aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/acpi-hotkey.txt38
-rw-r--r--Documentation/feature-removal-schedule.txt23
-rw-r--r--Documentation/gpio.txt17
-rw-r--r--Documentation/hrtimer/timer_stats.txt68
-rw-r--r--Documentation/hrtimers/highres.txt249
-rw-r--r--Documentation/hrtimers/hrtimers.txt (renamed from Documentation/hrtimers.txt)0
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/sony-laptop.txt106
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/kernel/irq.c3
-rw-r--r--arch/arm/mach-imx/time.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-netx/time.c2
-rw-r--r--arch/arm/mach-pxa/time.c2
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c9
-rw-r--r--arch/avr32/kernel/syscall_table.S22
-rw-r--r--arch/avr32/kernel/time.c2
-rw-r--r--arch/avr32/mach-at32ap/at32ap7000.c144
-rw-r--r--arch/avr32/mach-at32ap/clock.c6
-rw-r--r--arch/i386/Kconfig16
-rw-r--r--arch/i386/kernel/Makefile3
-rw-r--r--arch/i386/kernel/acpi/boot.c30
-rw-r--r--arch/i386/kernel/apic.c1629
-rw-r--r--arch/i386/kernel/apm.c44
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig9
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Makefile1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/e_powersaver.c334
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c359
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.h153
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c6
-rw-r--r--arch/i386/kernel/hpet.c498
-rw-r--r--arch/i386/kernel/i8253.c96
-rw-r--r--arch/i386/kernel/i8259.c7
-rw-r--r--arch/i386/kernel/io_apic.c10
-rw-r--r--arch/i386/kernel/irq.c22
-rw-r--r--arch/i386/kernel/nmi.c9
-rw-r--r--arch/i386/kernel/process.c3
-rw-r--r--arch/i386/kernel/smpboot.c187
-rw-r--r--arch/i386/kernel/time.c124
-rw-r--r--arch/i386/kernel/tsc.c169
-rw-r--r--arch/i386/kernel/tsc_sync.c1
-rw-r--r--arch/i386/kernel/vmitime.c2
-rw-r--r--arch/i386/mach-default/setup.c8
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/mips/kernel/time.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c2
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/sh/Kconfig17
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sh/boards/bigsur/Makefile6
-rw-r--r--arch/sh/boards/bigsur/io.c120
-rw-r--r--arch/sh/boards/bigsur/irq.c334
-rw-r--r--arch/sh/boards/bigsur/led.c54
-rw-r--r--arch/sh/boards/bigsur/setup.c88
-rw-r--r--arch/sh/boards/ec3104/Makefile6
-rw-r--r--arch/sh/boards/ec3104/io.c81
-rw-r--r--arch/sh/boards/ec3104/irq.c196
-rw-r--r--arch/sh/boards/ec3104/setup.c65
-rw-r--r--arch/sh/boards/mpc1211/Makefile2
-rw-r--r--arch/sh/boards/mpc1211/led.c63
-rw-r--r--arch/sh/boards/mpc1211/setup.c31
-rw-r--r--arch/sh/boards/renesas/r7780rp/Makefile1
-rw-r--r--arch/sh/boards/renesas/r7780rp/io.c152
-rw-r--r--arch/sh/boards/renesas/r7780rp/led.c43
-rw-r--r--arch/sh/boards/renesas/r7780rp/setup.c49
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/Makefile3
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/io.c302
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/irq.c80
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/led.c44
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/setup.c142
-rw-r--r--arch/sh/boards/se/7206/Makefile2
-rw-r--r--arch/sh/boards/se/7206/led.c57
-rw-r--r--arch/sh/boards/se/7206/setup.c34
-rw-r--r--arch/sh/boards/se/7300/Makefile2
-rw-r--r--arch/sh/boards/se/7300/led.c54
-rw-r--r--arch/sh/boards/se/7300/setup.c36
-rw-r--r--arch/sh/boards/se/73180/Makefile2
-rw-r--r--arch/sh/boards/se/73180/led.c53
-rw-r--r--arch/sh/boards/se/73180/setup.c31
-rw-r--r--arch/sh/boards/se/7343/Makefile2
-rw-r--r--arch/sh/boards/se/7343/led.c44
-rw-r--r--arch/sh/boards/se/7343/setup.c26
-rw-r--r--arch/sh/boards/se/770x/Makefile1
-rw-r--r--arch/sh/boards/se/770x/irq.c108
-rw-r--r--arch/sh/boards/se/770x/led.c52
-rw-r--r--arch/sh/boards/se/770x/setup.c43
-rw-r--r--arch/sh/boards/se/7751/Makefile1
-rw-r--r--arch/sh/boards/se/7751/led.c51
-rw-r--r--arch/sh/boards/se/7751/setup.c36
-rw-r--r--arch/sh/boards/sh03/Makefile1
-rw-r--r--arch/sh/boards/sh03/led.c48
-rw-r--r--arch/sh/boards/sh03/setup.c30
-rw-r--r--arch/sh/boards/shmin/setup.c12
-rw-r--r--arch/sh/cchips/voyagergx/irq.c70
-rw-r--r--arch/sh/cchips/voyagergx/setup.c4
-rw-r--r--arch/sh/configs/rts7751r2d_defconfig308
-rw-r--r--arch/sh/configs/se7750_defconfig140
-rw-r--r--arch/sh/drivers/Makefile1
-rw-r--r--arch/sh/drivers/dma/dma-sh.c45
-rw-r--r--arch/sh/drivers/heartbeat.c132
-rw-r--r--arch/sh/drivers/pci/Makefile1
-rw-r--r--arch/sh/drivers/pci/ops-bigsur.c83
-rw-r--r--arch/sh/drivers/pci/pci-sh7751.c9
-rw-r--r--arch/sh/kernel/Makefile3
-rw-r--r--arch/sh/kernel/cpu/init.c41
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c19
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S12
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c32
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c62
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c16
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c89
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S207
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c42
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7709.c21
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c183
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c4
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c66
-rw-r--r--arch/sh/kernel/debugtraps.S41
-rw-r--r--arch/sh/kernel/early_printk.c24
-rw-r--r--arch/sh/kernel/entry-common.S119
-rw-r--r--arch/sh/kernel/io_generic.c12
-rw-r--r--arch/sh/kernel/kgdb_stub.c7
-rw-r--r--arch/sh/kernel/process.c95
-rw-r--r--arch/sh/kernel/setup.c49
-rw-r--r--arch/sh/kernel/sh_ksyms.c1
-rw-r--r--arch/sh/kernel/signal.c6
-rw-r--r--arch/sh/kernel/syscalls.S8
-rw-r--r--arch/sh/kernel/traps.c4
-rw-r--r--arch/sh/mm/Kconfig5
-rw-r--r--arch/sh/mm/cache-debugfs.c4
-rw-r--r--arch/sh/mm/cache-sh3.c8
-rw-r--r--arch/sh/mm/cache-sh4.c77
-rw-r--r--arch/sh/mm/cache-sh7705.c29
-rw-r--r--arch/sh/mm/fault.c87
-rw-r--r--arch/sh/mm/init.c7
-rw-r--r--arch/sh/mm/ioremap.c6
-rw-r--r--arch/sh/mm/pg-sh4.c28
-rw-r--r--arch/sh/mm/pg-sh7705.c37
-rw-r--r--arch/sh/mm/tlb-flush.c101
-rw-r--r--arch/sh/mm/tlb-nommu.c19
-rw-r--r--arch/sh/mm/tlb-sh3.c67
-rw-r--r--arch/sh/mm/tlb-sh4.c70
-rw-r--r--arch/sh/oprofile/op_model_sh7750.c2
-rw-r--r--arch/sh/tools/mach-types1
-rw-r--r--arch/um/os-Linux/sigio.c38
-rw-r--r--arch/x86_64/Kconfig8
-rw-r--r--arch/x86_64/kernel/Makefile4
-rw-r--r--arch/x86_64/kernel/apic.c5
-rw-r--r--arch/x86_64/kernel/early-quirks.c4
-rw-r--r--arch/x86_64/kernel/hpet.c (renamed from arch/i386/kernel/time_hpet.c)406
-rw-r--r--arch/x86_64/kernel/i8259.c1
-rw-r--r--arch/x86_64/kernel/io_apic.c4
-rw-r--r--arch/x86_64/kernel/pmtimer.c58
-rw-r--r--arch/x86_64/kernel/smpboot.c231
-rw-r--r--arch/x86_64/kernel/time.c961
-rw-r--r--arch/x86_64/kernel/tsc.c226
-rw-r--r--arch/x86_64/kernel/tsc_sync.c187
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S28
-rw-r--r--arch/x86_64/kernel/vsyscall.c121
-rw-r--r--drivers/acpi/Kconfig10
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c9
-rw-r--r--drivers/acpi/acpi_memhotplug.c9
-rw-r--r--drivers/acpi/asus_acpi.c20
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/acpi/bay.c107
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/button.c7
-rw-r--r--drivers/acpi/cm_sbs.c2
-rw-r--r--drivers/acpi/container.c9
-rw-r--r--drivers/acpi/debug.c2
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c12
-rw-r--r--drivers/acpi/dock.c8
-rw-r--r--drivers/acpi/ec.c9
-rw-r--r--drivers/acpi/event.c2
-rw-r--r--drivers/acpi/events/evgpe.c11
-rw-r--r--drivers/acpi/events/evmisc.c11
-rw-r--r--drivers/acpi/executer/exdump.c2
-rw-r--r--drivers/acpi/executer/exmutex.c36
-rw-r--r--drivers/acpi/fan.c9
-rw-r--r--drivers/acpi/glue.c62
-rw-r--r--drivers/acpi/hotkey.c1042
-rw-r--r--drivers/acpi/i2c_ec.c5
-rw-r--r--drivers/acpi/ibm_acpi.c18
-rw-r--r--drivers/acpi/numa.c18
-rw-r--r--drivers/acpi/osl.c35
-rw-r--r--drivers/acpi/pci_bind.c2
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/acpi/pci_link.c5
-rw-r--r--drivers/acpi/pci_root.c5
-rw-r--r--drivers/acpi/power.c5
-rw-r--r--drivers/acpi/processor_core.c13
-rw-r--r--drivers/acpi/processor_idle.c125
-rw-r--r--drivers/acpi/processor_perflib.c3
-rw-r--r--drivers/acpi/processor_thermal.c3
-rw-r--r--drivers/acpi/processor_throttling.c3
-rw-r--r--drivers/acpi/sbs.c5
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/system.c3
-rw-r--r--drivers/acpi/tables.c41
-rw-r--r--drivers/acpi/tables/tbxface.c9
-rw-r--r--drivers/acpi/thermal.c9
-rw-r--r--drivers/acpi/toshiba_acpi.c6
-rw-r--r--drivers/acpi/utilities/utdelete.c1
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/acpi/video.c9
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/pata_legacy.c11
-rw-r--r--drivers/ata/pata_qdi.c4
-rw-r--r--drivers/ata/pata_sl82c105.c3
-rw-r--r--drivers/ata/sata_nv.c6
-rw-r--r--drivers/ata/sata_promise.c64
-rw-r--r--drivers/ata/sata_vsc.c8
-rw-r--r--drivers/char/agp/Makefile1
-rw-r--r--drivers/char/agp/agp.h12
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/alpha-agp.c4
-rw-r--r--drivers/char/agp/amd-k7-agp.c1
-rw-r--r--drivers/char/agp/amd64-agp.c11
-rw-r--r--drivers/char/agp/ati-agp.c1
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/compat_ioctl.c282
-rw-r--r--drivers/char/agp/compat_ioctl.h105
-rw-r--r--drivers/char/agp/efficeon-agp.c1
-rw-r--r--drivers/char/agp/frontend.c34
-rw-r--r--drivers/char/agp/generic.c125
-rw-r--r--drivers/char/agp/hp-agp.c1
-rw-r--r--drivers/char/agp/i460-agp.c7
-rw-r--r--drivers/char/agp/intel-agp.c202
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/agp/parisc-agp.c1
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/char/agp/sis-agp.c1
-rw-r--r--drivers/char/agp/sworks-agp.c1
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/sysrq.c14
-rw-r--r--drivers/clocksource/acpi_pm.c20
-rw-r--r--drivers/clocksource/cyclone.c2
-rw-r--r--drivers/clocksource/scx200_hrt.c2
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c258
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c64
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c11
-rw-r--r--drivers/isdn/gigaset/Makefile2
-rw-r--r--drivers/misc/Kconfig15
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/asus-laptop.c5
-rw-r--r--drivers/misc/sony-laptop.c562
-rw-r--r--drivers/pnp/pnpacpi/Kconfig16
-rw-r--r--drivers/usb/misc/appledisplay.c4
-rw-r--r--drivers/video/s3c2410fb.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h1
-rw-r--r--fs/ecryptfs/keystore.c26
-rw-r--r--fs/ecryptfs/main.c5
-rw-r--r--fs/ecryptfs/messaging.c3
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfsd/nfs4acl.c491
-rw-r--r--fs/nfsd/nfs4callback.c7
-rw-r--r--fs/nfsd/nfs4xdr.c55
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--include/acpi/acinterp.h3
-rw-r--r--include/acpi/acobject.h2
-rw-r--r--include/acpi/acpi_drivers.h30
-rw-r--r--include/acpi/acpiosxf.h6
-rw-r--r--include/acpi/processor.h1
-rw-r--r--include/asm-avr32/arch-at32ap/board.h4
-rw-r--r--include/asm-avr32/io.h23
-rw-r--r--include/asm-avr32/unistd.h17
-rw-r--r--include/asm-i386/acpi.h1
-rw-r--r--include/asm-i386/apic.h9
-rw-r--r--include/asm-i386/hpet.h16
-rw-r--r--include/asm-i386/i8253.h15
-rw-r--r--include/asm-i386/mach-default/do_timer.h78
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h27
-rw-r--r--include/asm-i386/mpspec.h1
-rw-r--r--include/asm-i386/msr.h3
-rw-r--r--include/asm-i386/tsc.h49
-rw-r--r--include/asm-ia64/libata-portmap.h12
-rw-r--r--include/asm-sh/Kbuild2
-rw-r--r--include/asm-sh/bigsur/bigsur.h80
-rw-r--r--include/asm-sh/bigsur/io.h35
-rw-r--r--include/asm-sh/bigsur/serial.h24
-rw-r--r--include/asm-sh/bugs.h6
-rw-r--r--include/asm-sh/cacheflush.h3
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h2
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h13
-rw-r--r--include/asm-sh/cpu-sh4/dma.h11
-rw-r--r--include/asm-sh/dma-mapping.h4
-rw-r--r--include/asm-sh/ec3104/ec3104.h43
-rw-r--r--include/asm-sh/ec3104/io.h16
-rw-r--r--include/asm-sh/ec3104/keyboard.h15
-rw-r--r--include/asm-sh/ec3104/serial.h20
-rw-r--r--include/asm-sh/irq.h4
-rw-r--r--include/asm-sh/kgdb.h8
-rw-r--r--include/asm-sh/mmu.h20
-rw-r--r--include/asm-sh/mmu_context.h61
-rw-r--r--include/asm-sh/page.h3
-rw-r--r--include/asm-sh/pgtable.h11
-rw-r--r--include/asm-sh/processor.h6
-rw-r--r--include/asm-sh/rts7751r2d.h4
-rw-r--r--include/asm-sh/serial.h11
-rw-r--r--include/asm-sh/thread_info.h16
-rw-r--r--include/asm-sh/tlbflush.h38
-rw-r--r--include/asm-sh/ubc.h2
-rw-r--r--include/asm-sh/unistd.h20
-rw-r--r--include/asm-sh/voyagergx.h5
-rw-r--r--include/asm-x86_64/hpet.h7
-rw-r--r--include/asm-x86_64/proto.h6
-rw-r--r--include/asm-x86_64/timex.h35
-rw-r--r--include/asm-x86_64/tsc.h66
-rw-r--r--include/asm-x86_64/vsyscall.h29
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/acpi_pmtmr.h38
-rw-r--r--include/linux/agp_backend.h5
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/clockchips.h142
-rw-r--r--include/linux/clocksource.h39
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/hardirq.h9
-rw-r--r--include/linux/hrtimer.h260
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/irq.h52
-rw-r--r--include/linux/jiffies.h222
-rw-r--r--include/linux/ktime.h3
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs4_acl.h9
-rw-r--r--include/linux/tick.h109
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/timer.h66
-rw-r--r--include/linux/timex.h7
-rw-r--r--init/main.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/hrtimer.c824
-rw-r--r--kernel/irq/chip.c25
-rw-r--r--kernel/irq/manage.c44
-rw-r--r--kernel/irq/proc.c24
-rw-r--r--kernel/itimer.c18
-rw-r--r--kernel/posix-cpu-timers.c15
-rw-r--r--kernel/posix-timers.c15
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/signal.c58
-rw-r--r--kernel/softirq.c19
-rw-r--r--kernel/time.c254
-rw-r--r--kernel/time/Kconfig25
-rw-r--r--kernel/time/Makefile9
-rw-r--r--kernel/time/clockevents.c345
-rw-r--r--kernel/time/clocksource.c246
-rw-r--r--kernel/time/jiffies.c1
-rw-r--r--kernel/time/ntp.c30
-rw-r--r--kernel/time/tick-broadcast.c480
-rw-r--r--kernel/time/tick-common.c346
-rw-r--r--kernel/time/tick-internal.h110
-rw-r--r--kernel/time/tick-oneshot.c84
-rw-r--r--kernel/time/tick-sched.c563
-rw-r--r--kernel/time/timer_list.c287
-rw-r--r--kernel/time/timer_stats.c411
-rw-r--r--kernel/timer.c286
-rw-r--r--kernel/tsacct.c2
-rw-r--r--kernel/workqueue.c7
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/devres.c8
-rw-r--r--mm/filemap.c32
-rw-r--r--mm/mincore.c22
371 files changed, 12840 insertions, 9733 deletions
diff --git a/Documentation/acpi-hotkey.txt b/Documentation/acpi-hotkey.txt
deleted file mode 100644
index 38040fa37649..000000000000
--- a/Documentation/acpi-hotkey.txt
+++ /dev/null
@@ -1,38 +0,0 @@
1driver/acpi/hotkey.c implement:
21. /proc/acpi/hotkey/event_config
3(event based hotkey or event config interface):
4a. add a event based hotkey(event) :
5echo "0:bus::action:method:num:num" > event_config
6
7b. delete a event based hotkey(event):
8echo "1:::::num:num" > event_config
9
10c. modify a event based hotkey(event):
11echo "2:bus::action:method:num:num" > event_config
12
132. /proc/acpi/hotkey/poll_config
14(polling based hotkey or event config interface):
15a.add a polling based hotkey(event) :
16echo "0:bus:method:action:method:num" > poll_config
17this adding command will create a proc file
18/proc/acpi/hotkey/method, which is used to get
19result of polling.
20
21b.delete a polling based hotkey(event):
22echo "1:::::num" > event_config
23
24c.modify a polling based hotkey(event):
25echo "2:bus:method:action:method:num" > poll_config
26
273./proc/acpi/hotkey/action
28(interface to call aml method associated with a
29specific hotkey(event))
30echo "event_num:event_type:event_argument" >
31 /proc/acpi/hotkey/action.
32The result of the execution of this aml method is
33attached to /proc/acpi/hotkey/poll_method, which is dynamically
34created. Please use command "cat /proc/acpi/hotkey/polling_method"
35to retrieve it.
36
37Note: Use cmdline "acpi_generic_hotkey" to over-ride
38platform-specific with generic driver.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index c585aa8d62b4..28f897fd3674 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -253,29 +253,6 @@ Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
253 253
254--------------------------- 254---------------------------
255 255
256<<<<<<< test:Documentation/feature-removal-schedule.txt
257What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY)
258When: 2.6.21
259Why: hotkey.c was an attempt to consolidate multiple drivers that use
260 ACPI to implement hotkeys. However, hotkeys are not documented
261 in the ACPI specification, so the drivers used undocumented
262 vendor-specific hooks and turned out to be more different than
263 the same.
264
265 Further, the keys and the features supplied by each platform
266 are different, so there will always be a need for
267 platform-specific drivers.
268
269 So the new plan is to delete hotkey.c and instead, work on the
270 platform specific drivers to try to make them look the same
271 to the user when they supply the same features.
272
273 hotkey.c has always depended on CONFIG_EXPERIMENTAL
274
275Who: Len Brown <len.brown@intel.com>
276
277---------------------------
278
279What: /sys/firmware/acpi/namespace 256What: /sys/firmware/acpi/namespace
280When: 2.6.21 257When: 2.6.21
281Why: The ACPI namespace is effectively the symbol list for 258Why: The ACPI namespace is effectively the symbol list for
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 09dd510c4a5f..576ce463cf44 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -78,7 +78,8 @@ Identifying GPIOs
78----------------- 78-----------------
79GPIOs are identified by unsigned integers in the range 0..MAX_INT. That 79GPIOs are identified by unsigned integers in the range 0..MAX_INT. That
80reserves "negative" numbers for other purposes like marking signals as 80reserves "negative" numbers for other purposes like marking signals as
81"not available on this board", or indicating faults. 81"not available on this board", or indicating faults. Code that doesn't
82touch the underlying hardware treats these integers as opaque cookies.
82 83
83Platforms define how they use those integers, and usually #define symbols 84Platforms define how they use those integers, and usually #define symbols
84for the GPIO lines so that board-specific setup code directly corresponds 85for the GPIO lines so that board-specific setup code directly corresponds
@@ -139,10 +140,10 @@ issues including wire-OR and output latencies.
139The get/set calls have no error returns because "invalid GPIO" should have 140The get/set calls have no error returns because "invalid GPIO" should have
140been reported earlier in gpio_set_direction(). However, note that not all 141been reported earlier in gpio_set_direction(). However, note that not all
141platforms can read the value of output pins; those that can't should always 142platforms can read the value of output pins; those that can't should always
142return zero. Also, these calls will be ignored for GPIOs that can't safely 143return zero. Also, using these calls for GPIOs that can't safely be accessed
143be accessed wihtout sleeping (see below). 144without sleeping (see below) is an error.
144 145
145Platform-specific implementations are encouraged to optimise the two 146Platform-specific implementations are encouraged to optimize the two
146calls to access the GPIO value in cases where the GPIO number (and for 147calls to access the GPIO value in cases where the GPIO number (and for
147output, value) are constant. It's normal for them to need only a couple 148output, value) are constant. It's normal for them to need only a couple
148of instructions in such cases (reading or writing a hardware register), 149of instructions in such cases (reading or writing a hardware register),
@@ -239,7 +240,8 @@ options are part of the IRQ interface, e.g. IRQF_TRIGGER_FALLING, as are
239system wakeup capabilities. 240system wakeup capabilities.
240 241
241Non-error values returned from irq_to_gpio() would most commonly be used 242Non-error values returned from irq_to_gpio() would most commonly be used
242with gpio_get_value(). 243with gpio_get_value(), for example to initialize or update driver state
244when the IRQ is edge-triggered.
243 245
244 246
245 247
@@ -260,9 +262,10 @@ pullups (or pulldowns) so that the on-chip ones should not be used.
260There are other system-specific mechanisms that are not specified here, 262There are other system-specific mechanisms that are not specified here,
261like the aforementioned options for input de-glitching and wire-OR output. 263like the aforementioned options for input de-glitching and wire-OR output.
262Hardware may support reading or writing GPIOs in gangs, but that's usually 264Hardware may support reading or writing GPIOs in gangs, but that's usually
263configuration dependednt: for GPIOs sharing the same bank. (GPIOs are 265configuration dependent: for GPIOs sharing the same bank. (GPIOs are
264commonly grouped in banks of 16 or 32, with a given SOC having several such 266commonly grouped in banks of 16 or 32, with a given SOC having several such
265banks.) Code relying on such mechanisms will necessarily be nonportable. 267banks.) Some systems can trigger IRQs from output GPIOs. Code relying on
268such mechanisms will necessarily be nonportable.
266 269
267Dynamic definition of GPIOs is not currently supported; for example, as 270Dynamic definition of GPIOs is not currently supported; for example, as
268a side effect of configuring an add-on board with some GPIO expanders. 271a side effect of configuring an add-on board with some GPIO expanders.
diff --git a/Documentation/hrtimer/timer_stats.txt b/Documentation/hrtimer/timer_stats.txt
new file mode 100644
index 000000000000..27f782e3593f
--- /dev/null
+++ b/Documentation/hrtimer/timer_stats.txt
@@ -0,0 +1,68 @@
1timer_stats - timer usage statistics
2------------------------------------
3
4timer_stats is a debugging facility to make the timer (ab)usage in a Linux
5system visible to kernel and userspace developers. It is not intended for
6production usage as it adds significant overhead to the (hr)timer code and the
7(hr)timer data structures.
8
9timer_stats should be used by kernel and userspace developers to verify that
10their code does not make unduly use of timers. This helps to avoid unnecessary
11wakeups, which should be avoided to optimize power consumption.
12
13It can be enabled by CONFIG_TIMER_STATS in the "Kernel hacking" configuration
14section.
15
16timer_stats collects information about the timer events which are fired in a
17Linux system over a sample period:
18
19- the pid of the task(process) which initialized the timer
20- the name of the process which initialized the timer
21- the function where the timer was intialized
22- the callback function which is associated to the timer
23- the number of events (callbacks)
24
25timer_stats adds an entry to /proc: /proc/timer_stats
26
27This entry is used to control the statistics functionality and to read out the
28sampled information.
29
30The timer_stats functionality is inactive on bootup.
31
32To activate a sample period issue:
33# echo 1 >/proc/timer_stats
34
35To stop a sample period issue:
36# echo 0 >/proc/timer_stats
37
38The statistics can be retrieved by:
39# cat /proc/timer_stats
40
41The readout of /proc/timer_stats automatically disables sampling. The sampled
42information is kept until a new sample period is started. This allows multiple
43readouts.
44
45Sample output of /proc/timer_stats:
46
47Timerstats sample period: 3.888770 s
48 12, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
49 15, 1 swapper hcd_submit_urb (rh_timer_func)
50 4, 959 kedac schedule_timeout (process_timeout)
51 1, 0 swapper page_writeback_init (wb_timer_fn)
52 28, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
53 22, 2948 IRQ 4 tty_flip_buffer_push (delayed_work_timer_fn)
54 3, 3100 bash schedule_timeout (process_timeout)
55 1, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
56 1, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
57 1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
58 1, 2292 ip __netdev_watchdog_up (dev_watchdog)
59 1, 23 events/1 do_cache_clean (delayed_work_timer_fn)
6090 total events, 30.0 events/sec
61
62The first column is the number of events, the second column the pid, the third
63column is the name of the process. The forth column shows the function which
64initialized the timer and in parantheses the callback function which was
65executed on expiry.
66
67 Thomas, Ingo
68
diff --git a/Documentation/hrtimers/highres.txt b/Documentation/hrtimers/highres.txt
new file mode 100644
index 000000000000..ce0e9a91e157
--- /dev/null
+++ b/Documentation/hrtimers/highres.txt
@@ -0,0 +1,249 @@
1High resolution timers and dynamic ticks design notes
2-----------------------------------------------------
3
4Further information can be found in the paper of the OLS 2006 talk "hrtimers
5and beyond". The paper is part of the OLS 2006 Proceedings Volume 1, which can
6be found on the OLS website:
7http://www.linuxsymposium.org/2006/linuxsymposium_procv1.pdf
8
9The slides to this talk are available from:
10http://tglx.de/projects/hrtimers/ols2006-hrtimers.pdf
11
12The slides contain five figures (pages 2, 15, 18, 20, 22), which illustrate the
13changes in the time(r) related Linux subsystems. Figure #1 (p. 2) shows the
14design of the Linux time(r) system before hrtimers and other building blocks
15got merged into mainline.
16
17Note: the paper and the slides are talking about "clock event source", while we
18switched to the name "clock event devices" in meantime.
19
20The design contains the following basic building blocks:
21
22- hrtimer base infrastructure
23- timeofday and clock source management
24- clock event management
25- high resolution timer functionality
26- dynamic ticks
27
28
29hrtimer base infrastructure
30---------------------------
31
32The hrtimer base infrastructure was merged into the 2.6.16 kernel. Details of
33the base implementation are covered in Documentation/hrtimers/hrtimer.txt. See
34also figure #2 (OLS slides p. 15)
35
36The main differences to the timer wheel, which holds the armed timer_list type
37timers are:
38 - time ordered enqueueing into a rb-tree
39 - independent of ticks (the processing is based on nanoseconds)
40
41
42timeofday and clock source management
43-------------------------------------
44
45John Stultz's Generic Time Of Day (GTOD) framework moves a large portion of
46code out of the architecture-specific areas into a generic management
47framework, as illustrated in figure #3 (OLS slides p. 18). The architecture
48specific portion is reduced to the low level hardware details of the clock
49sources, which are registered in the framework and selected on a quality based
50decision. The low level code provides hardware setup and readout routines and
51initializes data structures, which are used by the generic time keeping code to
52convert the clock ticks to nanosecond based time values. All other time keeping
53related functionality is moved into the generic code. The GTOD base patch got
54merged into the 2.6.18 kernel.
55
56Further information about the Generic Time Of Day framework is available in the
57OLS 2005 Proceedings Volume 1:
58http://www.linuxsymposium.org/2005/linuxsymposium_procv1.pdf
59
60The paper "We Are Not Getting Any Younger: A New Approach to Time and
61Timers" was written by J. Stultz, D.V. Hart, & N. Aravamudan.
62
63Figure #3 (OLS slides p.18) illustrates the transformation.
64
65
66clock event management
67----------------------
68
69While clock sources provide read access to the monotonically increasing time
70value, clock event devices are used to schedule the next event
71interrupt(s). The next event is currently defined to be periodic, with its
72period defined at compile time. The setup and selection of the event device
73for various event driven functionalities is hardwired into the architecture
74dependent code. This results in duplicated code across all architectures and
75makes it extremely difficult to change the configuration of the system to use
76event interrupt devices other than those already built into the
77architecture. Another implication of the current design is that it is necessary
78to touch all the architecture-specific implementations in order to provide new
79functionality like high resolution timers or dynamic ticks.
80
81The clock events subsystem tries to address this problem by providing a generic
82solution to manage clock event devices and their usage for the various clock
83event driven kernel functionalities. The goal of the clock event subsystem is
84to minimize the clock event related architecture dependent code to the pure
85hardware related handling and to allow easy addition and utilization of new
86clock event devices. It also minimizes the duplicated code across the
87architectures as it provides generic functionality down to the interrupt
88service handler, which is almost inherently hardware dependent.
89
90Clock event devices are registered either by the architecture dependent boot
91code or at module insertion time. Each clock event device fills a data
92structure with clock-specific property parameters and callback functions. The
93clock event management decides, by using the specified property parameters, the
94set of system functions a clock event device will be used to support. This
95includes the distinction of per-CPU and per-system global event devices.
96
97System-level global event devices are used for the Linux periodic tick. Per-CPU
98event devices are used to provide local CPU functionality such as process
99accounting, profiling, and high resolution timers.
100
101The management layer assignes one or more of the folliwing functions to a clock
102event device:
103 - system global periodic tick (jiffies update)
104 - cpu local update_process_times
105 - cpu local profiling
106 - cpu local next event interrupt (non periodic mode)
107
108The clock event device delegates the selection of those timer interrupt related
109functions completely to the management layer. The clock management layer stores
110a function pointer in the device description structure, which has to be called
111from the hardware level handler. This removes a lot of duplicated code from the
112architecture specific timer interrupt handlers and hands the control over the
113clock event devices and the assignment of timer interrupt related functionality
114to the core code.
115
116The clock event layer API is rather small. Aside from the clock event device
117registration interface it provides functions to schedule the next event
118interrupt, clock event device notification service and support for suspend and
119resume.
120
121The framework adds about 700 lines of code which results in a 2KB increase of
122the kernel binary size. The conversion of i386 removes about 100 lines of
123code. The binary size decrease is in the range of 400 byte. We believe that the
124increase of flexibility and the avoidance of duplicated code across
125architectures justifies the slight increase of the binary size.
126
127The conversion of an architecture has no functional impact, but allows to
128utilize the high resolution and dynamic tick functionalites without any change
129to the clock event device and timer interrupt code. After the conversion the
130enabling of high resolution timers and dynamic ticks is simply provided by
131adding the kernel/time/Kconfig file to the architecture specific Kconfig and
132adding the dynamic tick specific calls to the idle routine (a total of 3 lines
133added to the idle function and the Kconfig file)
134
135Figure #4 (OLS slides p.20) illustrates the transformation.
136
137
138high resolution timer functionality
139-----------------------------------
140
141During system boot it is not possible to use the high resolution timer
142functionality, while making it possible would be difficult and would serve no
143useful function. The initialization of the clock event device framework, the
144clock source framework (GTOD) and hrtimers itself has to be done and
145appropriate clock sources and clock event devices have to be registered before
146the high resolution functionality can work. Up to the point where hrtimers are
147initialized, the system works in the usual low resolution periodic mode. The
148clock source and the clock event device layers provide notification functions
149which inform hrtimers about availability of new hardware. hrtimers validates
150the usability of the registered clock sources and clock event devices before
151switching to high resolution mode. This ensures also that a kernel which is
152configured for high resolution timers can run on a system which lacks the
153necessary hardware support.
154
155The high resolution timer code does not support SMP machines which have only
156global clock event devices. The support of such hardware would involve IPI
157calls when an interrupt happens. The overhead would be much larger than the
158benefit. This is the reason why we currently disable high resolution and
159dynamic ticks on i386 SMP systems which stop the local APIC in C3 power
160state. A workaround is available as an idea, but the problem has not been
161tackled yet.
162
163The time ordered insertion of timers provides all the infrastructure to decide
164whether the event device has to be reprogrammed when a timer is added. The
165decision is made per timer base and synchronized across per-cpu timer bases in
166a support function. The design allows the system to utilize separate per-CPU
167clock event devices for the per-CPU timer bases, but currently only one
168reprogrammable clock event device per-CPU is utilized.
169
170When the timer interrupt happens, the next event interrupt handler is called
171from the clock event distribution code and moves expired timers from the
172red-black tree to a separate double linked list and invokes the softirq
173handler. An additional mode field in the hrtimer structure allows the system to
174execute callback functions directly from the next event interrupt handler. This
175is restricted to code which can safely be executed in the hard interrupt
176context. This applies, for example, to the common case of a wakeup function as
177used by nanosleep. The advantage of executing the handler in the interrupt
178context is the avoidance of up to two context switches - from the interrupted
179context to the softirq and to the task which is woken up by the expired
180timer.
181
182Once a system has switched to high resolution mode, the periodic tick is
183switched off. This disables the per system global periodic clock event device -
184e.g. the PIT on i386 SMP systems.
185
186The periodic tick functionality is provided by an per-cpu hrtimer. The callback
187function is executed in the next event interrupt context and updates jiffies
188and calls update_process_times and profiling. The implementation of the hrtimer
189based periodic tick is designed to be extended with dynamic tick functionality.
190This allows to use a single clock event device to schedule high resolution
191timer and periodic events (jiffies tick, profiling, process accounting) on UP
192systems. This has been proved to work with the PIT on i386 and the Incrementer
193on PPC.
194
195The softirq for running the hrtimer queues and executing the callbacks has been
196separated from the tick bound timer softirq to allow accurate delivery of high
197resolution timer signals which are used by itimer and POSIX interval
198timers. The execution of this softirq can still be delayed by other softirqs,
199but the overall latencies have been significantly improved by this separation.
200
201Figure #5 (OLS slides p.22) illustrates the transformation.
202
203
204dynamic ticks
205-------------
206
207Dynamic ticks are the logical consequence of the hrtimer based periodic tick
208replacement (sched_tick). The functionality of the sched_tick hrtimer is
209extended by three functions:
210
211- hrtimer_stop_sched_tick
212- hrtimer_restart_sched_tick
213- hrtimer_update_jiffies
214
215hrtimer_stop_sched_tick() is called when a CPU goes into idle state. The code
216evaluates the next scheduled timer event (from both hrtimers and the timer
217wheel) and in case that the next event is further away than the next tick it
218reprograms the sched_tick to this future event, to allow longer idle sleeps
219without worthless interruption by the periodic tick. The function is also
220called when an interrupt happens during the idle period, which does not cause a
221reschedule. The call is necessary as the interrupt handler might have armed a
222new timer whose expiry time is before the time which was identified as the
223nearest event in the previous call to hrtimer_stop_sched_tick.
224
225hrtimer_restart_sched_tick() is called when the CPU leaves the idle state before
226it calls schedule(). hrtimer_restart_sched_tick() resumes the periodic tick,
227which is kept active until the next call to hrtimer_stop_sched_tick().
228
229hrtimer_update_jiffies() is called from irq_enter() when an interrupt happens
230in the idle period to make sure that jiffies are up to date and the interrupt
231handler has not to deal with an eventually stale jiffy value.
232
233The dynamic tick feature provides statistical values which are exported to
234userspace via /proc/stats and can be made available for enhanced power
235management control.
236
237The implementation leaves room for further development like full tickless
238systems, where the time slice is controlled by the scheduler, variable
239frequency profiling, and a complete removal of jiffies in the future.
240
241
242Aside the current initial submission of i386 support, the patchset has been
243extended to x86_64 and ARM already. Initial (work in progress) support is also
244available for MIPS and PowerPC.
245
246 Thomas, Ingo
247
248
249
diff --git a/Documentation/hrtimers.txt b/Documentation/hrtimers/hrtimers.txt
index ce31f65e12e7..ce31f65e12e7 100644
--- a/Documentation/hrtimers.txt
+++ b/Documentation/hrtimers/hrtimers.txt
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 22b19962a1a2..abd575cfc759 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -609,6 +609,10 @@ and is between 256 and 4096 characters. It is defined in the file
609 highmem otherwise. This also works to reduce highmem 609 highmem otherwise. This also works to reduce highmem
610 size on bigger boxes. 610 size on bigger boxes.
611 611
612 highres= [KNL] Enable/disable high resolution timer mode.
613 Valid parameters: "on", "off"
614 Default: "on"
615
612 hisax= [HW,ISDN] 616 hisax= [HW,ISDN]
613 See Documentation/isdn/README.HiSax. 617 See Documentation/isdn/README.HiSax.
614 618
@@ -1078,6 +1082,10 @@ and is between 256 and 4096 characters. It is defined in the file
1078 in certain environments such as networked servers or 1082 in certain environments such as networked servers or
1079 real-time systems. 1083 real-time systems.
1080 1084
1085 nohz= [KNL] Boottime enable/disable dynamic ticks
1086 Valid arguments: on, off
1087 Default: on
1088
1081 noirqbalance [IA-32,SMP,KNL] Disable kernel irq balancing 1089 noirqbalance [IA-32,SMP,KNL] Disable kernel irq balancing
1082 1090
1083 noirqdebug [IA-32] Disables the code which attempts to detect and 1091 noirqdebug [IA-32] Disables the code which attempts to detect and
diff --git a/Documentation/sony-laptop.txt b/Documentation/sony-laptop.txt
new file mode 100644
index 000000000000..dfd26df056f4
--- /dev/null
+++ b/Documentation/sony-laptop.txt
@@ -0,0 +1,106 @@
1Sony Notebook Control Driver (SNC) Readme
2-----------------------------------------
3 Copyright (C) 2004- 2005 Stelian Pop <stelian@popies.net>
4 Copyright (C) 2007 Mattia Dongili <malattia@linux.it>
5
6This mini-driver drives the SNC device present in the ACPI BIOS of
7the Sony Vaio laptops.
8
9It gives access to some extra laptop functionalities. In its current
10form, this driver let the user set or query the screen brightness
11through the backlight subsystem and remove/apply power to some devices.
12
13Backlight control:
14------------------
15If your laptop model supports it, you will find sysfs files in the
16/sys/class/backlight/sony/
17directory. You will be able to query and set the current screen
18brightness:
19 brightness get/set screen brightness (an iteger
20 between 0 and 7)
21 actual_brightness reading from this file will query the HW
22 to get real brightness value
23 max_brightness the maximum brightness value
24
25
26Platform specific:
27------------------
28Loading the sony-laptop module will create a
29/sys/devices/platform/sony-laptop/
30directory populated with some files.
31
32You then read/write integer values from/to those files by using
33standard UNIX tools.
34
35The files are:
36 brightness_default screen brightness which will be set
37 when the laptop will be rebooted
38 cdpower power on/off the internal CD drive
39 audiopower power on/off the internal sound card
40 lanpower power on/off the internal ethernet card
41 (only in debug mode)
42
43Note that some files may be missing if they are not supported
44by your particular laptop model.
45
46Example usage:
47 # echo "1" > /sys/devices/platform/sony-laptop/brightness_default
48sets the lowest screen brightness for the next and later reboots,
49 # echo "8" > /sys/devices/platform/sony-laptop/brightness_default
50sets the highest screen brightness for the next and later reboots,
51 # cat /sys/devices/platform/sony-laptop/brightness_default
52retrieves the value.
53
54 # echo "0" > /sys/devices/platform/sony-laptop/audiopower
55powers off the sound card,
56 # echo "1" > /sys/devices/platform/sony-laptop/audiopower
57powers on the sound card.
58
59Development:
60------------
61
62If you want to help with the development of this driver (and
63you are not afraid of any side effects doing strange things with
64your ACPI BIOS could have on your laptop), load the driver and
65pass the option 'debug=1'.
66
67REPEAT: DON'T DO THIS IF YOU DON'T LIKE RISKY BUSINESS.
68
69In your kernel logs you will find the list of all ACPI methods
70the SNC device has on your laptop. You can see the GCDP/GCDP methods
71used to pwer on/off the CD drive, but there are others.
72
73I HAVE NO IDEA WHAT THOSE METHODS DO.
74
75The sony-laptop driver creates, for some of those methods (the most
76current ones found on several Vaio models), an entry under
77/sys/devices/platform/sony-laptop, just like the 'cdpower' one.
78You can create other entries corresponding to your own laptop methods by
79further editing the source (see the 'sony_acpi_values' table, and add a new
80entry to this table with your get/set method names using the
81HANDLE_NAMES macro).
82
83Your mission, should you accept it, is to try finding out what
84those entries are for, by reading/writing random values from/to those
85files and find out what is the impact on your laptop.
86
87Should you find anything interesting, please report it back to me,
88I will not disavow all knowledge of your actions :)
89
90Bugs/Limitations:
91-----------------
92
93* This driver is not based on official documentation from Sony
94 (because there is none), so there is no guarantee this driver
95 will work at all, or do the right thing. Although this hasn't
96 happened to me, this driver could do very bad things to your
97 laptop, including permanent damage.
98
99* The sony-laptop and sonypi drivers do not interact at all. In the
100 future, sonypi could use sony-laptop to do (part of) its business.
101
102* spicctrl, which is the userspace tool used to communicate with the
103 sonypi driver (through /dev/sonypi) does not try to use the
104 sony-laptop driver. In the future, spicctrl could try sonypi first,
105 and if it isn't present, try sony-laptop instead.
106
diff --git a/MAINTAINERS b/MAINTAINERS
index b0fd71b3f66f..a384551a5978 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -247,6 +247,13 @@ L: linux-acpi@vger.kernel.org
247W: http://acpi.sourceforge.net/ 247W: http://acpi.sourceforge.net/
248S: Supported 248S: Supported
249 249
250ACPI VIDEO DRIVER
251P: Luming Yu
252M: luming.yu@intel.com
253L: linux-acpi@vger.kernel.org
254W: http://acpi.sourceforge.net/
255S: Supported
256
250AD1816 SOUND DRIVER 257AD1816 SOUND DRIVER
251P: Thorsten Knabe 258P: Thorsten Knabe
252M: Thorsten Knabe <linux@thorsten-knabe.de> 259M: Thorsten Knabe <linux@thorsten-knabe.de>
@@ -3061,6 +3068,8 @@ S: Maintained
3061SONY VAIO CONTROL DEVICE DRIVER 3068SONY VAIO CONTROL DEVICE DRIVER
3062P: Stelian Pop 3069P: Stelian Pop
3063M: stelian@popies.net 3070M: stelian@popies.net
3071P: Mattia Dongili
3072M: malattia@linux.it
3064W: http://popies.net/sonypi/ 3073W: http://popies.net/sonypi/
3065S: Maintained 3074S: Maintained
3066 3075
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ec01f08f5642..e101846ab7dd 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -159,8 +159,7 @@ void __init init_IRQ(void)
159 int irq; 159 int irq;
160 160
161 for (irq = 0; irq < NR_IRQS; irq++) 161 for (irq = 0; irq < NR_IRQS; irq++)
162 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_DELAYED_DISABLE | 162 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
163 IRQ_NOPROBE;
164 163
165#ifdef CONFIG_SMP 164#ifdef CONFIG_SMP
166 bad_irq_desc.affinity = CPU_MASK_ALL; 165 bad_irq_desc.affinity = CPU_MASK_ALL;
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c
index 40039b2a90b3..2703a730baf7 100644
--- a/arch/arm/mach-imx/time.c
+++ b/arch/arm/mach-imx/time.c
@@ -87,7 +87,7 @@ static struct clocksource clocksource_imx = {
87 .read = imx_get_cycles, 87 .read = imx_get_cycles,
88 .mask = 0xFFFFFFFF, 88 .mask = 0xFFFFFFFF,
89 .shift = 20, 89 .shift = 20,
90 .is_continuous = 1, 90 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
91}; 91};
92 92
93static int __init imx_clocksource_init(void) 93static int __init imx_clocksource_init(void)
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 2ec9a9e9a04d..45068c3d8dcc 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -395,7 +395,7 @@ static struct clocksource clocksource_ixp4xx = {
395 .read = ixp4xx_get_cycles, 395 .read = ixp4xx_get_cycles,
396 .mask = CLOCKSOURCE_MASK(32), 396 .mask = CLOCKSOURCE_MASK(32),
397 .shift = 20, 397 .shift = 20,
398 .is_continuous = 1, 398 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
399}; 399};
400 400
401unsigned long ixp4xx_timer_freq = FREQ; 401unsigned long ixp4xx_timer_freq = FREQ;
diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c
index 5773b55ef4a6..7e132fcccd47 100644
--- a/arch/arm/mach-netx/time.c
+++ b/arch/arm/mach-netx/time.c
@@ -62,7 +62,7 @@ static struct clocksource clocksource_netx = {
62 .read = netx_get_cycles, 62 .read = netx_get_cycles,
63 .mask = CLOCKSOURCE_MASK(32), 63 .mask = CLOCKSOURCE_MASK(32),
64 .shift = 20, 64 .shift = 20,
65 .is_continuous = 1, 65 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
66}; 66};
67 67
68/* 68/*
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index ee2beb400414..fc3b82a740a0 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -112,7 +112,7 @@ static struct clocksource clocksource_pxa = {
112 .read = pxa_get_cycles, 112 .read = pxa_get_cycles,
113 .mask = CLOCKSOURCE_MASK(32), 113 .mask = CLOCKSOURCE_MASK(32),
114 .shift = 20, 114 .shift = 20,
115 .is_continuous = 1, 115 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
116}; 116};
117 117
118static void __init pxa_timer_init(void) 118static void __init pxa_timer_init(void)
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index d47e39f0e971..5974768a59e5 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -8,7 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/device.h>
12#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
13#include <linux/init.h> 12#include <linux/init.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -36,12 +35,11 @@ static struct eth_addr __initdata hw_addr[2];
36static struct eth_platform_data __initdata eth_data[2]; 35static struct eth_platform_data __initdata eth_data[2];
37extern struct lcdc_platform_data atstk1000_fb0_data; 36extern struct lcdc_platform_data atstk1000_fb0_data;
38 37
39static struct spi_board_info spi_board_info[] __initdata = { 38static struct spi_board_info spi0_board_info[] __initdata = {
40 { 39 {
40 /* QVGA display */
41 .modalias = "ltv350qv", 41 .modalias = "ltv350qv",
42 .controller_data = (void *)GPIO_PIN_PA(4),
43 .max_speed_hz = 16000000, 42 .max_speed_hz = 16000000,
44 .bus_num = 0,
45 .chip_select = 1, 43 .chip_select = 1,
46 }, 44 },
47}; 45};
@@ -149,8 +147,7 @@ static int __init atstk1002_init(void)
149 147
150 set_hw_addr(at32_add_device_eth(0, &eth_data[0])); 148 set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
151 149
152 spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); 150 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
153 at32_add_device_spi(0);
154 at32_add_device_lcdc(0, &atstk1000_fb0_data); 151 at32_add_device_lcdc(0, &atstk1000_fb0_data);
155 152
156 return 0; 153 return 0;
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index db8f8b55ffdf..7c279586fbba 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -8,14 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#if !defined(CONFIG_NFSD) && !defined(CONFIG_NFSD_MODULE)
12#define sys_nfsservctl sys_ni_syscall
13#endif
14
15#if !defined(CONFIG_SYSV_IPC)
16# define sys_ipc sys_ni_syscall
17#endif
18
19 .section .rodata,"a",@progbits 11 .section .rodata,"a",@progbits
20 .type sys_call_table,@object 12 .type sys_call_table,@object
21 .global sys_call_table 13 .global sys_call_table
@@ -129,7 +121,7 @@ sys_call_table:
129 .long sys_getitimer /* 105 */ 121 .long sys_getitimer /* 105 */
130 .long sys_swapoff 122 .long sys_swapoff
131 .long sys_sysinfo 123 .long sys_sysinfo
132 .long sys_ipc 124 .long sys_ni_syscall /* was sys_ipc briefly */
133 .long sys_sendfile 125 .long sys_sendfile
134 .long sys_setdomainname /* 110 */ 126 .long sys_setdomainname /* 110 */
135 .long sys_newuname 127 .long sys_newuname
@@ -287,4 +279,16 @@ sys_call_table:
287 .long sys_tee 279 .long sys_tee
288 .long sys_vmsplice 280 .long sys_vmsplice
289 .long __sys_epoll_pwait /* 265 */ 281 .long __sys_epoll_pwait /* 265 */
282 .long sys_msgget
283 .long sys_msgsnd
284 .long sys_msgrcv
285 .long sys_msgctl
286 .long sys_semget /* 270 */
287 .long sys_semop
288 .long sys_semctl
289 .long sys_semtimedop
290 .long sys_shmat
291 .long sys_shmget /* 275 */
292 .long sys_shmdt
293 .long sys_shmctl
290 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */ 294 .long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index a2f74affaa98..c10833f2ee0c 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -37,7 +37,7 @@ static struct clocksource clocksource_avr32 = {
37 .read = read_cycle_count, 37 .read = read_cycle_count,
38 .mask = CLOCKSOURCE_MASK(32), 38 .mask = CLOCKSOURCE_MASK(32),
39 .shift = 16, 39 .shift = 16,
40 .is_continuous = 1, 40 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
41}; 41};
42 42
43/* 43/*
diff --git a/arch/avr32/mach-at32ap/at32ap7000.c b/arch/avr32/mach-at32ap/at32ap7000.c
index c1e477ec7576..bc235507c5c7 100644
--- a/arch/avr32/mach-at32ap/at32ap7000.c
+++ b/arch/avr32/mach-at32ap/at32ap7000.c
@@ -8,6 +8,7 @@
8#include <linux/clk.h> 8#include <linux/clk.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/platform_device.h> 10#include <linux/platform_device.h>
11#include <linux/spi/spi.h>
11 12
12#include <asm/io.h> 13#include <asm/io.h>
13 14
@@ -310,8 +311,6 @@ static void genclk_mode(struct clk *clk, int enabled)
310{ 311{
311 u32 control; 312 u32 control;
312 313
313 BUG_ON(clk->index > 7);
314
315 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index); 314 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
316 if (enabled) 315 if (enabled)
317 control |= SM_BIT(CEN); 316 control |= SM_BIT(CEN);
@@ -325,11 +324,6 @@ static unsigned long genclk_get_rate(struct clk *clk)
325 u32 control; 324 u32 control;
326 unsigned long div = 1; 325 unsigned long div = 1;
327 326
328 BUG_ON(clk->index > 7);
329
330 if (!clk->parent)
331 return 0;
332
333 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index); 327 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
334 if (control & SM_BIT(DIVEN)) 328 if (control & SM_BIT(DIVEN))
335 div = 2 * (SM_BFEXT(DIV, control) + 1); 329 div = 2 * (SM_BFEXT(DIV, control) + 1);
@@ -342,11 +336,6 @@ static long genclk_set_rate(struct clk *clk, unsigned long rate, int apply)
342 u32 control; 336 u32 control;
343 unsigned long parent_rate, actual_rate, div; 337 unsigned long parent_rate, actual_rate, div;
344 338
345 BUG_ON(clk->index > 7);
346
347 if (!clk->parent)
348 return 0;
349
350 parent_rate = clk->parent->get_rate(clk->parent); 339 parent_rate = clk->parent->get_rate(clk->parent);
351 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index); 340 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
352 341
@@ -373,11 +362,8 @@ int genclk_set_parent(struct clk *clk, struct clk *parent)
373{ 362{
374 u32 control; 363 u32 control;
375 364
376 BUG_ON(clk->index > 7);
377
378 printk("clk %s: new parent %s (was %s)\n", 365 printk("clk %s: new parent %s (was %s)\n",
379 clk->name, parent->name, 366 clk->name, parent->name, clk->parent->name);
380 clk->parent ? clk->parent->name : "(null)");
381 367
382 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index); 368 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
383 369
@@ -399,6 +385,22 @@ int genclk_set_parent(struct clk *clk, struct clk *parent)
399 return 0; 385 return 0;
400} 386}
401 387
388static void __init genclk_init_parent(struct clk *clk)
389{
390 u32 control;
391 struct clk *parent;
392
393 BUG_ON(clk->index > 7);
394
395 control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
396 if (control & SM_BIT(OSCSEL))
397 parent = (control & SM_BIT(PLLSEL)) ? &pll1 : &osc1;
398 else
399 parent = (control & SM_BIT(PLLSEL)) ? &pll0 : &osc0;
400
401 clk->parent = parent;
402}
403
402/* -------------------------------------------------------------------- 404/* --------------------------------------------------------------------
403 * System peripherals 405 * System peripherals
404 * -------------------------------------------------------------------- */ 406 * -------------------------------------------------------------------- */
@@ -750,8 +752,41 @@ static struct resource atmel_spi1_resource[] = {
750DEFINE_DEV(atmel_spi, 1); 752DEFINE_DEV(atmel_spi, 1);
751DEV_CLK(spi_clk, atmel_spi1, pba, 1); 753DEV_CLK(spi_clk, atmel_spi1, pba, 1);
752 754
753struct platform_device *__init at32_add_device_spi(unsigned int id) 755static void
756at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b,
757 unsigned int n, const u8 *pins)
758{
759 unsigned int pin, mode;
760
761 for (; n; n--, b++) {
762 b->bus_num = bus_num;
763 if (b->chip_select >= 4)
764 continue;
765 pin = (unsigned)b->controller_data;
766 if (!pin) {
767 pin = pins[b->chip_select];
768 b->controller_data = (void *)pin;
769 }
770 mode = AT32_GPIOF_OUTPUT;
771 if (!(b->mode & SPI_CS_HIGH))
772 mode |= AT32_GPIOF_HIGH;
773 at32_select_gpio(pin, mode);
774 }
775}
776
777struct platform_device *__init
778at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
754{ 779{
780 /*
781 * Manage the chipselects as GPIOs, normally using the same pins
782 * the SPI controller expects; but boards can use other pins.
783 */
784 static u8 __initdata spi0_pins[] =
785 { GPIO_PIN_PA(3), GPIO_PIN_PA(4),
786 GPIO_PIN_PA(5), GPIO_PIN_PA(20), };
787 static u8 __initdata spi1_pins[] =
788 { GPIO_PIN_PB(2), GPIO_PIN_PB(3),
789 GPIO_PIN_PB(4), GPIO_PIN_PA(27), };
755 struct platform_device *pdev; 790 struct platform_device *pdev;
756 791
757 switch (id) { 792 switch (id) {
@@ -760,14 +795,7 @@ struct platform_device *__init at32_add_device_spi(unsigned int id)
760 select_peripheral(PA(0), PERIPH_A, 0); /* MISO */ 795 select_peripheral(PA(0), PERIPH_A, 0); /* MISO */
761 select_peripheral(PA(1), PERIPH_A, 0); /* MOSI */ 796 select_peripheral(PA(1), PERIPH_A, 0); /* MOSI */
762 select_peripheral(PA(2), PERIPH_A, 0); /* SCK */ 797 select_peripheral(PA(2), PERIPH_A, 0); /* SCK */
763 798 at32_spi_setup_slaves(0, b, n, spi0_pins);
764 /* NPCS[2:0] */
765 at32_select_gpio(GPIO_PIN_PA(3),
766 AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
767 at32_select_gpio(GPIO_PIN_PA(4),
768 AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
769 at32_select_gpio(GPIO_PIN_PA(5),
770 AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
771 break; 799 break;
772 800
773 case 1: 801 case 1:
@@ -775,20 +803,14 @@ struct platform_device *__init at32_add_device_spi(unsigned int id)
775 select_peripheral(PB(0), PERIPH_B, 0); /* MISO */ 803 select_peripheral(PB(0), PERIPH_B, 0); /* MISO */
776 select_peripheral(PB(1), PERIPH_B, 0); /* MOSI */ 804 select_peripheral(PB(1), PERIPH_B, 0); /* MOSI */
777 select_peripheral(PB(5), PERIPH_B, 0); /* SCK */ 805 select_peripheral(PB(5), PERIPH_B, 0); /* SCK */
778 806 at32_spi_setup_slaves(1, b, n, spi1_pins);
779 /* NPCS[2:0] */
780 at32_select_gpio(GPIO_PIN_PB(2),
781 AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
782 at32_select_gpio(GPIO_PIN_PB(3),
783 AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
784 at32_select_gpio(GPIO_PIN_PB(4),
785 AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
786 break; 807 break;
787 808
788 default: 809 default:
789 return NULL; 810 return NULL;
790 } 811 }
791 812
813 spi_register_board_info(b, n);
792 platform_device_register(pdev); 814 platform_device_register(pdev);
793 return pdev; 815 return pdev;
794} 816}
@@ -872,6 +894,50 @@ at32_add_device_lcdc(unsigned int id, struct lcdc_platform_data *data)
872 return pdev; 894 return pdev;
873} 895}
874 896
897/* --------------------------------------------------------------------
898 * GCLK
899 * -------------------------------------------------------------------- */
900static struct clk gclk0 = {
901 .name = "gclk0",
902 .mode = genclk_mode,
903 .get_rate = genclk_get_rate,
904 .set_rate = genclk_set_rate,
905 .set_parent = genclk_set_parent,
906 .index = 0,
907};
908static struct clk gclk1 = {
909 .name = "gclk1",
910 .mode = genclk_mode,
911 .get_rate = genclk_get_rate,
912 .set_rate = genclk_set_rate,
913 .set_parent = genclk_set_parent,
914 .index = 1,
915};
916static struct clk gclk2 = {
917 .name = "gclk2",
918 .mode = genclk_mode,
919 .get_rate = genclk_get_rate,
920 .set_rate = genclk_set_rate,
921 .set_parent = genclk_set_parent,
922 .index = 2,
923};
924static struct clk gclk3 = {
925 .name = "gclk3",
926 .mode = genclk_mode,
927 .get_rate = genclk_get_rate,
928 .set_rate = genclk_set_rate,
929 .set_parent = genclk_set_parent,
930 .index = 3,
931};
932static struct clk gclk4 = {
933 .name = "gclk4",
934 .mode = genclk_mode,
935 .get_rate = genclk_get_rate,
936 .set_rate = genclk_set_rate,
937 .set_parent = genclk_set_parent,
938 .index = 4,
939};
940
875struct clk *at32_clock_list[] = { 941struct clk *at32_clock_list[] = {
876 &osc32k, 942 &osc32k,
877 &osc0, 943 &osc0,
@@ -908,6 +974,11 @@ struct clk *at32_clock_list[] = {
908 &atmel_spi1_spi_clk, 974 &atmel_spi1_spi_clk,
909 &lcdc0_hclk, 975 &lcdc0_hclk,
910 &lcdc0_pixclk, 976 &lcdc0_pixclk,
977 &gclk0,
978 &gclk1,
979 &gclk2,
980 &gclk3,
981 &gclk4,
911}; 982};
912unsigned int at32_nr_clocks = ARRAY_SIZE(at32_clock_list); 983unsigned int at32_nr_clocks = ARRAY_SIZE(at32_clock_list);
913 984
@@ -936,6 +1007,13 @@ void __init at32_clock_init(void)
936 if (sm_readl(sm, PM_PLL1) & SM_BIT(PLLOSC)) 1007 if (sm_readl(sm, PM_PLL1) & SM_BIT(PLLOSC))
937 pll1.parent = &osc1; 1008 pll1.parent = &osc1;
938 1009
1010 genclk_init_parent(&gclk0);
1011 genclk_init_parent(&gclk1);
1012 genclk_init_parent(&gclk2);
1013 genclk_init_parent(&gclk3);
1014 genclk_init_parent(&gclk4);
1015 genclk_init_parent(&lcdc0_pixclk);
1016
939 /* 1017 /*
940 * Turn on all clocks that have at least one user already, and 1018 * Turn on all clocks that have at least one user already, and
941 * turn off everything else. We only do this for module 1019 * turn off everything else. We only do this for module
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 3d0d1097389f..49e7b12fe710 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -63,7 +63,11 @@ EXPORT_SYMBOL(clk_enable);
63 63
64static void __clk_disable(struct clk *clk) 64static void __clk_disable(struct clk *clk)
65{ 65{
66 BUG_ON(clk->users == 0); 66 if (clk->users == 0) {
67 printk(KERN_ERR "%s: mismatched disable\n", clk->name);
68 WARN_ON(1);
69 return;
70 }
67 71
68 if (--clk->users == 0 && clk->mode) 72 if (--clk->users == 0 && clk->mode)
69 clk->mode(clk, 0); 73 clk->mode(clk, 0);
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 595fb771366e..1df4a1f14289 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -18,6 +18,18 @@ config GENERIC_TIME
18 bool 18 bool
19 default y 19 default y
20 20
21config CLOCKSOURCE_WATCHDOG
22 bool
23 default y
24
25config GENERIC_CLOCKEVENTS
26 bool
27 default y
28
29config GENERIC_CLOCKEVENTS_BROADCAST
30 bool
31 default y
32
21config LOCKDEP_SUPPORT 33config LOCKDEP_SUPPORT
22 bool 34 bool
23 default y 35 default y
@@ -74,6 +86,8 @@ source "init/Kconfig"
74 86
75menu "Processor type and features" 87menu "Processor type and features"
76 88
89source "kernel/time/Kconfig"
90
77config SMP 91config SMP
78 bool "Symmetric multi-processing support" 92 bool "Symmetric multi-processing support"
79 ---help--- 93 ---help---
@@ -205,7 +219,7 @@ config PARAVIRT
205 219
206config VMI 220config VMI
207 bool "VMI Paravirt-ops support" 221 bool "VMI Paravirt-ops support"
208 depends on PARAVIRT 222 depends on PARAVIRT && !NO_HZ
209 default y 223 default y
210 help 224 help
211 VMI provides a paravirtualized interface to multiple hypervisors 225 VMI provides a paravirtualized interface to multiple hypervisors
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index cbe4e601885c..4ae3dcf1d2f0 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_X86_MSR) += msr.o
18obj-$(CONFIG_X86_CPUID) += cpuid.o 18obj-$(CONFIG_X86_CPUID) += cpuid.o
19obj-$(CONFIG_MICROCODE) += microcode.o 19obj-$(CONFIG_MICROCODE) += microcode.o
20obj-$(CONFIG_APM) += apm.o 20obj-$(CONFIG_APM) += apm.o
21obj-$(CONFIG_X86_SMP) += smp.o smpboot.o 21obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
22obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 22obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
23obj-$(CONFIG_X86_MPPARSE) += mpparse.o 23obj-$(CONFIG_X86_MPPARSE) += mpparse.o
24obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o 24obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
@@ -32,7 +32,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o
32obj-$(CONFIG_MODULES) += module.o 32obj-$(CONFIG_MODULES) += module.o
33obj-y += sysenter.o vsyscall.o 33obj-y += sysenter.o vsyscall.o
34obj-$(CONFIG_ACPI_SRAT) += srat.o 34obj-$(CONFIG_ACPI_SRAT) += srat.o
35obj-$(CONFIG_HPET_TIMER) += time_hpet.o
36obj-$(CONFIG_EFI) += efi.o efi_stub.o 35obj-$(CONFIG_EFI) += efi.o efi_stub.o
37obj-$(CONFIG_DOUBLEFAULT) += doublefault.o 36obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
38obj-$(CONFIG_VM86) += vm86.o 37obj-$(CONFIG_VM86) += vm86.o
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index e94aff6888ca..e5eb97a910ed 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/acpi.h> 27#include <linux/acpi.h>
28#include <linux/acpi_pmtmr.h>
28#include <linux/efi.h> 29#include <linux/efi.h>
29#include <linux/cpumask.h> 30#include <linux/cpumask.h>
30#include <linux/module.h> 31#include <linux/module.h>
@@ -615,6 +616,7 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
615} 616}
616 617
617#ifdef CONFIG_HPET_TIMER 618#ifdef CONFIG_HPET_TIMER
619#include <asm/hpet.h>
618 620
619static int __init acpi_parse_hpet(struct acpi_table_header *table) 621static int __init acpi_parse_hpet(struct acpi_table_header *table)
620{ 622{
@@ -645,24 +647,11 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
645 hpet_res->end = (1 * 1024) - 1; 647 hpet_res->end = (1 * 1024) - 1;
646 } 648 }
647 649
648#ifdef CONFIG_X86_64 650 hpet_address = hpet_tbl->address.address;
649 vxtime.hpet_address = hpet_tbl->address.address;
650
651 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 651 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
652 hpet_tbl->id, vxtime.hpet_address); 652 hpet_tbl->id, hpet_address);
653
654 res_start = vxtime.hpet_address;
655#else /* X86 */
656 {
657 extern unsigned long hpet_address;
658 653
659 hpet_address = hpet_tbl->address.address; 654 res_start = hpet_address;
660 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
661 hpet_tbl->id, hpet_address);
662
663 res_start = hpet_address;
664 }
665#endif /* X86 */
666 655
667 if (hpet_res) { 656 if (hpet_res) {
668 hpet_res->start = res_start; 657 hpet_res->start = res_start;
@@ -676,10 +665,6 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
676#define acpi_parse_hpet NULL 665#define acpi_parse_hpet NULL
677#endif 666#endif
678 667
679#ifdef CONFIG_X86_PM_TIMER
680extern u32 pmtmr_ioport;
681#endif
682
683static int __init acpi_parse_fadt(struct acpi_table_header *table) 668static int __init acpi_parse_fadt(struct acpi_table_header *table)
684{ 669{
685 670
@@ -865,10 +850,9 @@ static inline int acpi_parse_madt_ioapic_entries(void)
865static void __init acpi_process_madt(void) 850static void __init acpi_process_madt(void)
866{ 851{
867#ifdef CONFIG_X86_LOCAL_APIC 852#ifdef CONFIG_X86_LOCAL_APIC
868 int count, error; 853 int error;
869 854
870 count = acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt); 855 if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
871 if (count >= 1) {
872 856
873 /* 857 /*
874 * Parse MADT LAPIC entries 858 * Parse MADT LAPIC entries
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index f4159e0a7ae9..9655c233e6f1 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -25,6 +25,8 @@
25#include <linux/kernel_stat.h> 25#include <linux/kernel_stat.h>
26#include <linux/sysdev.h> 26#include <linux/sysdev.h>
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/clockchips.h>
29#include <linux/acpi_pmtmr.h>
28#include <linux/module.h> 30#include <linux/module.h>
29 31
30#include <asm/atomic.h> 32#include <asm/atomic.h>
@@ -45,128 +47,549 @@
45#include "io_ports.h" 47#include "io_ports.h"
46 48
47/* 49/*
48 * cpu_mask that denotes the CPUs that needs timer interrupt coming in as 50 * Sanity check
49 * IPIs in place of local APIC timers
50 */ 51 */
51static cpumask_t timer_bcast_ipi; 52#if (SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F
53# error SPURIOUS_APIC_VECTOR definition error
54#endif
52 55
53/* 56/*
54 * Knob to control our willingness to enable the local APIC. 57 * Knob to control our willingness to enable the local APIC.
58 *
59 * -1=force-disable, +1=force-enable
55 */ 60 */
56static int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */ 61static int enable_local_apic __initdata = 0;
57
58static inline void lapic_disable(void)
59{
60 enable_local_apic = -1;
61 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
62}
63 62
64static inline void lapic_enable(void) 63/* Local APIC timer verification ok */
65{ 64static int local_apic_timer_verify_ok;
66 enable_local_apic = 1;
67}
68 65
69/* 66/*
70 * Debug level 67 * Debug level, exported for io_apic.c
71 */ 68 */
72int apic_verbosity; 69int apic_verbosity;
73 70
71static unsigned int calibration_result;
74 72
73static int lapic_next_event(unsigned long delta,
74 struct clock_event_device *evt);
75static void lapic_timer_setup(enum clock_event_mode mode,
76 struct clock_event_device *evt);
77static void lapic_timer_broadcast(cpumask_t mask);
75static void apic_pm_activate(void); 78static void apic_pm_activate(void);
76 79
80/*
81 * The local apic timer can be used for any function which is CPU local.
82 */
83static struct clock_event_device lapic_clockevent = {
84 .name = "lapic",
85 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
86 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
87 .shift = 32,
88 .set_mode = lapic_timer_setup,
89 .set_next_event = lapic_next_event,
90 .broadcast = lapic_timer_broadcast,
91 .rating = 100,
92 .irq = -1,
93};
94static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
95
96/* Local APIC was disabled by the BIOS and enabled by the kernel */
97static int enabled_via_apicbase;
98
99/*
100 * Get the LAPIC version
101 */
102static inline int lapic_get_version(void)
103{
104 return GET_APIC_VERSION(apic_read(APIC_LVR));
105}
106
107/*
108 * Check, if the APIC is integrated or a seperate chip
109 */
110static inline int lapic_is_integrated(void)
111{
112 return APIC_INTEGRATED(lapic_get_version());
113}
114
115/*
116 * Check, whether this is a modern or a first generation APIC
117 */
77static int modern_apic(void) 118static int modern_apic(void)
78{ 119{
79 unsigned int lvr, version;
80 /* AMD systems use old APIC versions, so check the CPU */ 120 /* AMD systems use old APIC versions, so check the CPU */
81 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 121 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
82 boot_cpu_data.x86 >= 0xf) 122 boot_cpu_data.x86 >= 0xf)
83 return 1; 123 return 1;
84 lvr = apic_read(APIC_LVR); 124 return lapic_get_version() >= 0x14;
85 version = GET_APIC_VERSION(lvr); 125}
86 return version >= 0x14; 126
127/**
128 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
129 */
130void enable_NMI_through_LVT0 (void * dummy)
131{
132 unsigned int v = APIC_DM_NMI;
133
134 /* Level triggered for 82489DX */
135 if (!lapic_is_integrated())
136 v |= APIC_LVT_LEVEL_TRIGGER;
137 apic_write_around(APIC_LVT0, v);
138}
139
140/**
141 * get_physical_broadcast - Get number of physical broadcast IDs
142 */
143int get_physical_broadcast(void)
144{
145 return modern_apic() ? 0xff : 0xf;
146}
147
148/**
149 * lapic_get_maxlvt - get the maximum number of local vector table entries
150 */
151int lapic_get_maxlvt(void)
152{
153 unsigned int v = apic_read(APIC_LVR);
154
155 /* 82489DXs do not report # of LVT entries. */
156 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
87} 157}
88 158
89/* 159/*
90 * 'what should we do if we get a hw irq event on an illegal vector'. 160 * Local APIC timer
91 * each architecture has to answer this themselves.
92 */ 161 */
93void ack_bad_irq(unsigned int irq) 162
163/* Clock divisor is set to 16 */
164#define APIC_DIVISOR 16
165
166/*
167 * This function sets up the local APIC timer, with a timeout of
168 * 'clocks' APIC bus clock. During calibration we actually call
169 * this function twice on the boot CPU, once with a bogus timeout
170 * value, second time for real. The other (noncalibrating) CPUs
171 * call this function only once, with the real, calibrated value.
172 *
173 * We do reads before writes even if unnecessary, to get around the
174 * P5 APIC double write bug.
175 */
176static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
94{ 177{
95 printk("unexpected IRQ trap at vector %02x\n", irq); 178 unsigned int lvtt_value, tmp_value;
179
180 lvtt_value = LOCAL_TIMER_VECTOR;
181 if (!oneshot)
182 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
183 if (!lapic_is_integrated())
184 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
185
186 if (!irqen)
187 lvtt_value |= APIC_LVT_MASKED;
188
189 apic_write_around(APIC_LVTT, lvtt_value);
190
96 /* 191 /*
97 * Currently unexpected vectors happen only on SMP and APIC. 192 * Divide PICLK by 16
98 * We _must_ ack these because every local APIC has only N
99 * irq slots per priority level, and a 'hanging, unacked' IRQ
100 * holds up an irq slot - in excessive cases (when multiple
101 * unexpected vectors occur) that might lock up the APIC
102 * completely.
103 * But only ack when the APIC is enabled -AK
104 */ 193 */
105 if (cpu_has_apic) 194 tmp_value = apic_read(APIC_TDCR);
106 ack_APIC_irq(); 195 apic_write_around(APIC_TDCR, (tmp_value
196 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
197 | APIC_TDR_DIV_16);
198
199 if (!oneshot)
200 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
107} 201}
108 202
109void __init apic_intr_init(void) 203/*
204 * Program the next event, relative to now
205 */
206static int lapic_next_event(unsigned long delta,
207 struct clock_event_device *evt)
208{
209 apic_write_around(APIC_TMICT, delta);
210 return 0;
211}
212
213/*
214 * Setup the lapic timer in periodic or oneshot mode
215 */
216static void lapic_timer_setup(enum clock_event_mode mode,
217 struct clock_event_device *evt)
218{
219 unsigned long flags;
220 unsigned int v;
221
222 /* Lapic used for broadcast ? */
223 if (!local_apic_timer_verify_ok)
224 return;
225
226 local_irq_save(flags);
227
228 switch (mode) {
229 case CLOCK_EVT_MODE_PERIODIC:
230 case CLOCK_EVT_MODE_ONESHOT:
231 __setup_APIC_LVTT(calibration_result,
232 mode != CLOCK_EVT_MODE_PERIODIC, 1);
233 break;
234 case CLOCK_EVT_MODE_UNUSED:
235 case CLOCK_EVT_MODE_SHUTDOWN:
236 v = apic_read(APIC_LVTT);
237 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
238 apic_write_around(APIC_LVTT, v);
239 break;
240 }
241
242 local_irq_restore(flags);
243}
244
245/*
246 * Local APIC timer broadcast function
247 */
248static void lapic_timer_broadcast(cpumask_t mask)
110{ 249{
111#ifdef CONFIG_SMP 250#ifdef CONFIG_SMP
112 smp_intr_init(); 251 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
113#endif 252#endif
114 /* self generated IPI for local APIC timer */ 253}
115 set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
116 254
117 /* IPI vectors for APIC spurious and error interrupts */ 255/*
118 set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 256 * Setup the local APIC timer for this CPU. Copy the initilized values
119 set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 257 * of the boot CPU and register the clock event in the framework.
258 */
259static void __devinit setup_APIC_timer(void)
260{
261 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
120 262
121 /* thermal monitor LVT interrupt */ 263 memcpy(levt, &lapic_clockevent, sizeof(*levt));
122#ifdef CONFIG_X86_MCE_P4THERMAL 264 levt->cpumask = cpumask_of_cpu(smp_processor_id());
123 set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 265
124#endif 266 clockevents_register_device(levt);
125} 267}
126 268
127/* Using APIC to generate smp_local_timer_interrupt? */ 269/*
128int using_apic_timer __read_mostly = 0; 270 * In this functions we calibrate APIC bus clocks to the external timer.
271 *
272 * We want to do the calibration only once since we want to have local timer
273 * irqs syncron. CPUs connected by the same APIC bus have the very same bus
274 * frequency.
275 *
276 * This was previously done by reading the PIT/HPET and waiting for a wrap
277 * around to find out, that a tick has elapsed. I have a box, where the PIT
278 * readout is broken, so it never gets out of the wait loop again. This was
279 * also reported by others.
280 *
281 * Monitoring the jiffies value is inaccurate and the clockevents
282 * infrastructure allows us to do a simple substitution of the interrupt
283 * handler.
284 *
285 * The calibration routine also uses the pm_timer when possible, as the PIT
286 * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
287 * back to normal later in the boot process).
288 */
129 289
130static int enabled_via_apicbase; 290#define LAPIC_CAL_LOOPS (HZ/10)
131 291
132void enable_NMI_through_LVT0 (void * dummy) 292static __initdata volatile int lapic_cal_loops = -1;
293static __initdata long lapic_cal_t1, lapic_cal_t2;
294static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
295static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
296static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
297
298/*
299 * Temporary interrupt handler.
300 */
301static void __init lapic_cal_handler(struct clock_event_device *dev)
133{ 302{
134 unsigned int v, ver; 303 unsigned long long tsc = 0;
304 long tapic = apic_read(APIC_TMCCT);
305 unsigned long pm = acpi_pm_read_early();
135 306
136 ver = apic_read(APIC_LVR); 307 if (cpu_has_tsc)
137 ver = GET_APIC_VERSION(ver); 308 rdtscll(tsc);
138 v = APIC_DM_NMI; /* unmask and set to NMI */ 309
139 if (!APIC_INTEGRATED(ver)) /* 82489DX */ 310 switch (lapic_cal_loops++) {
140 v |= APIC_LVT_LEVEL_TRIGGER; 311 case 0:
141 apic_write_around(APIC_LVT0, v); 312 lapic_cal_t1 = tapic;
313 lapic_cal_tsc1 = tsc;
314 lapic_cal_pm1 = pm;
315 lapic_cal_j1 = jiffies;
316 break;
317
318 case LAPIC_CAL_LOOPS:
319 lapic_cal_t2 = tapic;
320 lapic_cal_tsc2 = tsc;
321 if (pm < lapic_cal_pm1)
322 pm += ACPI_PM_OVRRUN;
323 lapic_cal_pm2 = pm;
324 lapic_cal_j2 = jiffies;
325 break;
326 }
142} 327}
143 328
144int get_physical_broadcast(void) 329/*
330 * Setup the boot APIC
331 *
332 * Calibrate and verify the result.
333 */
334void __init setup_boot_APIC_clock(void)
145{ 335{
146 if (modern_apic()) 336 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
147 return 0xff; 337 const long pm_100ms = PMTMR_TICKS_PER_SEC/10;
148 else 338 const long pm_thresh = pm_100ms/100;
149 return 0xf; 339 void (*real_handler)(struct clock_event_device *dev);
340 unsigned long deltaj;
341 long delta, deltapm;
342
343 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
344 "calibrating APIC timer ...\n");
345
346 local_irq_disable();
347
348 /* Replace the global interrupt handler */
349 real_handler = global_clock_event->event_handler;
350 global_clock_event->event_handler = lapic_cal_handler;
351
352 /*
353 * Setup the APIC counter to 1e9. There is no way the lapic
354 * can underflow in the 100ms detection time frame
355 */
356 __setup_APIC_LVTT(1000000000, 0, 0);
357
358 /* Let the interrupts run */
359 local_irq_enable();
360
361 while(lapic_cal_loops <= LAPIC_CAL_LOOPS);
362
363 local_irq_disable();
364
365 /* Restore the real event handler */
366 global_clock_event->event_handler = real_handler;
367
368 /* Build delta t1-t2 as apic timer counts down */
369 delta = lapic_cal_t1 - lapic_cal_t2;
370 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
371
372 /* Check, if the PM timer is available */
373 deltapm = lapic_cal_pm2 - lapic_cal_pm1;
374 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
375
376 if (deltapm) {
377 unsigned long mult;
378 u64 res;
379
380 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
381
382 if (deltapm > (pm_100ms - pm_thresh) &&
383 deltapm < (pm_100ms + pm_thresh)) {
384 apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
385 } else {
386 res = (((u64) deltapm) * mult) >> 22;
387 do_div(res, 1000000);
388 printk(KERN_WARNING "APIC calibration not consistent "
389 "with PM Timer: %ldms instead of 100ms\n",
390 (long)res);
391 /* Correct the lapic counter value */
392 res = (((u64) delta ) * pm_100ms);
393 do_div(res, deltapm);
394 printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
395 "%lu (%ld)\n", (unsigned long) res, delta);
396 delta = (long) res;
397 }
398 }
399
400 /* Calculate the scaled math multiplication factor */
401 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, 32);
402 lapic_clockevent.max_delta_ns =
403 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
404 lapic_clockevent.min_delta_ns =
405 clockevent_delta2ns(0xF, &lapic_clockevent);
406
407 calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
408
409 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
410 apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
411 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
412 calibration_result);
413
414 if (cpu_has_tsc) {
415 delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
416 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
417 "%ld.%04ld MHz.\n",
418 (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ),
419 (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ));
420 }
421
422 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
423 "%u.%04u MHz.\n",
424 calibration_result / (1000000 / HZ),
425 calibration_result % (1000000 / HZ));
426
427
428 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
429
430 /*
431 * Setup the apic timer manually
432 */
433 local_apic_timer_verify_ok = 1;
434 levt->event_handler = lapic_cal_handler;
435 lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
436 lapic_cal_loops = -1;
437
438 /* Let the interrupts run */
439 local_irq_enable();
440
441 while(lapic_cal_loops <= LAPIC_CAL_LOOPS);
442
443 local_irq_disable();
444
445 /* Stop the lapic timer */
446 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
447
448 local_irq_enable();
449
450 /* Jiffies delta */
451 deltaj = lapic_cal_j2 - lapic_cal_j1;
452 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
453
454 /* Check, if the PM timer is available */
455 deltapm = lapic_cal_pm2 - lapic_cal_pm1;
456 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
457
458 local_apic_timer_verify_ok = 0;
459
460 if (deltapm) {
461 if (deltapm > (pm_100ms - pm_thresh) &&
462 deltapm < (pm_100ms + pm_thresh)) {
463 apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
464 /* Check, if the jiffies result is consistent */
465 if (deltaj < LAPIC_CAL_LOOPS-2 ||
466 deltaj > LAPIC_CAL_LOOPS+2) {
467 /*
468 * Not sure, what we can do about this one.
469 * When high resultion timers are active
470 * and the lapic timer does not stop in C3
471 * we are fine. Otherwise more trouble might
472 * be waiting. -- tglx
473 */
474 printk(KERN_WARNING "Global event device %s "
475 "has wrong frequency "
476 "(%lu ticks instead of %d)\n",
477 global_clock_event->name, deltaj,
478 LAPIC_CAL_LOOPS);
479 }
480 local_apic_timer_verify_ok = 1;
481 }
482 } else {
483 /* Check, if the jiffies result is consistent */
484 if (deltaj >= LAPIC_CAL_LOOPS-2 &&
485 deltaj <= LAPIC_CAL_LOOPS+2) {
486 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
487 local_apic_timer_verify_ok = 1;
488 }
489 }
490
491 if (!local_apic_timer_verify_ok) {
492 printk(KERN_WARNING
493 "APIC timer disabled due to verification failure.\n");
494 /* No broadcast on UP ! */
495 if (num_possible_cpus() == 1)
496 return;
497 } else
498 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
499
500 /* Setup the lapic or request the broadcast */
501 setup_APIC_timer();
502}
503
504void __devinit setup_secondary_APIC_clock(void)
505{
506 setup_APIC_timer();
150} 507}
151 508
152int get_maxlvt(void) 509/*
510 * The guts of the apic timer interrupt
511 */
512static void local_apic_timer_interrupt(void)
153{ 513{
154 unsigned int v, ver, maxlvt; 514 int cpu = smp_processor_id();
515 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
155 516
156 v = apic_read(APIC_LVR); 517 /*
157 ver = GET_APIC_VERSION(v); 518 * Normally we should not be here till LAPIC has been initialized but
158 /* 82489DXs do not report # of LVT entries. */ 519 * in some cases like kdump, its possible that there is a pending LAPIC
159 maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2; 520 * timer interrupt from previous kernel's context and is delivered in
160 return maxlvt; 521 * new kernel the moment interrupts are enabled.
522 *
523 * Interrupts are enabled early and LAPIC is setup much later, hence
524 * its possible that when we get here evt->event_handler is NULL.
525 * Check for event_handler being NULL and discard the interrupt as
526 * spurious.
527 */
528 if (!evt->event_handler) {
529 printk(KERN_WARNING
530 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
531 /* Switch it off */
532 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
533 return;
534 }
535
536 per_cpu(irq_stat, cpu).apic_timer_irqs++;
537
538 evt->event_handler(evt);
539}
540
541/*
542 * Local APIC timer interrupt. This is the most natural way for doing
543 * local interrupts, but local timer interrupts can be emulated by
544 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
545 *
546 * [ if a single-CPU system runs an SMP kernel then we call the local
547 * interrupt as well. Thus we cannot inline the local irq ... ]
548 */
549
550void fastcall smp_apic_timer_interrupt(struct pt_regs *regs)
551{
552 struct pt_regs *old_regs = set_irq_regs(regs);
553
554 /*
555 * NOTE! We'd better ACK the irq immediately,
556 * because timer handling can be slow.
557 */
558 ack_APIC_irq();
559 /*
560 * update_process_times() expects us to have done irq_enter().
561 * Besides, if we don't timer interrupts ignore the global
562 * interrupt lock, which is the WrongThing (tm) to do.
563 */
564 exit_idle();
565 irq_enter();
566 local_apic_timer_interrupt();
567 irq_exit();
568
569 set_irq_regs(old_regs);
161} 570}
162 571
572int setup_profiling_timer(unsigned int multiplier)
573{
574 return -EINVAL;
575}
576
577/*
578 * Local APIC start and shutdown
579 */
580
581/**
582 * clear_local_APIC - shutdown the local APIC
583 *
584 * This is called, when a CPU is disabled and before rebooting, so the state of
585 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
586 * leftovers during boot.
587 */
163void clear_local_APIC(void) 588void clear_local_APIC(void)
164{ 589{
165 int maxlvt; 590 int maxlvt = lapic_get_maxlvt();
166 unsigned long v; 591 unsigned long v;
167 592
168 maxlvt = get_maxlvt();
169
170 /* 593 /*
171 * Masking an LVT entry can trigger a local APIC error 594 * Masking an LVT entry can trigger a local APIC error
172 * if the vector is zero. Mask LVTERR first to prevent this. 595 * if the vector is zero. Mask LVTERR first to prevent this.
@@ -190,7 +613,7 @@ void clear_local_APIC(void)
190 apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED); 613 apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
191 } 614 }
192 615
193/* lets not touch this if we didn't frob it */ 616 /* lets not touch this if we didn't frob it */
194#ifdef CONFIG_X86_MCE_P4THERMAL 617#ifdef CONFIG_X86_MCE_P4THERMAL
195 if (maxlvt >= 5) { 618 if (maxlvt >= 5) {
196 v = apic_read(APIC_LVTTHMR); 619 v = apic_read(APIC_LVTTHMR);
@@ -212,85 +635,18 @@ void clear_local_APIC(void)
212 if (maxlvt >= 5) 635 if (maxlvt >= 5)
213 apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED); 636 apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED);
214#endif 637#endif
215 v = GET_APIC_VERSION(apic_read(APIC_LVR)); 638 /* Integrated APIC (!82489DX) ? */
216 if (APIC_INTEGRATED(v)) { /* !82489DX */ 639 if (lapic_is_integrated()) {
217 if (maxlvt > 3) /* Due to Pentium errata 3AP and 11AP. */ 640 if (maxlvt > 3)
641 /* Clear ESR due to Pentium errata 3AP and 11AP */
218 apic_write(APIC_ESR, 0); 642 apic_write(APIC_ESR, 0);
219 apic_read(APIC_ESR); 643 apic_read(APIC_ESR);
220 } 644 }
221} 645}
222 646
223void __init connect_bsp_APIC(void) 647/**
224{ 648 * disable_local_APIC - clear and disable the local APIC
225 if (pic_mode) { 649 */
226 /*
227 * Do not trust the local APIC being empty at bootup.
228 */
229 clear_local_APIC();
230 /*
231 * PIC mode, enable APIC mode in the IMCR, i.e.
232 * connect BSP's local APIC to INT and NMI lines.
233 */
234 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
235 "enabling APIC mode.\n");
236 outb(0x70, 0x22);
237 outb(0x01, 0x23);
238 }
239 enable_apic_mode();
240}
241
242void disconnect_bsp_APIC(int virt_wire_setup)
243{
244 if (pic_mode) {
245 /*
246 * Put the board back into PIC mode (has an effect
247 * only on certain older boards). Note that APIC
248 * interrupts, including IPIs, won't work beyond
249 * this point! The only exception are INIT IPIs.
250 */
251 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
252 "entering PIC mode.\n");
253 outb(0x70, 0x22);
254 outb(0x00, 0x23);
255 }
256 else {
257 /* Go back to Virtual Wire compatibility mode */
258 unsigned long value;
259
260 /* For the spurious interrupt use vector F, and enable it */
261 value = apic_read(APIC_SPIV);
262 value &= ~APIC_VECTOR_MASK;
263 value |= APIC_SPIV_APIC_ENABLED;
264 value |= 0xf;
265 apic_write_around(APIC_SPIV, value);
266
267 if (!virt_wire_setup) {
268 /* For LVT0 make it edge triggered, active high, external and enabled */
269 value = apic_read(APIC_LVT0);
270 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
271 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
272 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
273 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
274 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
275 apic_write_around(APIC_LVT0, value);
276 }
277 else {
278 /* Disable LVT0 */
279 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
280 }
281
282 /* For LVT1 make it edge triggered, active high, nmi and enabled */
283 value = apic_read(APIC_LVT1);
284 value &= ~(
285 APIC_MODE_MASK | APIC_SEND_PENDING |
286 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
287 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
288 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
289 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
290 apic_write_around(APIC_LVT1, value);
291 }
292}
293
294void disable_local_APIC(void) 650void disable_local_APIC(void)
295{ 651{
296 unsigned long value; 652 unsigned long value;
@@ -305,8 +661,13 @@ void disable_local_APIC(void)
305 value &= ~APIC_SPIV_APIC_ENABLED; 661 value &= ~APIC_SPIV_APIC_ENABLED;
306 apic_write_around(APIC_SPIV, value); 662 apic_write_around(APIC_SPIV, value);
307 663
664 /*
665 * When LAPIC was disabled by the BIOS and enabled by the kernel,
666 * restore the disabled state.
667 */
308 if (enabled_via_apicbase) { 668 if (enabled_via_apicbase) {
309 unsigned int l, h; 669 unsigned int l, h;
670
310 rdmsr(MSR_IA32_APICBASE, l, h); 671 rdmsr(MSR_IA32_APICBASE, l, h);
311 l &= ~MSR_IA32_APICBASE_ENABLE; 672 l &= ~MSR_IA32_APICBASE_ENABLE;
312 wrmsr(MSR_IA32_APICBASE, l, h); 673 wrmsr(MSR_IA32_APICBASE, l, h);
@@ -314,6 +675,28 @@ void disable_local_APIC(void)
314} 675}
315 676
316/* 677/*
678 * If Linux enabled the LAPIC against the BIOS default disable it down before
679 * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
680 * not power-off. Additionally clear all LVT entries before disable_local_APIC
681 * for the case where Linux didn't enable the LAPIC.
682 */
683void lapic_shutdown(void)
684{
685 unsigned long flags;
686
687 if (!cpu_has_apic)
688 return;
689
690 local_irq_save(flags);
691 clear_local_APIC();
692
693 if (enabled_via_apicbase)
694 disable_local_APIC();
695
696 local_irq_restore(flags);
697}
698
699/*
317 * This is to verify that we're looking at a real local APIC. 700 * This is to verify that we're looking at a real local APIC.
318 * Check these against your board if the CPUs aren't getting 701 * Check these against your board if the CPUs aren't getting
319 * started for no apparent reason. 702 * started for no apparent reason.
@@ -345,7 +728,7 @@ int __init verify_local_APIC(void)
345 reg1 = GET_APIC_VERSION(reg0); 728 reg1 = GET_APIC_VERSION(reg0);
346 if (reg1 == 0x00 || reg1 == 0xff) 729 if (reg1 == 0x00 || reg1 == 0xff)
347 return 0; 730 return 0;
348 reg1 = get_maxlvt(); 731 reg1 = lapic_get_maxlvt();
349 if (reg1 < 0x02 || reg1 == 0xff) 732 if (reg1 < 0x02 || reg1 == 0xff)
350 return 0; 733 return 0;
351 734
@@ -368,10 +751,15 @@ int __init verify_local_APIC(void)
368 return 1; 751 return 1;
369} 752}
370 753
754/**
755 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
756 */
371void __init sync_Arb_IDs(void) 757void __init sync_Arb_IDs(void)
372{ 758{
373 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 759 /*
374 And not needed on AMD */ 760 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
761 * needed on AMD.
762 */
375 if (modern_apic()) 763 if (modern_apic())
376 return; 764 return;
377 /* 765 /*
@@ -384,14 +772,12 @@ void __init sync_Arb_IDs(void)
384 | APIC_DM_INIT); 772 | APIC_DM_INIT);
385} 773}
386 774
387extern void __error_in_apic_c (void);
388
389/* 775/*
390 * An initial setup of the virtual wire mode. 776 * An initial setup of the virtual wire mode.
391 */ 777 */
392void __init init_bsp_APIC(void) 778void __init init_bsp_APIC(void)
393{ 779{
394 unsigned long value, ver; 780 unsigned long value;
395 781
396 /* 782 /*
397 * Don't do the setup now if we have a SMP BIOS as the 783 * Don't do the setup now if we have a SMP BIOS as the
@@ -400,9 +786,6 @@ void __init init_bsp_APIC(void)
400 if (smp_found_config || !cpu_has_apic) 786 if (smp_found_config || !cpu_has_apic)
401 return; 787 return;
402 788
403 value = apic_read(APIC_LVR);
404 ver = GET_APIC_VERSION(value);
405
406 /* 789 /*
407 * Do not trust the local APIC being empty at bootup. 790 * Do not trust the local APIC being empty at bootup.
408 */ 791 */
@@ -414,9 +797,10 @@ void __init init_bsp_APIC(void)
414 value = apic_read(APIC_SPIV); 797 value = apic_read(APIC_SPIV);
415 value &= ~APIC_VECTOR_MASK; 798 value &= ~APIC_VECTOR_MASK;
416 value |= APIC_SPIV_APIC_ENABLED; 799 value |= APIC_SPIV_APIC_ENABLED;
417 800
418 /* This bit is reserved on P4/Xeon and should be cleared */ 801 /* This bit is reserved on P4/Xeon and should be cleared */
419 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15)) 802 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
803 (boot_cpu_data.x86 == 15))
420 value &= ~APIC_SPIV_FOCUS_DISABLED; 804 value &= ~APIC_SPIV_FOCUS_DISABLED;
421 else 805 else
422 value |= APIC_SPIV_FOCUS_DISABLED; 806 value |= APIC_SPIV_FOCUS_DISABLED;
@@ -428,14 +812,17 @@ void __init init_bsp_APIC(void)
428 */ 812 */
429 apic_write_around(APIC_LVT0, APIC_DM_EXTINT); 813 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
430 value = APIC_DM_NMI; 814 value = APIC_DM_NMI;
431 if (!APIC_INTEGRATED(ver)) /* 82489DX */ 815 if (!lapic_is_integrated()) /* 82489DX */
432 value |= APIC_LVT_LEVEL_TRIGGER; 816 value |= APIC_LVT_LEVEL_TRIGGER;
433 apic_write_around(APIC_LVT1, value); 817 apic_write_around(APIC_LVT1, value);
434} 818}
435 819
820/**
821 * setup_local_APIC - setup the local APIC
822 */
436void __devinit setup_local_APIC(void) 823void __devinit setup_local_APIC(void)
437{ 824{
438 unsigned long oldvalue, value, ver, maxlvt; 825 unsigned long oldvalue, value, maxlvt, integrated;
439 int i, j; 826 int i, j;
440 827
441 /* Pound the ESR really hard over the head with a big hammer - mbligh */ 828 /* Pound the ESR really hard over the head with a big hammer - mbligh */
@@ -446,11 +833,7 @@ void __devinit setup_local_APIC(void)
446 apic_write(APIC_ESR, 0); 833 apic_write(APIC_ESR, 0);
447 } 834 }
448 835
449 value = apic_read(APIC_LVR); 836 integrated = lapic_is_integrated();
450 ver = GET_APIC_VERSION(value);
451
452 if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f)
453 __error_in_apic_c();
454 837
455 /* 838 /*
456 * Double-check whether this APIC is really registered. 839 * Double-check whether this APIC is really registered.
@@ -521,13 +904,10 @@ void __devinit setup_local_APIC(void)
521 * like LRU than MRU (the short-term load is more even across CPUs). 904 * like LRU than MRU (the short-term load is more even across CPUs).
522 * See also the comment in end_level_ioapic_irq(). --macro 905 * See also the comment in end_level_ioapic_irq(). --macro
523 */ 906 */
524#if 1 907
525 /* Enable focus processor (bit==0) */ 908 /* Enable focus processor (bit==0) */
526 value &= ~APIC_SPIV_FOCUS_DISABLED; 909 value &= ~APIC_SPIV_FOCUS_DISABLED;
527#else 910
528 /* Disable focus processor (bit==1) */
529 value |= APIC_SPIV_FOCUS_DISABLED;
530#endif
531 /* 911 /*
532 * Set spurious IRQ vector 912 * Set spurious IRQ vector
533 */ 913 */
@@ -563,17 +943,18 @@ void __devinit setup_local_APIC(void)
563 value = APIC_DM_NMI; 943 value = APIC_DM_NMI;
564 else 944 else
565 value = APIC_DM_NMI | APIC_LVT_MASKED; 945 value = APIC_DM_NMI | APIC_LVT_MASKED;
566 if (!APIC_INTEGRATED(ver)) /* 82489DX */ 946 if (!integrated) /* 82489DX */
567 value |= APIC_LVT_LEVEL_TRIGGER; 947 value |= APIC_LVT_LEVEL_TRIGGER;
568 apic_write_around(APIC_LVT1, value); 948 apic_write_around(APIC_LVT1, value);
569 949
570 if (APIC_INTEGRATED(ver) && !esr_disable) { /* !82489DX */ 950 if (integrated && !esr_disable) { /* !82489DX */
571 maxlvt = get_maxlvt(); 951 maxlvt = lapic_get_maxlvt();
572 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 952 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
573 apic_write(APIC_ESR, 0); 953 apic_write(APIC_ESR, 0);
574 oldvalue = apic_read(APIC_ESR); 954 oldvalue = apic_read(APIC_ESR);
575 955
576 value = ERROR_APIC_VECTOR; // enables sending errors 956 /* enables sending errors */
957 value = ERROR_APIC_VECTOR;
577 apic_write_around(APIC_LVTERR, value); 958 apic_write_around(APIC_LVTERR, value);
578 /* 959 /*
579 * spec says clear errors after enabling vector. 960 * spec says clear errors after enabling vector.
@@ -586,207 +967,30 @@ void __devinit setup_local_APIC(void)
586 "vector: 0x%08lx after: 0x%08lx\n", 967 "vector: 0x%08lx after: 0x%08lx\n",
587 oldvalue, value); 968 oldvalue, value);
588 } else { 969 } else {
589 if (esr_disable) 970 if (esr_disable)
590 /* 971 /*
591 * Something untraceble is creating bad interrupts on 972 * Something untraceble is creating bad interrupts on
592 * secondary quads ... for the moment, just leave the 973 * secondary quads ... for the moment, just leave the
593 * ESR disabled - we can't do anything useful with the 974 * ESR disabled - we can't do anything useful with the
594 * errors anyway - mbligh 975 * errors anyway - mbligh
595 */ 976 */
596 printk("Leaving ESR disabled.\n"); 977 printk(KERN_INFO "Leaving ESR disabled.\n");
597 else 978 else
598 printk("No ESR for 82489DX.\n"); 979 printk(KERN_INFO "No ESR for 82489DX.\n");
599 } 980 }
600 981
982 /* Disable the local apic timer */
983 value = apic_read(APIC_LVTT);
984 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
985 apic_write_around(APIC_LVTT, value);
986
601 setup_apic_nmi_watchdog(NULL); 987 setup_apic_nmi_watchdog(NULL);
602 apic_pm_activate(); 988 apic_pm_activate();
603} 989}
604 990
605/* 991/*
606 * If Linux enabled the LAPIC against the BIOS default 992 * Detect and initialize APIC
607 * disable it down before re-entering the BIOS on shutdown.
608 * Otherwise the BIOS may get confused and not power-off.
609 * Additionally clear all LVT entries before disable_local_APIC
610 * for the case where Linux didn't enable the LAPIC.
611 */
612void lapic_shutdown(void)
613{
614 unsigned long flags;
615
616 if (!cpu_has_apic)
617 return;
618
619 local_irq_save(flags);
620 clear_local_APIC();
621
622 if (enabled_via_apicbase)
623 disable_local_APIC();
624
625 local_irq_restore(flags);
626}
627
628#ifdef CONFIG_PM
629
630static struct {
631 int active;
632 /* r/w apic fields */
633 unsigned int apic_id;
634 unsigned int apic_taskpri;
635 unsigned int apic_ldr;
636 unsigned int apic_dfr;
637 unsigned int apic_spiv;
638 unsigned int apic_lvtt;
639 unsigned int apic_lvtpc;
640 unsigned int apic_lvt0;
641 unsigned int apic_lvt1;
642 unsigned int apic_lvterr;
643 unsigned int apic_tmict;
644 unsigned int apic_tdcr;
645 unsigned int apic_thmr;
646} apic_pm_state;
647
648static int lapic_suspend(struct sys_device *dev, pm_message_t state)
649{
650 unsigned long flags;
651 int maxlvt;
652
653 if (!apic_pm_state.active)
654 return 0;
655
656 maxlvt = get_maxlvt();
657
658 apic_pm_state.apic_id = apic_read(APIC_ID);
659 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
660 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
661 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
662 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
663 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
664 if (maxlvt >= 4)
665 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
666 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
667 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
668 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
669 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
670 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
671#ifdef CONFIG_X86_MCE_P4THERMAL
672 if (maxlvt >= 5)
673 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
674#endif
675
676 local_irq_save(flags);
677 disable_local_APIC();
678 local_irq_restore(flags);
679 return 0;
680}
681
682static int lapic_resume(struct sys_device *dev)
683{
684 unsigned int l, h;
685 unsigned long flags;
686 int maxlvt;
687
688 if (!apic_pm_state.active)
689 return 0;
690
691 maxlvt = get_maxlvt();
692
693 local_irq_save(flags);
694
695 /*
696 * Make sure the APICBASE points to the right address
697 *
698 * FIXME! This will be wrong if we ever support suspend on
699 * SMP! We'll need to do this as part of the CPU restore!
700 */
701 rdmsr(MSR_IA32_APICBASE, l, h);
702 l &= ~MSR_IA32_APICBASE_BASE;
703 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
704 wrmsr(MSR_IA32_APICBASE, l, h);
705
706 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
707 apic_write(APIC_ID, apic_pm_state.apic_id);
708 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
709 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
710 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
711 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
712 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
713 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
714#ifdef CONFIG_X86_MCE_P4THERMAL
715 if (maxlvt >= 5)
716 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
717#endif
718 if (maxlvt >= 4)
719 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
720 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
721 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
722 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
723 apic_write(APIC_ESR, 0);
724 apic_read(APIC_ESR);
725 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
726 apic_write(APIC_ESR, 0);
727 apic_read(APIC_ESR);
728 local_irq_restore(flags);
729 return 0;
730}
731
732/*
733 * This device has no shutdown method - fully functioning local APICs
734 * are needed on every CPU up until machine_halt/restart/poweroff.
735 */ 993 */
736
737static struct sysdev_class lapic_sysclass = {
738 set_kset_name("lapic"),
739 .resume = lapic_resume,
740 .suspend = lapic_suspend,
741};
742
743static struct sys_device device_lapic = {
744 .id = 0,
745 .cls = &lapic_sysclass,
746};
747
748static void __devinit apic_pm_activate(void)
749{
750 apic_pm_state.active = 1;
751}
752
753static int __init init_lapic_sysfs(void)
754{
755 int error;
756
757 if (!cpu_has_apic)
758 return 0;
759 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
760
761 error = sysdev_class_register(&lapic_sysclass);
762 if (!error)
763 error = sysdev_register(&device_lapic);
764 return error;
765}
766device_initcall(init_lapic_sysfs);
767
768#else /* CONFIG_PM */
769
770static void apic_pm_activate(void) { }
771
772#endif /* CONFIG_PM */
773
774/*
775 * Detect and enable local APICs on non-SMP boards.
776 * Original code written by Keir Fraser.
777 */
778
779static int __init apic_set_verbosity(char *str)
780{
781 if (strcmp("debug", str) == 0)
782 apic_verbosity = APIC_DEBUG;
783 else if (strcmp("verbose", str) == 0)
784 apic_verbosity = APIC_VERBOSE;
785 return 1;
786}
787
788__setup("apic=", apic_set_verbosity);
789
790static int __init detect_init_APIC (void) 994static int __init detect_init_APIC (void)
791{ 995{
792 u32 h, l, features; 996 u32 h, l, features;
@@ -798,7 +1002,7 @@ static int __init detect_init_APIC (void)
798 switch (boot_cpu_data.x86_vendor) { 1002 switch (boot_cpu_data.x86_vendor) {
799 case X86_VENDOR_AMD: 1003 case X86_VENDOR_AMD:
800 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || 1004 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
801 (boot_cpu_data.x86 == 15)) 1005 (boot_cpu_data.x86 == 15))
802 break; 1006 break;
803 goto no_apic; 1007 goto no_apic;
804 case X86_VENDOR_INTEL: 1008 case X86_VENDOR_INTEL:
@@ -812,23 +1016,23 @@ static int __init detect_init_APIC (void)
812 1016
813 if (!cpu_has_apic) { 1017 if (!cpu_has_apic) {
814 /* 1018 /*
815 * Over-ride BIOS and try to enable the local 1019 * Over-ride BIOS and try to enable the local APIC only if
816 * APIC only if "lapic" specified. 1020 * "lapic" specified.
817 */ 1021 */
818 if (enable_local_apic <= 0) { 1022 if (enable_local_apic <= 0) {
819 printk("Local APIC disabled by BIOS -- " 1023 printk(KERN_INFO "Local APIC disabled by BIOS -- "
820 "you can enable it with \"lapic\"\n"); 1024 "you can enable it with \"lapic\"\n");
821 return -1; 1025 return -1;
822 } 1026 }
823 /* 1027 /*
824 * Some BIOSes disable the local APIC in the 1028 * Some BIOSes disable the local APIC in the APIC_BASE
825 * APIC_BASE MSR. This can only be done in 1029 * MSR. This can only be done in software for Intel P6 or later
826 * software for Intel P6 or later and AMD K7 1030 * and AMD K7 (Model > 1) or later.
827 * (Model > 1) or later.
828 */ 1031 */
829 rdmsr(MSR_IA32_APICBASE, l, h); 1032 rdmsr(MSR_IA32_APICBASE, l, h);
830 if (!(l & MSR_IA32_APICBASE_ENABLE)) { 1033 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
831 printk("Local APIC disabled by BIOS -- reenabling.\n"); 1034 printk(KERN_INFO
1035 "Local APIC disabled by BIOS -- reenabling.\n");
832 l &= ~MSR_IA32_APICBASE_BASE; 1036 l &= ~MSR_IA32_APICBASE_BASE;
833 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; 1037 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
834 wrmsr(MSR_IA32_APICBASE, l, h); 1038 wrmsr(MSR_IA32_APICBASE, l, h);
@@ -841,7 +1045,7 @@ static int __init detect_init_APIC (void)
841 */ 1045 */
842 features = cpuid_edx(1); 1046 features = cpuid_edx(1);
843 if (!(features & (1 << X86_FEATURE_APIC))) { 1047 if (!(features & (1 << X86_FEATURE_APIC))) {
844 printk("Could not enable APIC!\n"); 1048 printk(KERN_WARNING "Could not enable APIC!\n");
845 return -1; 1049 return -1;
846 } 1050 }
847 set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); 1051 set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
@@ -855,17 +1059,20 @@ static int __init detect_init_APIC (void)
855 if (nmi_watchdog != NMI_NONE) 1059 if (nmi_watchdog != NMI_NONE)
856 nmi_watchdog = NMI_LOCAL_APIC; 1060 nmi_watchdog = NMI_LOCAL_APIC;
857 1061
858 printk("Found and enabled local APIC!\n"); 1062 printk(KERN_INFO "Found and enabled local APIC!\n");
859 1063
860 apic_pm_activate(); 1064 apic_pm_activate();
861 1065
862 return 0; 1066 return 0;
863 1067
864no_apic: 1068no_apic:
865 printk("No local APIC present or hardware disabled\n"); 1069 printk(KERN_INFO "No local APIC present or hardware disabled\n");
866 return -1; 1070 return -1;
867} 1071}
868 1072
1073/**
1074 * init_apic_mappings - initialize APIC mappings
1075 */
869void __init init_apic_mappings(void) 1076void __init init_apic_mappings(void)
870{ 1077{
871 unsigned long apic_phys; 1078 unsigned long apic_phys;
@@ -925,385 +1132,92 @@ fake_ioapic_page:
925} 1132}
926 1133
927/* 1134/*
928 * This part sets up the APIC 32 bit clock in LVTT1, with HZ interrupts 1135 * This initializes the IO-APIC and APIC hardware if this is
929 * per second. We assume that the caller has already set up the local 1136 * a UP kernel.
930 * APIC.
931 *
932 * The APIC timer is not exactly sync with the external timer chip, it
933 * closely follows bus clocks.
934 */
935
936/*
937 * The timer chip is already set up at HZ interrupts per second here,
938 * but we do not accept timer interrupts yet. We only allow the BP
939 * to calibrate.
940 */
941static unsigned int __devinit get_8254_timer_count(void)
942{
943 unsigned long flags;
944
945 unsigned int count;
946
947 spin_lock_irqsave(&i8253_lock, flags);
948
949 outb_p(0x00, PIT_MODE);
950 count = inb_p(PIT_CH0);
951 count |= inb_p(PIT_CH0) << 8;
952
953 spin_unlock_irqrestore(&i8253_lock, flags);
954
955 return count;
956}
957
958/* next tick in 8254 can be caught by catching timer wraparound */
959static void __devinit wait_8254_wraparound(void)
960{
961 unsigned int curr_count, prev_count;
962
963 curr_count = get_8254_timer_count();
964 do {
965 prev_count = curr_count;
966 curr_count = get_8254_timer_count();
967
968 /* workaround for broken Mercury/Neptune */
969 if (prev_count >= curr_count + 0x100)
970 curr_count = get_8254_timer_count();
971
972 } while (prev_count >= curr_count);
973}
974
975/*
976 * Default initialization for 8254 timers. If we use other timers like HPET,
977 * we override this later
978 */
979void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
980
981/*
982 * This function sets up the local APIC timer, with a timeout of
983 * 'clocks' APIC bus clock. During calibration we actually call
984 * this function twice on the boot CPU, once with a bogus timeout
985 * value, second time for real. The other (noncalibrating) CPUs
986 * call this function only once, with the real, calibrated value.
987 *
988 * We do reads before writes even if unnecessary, to get around the
989 * P5 APIC double write bug.
990 */ 1137 */
991 1138int __init APIC_init_uniprocessor (void)
992#define APIC_DIVISOR 16
993
994static void __setup_APIC_LVTT(unsigned int clocks)
995{ 1139{
996 unsigned int lvtt_value, tmp_value, ver; 1140 if (enable_local_apic < 0)
997 int cpu = smp_processor_id(); 1141 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
998
999 ver = GET_APIC_VERSION(apic_read(APIC_LVR));
1000 lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
1001 if (!APIC_INTEGRATED(ver))
1002 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
1003
1004 if (cpu_isset(cpu, timer_bcast_ipi))
1005 lvtt_value |= APIC_LVT_MASKED;
1006 1142
1007 apic_write_around(APIC_LVTT, lvtt_value); 1143 if (!smp_found_config && !cpu_has_apic)
1144 return -1;
1008 1145
1009 /* 1146 /*
1010 * Divide PICLK by 16 1147 * Complain if the BIOS pretends there is one.
1011 */ 1148 */
1012 tmp_value = apic_read(APIC_TDCR); 1149 if (!cpu_has_apic &&
1013 apic_write_around(APIC_TDCR, (tmp_value 1150 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1014 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) 1151 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1015 | APIC_TDR_DIV_16); 1152 boot_cpu_physical_apicid);
1016 1153 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1017 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR); 1154 return -1;
1018} 1155 }
1019 1156
1020static void __devinit setup_APIC_timer(unsigned int clocks) 1157 verify_local_APIC();
1021{
1022 unsigned long flags;
1023 1158
1024 local_irq_save(flags); 1159 connect_bsp_APIC();
1025 1160
1026 /* 1161 /*
1027 * Wait for IRQ0's slice: 1162 * Hack: In case of kdump, after a crash, kernel might be booting
1163 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1164 * might be zero if read from MP tables. Get it from LAPIC.
1028 */ 1165 */
1029 wait_timer_tick(); 1166#ifdef CONFIG_CRASH_DUMP
1167 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1168#endif
1169 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
1030 1170
1031 __setup_APIC_LVTT(clocks); 1171 setup_local_APIC();
1032 1172
1033 local_irq_restore(flags); 1173#ifdef CONFIG_X86_IO_APIC
1174 if (smp_found_config)
1175 if (!skip_ioapic_setup && nr_ioapics)
1176 setup_IO_APIC();
1177#endif
1178 setup_boot_clock();
1179
1180 return 0;
1034} 1181}
1035 1182
1036/* 1183/*
1037 * In this function we calibrate APIC bus clocks to the external 1184 * APIC command line parameters
1038 * timer. Unfortunately we cannot use jiffies and the timer irq
1039 * to calibrate, since some later bootup code depends on getting
1040 * the first irq? Ugh.
1041 *
1042 * We want to do the calibration only once since we
1043 * want to have local timer irqs syncron. CPUs connected
1044 * by the same APIC bus have the very same bus frequency.
1045 * And we want to have irqs off anyways, no accidental
1046 * APIC irq that way.
1047 */ 1185 */
1048 1186static int __init parse_lapic(char *arg)
1049static int __init calibrate_APIC_clock(void)
1050{
1051 unsigned long long t1 = 0, t2 = 0;
1052 long tt1, tt2;
1053 long result;
1054 int i;
1055 const int LOOPS = HZ/10;
1056
1057 apic_printk(APIC_VERBOSE, "calibrating APIC timer ...\n");
1058
1059 /*
1060 * Put whatever arbitrary (but long enough) timeout
1061 * value into the APIC clock, we just want to get the
1062 * counter running for calibration.
1063 */
1064 __setup_APIC_LVTT(1000000000);
1065
1066 /*
1067 * The timer chip counts down to zero. Let's wait
1068 * for a wraparound to start exact measurement:
1069 * (the current tick might have been already half done)
1070 */
1071
1072 wait_timer_tick();
1073
1074 /*
1075 * We wrapped around just now. Let's start:
1076 */
1077 if (cpu_has_tsc)
1078 rdtscll(t1);
1079 tt1 = apic_read(APIC_TMCCT);
1080
1081 /*
1082 * Let's wait LOOPS wraprounds:
1083 */
1084 for (i = 0; i < LOOPS; i++)
1085 wait_timer_tick();
1086
1087 tt2 = apic_read(APIC_TMCCT);
1088 if (cpu_has_tsc)
1089 rdtscll(t2);
1090
1091 /*
1092 * The APIC bus clock counter is 32 bits only, it
1093 * might have overflown, but note that we use signed
1094 * longs, thus no extra care needed.
1095 *
1096 * underflown to be exact, as the timer counts down ;)
1097 */
1098
1099 result = (tt1-tt2)*APIC_DIVISOR/LOOPS;
1100
1101 if (cpu_has_tsc)
1102 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
1103 "%ld.%04ld MHz.\n",
1104 ((long)(t2-t1)/LOOPS)/(1000000/HZ),
1105 ((long)(t2-t1)/LOOPS)%(1000000/HZ));
1106
1107 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
1108 "%ld.%04ld MHz.\n",
1109 result/(1000000/HZ),
1110 result%(1000000/HZ));
1111
1112 return result;
1113}
1114
1115static unsigned int calibration_result;
1116
1117void __init setup_boot_APIC_clock(void)
1118{
1119 unsigned long flags;
1120 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n");
1121 using_apic_timer = 1;
1122
1123 local_irq_save(flags);
1124
1125 calibration_result = calibrate_APIC_clock();
1126 /*
1127 * Now set up the timer for real.
1128 */
1129 setup_APIC_timer(calibration_result);
1130
1131 local_irq_restore(flags);
1132}
1133
1134void __devinit setup_secondary_APIC_clock(void)
1135{
1136 setup_APIC_timer(calibration_result);
1137}
1138
1139void disable_APIC_timer(void)
1140{
1141 if (using_apic_timer) {
1142 unsigned long v;
1143
1144 v = apic_read(APIC_LVTT);
1145 /*
1146 * When an illegal vector value (0-15) is written to an LVT
1147 * entry and delivery mode is Fixed, the APIC may signal an
1148 * illegal vector error, with out regard to whether the mask
1149 * bit is set or whether an interrupt is actually seen on input.
1150 *
1151 * Boot sequence might call this function when the LVTT has
1152 * '0' vector value. So make sure vector field is set to
1153 * valid value.
1154 */
1155 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1156 apic_write_around(APIC_LVTT, v);
1157 }
1158}
1159
1160void enable_APIC_timer(void)
1161{ 1187{
1162 int cpu = smp_processor_id(); 1188 enable_local_apic = 1;
1163 1189 return 0;
1164 if (using_apic_timer &&
1165 !cpu_isset(cpu, timer_bcast_ipi)) {
1166 unsigned long v;
1167
1168 v = apic_read(APIC_LVTT);
1169 apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED);
1170 }
1171} 1190}
1191early_param("lapic", parse_lapic);
1172 1192
1173void switch_APIC_timer_to_ipi(void *cpumask) 1193static int __init parse_nolapic(char *arg)
1174{ 1194{
1175 cpumask_t mask = *(cpumask_t *)cpumask; 1195 enable_local_apic = -1;
1176 int cpu = smp_processor_id(); 1196 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1177 1197 return 0;
1178 if (cpu_isset(cpu, mask) &&
1179 !cpu_isset(cpu, timer_bcast_ipi)) {
1180 disable_APIC_timer();
1181 cpu_set(cpu, timer_bcast_ipi);
1182 }
1183} 1198}
1184EXPORT_SYMBOL(switch_APIC_timer_to_ipi); 1199early_param("nolapic", parse_nolapic);
1185 1200
1186void switch_ipi_to_APIC_timer(void *cpumask) 1201static int __init apic_set_verbosity(char *str)
1187{ 1202{
1188 cpumask_t mask = *(cpumask_t *)cpumask; 1203 if (strcmp("debug", str) == 0)
1189 int cpu = smp_processor_id(); 1204 apic_verbosity = APIC_DEBUG;
1190 1205 else if (strcmp("verbose", str) == 0)
1191 if (cpu_isset(cpu, mask) && 1206 apic_verbosity = APIC_VERBOSE;
1192 cpu_isset(cpu, timer_bcast_ipi)) { 1207 return 1;
1193 cpu_clear(cpu, timer_bcast_ipi);
1194 enable_APIC_timer();
1195 }
1196} 1208}
1197EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
1198 1209
1199#undef APIC_DIVISOR 1210__setup("apic=", apic_set_verbosity);
1200
1201/*
1202 * Local timer interrupt handler. It does both profiling and
1203 * process statistics/rescheduling.
1204 *
1205 * We do profiling in every local tick, statistics/rescheduling
1206 * happen only every 'profiling multiplier' ticks. The default
1207 * multiplier is 1 and it can be changed by writing the new multiplier
1208 * value into /proc/profile.
1209 */
1210
1211inline void smp_local_timer_interrupt(void)
1212{
1213 profile_tick(CPU_PROFILING);
1214#ifdef CONFIG_SMP
1215 update_process_times(user_mode_vm(get_irq_regs()));
1216#endif
1217 1211
1218 /*
1219 * We take the 'long' return path, and there every subsystem
1220 * grabs the apropriate locks (kernel lock/ irq lock).
1221 *
1222 * we might want to decouple profiling from the 'long path',
1223 * and do the profiling totally in assembly.
1224 *
1225 * Currently this isn't too much of an issue (performance wise),
1226 * we can take more than 100K local irqs per second on a 100 MHz P5.
1227 */
1228}
1229 1212
1230/* 1213/*
1231 * Local APIC timer interrupt. This is the most natural way for doing 1214 * Local APIC interrupts
1232 * local interrupts, but local timer interrupts can be emulated by
1233 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
1234 *
1235 * [ if a single-CPU system runs an SMP kernel then we call the local
1236 * interrupt as well. Thus we cannot inline the local irq ... ]
1237 */ 1215 */
1238 1216
1239fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
1240{
1241 struct pt_regs *old_regs = set_irq_regs(regs);
1242 int cpu = smp_processor_id();
1243
1244 /*
1245 * the NMI deadlock-detector uses this.
1246 */
1247 per_cpu(irq_stat, cpu).apic_timer_irqs++;
1248
1249 /*
1250 * NOTE! We'd better ACK the irq immediately,
1251 * because timer handling can be slow.
1252 */
1253 ack_APIC_irq();
1254 /*
1255 * update_process_times() expects us to have done irq_enter().
1256 * Besides, if we don't timer interrupts ignore the global
1257 * interrupt lock, which is the WrongThing (tm) to do.
1258 */
1259 exit_idle();
1260 irq_enter();
1261 smp_local_timer_interrupt();
1262 irq_exit();
1263 set_irq_regs(old_regs);
1264}
1265
1266#ifndef CONFIG_SMP
1267static void up_apic_timer_interrupt_call(void)
1268{
1269 int cpu = smp_processor_id();
1270
1271 /*
1272 * the NMI deadlock-detector uses this.
1273 */
1274 per_cpu(irq_stat, cpu).apic_timer_irqs++;
1275
1276 smp_local_timer_interrupt();
1277}
1278#endif
1279
1280void smp_send_timer_broadcast_ipi(void)
1281{
1282 cpumask_t mask;
1283
1284 cpus_and(mask, cpu_online_map, timer_bcast_ipi);
1285 if (!cpus_empty(mask)) {
1286#ifdef CONFIG_SMP
1287 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
1288#else
1289 /*
1290 * We can directly call the apic timer interrupt handler
1291 * in UP case. Minus all irq related functions
1292 */
1293 up_apic_timer_interrupt_call();
1294#endif
1295 }
1296}
1297
1298int setup_profiling_timer(unsigned int multiplier)
1299{
1300 return -EINVAL;
1301}
1302
1303/* 1217/*
1304 * This interrupt should _never_ happen with our APIC/SMP architecture 1218 * This interrupt should _never_ happen with our APIC/SMP architecture
1305 */ 1219 */
1306fastcall void smp_spurious_interrupt(struct pt_regs *regs) 1220void smp_spurious_interrupt(struct pt_regs *regs)
1307{ 1221{
1308 unsigned long v; 1222 unsigned long v;
1309 1223
@@ -1319,16 +1233,15 @@ fastcall void smp_spurious_interrupt(struct pt_regs *regs)
1319 ack_APIC_irq(); 1233 ack_APIC_irq();
1320 1234
1321 /* see sw-dev-man vol 3, chapter 7.4.13.5 */ 1235 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1322 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n", 1236 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
1323 smp_processor_id()); 1237 "should never happen.\n", smp_processor_id());
1324 irq_exit(); 1238 irq_exit();
1325} 1239}
1326 1240
1327/* 1241/*
1328 * This interrupt should never happen with our APIC/SMP architecture 1242 * This interrupt should never happen with our APIC/SMP architecture
1329 */ 1243 */
1330 1244void smp_error_interrupt(struct pt_regs *regs)
1331fastcall void smp_error_interrupt(struct pt_regs *regs)
1332{ 1245{
1333 unsigned long v, v1; 1246 unsigned long v, v1;
1334 1247
@@ -1352,69 +1265,261 @@ fastcall void smp_error_interrupt(struct pt_regs *regs)
1352 7: Illegal register address 1265 7: Illegal register address
1353 */ 1266 */
1354 printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n", 1267 printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
1355 smp_processor_id(), v , v1); 1268 smp_processor_id(), v , v1);
1356 irq_exit(); 1269 irq_exit();
1357} 1270}
1358 1271
1359/* 1272/*
1360 * This initializes the IO-APIC and APIC hardware if this is 1273 * Initialize APIC interrupts
1361 * a UP kernel.
1362 */ 1274 */
1363int __init APIC_init_uniprocessor (void) 1275void __init apic_intr_init(void)
1364{ 1276{
1365 if (enable_local_apic < 0) 1277#ifdef CONFIG_SMP
1366 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); 1278 smp_intr_init();
1279#endif
1280 /* self generated IPI for local APIC timer */
1281 set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
1367 1282
1368 if (!smp_found_config && !cpu_has_apic) 1283 /* IPI vectors for APIC spurious and error interrupts */
1369 return -1; 1284 set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
1285 set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
1370 1286
1371 /* 1287 /* thermal monitor LVT interrupt */
1372 * Complain if the BIOS pretends there is one. 1288#ifdef CONFIG_X86_MCE_P4THERMAL
1373 */ 1289 set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
1374 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { 1290#endif
1375 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", 1291}
1376 boot_cpu_physical_apicid); 1292
1377 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); 1293/**
1378 return -1; 1294 * connect_bsp_APIC - attach the APIC to the interrupt system
1295 */
1296void __init connect_bsp_APIC(void)
1297{
1298 if (pic_mode) {
1299 /*
1300 * Do not trust the local APIC being empty at bootup.
1301 */
1302 clear_local_APIC();
1303 /*
1304 * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
1305 * local APIC to INT and NMI lines.
1306 */
1307 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
1308 "enabling APIC mode.\n");
1309 outb(0x70, 0x22);
1310 outb(0x01, 0x23);
1379 } 1311 }
1312 enable_apic_mode();
1313}
1380 1314
1381 verify_local_APIC(); 1315/**
1316 * disconnect_bsp_APIC - detach the APIC from the interrupt system
1317 * @virt_wire_setup: indicates, whether virtual wire mode is selected
1318 *
1319 * Virtual wire mode is necessary to deliver legacy interrupts even when the
1320 * APIC is disabled.
1321 */
1322void disconnect_bsp_APIC(int virt_wire_setup)
1323{
1324 if (pic_mode) {
1325 /*
1326 * Put the board back into PIC mode (has an effect only on
1327 * certain older boards). Note that APIC interrupts, including
1328 * IPIs, won't work beyond this point! The only exception are
1329 * INIT IPIs.
1330 */
1331 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
1332 "entering PIC mode.\n");
1333 outb(0x70, 0x22);
1334 outb(0x00, 0x23);
1335 } else {
1336 /* Go back to Virtual Wire compatibility mode */
1337 unsigned long value;
1382 1338
1383 connect_bsp_APIC(); 1339 /* For the spurious interrupt use vector F, and enable it */
1340 value = apic_read(APIC_SPIV);
1341 value &= ~APIC_VECTOR_MASK;
1342 value |= APIC_SPIV_APIC_ENABLED;
1343 value |= 0xf;
1344 apic_write_around(APIC_SPIV, value);
1384 1345
1385 /* 1346 if (!virt_wire_setup) {
1386 * Hack: In case of kdump, after a crash, kernel might be booting 1347 /*
1387 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid 1348 * For LVT0 make it edge triggered, active high,
1388 * might be zero if read from MP tables. Get it from LAPIC. 1349 * external and enabled
1389 */ 1350 */
1390#ifdef CONFIG_CRASH_DUMP 1351 value = apic_read(APIC_LVT0);
1391 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 1352 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1392#endif 1353 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1393 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 1354 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
1355 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1356 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1357 apic_write_around(APIC_LVT0, value);
1358 } else {
1359 /* Disable LVT0 */
1360 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
1361 }
1394 1362
1395 setup_local_APIC(); 1363 /*
1364 * For LVT1 make it edge triggered, active high, nmi and
1365 * enabled
1366 */
1367 value = apic_read(APIC_LVT1);
1368 value &= ~(
1369 APIC_MODE_MASK | APIC_SEND_PENDING |
1370 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1371 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1372 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1373 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1374 apic_write_around(APIC_LVT1, value);
1375 }
1376}
1396 1377
1397#ifdef CONFIG_X86_IO_APIC 1378/*
1398 if (smp_found_config) 1379 * Power management
1399 if (!skip_ioapic_setup && nr_ioapics) 1380 */
1400 setup_IO_APIC(); 1381#ifdef CONFIG_PM
1382
1383static struct {
1384 int active;
1385 /* r/w apic fields */
1386 unsigned int apic_id;
1387 unsigned int apic_taskpri;
1388 unsigned int apic_ldr;
1389 unsigned int apic_dfr;
1390 unsigned int apic_spiv;
1391 unsigned int apic_lvtt;
1392 unsigned int apic_lvtpc;
1393 unsigned int apic_lvt0;
1394 unsigned int apic_lvt1;
1395 unsigned int apic_lvterr;
1396 unsigned int apic_tmict;
1397 unsigned int apic_tdcr;
1398 unsigned int apic_thmr;
1399} apic_pm_state;
1400
1401static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1402{
1403 unsigned long flags;
1404 int maxlvt;
1405
1406 if (!apic_pm_state.active)
1407 return 0;
1408
1409 maxlvt = lapic_get_maxlvt();
1410
1411 apic_pm_state.apic_id = apic_read(APIC_ID);
1412 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1413 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1414 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1415 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1416 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1417 if (maxlvt >= 4)
1418 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1419 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1420 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1421 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1422 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1423 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1424#ifdef CONFIG_X86_MCE_P4THERMAL
1425 if (maxlvt >= 5)
1426 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1401#endif 1427#endif
1402 setup_boot_clock();
1403 1428
1429 local_irq_save(flags);
1430 disable_local_APIC();
1431 local_irq_restore(flags);
1404 return 0; 1432 return 0;
1405} 1433}
1406 1434
1407static int __init parse_lapic(char *arg) 1435static int lapic_resume(struct sys_device *dev)
1408{ 1436{
1409 lapic_enable(); 1437 unsigned int l, h;
1438 unsigned long flags;
1439 int maxlvt;
1440
1441 if (!apic_pm_state.active)
1442 return 0;
1443
1444 maxlvt = lapic_get_maxlvt();
1445
1446 local_irq_save(flags);
1447
1448 /*
1449 * Make sure the APICBASE points to the right address
1450 *
1451 * FIXME! This will be wrong if we ever support suspend on
1452 * SMP! We'll need to do this as part of the CPU restore!
1453 */
1454 rdmsr(MSR_IA32_APICBASE, l, h);
1455 l &= ~MSR_IA32_APICBASE_BASE;
1456 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1457 wrmsr(MSR_IA32_APICBASE, l, h);
1458
1459 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1460 apic_write(APIC_ID, apic_pm_state.apic_id);
1461 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1462 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1463 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1464 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1465 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1466 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1467#ifdef CONFIG_X86_MCE_P4THERMAL
1468 if (maxlvt >= 5)
1469 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1470#endif
1471 if (maxlvt >= 4)
1472 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1473 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1474 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1475 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1476 apic_write(APIC_ESR, 0);
1477 apic_read(APIC_ESR);
1478 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1479 apic_write(APIC_ESR, 0);
1480 apic_read(APIC_ESR);
1481 local_irq_restore(flags);
1410 return 0; 1482 return 0;
1411} 1483}
1412early_param("lapic", parse_lapic);
1413 1484
1414static int __init parse_nolapic(char *arg) 1485/*
1486 * This device has no shutdown method - fully functioning local APICs
1487 * are needed on every CPU up until machine_halt/restart/poweroff.
1488 */
1489
1490static struct sysdev_class lapic_sysclass = {
1491 set_kset_name("lapic"),
1492 .resume = lapic_resume,
1493 .suspend = lapic_suspend,
1494};
1495
1496static struct sys_device device_lapic = {
1497 .id = 0,
1498 .cls = &lapic_sysclass,
1499};
1500
1501static void __devinit apic_pm_activate(void)
1415{ 1502{
1416 lapic_disable(); 1503 apic_pm_state.active = 1;
1417 return 0;
1418} 1504}
1419early_param("nolapic", parse_nolapic);
1420 1505
1506static int __init init_lapic_sysfs(void)
1507{
1508 int error;
1509
1510 if (!cpu_has_apic)
1511 return 0;
1512 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1513
1514 error = sysdev_class_register(&lapic_sysclass);
1515 if (!error)
1516 error = sysdev_register(&device_lapic);
1517 return error;
1518}
1519device_initcall(init_lapic_sysfs);
1520
1521#else /* CONFIG_PM */
1522
1523static void apic_pm_activate(void) { }
1524
1525#endif /* CONFIG_PM */
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index f9ba0af7ee1f..064bbf2861f4 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -236,7 +236,6 @@
236 236
237#include "io_ports.h" 237#include "io_ports.h"
238 238
239extern unsigned long get_cmos_time(void);
240extern void machine_real_restart(unsigned char *, int); 239extern void machine_real_restart(unsigned char *, int);
241 240
242#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) 241#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
@@ -1176,28 +1175,6 @@ out:
1176 spin_unlock(&user_list_lock); 1175 spin_unlock(&user_list_lock);
1177} 1176}
1178 1177
1179static void set_time(void)
1180{
1181 struct timespec ts;
1182 if (got_clock_diff) { /* Must know time zone in order to set clock */
1183 ts.tv_sec = get_cmos_time() + clock_cmos_diff;
1184 ts.tv_nsec = 0;
1185 do_settimeofday(&ts);
1186 }
1187}
1188
1189static void get_time_diff(void)
1190{
1191#ifndef CONFIG_APM_RTC_IS_GMT
1192 /*
1193 * Estimate time zone so that set_time can update the clock
1194 */
1195 clock_cmos_diff = -get_cmos_time();
1196 clock_cmos_diff += get_seconds();
1197 got_clock_diff = 1;
1198#endif
1199}
1200
1201static void reinit_timer(void) 1178static void reinit_timer(void)
1202{ 1179{
1203#ifdef INIT_TIMER_AFTER_SUSPEND 1180#ifdef INIT_TIMER_AFTER_SUSPEND
@@ -1237,19 +1214,6 @@ static int suspend(int vetoable)
1237 local_irq_disable(); 1214 local_irq_disable();
1238 device_power_down(PMSG_SUSPEND); 1215 device_power_down(PMSG_SUSPEND);
1239 1216
1240 /* serialize with the timer interrupt */
1241 write_seqlock(&xtime_lock);
1242
1243 /* protect against access to timer chip registers */
1244 spin_lock(&i8253_lock);
1245
1246 get_time_diff();
1247 /*
1248 * Irq spinlock must be dropped around set_system_power_state.
1249 * We'll undo any timer changes due to interrupts below.
1250 */
1251 spin_unlock(&i8253_lock);
1252 write_sequnlock(&xtime_lock);
1253 local_irq_enable(); 1217 local_irq_enable();
1254 1218
1255 save_processor_state(); 1219 save_processor_state();
@@ -1258,7 +1222,6 @@ static int suspend(int vetoable)
1258 restore_processor_state(); 1222 restore_processor_state();
1259 1223
1260 local_irq_disable(); 1224 local_irq_disable();
1261 set_time();
1262 reinit_timer(); 1225 reinit_timer();
1263 1226
1264 if (err == APM_NO_ERROR) 1227 if (err == APM_NO_ERROR)
@@ -1288,11 +1251,6 @@ static void standby(void)
1288 1251
1289 local_irq_disable(); 1252 local_irq_disable();
1290 device_power_down(PMSG_SUSPEND); 1253 device_power_down(PMSG_SUSPEND);
1291 /* serialize with the timer interrupt */
1292 write_seqlock(&xtime_lock);
1293 /* If needed, notify drivers here */
1294 get_time_diff();
1295 write_sequnlock(&xtime_lock);
1296 local_irq_enable(); 1254 local_irq_enable();
1297 1255
1298 err = set_system_power_state(APM_STATE_STANDBY); 1256 err = set_system_power_state(APM_STATE_STANDBY);
@@ -1386,7 +1344,6 @@ static void check_events(void)
1386 ignore_bounce = 1; 1344 ignore_bounce = 1;
1387 if ((event != APM_NORMAL_RESUME) 1345 if ((event != APM_NORMAL_RESUME)
1388 || (ignore_normal_resume == 0)) { 1346 || (ignore_normal_resume == 0)) {
1389 set_time();
1390 device_resume(); 1347 device_resume();
1391 pm_send_all(PM_RESUME, (void *)0); 1348 pm_send_all(PM_RESUME, (void *)0);
1392 queue_event(event, NULL); 1349 queue_event(event, NULL);
@@ -1402,7 +1359,6 @@ static void check_events(void)
1402 break; 1359 break;
1403 1360
1404 case APM_UPDATE_TIME: 1361 case APM_UPDATE_TIME:
1405 set_time();
1406 break; 1362 break;
1407 1363
1408 case APM_CRITICAL_SUSPEND: 1364 case APM_CRITICAL_SUSPEND:
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 5299c5bf4454..6c52182ca323 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -217,6 +217,15 @@ config X86_LONGHAUL
217 217
218 If in doubt, say N. 218 If in doubt, say N.
219 219
220config X86_E_POWERSAVER
221 tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)"
222 select CPU_FREQ_TABLE
223 depends on EXPERIMENTAL
224 help
225 This adds the CPUFreq driver for VIA C7 processors.
226
227 If in doubt, say N.
228
220comment "shared options" 229comment "shared options"
221 230
222config X86_ACPI_CPUFREQ_PROC_INTF 231config X86_ACPI_CPUFREQ_PROC_INTF
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile
index 8de3abe322a9..560f7760dae5 100644
--- a/arch/i386/kernel/cpu/cpufreq/Makefile
+++ b/arch/i386/kernel/cpu/cpufreq/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
2obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o 2obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
3obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o 3obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
4obj-$(CONFIG_X86_LONGHAUL) += longhaul.o 4obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
5obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
5obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o 6obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
6obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o 7obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
7obj-$(CONFIG_X86_LONGRUN) += longrun.o 8obj-$(CONFIG_X86_LONGRUN) += longrun.o
diff --git a/arch/i386/kernel/cpu/cpufreq/e_powersaver.c b/arch/i386/kernel/cpu/cpufreq/e_powersaver.c
new file mode 100644
index 000000000000..f43d98e11cc7
--- /dev/null
+++ b/arch/i386/kernel/cpu/cpufreq/e_powersaver.c
@@ -0,0 +1,334 @@
1/*
2 * Based on documentation provided by Dave Jones. Thanks!
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/cpufreq.h>
13#include <linux/ioport.h>
14#include <linux/slab.h>
15
16#include <asm/msr.h>
17#include <asm/tsc.h>
18#include <asm/timex.h>
19#include <asm/io.h>
20#include <asm/delay.h>
21
22#define EPS_BRAND_C7M 0
23#define EPS_BRAND_C7 1
24#define EPS_BRAND_EDEN 2
25#define EPS_BRAND_C3 3
26
27struct eps_cpu_data {
28 u32 fsb;
29 struct cpufreq_frequency_table freq_table[];
30};
31
32static struct eps_cpu_data *eps_cpu[NR_CPUS];
33
34
35static unsigned int eps_get(unsigned int cpu)
36{
37 struct eps_cpu_data *centaur;
38 u32 lo, hi;
39
40 if (cpu)
41 return 0;
42 centaur = eps_cpu[cpu];
43 if (centaur == NULL)
44 return 0;
45
46 /* Return current frequency */
47 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
48 return centaur->fsb * ((lo >> 8) & 0xff);
49}
50
51static int eps_set_state(struct eps_cpu_data *centaur,
52 unsigned int cpu,
53 u32 dest_state)
54{
55 struct cpufreq_freqs freqs;
56 u32 lo, hi;
57 int err = 0;
58 int i;
59
60 freqs.old = eps_get(cpu);
61 freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
62 freqs.cpu = cpu;
63 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
64
65 /* Wait while CPU is busy */
66 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
67 i = 0;
68 while (lo & ((1 << 16) | (1 << 17))) {
69 udelay(16);
70 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
71 i++;
72 if (unlikely(i > 64)) {
73 err = -ENODEV;
74 goto postchange;
75 }
76 }
77 /* Set new multiplier and voltage */
78 wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0);
79 /* Wait until transition end */
80 i = 0;
81 do {
82 udelay(16);
83 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
84 i++;
85 if (unlikely(i > 64)) {
86 err = -ENODEV;
87 goto postchange;
88 }
89 } while (lo & ((1 << 16) | (1 << 17)));
90
91 /* Return current frequency */
92postchange:
93 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
94 freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
95
96 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
97 return err;
98}
99
100static int eps_target(struct cpufreq_policy *policy,
101 unsigned int target_freq,
102 unsigned int relation)
103{
104 struct eps_cpu_data *centaur;
105 unsigned int newstate = 0;
106 unsigned int cpu = policy->cpu;
107 unsigned int dest_state;
108 int ret;
109
110 if (unlikely(eps_cpu[cpu] == NULL))
111 return -ENODEV;
112 centaur = eps_cpu[cpu];
113
114 if (unlikely(cpufreq_frequency_table_target(policy,
115 &eps_cpu[cpu]->freq_table[0],
116 target_freq,
117 relation,
118 &newstate))) {
119 return -EINVAL;
120 }
121
122 /* Make frequency transition */
123 dest_state = centaur->freq_table[newstate].index & 0xffff;
124 ret = eps_set_state(centaur, cpu, dest_state);
125 if (ret)
126 printk(KERN_ERR "eps: Timeout!\n");
127 return ret;
128}
129
130static int eps_verify(struct cpufreq_policy *policy)
131{
132 return cpufreq_frequency_table_verify(policy,
133 &eps_cpu[policy->cpu]->freq_table[0]);
134}
135
136static int eps_cpu_init(struct cpufreq_policy *policy)
137{
138 unsigned int i;
139 u32 lo, hi;
140 u64 val;
141 u8 current_multiplier, current_voltage;
142 u8 max_multiplier, max_voltage;
143 u8 min_multiplier, min_voltage;
144 u8 brand;
145 u32 fsb;
146 struct eps_cpu_data *centaur;
147 struct cpufreq_frequency_table *f_table;
148 int k, step, voltage;
149 int ret;
150 int states;
151
152 if (policy->cpu != 0)
153 return -ENODEV;
154
155 /* Check brand */
156 printk("eps: Detected VIA ");
157 rdmsr(0x1153, lo, hi);
158 brand = (((lo >> 2) ^ lo) >> 18) & 3;
159 switch(brand) {
160 case EPS_BRAND_C7M:
161 printk("C7-M\n");
162 break;
163 case EPS_BRAND_C7:
164 printk("C7\n");
165 break;
166 case EPS_BRAND_EDEN:
167 printk("Eden\n");
168 break;
169 case EPS_BRAND_C3:
170 printk("C3\n");
171 return -ENODEV;
172 break;
173 }
174 /* Enable Enhanced PowerSaver */
175 rdmsrl(MSR_IA32_MISC_ENABLE, val);
176 if (!(val & 1 << 16)) {
177 val |= 1 << 16;
178 wrmsrl(MSR_IA32_MISC_ENABLE, val);
179 /* Can be locked at 0 */
180 rdmsrl(MSR_IA32_MISC_ENABLE, val);
181 if (!(val & 1 << 16)) {
182 printk("eps: Can't enable Enhanced PowerSaver\n");
183 return -ENODEV;
184 }
185 }
186
187 /* Print voltage and multiplier */
188 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
189 current_voltage = lo & 0xff;
190 printk("eps: Current voltage = %dmV\n", current_voltage * 16 + 700);
191 current_multiplier = (lo >> 8) & 0xff;
192 printk("eps: Current multiplier = %d\n", current_multiplier);
193
194 /* Print limits */
195 max_voltage = hi & 0xff;
196 printk("eps: Highest voltage = %dmV\n", max_voltage * 16 + 700);
197 max_multiplier = (hi >> 8) & 0xff;
198 printk("eps: Highest multiplier = %d\n", max_multiplier);
199 min_voltage = (hi >> 16) & 0xff;
200 printk("eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700);
201 min_multiplier = (hi >> 24) & 0xff;
202 printk("eps: Lowest multiplier = %d\n", min_multiplier);
203
204 /* Sanity checks */
205 if (current_multiplier == 0 || max_multiplier == 0
206 || min_multiplier == 0)
207 return -EINVAL;
208 if (current_multiplier > max_multiplier
209 || max_multiplier <= min_multiplier)
210 return -EINVAL;
211 if (current_voltage > 0x1c || max_voltage > 0x1c)
212 return -EINVAL;
213 if (max_voltage < min_voltage)
214 return -EINVAL;
215
216 /* Calc FSB speed */
217 fsb = cpu_khz / current_multiplier;
218 /* Calc number of p-states supported */
219 if (brand == EPS_BRAND_C7M)
220 states = max_multiplier - min_multiplier + 1;
221 else
222 states = 2;
223
224 /* Allocate private data and frequency table for current cpu */
225 centaur = kzalloc(sizeof(struct eps_cpu_data)
226 + (states + 1) * sizeof(struct cpufreq_frequency_table),
227 GFP_KERNEL);
228 if (!centaur)
229 return -ENOMEM;
230 eps_cpu[0] = centaur;
231
232 /* Copy basic values */
233 centaur->fsb = fsb;
234
235 /* Fill frequency and MSR value table */
236 f_table = &centaur->freq_table[0];
237 if (brand != EPS_BRAND_C7M) {
238 f_table[0].frequency = fsb * min_multiplier;
239 f_table[0].index = (min_multiplier << 8) | min_voltage;
240 f_table[1].frequency = fsb * max_multiplier;
241 f_table[1].index = (max_multiplier << 8) | max_voltage;
242 f_table[2].frequency = CPUFREQ_TABLE_END;
243 } else {
244 k = 0;
245 step = ((max_voltage - min_voltage) * 256)
246 / (max_multiplier - min_multiplier);
247 for (i = min_multiplier; i <= max_multiplier; i++) {
248 voltage = (k * step) / 256 + min_voltage;
249 f_table[k].frequency = fsb * i;
250 f_table[k].index = (i << 8) | voltage;
251 k++;
252 }
253 f_table[k].frequency = CPUFREQ_TABLE_END;
254 }
255
256 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
257 policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
258 policy->cur = fsb * current_multiplier;
259
260 ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
261 if (ret) {
262 kfree(centaur);
263 return ret;
264 }
265
266 cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
267 return 0;
268}
269
270static int eps_cpu_exit(struct cpufreq_policy *policy)
271{
272 unsigned int cpu = policy->cpu;
273 struct eps_cpu_data *centaur;
274 u32 lo, hi;
275
276 if (eps_cpu[cpu] == NULL)
277 return -ENODEV;
278 centaur = eps_cpu[cpu];
279
280 /* Get max frequency */
281 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
282 /* Set max frequency */
283 eps_set_state(centaur, cpu, hi & 0xffff);
284 /* Bye */
285 cpufreq_frequency_table_put_attr(policy->cpu);
286 kfree(eps_cpu[cpu]);
287 eps_cpu[cpu] = NULL;
288 return 0;
289}
290
291static struct freq_attr* eps_attr[] = {
292 &cpufreq_freq_attr_scaling_available_freqs,
293 NULL,
294};
295
296static struct cpufreq_driver eps_driver = {
297 .verify = eps_verify,
298 .target = eps_target,
299 .init = eps_cpu_init,
300 .exit = eps_cpu_exit,
301 .get = eps_get,
302 .name = "e_powersaver",
303 .owner = THIS_MODULE,
304 .attr = eps_attr,
305};
306
307static int __init eps_init(void)
308{
309 struct cpuinfo_x86 *c = cpu_data;
310
311 /* This driver will work only on Centaur C7 processors with
312 * Enhanced SpeedStep/PowerSaver registers */
313 if (c->x86_vendor != X86_VENDOR_CENTAUR
314 || c->x86 != 6 || c->x86_model != 10)
315 return -ENODEV;
316 if (!cpu_has(c, X86_FEATURE_EST))
317 return -ENODEV;
318
319 if (cpufreq_register_driver(&eps_driver))
320 return -EINVAL;
321 return 0;
322}
323
324static void __exit eps_exit(void)
325{
326 cpufreq_unregister_driver(&eps_driver);
327}
328
329MODULE_AUTHOR("Rafa³ Bilski <rafalbilski@interia.pl>");
330MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
331MODULE_LICENSE("GPL");
332
333module_init(eps_init);
334module_exit(eps_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index a3db9332d652..b59878a0d9b3 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -8,12 +8,11 @@
8 * VIA have currently 3 different versions of Longhaul. 8 * VIA have currently 3 different versions of Longhaul.
9 * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. 9 * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
10 * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0. 10 * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
11 * Version 2 of longhaul is the same as v1, but adds voltage scaling. 11 * Version 2 of longhaul is backward compatible with v1, but adds
12 * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C) 12 * LONGHAUL MSR for purpose of both frequency and voltage scaling.
13 * voltage scaling support has currently been disabled in this driver 13 * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C).
14 * until we have code that gets it right.
15 * Version 3 of longhaul got renamed to Powersaver and redesigned 14 * Version 3 of longhaul got renamed to Powersaver and redesigned
16 * to use the POWERSAVER MSR at 0x110a. 15 * to use only the POWERSAVER MSR at 0x110a.
17 * It is present in Ezra-T (C5M), Nehemiah (C5X) and above. 16 * It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
18 * It's pretty much the same feature wise to longhaul v2, though 17 * It's pretty much the same feature wise to longhaul v2, though
19 * there is provision for scaling FSB too, but this doesn't work 18 * there is provision for scaling FSB too, but this doesn't work
@@ -51,10 +50,12 @@
51#define CPU_EZRA 3 50#define CPU_EZRA 3
52#define CPU_EZRA_T 4 51#define CPU_EZRA_T 4
53#define CPU_NEHEMIAH 5 52#define CPU_NEHEMIAH 5
53#define CPU_NEHEMIAH_C 6
54 54
55/* Flags */ 55/* Flags */
56#define USE_ACPI_C3 (1 << 1) 56#define USE_ACPI_C3 (1 << 1)
57#define USE_NORTHBRIDGE (1 << 2) 57#define USE_NORTHBRIDGE (1 << 2)
58#define USE_VT8235 (1 << 3)
58 59
59static int cpu_model; 60static int cpu_model;
60static unsigned int numscales=16; 61static unsigned int numscales=16;
@@ -63,7 +64,8 @@ static unsigned int fsb;
63static struct mV_pos *vrm_mV_table; 64static struct mV_pos *vrm_mV_table;
64static unsigned char *mV_vrm_table; 65static unsigned char *mV_vrm_table;
65struct f_msr { 66struct f_msr {
66 unsigned char vrm; 67 u8 vrm;
68 u8 pos;
67}; 69};
68static struct f_msr f_msr_table[32]; 70static struct f_msr f_msr_table[32];
69 71
@@ -73,10 +75,10 @@ static int can_scale_voltage;
73static struct acpi_processor *pr = NULL; 75static struct acpi_processor *pr = NULL;
74static struct acpi_processor_cx *cx = NULL; 76static struct acpi_processor_cx *cx = NULL;
75static u8 longhaul_flags; 77static u8 longhaul_flags;
78static u8 longhaul_pos;
76 79
77/* Module parameters */ 80/* Module parameters */
78static int scale_voltage; 81static int scale_voltage;
79static int ignore_latency;
80 82
81#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) 83#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)
82 84
@@ -164,26 +166,47 @@ static void do_longhaul1(unsigned int clock_ratio_index)
164static void do_powersaver(int cx_address, unsigned int clock_ratio_index) 166static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
165{ 167{
166 union msr_longhaul longhaul; 168 union msr_longhaul longhaul;
169 u8 dest_pos;
167 u32 t; 170 u32 t;
168 171
172 dest_pos = f_msr_table[clock_ratio_index].pos;
173
169 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); 174 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
175 /* Setup new frequency */
170 longhaul.bits.RevisionKey = longhaul.bits.RevisionID; 176 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
171 longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; 177 longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
172 longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; 178 longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
173 longhaul.bits.EnableSoftBusRatio = 1; 179 /* Setup new voltage */
174 180 if (can_scale_voltage)
175 if (can_scale_voltage) {
176 longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm; 181 longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm;
182 /* Sync to timer tick */
183 safe_halt();
184 /* Raise voltage if necessary */
185 if (can_scale_voltage && longhaul_pos < dest_pos) {
177 longhaul.bits.EnableSoftVID = 1; 186 longhaul.bits.EnableSoftVID = 1;
187 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
188 /* Change voltage */
189 if (!cx_address) {
190 ACPI_FLUSH_CPU_CACHE();
191 halt();
192 } else {
193 ACPI_FLUSH_CPU_CACHE();
194 /* Invoke C3 */
195 inb(cx_address);
196 /* Dummy op - must do something useless after P_LVL3
197 * read */
198 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
199 }
200 longhaul.bits.EnableSoftVID = 0;
201 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
202 longhaul_pos = dest_pos;
178 } 203 }
179 204
180 /* Sync to timer tick */
181 safe_halt();
182 /* Change frequency on next halt or sleep */ 205 /* Change frequency on next halt or sleep */
206 longhaul.bits.EnableSoftBusRatio = 1;
183 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 207 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
184 if (!cx_address) { 208 if (!cx_address) {
185 ACPI_FLUSH_CPU_CACHE(); 209 ACPI_FLUSH_CPU_CACHE();
186 /* Invoke C1 */
187 halt(); 210 halt();
188 } else { 211 } else {
189 ACPI_FLUSH_CPU_CACHE(); 212 ACPI_FLUSH_CPU_CACHE();
@@ -193,12 +216,29 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
193 t = inl(acpi_gbl_FADT.xpm_timer_block.address); 216 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
194 } 217 }
195 /* Disable bus ratio bit */ 218 /* Disable bus ratio bit */
196 local_irq_disable();
197 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
198 longhaul.bits.EnableSoftBusRatio = 0; 219 longhaul.bits.EnableSoftBusRatio = 0;
199 longhaul.bits.EnableSoftBSEL = 0;
200 longhaul.bits.EnableSoftVID = 0;
201 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 220 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
221
222 /* Reduce voltage if necessary */
223 if (can_scale_voltage && longhaul_pos > dest_pos) {
224 longhaul.bits.EnableSoftVID = 1;
225 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
226 /* Change voltage */
227 if (!cx_address) {
228 ACPI_FLUSH_CPU_CACHE();
229 halt();
230 } else {
231 ACPI_FLUSH_CPU_CACHE();
232 /* Invoke C3 */
233 inb(cx_address);
234 /* Dummy op - must do something useless after P_LVL3
235 * read */
236 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
237 }
238 longhaul.bits.EnableSoftVID = 0;
239 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
240 longhaul_pos = dest_pos;
241 }
202} 242}
203 243
204/** 244/**
@@ -257,26 +297,19 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
257 /* 297 /*
258 * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B]) 298 * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
259 * Software controlled multipliers only. 299 * Software controlled multipliers only.
260 *
261 * *NB* Until we get voltage scaling working v1 & v2 are the same code.
262 * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5b] and Ezra [C5C]
263 */ 300 */
264 case TYPE_LONGHAUL_V1: 301 case TYPE_LONGHAUL_V1:
265 case TYPE_LONGHAUL_V2:
266 do_longhaul1(clock_ratio_index); 302 do_longhaul1(clock_ratio_index);
267 break; 303 break;
268 304
269 /* 305 /*
306 * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C]
307 *
270 * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N]) 308 * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
271 * We can scale voltage with this too, but that's currently
272 * disabled until we come up with a decent 'match freq to voltage'
273 * algorithm.
274 * When we add voltage scaling, we will also need to do the
275 * voltage/freq setting in order depending on the direction
276 * of scaling (like we do in powernow-k7.c)
277 * Nehemiah can do FSB scaling too, but this has never been proven 309 * Nehemiah can do FSB scaling too, but this has never been proven
278 * to work in practice. 310 * to work in practice.
279 */ 311 */
312 case TYPE_LONGHAUL_V2:
280 case TYPE_POWERSAVER: 313 case TYPE_POWERSAVER:
281 if (longhaul_flags & USE_ACPI_C3) { 314 if (longhaul_flags & USE_ACPI_C3) {
282 /* Don't allow wakeup */ 315 /* Don't allow wakeup */
@@ -301,6 +334,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
301 local_irq_restore(flags); 334 local_irq_restore(flags);
302 preempt_enable(); 335 preempt_enable();
303 336
337 freqs.new = calc_speed(longhaul_get_cpu_mult());
304 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 338 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
305} 339}
306 340
@@ -315,31 +349,19 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
315 349
316#define ROUNDING 0xf 350#define ROUNDING 0xf
317 351
318static int _guess(int guess, int mult)
319{
320 int target;
321
322 target = ((mult/10)*guess);
323 if (mult%10 != 0)
324 target += (guess/2);
325 target += ROUNDING/2;
326 target &= ~ROUNDING;
327 return target;
328}
329
330
331static int guess_fsb(int mult) 352static int guess_fsb(int mult)
332{ 353{
333 int speed = (cpu_khz/1000); 354 int speed = cpu_khz / 1000;
334 int i; 355 int i;
335 int speeds[] = { 66, 100, 133, 200 }; 356 int speeds[] = { 666, 1000, 1333, 2000 };
336 357 int f_max, f_min;
337 speed += ROUNDING/2; 358
338 speed &= ~ROUNDING; 359 for (i = 0; i < 4; i++) {
339 360 f_max = ((speeds[i] * mult) + 50) / 100;
340 for (i=0; i<4; i++) { 361 f_max += (ROUNDING / 2);
341 if (_guess(speeds[i], mult) == speed) 362 f_min = f_max - ROUNDING;
342 return speeds[i]; 363 if ((speed <= f_max) && (speed >= f_min))
364 return speeds[i] / 10;
343 } 365 }
344 return 0; 366 return 0;
345} 367}
@@ -347,67 +369,40 @@ static int guess_fsb(int mult)
347 369
348static int __init longhaul_get_ranges(void) 370static int __init longhaul_get_ranges(void)
349{ 371{
350 unsigned long invalue;
351 unsigned int ezra_t_multipliers[32]= {
352 90, 30, 40, 100, 55, 35, 45, 95,
353 50, 70, 80, 60, 120, 75, 85, 65,
354 -1, 110, 120, -1, 135, 115, 125, 105,
355 130, 150, 160, 140, -1, 155, -1, 145 };
356 unsigned int j, k = 0; 372 unsigned int j, k = 0;
357 union msr_longhaul longhaul; 373 int mult;
358 int mult = 0;
359 374
360 switch (longhaul_version) { 375 /* Get current frequency */
361 case TYPE_LONGHAUL_V1: 376 mult = longhaul_get_cpu_mult();
362 case TYPE_LONGHAUL_V2: 377 if (mult == -1) {
363 /* Ugh, Longhaul v1 didn't have the min/max MSRs. 378 printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
364 Assume min=3.0x & max = whatever we booted at. */ 379 return -EINVAL;
380 }
381 fsb = guess_fsb(mult);
382 if (fsb == 0) {
383 printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
384 return -EINVAL;
385 }
386 /* Get max multiplier - as we always did.
387 * Longhaul MSR is usefull only when voltage scaling is enabled.
388 * C3 is booting at max anyway. */
389 maxmult = mult;
390 /* Get min multiplier */
391 switch (cpu_model) {
392 case CPU_NEHEMIAH:
393 minmult = 50;
394 break;
395 case CPU_NEHEMIAH_C:
396 minmult = 40;
397 break;
398 default:
365 minmult = 30; 399 minmult = 30;
366 maxmult = mult = longhaul_get_cpu_mult();
367 break; 400 break;
368
369 case TYPE_POWERSAVER:
370 /* Ezra-T */
371 if (cpu_model==CPU_EZRA_T) {
372 minmult = 30;
373 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
374 invalue = longhaul.bits.MaxMHzBR;
375 if (longhaul.bits.MaxMHzBR4)
376 invalue += 16;
377 maxmult = mult = ezra_t_multipliers[invalue];
378 break;
379 }
380
381 /* Nehemiah */
382 if (cpu_model==CPU_NEHEMIAH) {
383 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
384
385 /*
386 * TODO: This code works, but raises a lot of questions.
387 * - Some Nehemiah's seem to have broken Min/MaxMHzBR's.
388 * We get around this by using a hardcoded multiplier of 4.0x
389 * for the minimimum speed, and the speed we booted up at for the max.
390 * This is done in longhaul_get_cpu_mult() by reading the EBLCR register.
391 * - According to some VIA documentation EBLCR is only
392 * in pre-Nehemiah C3s. How this still works is a mystery.
393 * We're possibly using something undocumented and unsupported,
394 * But it works, so we don't grumble.
395 */
396 minmult=40;
397 maxmult = mult = longhaul_get_cpu_mult();
398 break;
399 }
400 } 401 }
401 fsb = guess_fsb(mult);
402 402
403 dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", 403 dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n",
404 minmult/10, minmult%10, maxmult/10, maxmult%10); 404 minmult/10, minmult%10, maxmult/10, maxmult%10);
405 405
406 if (fsb == 0) {
407 printk (KERN_INFO PFX "Invalid (reserved) FSB!\n");
408 return -EINVAL;
409 }
410
411 highest_speed = calc_speed(maxmult); 406 highest_speed = calc_speed(maxmult);
412 lowest_speed = calc_speed(minmult); 407 lowest_speed = calc_speed(minmult);
413 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, 408 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
@@ -455,6 +450,7 @@ static void __init longhaul_setup_voltagescaling(void)
455 union msr_longhaul longhaul; 450 union msr_longhaul longhaul;
456 struct mV_pos minvid, maxvid; 451 struct mV_pos minvid, maxvid;
457 unsigned int j, speed, pos, kHz_step, numvscales; 452 unsigned int j, speed, pos, kHz_step, numvscales;
453 int min_vid_speed;
458 454
459 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); 455 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
460 if (!(longhaul.bits.RevisionID & 1)) { 456 if (!(longhaul.bits.RevisionID & 1)) {
@@ -468,14 +464,14 @@ static void __init longhaul_setup_voltagescaling(void)
468 mV_vrm_table = &mV_vrm85[0]; 464 mV_vrm_table = &mV_vrm85[0];
469 } else { 465 } else {
470 printk (KERN_INFO PFX "Mobile VRM\n"); 466 printk (KERN_INFO PFX "Mobile VRM\n");
467 if (cpu_model < CPU_NEHEMIAH)
468 return;
471 vrm_mV_table = &mobilevrm_mV[0]; 469 vrm_mV_table = &mobilevrm_mV[0];
472 mV_vrm_table = &mV_mobilevrm[0]; 470 mV_vrm_table = &mV_mobilevrm[0];
473 } 471 }
474 472
475 minvid = vrm_mV_table[longhaul.bits.MinimumVID]; 473 minvid = vrm_mV_table[longhaul.bits.MinimumVID];
476 maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; 474 maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
477 numvscales = maxvid.pos - minvid.pos + 1;
478 kHz_step = (highest_speed - lowest_speed) / numvscales;
479 475
480 if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { 476 if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
481 printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " 477 printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
@@ -491,20 +487,59 @@ static void __init longhaul_setup_voltagescaling(void)
491 return; 487 return;
492 } 488 }
493 489
494 printk(KERN_INFO PFX "Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n", 490 /* How many voltage steps */
491 numvscales = maxvid.pos - minvid.pos + 1;
492 printk(KERN_INFO PFX
493 "Max VID=%d.%03d "
494 "Min VID=%d.%03d, "
495 "%d possible voltage scales\n",
495 maxvid.mV/1000, maxvid.mV%1000, 496 maxvid.mV/1000, maxvid.mV%1000,
496 minvid.mV/1000, minvid.mV%1000, 497 minvid.mV/1000, minvid.mV%1000,
497 numvscales); 498 numvscales);
498 499
500 /* Calculate max frequency at min voltage */
501 j = longhaul.bits.MinMHzBR;
502 if (longhaul.bits.MinMHzBR4)
503 j += 16;
504 min_vid_speed = eblcr_table[j];
505 if (min_vid_speed == -1)
506 return;
507 switch (longhaul.bits.MinMHzFSB) {
508 case 0:
509 min_vid_speed *= 13333;
510 break;
511 case 1:
512 min_vid_speed *= 10000;
513 break;
514 case 3:
515 min_vid_speed *= 6666;
516 break;
517 default:
518 return;
519 break;
520 }
521 if (min_vid_speed >= highest_speed)
522 return;
523 /* Calculate kHz for one voltage step */
524 kHz_step = (highest_speed - min_vid_speed) / numvscales;
525
526
499 j = 0; 527 j = 0;
500 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { 528 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
501 speed = longhaul_table[j].frequency; 529 speed = longhaul_table[j].frequency;
502 pos = (speed - lowest_speed) / kHz_step + minvid.pos; 530 if (speed > min_vid_speed)
531 pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
532 else
533 pos = minvid.pos;
503 f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos]; 534 f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos];
535 f_msr_table[longhaul_table[j].index].pos = pos;
504 j++; 536 j++;
505 } 537 }
506 538
539 longhaul_pos = maxvid.pos;
507 can_scale_voltage = 1; 540 can_scale_voltage = 1;
541 printk(KERN_INFO PFX "Voltage scaling enabled. "
542 "Use of \"conservative\" governor is highly recommended.\n");
508} 543}
509 544
510 545
@@ -573,20 +608,51 @@ static int enable_arbiter_disable(void)
573 if (dev != NULL) { 608 if (dev != NULL) {
574 /* Enable access to port 0x22 */ 609 /* Enable access to port 0x22 */
575 pci_read_config_byte(dev, reg, &pci_cmd); 610 pci_read_config_byte(dev, reg, &pci_cmd);
576 if ( !(pci_cmd & 1<<7) ) { 611 if (!(pci_cmd & 1<<7)) {
577 pci_cmd |= 1<<7; 612 pci_cmd |= 1<<7;
578 pci_write_config_byte(dev, reg, pci_cmd); 613 pci_write_config_byte(dev, reg, pci_cmd);
614 pci_read_config_byte(dev, reg, &pci_cmd);
615 if (!(pci_cmd & 1<<7)) {
616 printk(KERN_ERR PFX
617 "Can't enable access to port 0x22.\n");
618 return 0;
619 }
579 } 620 }
580 return 1; 621 return 1;
581 } 622 }
582 return 0; 623 return 0;
583} 624}
584 625
626static int longhaul_setup_vt8235(void)
627{
628 struct pci_dev *dev;
629 u8 pci_cmd;
630
631 /* Find VT8235 southbridge */
632 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
633 if (dev != NULL) {
634 /* Set transition time to max */
635 pci_read_config_byte(dev, 0xec, &pci_cmd);
636 pci_cmd &= ~(1 << 2);
637 pci_write_config_byte(dev, 0xec, pci_cmd);
638 pci_read_config_byte(dev, 0xe4, &pci_cmd);
639 pci_cmd &= ~(1 << 7);
640 pci_write_config_byte(dev, 0xe4, pci_cmd);
641 pci_read_config_byte(dev, 0xe5, &pci_cmd);
642 pci_cmd |= 1 << 7;
643 pci_write_config_byte(dev, 0xe5, pci_cmd);
644 return 1;
645 }
646 return 0;
647}
648
585static int __init longhaul_cpu_init(struct cpufreq_policy *policy) 649static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
586{ 650{
587 struct cpuinfo_x86 *c = cpu_data; 651 struct cpuinfo_x86 *c = cpu_data;
588 char *cpuname=NULL; 652 char *cpuname=NULL;
589 int ret; 653 int ret;
654 u32 lo, hi;
655 int vt8235_present;
590 656
591 /* Check what we have on this motherboard */ 657 /* Check what we have on this motherboard */
592 switch (c->x86_model) { 658 switch (c->x86_model) {
@@ -599,16 +665,20 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
599 break; 665 break;
600 666
601 case 7: 667 case 7:
602 longhaul_version = TYPE_LONGHAUL_V1;
603 switch (c->x86_mask) { 668 switch (c->x86_mask) {
604 case 0: 669 case 0:
670 longhaul_version = TYPE_LONGHAUL_V1;
605 cpu_model = CPU_SAMUEL2; 671 cpu_model = CPU_SAMUEL2;
606 cpuname = "C3 'Samuel 2' [C5B]"; 672 cpuname = "C3 'Samuel 2' [C5B]";
607 /* Note, this is not a typo, early Samuel2's had Samuel1 ratios. */ 673 /* Note, this is not a typo, early Samuel2's had
608 memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio)); 674 * Samuel1 ratios. */
609 memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr)); 675 memcpy(clock_ratio, samuel1_clock_ratio,
676 sizeof(samuel1_clock_ratio));
677 memcpy(eblcr_table, samuel2_eblcr,
678 sizeof(samuel2_eblcr));
610 break; 679 break;
611 case 1 ... 15: 680 case 1 ... 15:
681 longhaul_version = TYPE_LONGHAUL_V2;
612 if (c->x86_mask < 8) { 682 if (c->x86_mask < 8) {
613 cpu_model = CPU_SAMUEL2; 683 cpu_model = CPU_SAMUEL2;
614 cpuname = "C3 'Samuel 2' [C5B]"; 684 cpuname = "C3 'Samuel 2' [C5B]";
@@ -616,8 +686,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
616 cpu_model = CPU_EZRA; 686 cpu_model = CPU_EZRA;
617 cpuname = "C3 'Ezra' [C5C]"; 687 cpuname = "C3 'Ezra' [C5C]";
618 } 688 }
619 memcpy (clock_ratio, ezra_clock_ratio, sizeof(ezra_clock_ratio)); 689 memcpy(clock_ratio, ezra_clock_ratio,
620 memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr)); 690 sizeof(ezra_clock_ratio));
691 memcpy(eblcr_table, ezra_eblcr,
692 sizeof(ezra_eblcr));
621 break; 693 break;
622 } 694 }
623 break; 695 break;
@@ -632,24 +704,24 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
632 break; 704 break;
633 705
634 case 9: 706 case 9:
635 cpu_model = CPU_NEHEMIAH;
636 longhaul_version = TYPE_POWERSAVER; 707 longhaul_version = TYPE_POWERSAVER;
637 numscales=32; 708 numscales = 32;
709 memcpy(clock_ratio,
710 nehemiah_clock_ratio,
711 sizeof(nehemiah_clock_ratio));
712 memcpy(eblcr_table, nehemiah_eblcr, sizeof(nehemiah_eblcr));
638 switch (c->x86_mask) { 713 switch (c->x86_mask) {
639 case 0 ... 1: 714 case 0 ... 1:
640 cpuname = "C3 'Nehemiah A' [C5N]"; 715 cpu_model = CPU_NEHEMIAH;
641 memcpy (clock_ratio, nehemiah_a_clock_ratio, sizeof(nehemiah_a_clock_ratio)); 716 cpuname = "C3 'Nehemiah A' [C5XLOE]";
642 memcpy (eblcr_table, nehemiah_a_eblcr, sizeof(nehemiah_a_eblcr));
643 break; 717 break;
644 case 2 ... 4: 718 case 2 ... 4:
645 cpuname = "C3 'Nehemiah B' [C5N]"; 719 cpu_model = CPU_NEHEMIAH;
646 memcpy (clock_ratio, nehemiah_b_clock_ratio, sizeof(nehemiah_b_clock_ratio)); 720 cpuname = "C3 'Nehemiah B' [C5XLOH]";
647 memcpy (eblcr_table, nehemiah_b_eblcr, sizeof(nehemiah_b_eblcr));
648 break; 721 break;
649 case 5 ... 15: 722 case 5 ... 15:
650 cpuname = "C3 'Nehemiah C' [C5N]"; 723 cpu_model = CPU_NEHEMIAH_C;
651 memcpy (clock_ratio, nehemiah_c_clock_ratio, sizeof(nehemiah_c_clock_ratio)); 724 cpuname = "C3 'Nehemiah C' [C5P]";
652 memcpy (eblcr_table, nehemiah_c_eblcr, sizeof(nehemiah_c_eblcr));
653 break; 725 break;
654 } 726 }
655 break; 727 break;
@@ -658,6 +730,13 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
658 cpuname = "Unknown"; 730 cpuname = "Unknown";
659 break; 731 break;
660 } 732 }
733 /* Check Longhaul ver. 2 */
734 if (longhaul_version == TYPE_LONGHAUL_V2) {
735 rdmsr(MSR_VIA_LONGHAUL, lo, hi);
736 if (lo == 0 && hi == 0)
737 /* Looks like MSR isn't present */
738 longhaul_version = TYPE_LONGHAUL_V1;
739 }
661 740
662 printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname); 741 printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
663 switch (longhaul_version) { 742 switch (longhaul_version) {
@@ -670,15 +749,18 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
670 break; 749 break;
671 }; 750 };
672 751
752 /* Doesn't hurt */
753 vt8235_present = longhaul_setup_vt8235();
754
673 /* Find ACPI data for processor */ 755 /* Find ACPI data for processor */
674 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 756 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
675 &longhaul_walk_callback, NULL, (void *)&pr); 757 ACPI_UINT32_MAX, &longhaul_walk_callback,
758 NULL, (void *)&pr);
676 759
677 /* Check ACPI support for C3 state */ 760 /* Check ACPI support for C3 state */
678 if ((pr != NULL) && (longhaul_version == TYPE_POWERSAVER)) { 761 if (pr != NULL && longhaul_version != TYPE_LONGHAUL_V1) {
679 cx = &pr->power.states[ACPI_STATE_C3]; 762 cx = &pr->power.states[ACPI_STATE_C3];
680 if (cx->address > 0 && 763 if (cx->address > 0 && cx->latency <= 1000) {
681 (cx->latency <= 1000 || ignore_latency != 0) ) {
682 longhaul_flags |= USE_ACPI_C3; 764 longhaul_flags |= USE_ACPI_C3;
683 goto print_support_type; 765 goto print_support_type;
684 } 766 }
@@ -688,8 +770,11 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
688 longhaul_flags |= USE_NORTHBRIDGE; 770 longhaul_flags |= USE_NORTHBRIDGE;
689 goto print_support_type; 771 goto print_support_type;
690 } 772 }
691 773 /* Use VT8235 southbridge if present */
692 /* No ACPI C3 or we can't use it */ 774 if (longhaul_version == TYPE_POWERSAVER && vt8235_present) {
775 longhaul_flags |= USE_VT8235;
776 goto print_support_type;
777 }
693 /* Check ACPI support for bus master arbiter disable */ 778 /* Check ACPI support for bus master arbiter disable */
694 if ((pr == NULL) || !(pr->flags.bm_control)) { 779 if ((pr == NULL) || !(pr->flags.bm_control)) {
695 printk(KERN_ERR PFX 780 printk(KERN_ERR PFX
@@ -698,18 +783,18 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
698 } 783 }
699 784
700print_support_type: 785print_support_type:
701 if (!(longhaul_flags & USE_NORTHBRIDGE)) { 786 if (longhaul_flags & USE_NORTHBRIDGE)
702 printk (KERN_INFO PFX "Using ACPI support.\n");
703 } else {
704 printk (KERN_INFO PFX "Using northbridge support.\n"); 787 printk (KERN_INFO PFX "Using northbridge support.\n");
705 } 788 else if (longhaul_flags & USE_VT8235)
789 printk (KERN_INFO PFX "Using VT8235 support.\n");
790 else
791 printk (KERN_INFO PFX "Using ACPI support.\n");
706 792
707 ret = longhaul_get_ranges(); 793 ret = longhaul_get_ranges();
708 if (ret != 0) 794 if (ret != 0)
709 return ret; 795 return ret;
710 796
711 if ((longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) && 797 if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
712 (scale_voltage != 0))
713 longhaul_setup_voltagescaling(); 798 longhaul_setup_voltagescaling();
714 799
715 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 800 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
@@ -797,8 +882,6 @@ static void __exit longhaul_exit(void)
797 882
798module_param (scale_voltage, int, 0644); 883module_param (scale_voltage, int, 0644);
799MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); 884MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
800module_param(ignore_latency, int, 0644);
801MODULE_PARM_DESC(ignore_latency, "Skip ACPI C3 latency test");
802 885
803MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); 886MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
804MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); 887MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.h b/arch/i386/kernel/cpu/cpufreq/longhaul.h
index bc4682aad69b..bb0a04b1d1ab 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.h
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.h
@@ -235,84 +235,14 @@ static int __initdata ezrat_eblcr[32] = {
235/* 235/*
236 * VIA C3 Nehemiah */ 236 * VIA C3 Nehemiah */
237 237
238static int __initdata nehemiah_a_clock_ratio[32] = { 238static int __initdata nehemiah_clock_ratio[32] = {
239 100, /* 0000 -> 10.0x */ 239 100, /* 0000 -> 10.0x */
240 160, /* 0001 -> 16.0x */ 240 160, /* 0001 -> 16.0x */
241 -1, /* 0010 -> RESERVED */ 241 40, /* 0010 -> 4.0x */
242 90, /* 0011 -> 9.0x */
243 95, /* 0100 -> 9.5x */
244 -1, /* 0101 -> RESERVED */
245 -1, /* 0110 -> RESERVED */
246 55, /* 0111 -> 5.5x */
247 60, /* 1000 -> 6.0x */
248 70, /* 1001 -> 7.0x */
249 80, /* 1010 -> 8.0x */
250 50, /* 1011 -> 5.0x */
251 65, /* 1100 -> 6.5x */
252 75, /* 1101 -> 7.5x */
253 85, /* 1110 -> 8.5x */
254 120, /* 1111 -> 12.0x */
255 100, /* 0000 -> 10.0x */
256 -1, /* 0001 -> RESERVED */
257 120, /* 0010 -> 12.0x */
258 90, /* 0011 -> 9.0x */
259 105, /* 0100 -> 10.5x */
260 115, /* 0101 -> 11.5x */
261 125, /* 0110 -> 12.5x */
262 135, /* 0111 -> 13.5x */
263 140, /* 1000 -> 14.0x */
264 150, /* 1001 -> 15.0x */
265 160, /* 1010 -> 16.0x */
266 130, /* 1011 -> 13.0x */
267 145, /* 1100 -> 14.5x */
268 155, /* 1101 -> 15.5x */
269 -1, /* 1110 -> RESERVED (13.0x) */
270 120, /* 1111 -> 12.0x */
271};
272
273static int __initdata nehemiah_b_clock_ratio[32] = {
274 100, /* 0000 -> 10.0x */
275 160, /* 0001 -> 16.0x */
276 -1, /* 0010 -> RESERVED */
277 90, /* 0011 -> 9.0x */
278 95, /* 0100 -> 9.5x */
279 -1, /* 0101 -> RESERVED */
280 -1, /* 0110 -> RESERVED */
281 55, /* 0111 -> 5.5x */
282 60, /* 1000 -> 6.0x */
283 70, /* 1001 -> 7.0x */
284 80, /* 1010 -> 8.0x */
285 50, /* 1011 -> 5.0x */
286 65, /* 1100 -> 6.5x */
287 75, /* 1101 -> 7.5x */
288 85, /* 1110 -> 8.5x */
289 120, /* 1111 -> 12.0x */
290 100, /* 0000 -> 10.0x */
291 110, /* 0001 -> 11.0x */
292 120, /* 0010 -> 12.0x */
293 90, /* 0011 -> 9.0x */
294 105, /* 0100 -> 10.5x */
295 115, /* 0101 -> 11.5x */
296 125, /* 0110 -> 12.5x */
297 135, /* 0111 -> 13.5x */
298 140, /* 1000 -> 14.0x */
299 150, /* 1001 -> 15.0x */
300 160, /* 1010 -> 16.0x */
301 130, /* 1011 -> 13.0x */
302 145, /* 1100 -> 14.5x */
303 155, /* 1101 -> 15.5x */
304 -1, /* 1110 -> RESERVED (13.0x) */
305 120, /* 1111 -> 12.0x */
306};
307
308static int __initdata nehemiah_c_clock_ratio[32] = {
309 100, /* 0000 -> 10.0x */
310 160, /* 0001 -> 16.0x */
311 40, /* 0010 -> RESERVED */
312 90, /* 0011 -> 9.0x */ 242 90, /* 0011 -> 9.0x */
313 95, /* 0100 -> 9.5x */ 243 95, /* 0100 -> 9.5x */
314 -1, /* 0101 -> RESERVED */ 244 -1, /* 0101 -> RESERVED */
315 45, /* 0110 -> RESERVED */ 245 45, /* 0110 -> 4.5x */
316 55, /* 0111 -> 5.5x */ 246 55, /* 0111 -> 5.5x */
317 60, /* 1000 -> 6.0x */ 247 60, /* 1000 -> 6.0x */
318 70, /* 1001 -> 7.0x */ 248 70, /* 1001 -> 7.0x */
@@ -340,84 +270,14 @@ static int __initdata nehemiah_c_clock_ratio[32] = {
340 120, /* 1111 -> 12.0x */ 270 120, /* 1111 -> 12.0x */
341}; 271};
342 272
343static int __initdata nehemiah_a_eblcr[32] = { 273static int __initdata nehemiah_eblcr[32] = {
344 50, /* 0000 -> 5.0x */
345 160, /* 0001 -> 16.0x */
346 -1, /* 0010 -> RESERVED */
347 100, /* 0011 -> 10.0x */
348 55, /* 0100 -> 5.5x */
349 -1, /* 0101 -> RESERVED */
350 -1, /* 0110 -> RESERVED */
351 95, /* 0111 -> 9.5x */
352 90, /* 1000 -> 9.0x */
353 70, /* 1001 -> 7.0x */
354 80, /* 1010 -> 8.0x */
355 60, /* 1011 -> 6.0x */
356 120, /* 1100 -> 12.0x */
357 75, /* 1101 -> 7.5x */
358 85, /* 1110 -> 8.5x */
359 65, /* 1111 -> 6.5x */
360 90, /* 0000 -> 9.0x */
361 -1, /* 0001 -> RESERVED */
362 120, /* 0010 -> 12.0x */
363 100, /* 0011 -> 10.0x */
364 135, /* 0100 -> 13.5x */
365 115, /* 0101 -> 11.5x */
366 125, /* 0110 -> 12.5x */
367 105, /* 0111 -> 10.5x */
368 130, /* 1000 -> 13.0x */
369 150, /* 1001 -> 15.0x */
370 160, /* 1010 -> 16.0x */
371 140, /* 1011 -> 14.0x */
372 120, /* 1100 -> 12.0x */
373 155, /* 1101 -> 15.5x */
374 -1, /* 1110 -> RESERVED (13.0x) */
375 145 /* 1111 -> 14.5x */
376 /* end of table */
377};
378static int __initdata nehemiah_b_eblcr[32] = {
379 50, /* 0000 -> 5.0x */
380 160, /* 0001 -> 16.0x */
381 -1, /* 0010 -> RESERVED */
382 100, /* 0011 -> 10.0x */
383 55, /* 0100 -> 5.5x */
384 -1, /* 0101 -> RESERVED */
385 -1, /* 0110 -> RESERVED */
386 95, /* 0111 -> 9.5x */
387 90, /* 1000 -> 9.0x */
388 70, /* 1001 -> 7.0x */
389 80, /* 1010 -> 8.0x */
390 60, /* 1011 -> 6.0x */
391 120, /* 1100 -> 12.0x */
392 75, /* 1101 -> 7.5x */
393 85, /* 1110 -> 8.5x */
394 65, /* 1111 -> 6.5x */
395 90, /* 0000 -> 9.0x */
396 110, /* 0001 -> 11.0x */
397 120, /* 0010 -> 12.0x */
398 100, /* 0011 -> 10.0x */
399 135, /* 0100 -> 13.5x */
400 115, /* 0101 -> 11.5x */
401 125, /* 0110 -> 12.5x */
402 105, /* 0111 -> 10.5x */
403 130, /* 1000 -> 13.0x */
404 150, /* 1001 -> 15.0x */
405 160, /* 1010 -> 16.0x */
406 140, /* 1011 -> 14.0x */
407 120, /* 1100 -> 12.0x */
408 155, /* 1101 -> 15.5x */
409 -1, /* 1110 -> RESERVED (13.0x) */
410 145 /* 1111 -> 14.5x */
411 /* end of table */
412};
413static int __initdata nehemiah_c_eblcr[32] = {
414 50, /* 0000 -> 5.0x */ 274 50, /* 0000 -> 5.0x */
415 160, /* 0001 -> 16.0x */ 275 160, /* 0001 -> 16.0x */
416 40, /* 0010 -> RESERVED */ 276 40, /* 0010 -> 4.0x */
417 100, /* 0011 -> 10.0x */ 277 100, /* 0011 -> 10.0x */
418 55, /* 0100 -> 5.5x */ 278 55, /* 0100 -> 5.5x */
419 -1, /* 0101 -> RESERVED */ 279 -1, /* 0101 -> RESERVED */
420 45, /* 0110 -> RESERVED */ 280 45, /* 0110 -> 4.5x */
421 95, /* 0111 -> 9.5x */ 281 95, /* 0111 -> 9.5x */
422 90, /* 1000 -> 9.0x */ 282 90, /* 1000 -> 9.0x */
423 70, /* 1001 -> 7.0x */ 283 70, /* 1001 -> 7.0x */
@@ -443,7 +303,6 @@ static int __initdata nehemiah_c_eblcr[32] = {
443 155, /* 1101 -> 15.5x */ 303 155, /* 1101 -> 15.5x */
444 -1, /* 1110 -> RESERVED (13.0x) */ 304 -1, /* 1110 -> RESERVED (13.0x) */
445 145 /* 1111 -> 14.5x */ 305 145 /* 1111 -> 14.5x */
446 /* end of table */
447}; 306};
448 307
449/* 308/*
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 2d6491672559..fe3b67005ebb 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -1289,7 +1289,11 @@ static unsigned int powernowk8_get (unsigned int cpu)
1289 if (query_current_values_with_pending_wait(data)) 1289 if (query_current_values_with_pending_wait(data))
1290 goto out; 1290 goto out;
1291 1291
1292 khz = find_khz_freq_from_fid(data->currfid); 1292 if (cpu_family == CPU_HW_PSTATE)
1293 khz = find_khz_freq_from_fiddid(data->currfid, data->currdid);
1294 else
1295 khz = find_khz_freq_from_fid(data->currfid);
1296
1293 1297
1294out: 1298out:
1295 set_cpus_allowed(current, oldmask); 1299 set_cpus_allowed(current, oldmask);
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
index 0b29d41322a2..e1006b7acc9e 100644
--- a/arch/i386/kernel/hpet.c
+++ b/arch/i386/kernel/hpet.c
@@ -1,4 +1,5 @@
1#include <linux/clocksource.h> 1#include <linux/clocksource.h>
2#include <linux/clockchips.h>
2#include <linux/errno.h> 3#include <linux/errno.h>
3#include <linux/hpet.h> 4#include <linux/hpet.h>
4#include <linux/init.h> 5#include <linux/init.h>
@@ -6,17 +7,278 @@
6#include <asm/hpet.h> 7#include <asm/hpet.h>
7#include <asm/io.h> 8#include <asm/io.h>
8 9
10extern struct clock_event_device *global_clock_event;
11
9#define HPET_MASK CLOCKSOURCE_MASK(32) 12#define HPET_MASK CLOCKSOURCE_MASK(32)
10#define HPET_SHIFT 22 13#define HPET_SHIFT 22
11 14
12/* FSEC = 10^-15 NSEC = 10^-9 */ 15/* FSEC = 10^-15 NSEC = 10^-9 */
13#define FSEC_PER_NSEC 1000000 16#define FSEC_PER_NSEC 1000000
14 17
15static void __iomem *hpet_ptr; 18/*
19 * HPET address is set in acpi/boot.c, when an ACPI entry exists
20 */
21unsigned long hpet_address;
22static void __iomem * hpet_virt_address;
23
24static inline unsigned long hpet_readl(unsigned long a)
25{
26 return readl(hpet_virt_address + a);
27}
28
29static inline void hpet_writel(unsigned long d, unsigned long a)
30{
31 writel(d, hpet_virt_address + a);
32}
33
34/*
35 * HPET command line enable / disable
36 */
37static int boot_hpet_disable;
38
39static int __init hpet_setup(char* str)
40{
41 if (str) {
42 if (!strncmp("disable", str, 7))
43 boot_hpet_disable = 1;
44 }
45 return 1;
46}
47__setup("hpet=", hpet_setup);
48
49static inline int is_hpet_capable(void)
50{
51 return (!boot_hpet_disable && hpet_address);
52}
53
54/*
55 * HPET timer interrupt enable / disable
56 */
57static int hpet_legacy_int_enabled;
58
59/**
60 * is_hpet_enabled - check whether the hpet timer interrupt is enabled
61 */
62int is_hpet_enabled(void)
63{
64 return is_hpet_capable() && hpet_legacy_int_enabled;
65}
66
67/*
68 * When the hpet driver (/dev/hpet) is enabled, we need to reserve
69 * timer 0 and timer 1 in case of RTC emulation.
70 */
71#ifdef CONFIG_HPET
72static void hpet_reserve_platform_timers(unsigned long id)
73{
74 struct hpet __iomem *hpet = hpet_virt_address;
75 struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
76 unsigned int nrtimers, i;
77 struct hpet_data hd;
78
79 nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
80
81 memset(&hd, 0, sizeof (hd));
82 hd.hd_phys_address = hpet_address;
83 hd.hd_address = hpet_virt_address;
84 hd.hd_nirqs = nrtimers;
85 hd.hd_flags = HPET_DATA_PLATFORM;
86 hpet_reserve_timer(&hd, 0);
87
88#ifdef CONFIG_HPET_EMULATE_RTC
89 hpet_reserve_timer(&hd, 1);
90#endif
91
92 hd.hd_irq[0] = HPET_LEGACY_8254;
93 hd.hd_irq[1] = HPET_LEGACY_RTC;
94
95 for (i = 2; i < nrtimers; timer++, i++)
96 hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
97 Tn_INT_ROUTE_CNF_SHIFT;
98
99 hpet_alloc(&hd);
100
101}
102#else
103static void hpet_reserve_platform_timers(unsigned long id) { }
104#endif
105
106/*
107 * Common hpet info
108 */
109static unsigned long hpet_period;
110
111static void hpet_set_mode(enum clock_event_mode mode,
112 struct clock_event_device *evt);
113static int hpet_next_event(unsigned long delta,
114 struct clock_event_device *evt);
115
116/*
117 * The hpet clock event device
118 */
119static struct clock_event_device hpet_clockevent = {
120 .name = "hpet",
121 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
122 .set_mode = hpet_set_mode,
123 .set_next_event = hpet_next_event,
124 .shift = 32,
125 .irq = 0,
126};
127
128static void hpet_start_counter(void)
129{
130 unsigned long cfg = hpet_readl(HPET_CFG);
131
132 cfg &= ~HPET_CFG_ENABLE;
133 hpet_writel(cfg, HPET_CFG);
134 hpet_writel(0, HPET_COUNTER);
135 hpet_writel(0, HPET_COUNTER + 4);
136 cfg |= HPET_CFG_ENABLE;
137 hpet_writel(cfg, HPET_CFG);
138}
139
140static void hpet_enable_int(void)
141{
142 unsigned long cfg = hpet_readl(HPET_CFG);
143
144 cfg |= HPET_CFG_LEGACY;
145 hpet_writel(cfg, HPET_CFG);
146 hpet_legacy_int_enabled = 1;
147}
148
149static void hpet_set_mode(enum clock_event_mode mode,
150 struct clock_event_device *evt)
151{
152 unsigned long cfg, cmp, now;
153 uint64_t delta;
154
155 switch(mode) {
156 case CLOCK_EVT_MODE_PERIODIC:
157 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult;
158 delta >>= hpet_clockevent.shift;
159 now = hpet_readl(HPET_COUNTER);
160 cmp = now + (unsigned long) delta;
161 cfg = hpet_readl(HPET_T0_CFG);
162 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
163 HPET_TN_SETVAL | HPET_TN_32BIT;
164 hpet_writel(cfg, HPET_T0_CFG);
165 /*
166 * The first write after writing TN_SETVAL to the
167 * config register sets the counter value, the second
168 * write sets the period.
169 */
170 hpet_writel(cmp, HPET_T0_CMP);
171 udelay(1);
172 hpet_writel((unsigned long) delta, HPET_T0_CMP);
173 break;
174
175 case CLOCK_EVT_MODE_ONESHOT:
176 cfg = hpet_readl(HPET_T0_CFG);
177 cfg &= ~HPET_TN_PERIODIC;
178 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
179 hpet_writel(cfg, HPET_T0_CFG);
180 break;
181
182 case CLOCK_EVT_MODE_UNUSED:
183 case CLOCK_EVT_MODE_SHUTDOWN:
184 cfg = hpet_readl(HPET_T0_CFG);
185 cfg &= ~HPET_TN_ENABLE;
186 hpet_writel(cfg, HPET_T0_CFG);
187 break;
188 }
189}
190
191static int hpet_next_event(unsigned long delta,
192 struct clock_event_device *evt)
193{
194 unsigned long cnt;
195
196 cnt = hpet_readl(HPET_COUNTER);
197 cnt += delta;
198 hpet_writel(cnt, HPET_T0_CMP);
199
200 return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0);
201}
202
203/*
204 * Try to setup the HPET timer
205 */
206int __init hpet_enable(void)
207{
208 unsigned long id;
209 uint64_t hpet_freq;
210
211 if (!is_hpet_capable())
212 return 0;
213
214 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
215
216 /*
217 * Read the period and check for a sane value:
218 */
219 hpet_period = hpet_readl(HPET_PERIOD);
220 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
221 goto out_nohpet;
222
223 /*
224 * The period is a femto seconds value. We need to calculate the
225 * scaled math multiplication factor for nanosecond to hpet tick
226 * conversion.
227 */
228 hpet_freq = 1000000000000000ULL;
229 do_div(hpet_freq, hpet_period);
230 hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
231 NSEC_PER_SEC, 32);
232 /* Calculate the min / max delta */
233 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
234 &hpet_clockevent);
235 hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
236 &hpet_clockevent);
237
238 /*
239 * Read the HPET ID register to retrieve the IRQ routing
240 * information and the number of channels
241 */
242 id = hpet_readl(HPET_ID);
243
244#ifdef CONFIG_HPET_EMULATE_RTC
245 /*
246 * The legacy routing mode needs at least two channels, tick timer
247 * and the rtc emulation channel.
248 */
249 if (!(id & HPET_ID_NUMBER))
250 goto out_nohpet;
251#endif
252
253 /* Start the counter */
254 hpet_start_counter();
255
256 if (id & HPET_ID_LEGSUP) {
257 hpet_enable_int();
258 hpet_reserve_platform_timers(id);
259 /*
260 * Start hpet with the boot cpu mask and make it
261 * global after the IO_APIC has been initialized.
262 */
263 hpet_clockevent.cpumask =cpumask_of_cpu(0);
264 clockevents_register_device(&hpet_clockevent);
265 global_clock_event = &hpet_clockevent;
266 return 1;
267 }
268 return 0;
16 269
270out_nohpet:
271 iounmap(hpet_virt_address);
272 hpet_virt_address = NULL;
273 return 0;
274}
275
276/*
277 * Clock source related code
278 */
17static cycle_t read_hpet(void) 279static cycle_t read_hpet(void)
18{ 280{
19 return (cycle_t)readl(hpet_ptr); 281 return (cycle_t)hpet_readl(HPET_COUNTER);
20} 282}
21 283
22static struct clocksource clocksource_hpet = { 284static struct clocksource clocksource_hpet = {
@@ -24,28 +286,17 @@ static struct clocksource clocksource_hpet = {
24 .rating = 250, 286 .rating = 250,
25 .read = read_hpet, 287 .read = read_hpet,
26 .mask = HPET_MASK, 288 .mask = HPET_MASK,
27 .mult = 0, /* set below */
28 .shift = HPET_SHIFT, 289 .shift = HPET_SHIFT,
29 .is_continuous = 1, 290 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
30}; 291};
31 292
32static int __init init_hpet_clocksource(void) 293static int __init init_hpet_clocksource(void)
33{ 294{
34 unsigned long hpet_period;
35 void __iomem* hpet_base;
36 u64 tmp; 295 u64 tmp;
37 int err;
38 296
39 if (!is_hpet_enabled()) 297 if (!hpet_virt_address)
40 return -ENODEV; 298 return -ENODEV;
41 299
42 /* calculate the hpet address: */
43 hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
44 hpet_ptr = hpet_base + HPET_COUNTER;
45
46 /* calculate the frequency: */
47 hpet_period = readl(hpet_base + HPET_PERIOD);
48
49 /* 300 /*
50 * hpet period is in femto seconds per cycle 301 * hpet period is in femto seconds per cycle
51 * so we need to convert this to ns/cyc units 302 * so we need to convert this to ns/cyc units
@@ -61,11 +312,218 @@ static int __init init_hpet_clocksource(void)
61 do_div(tmp, FSEC_PER_NSEC); 312 do_div(tmp, FSEC_PER_NSEC);
62 clocksource_hpet.mult = (u32)tmp; 313 clocksource_hpet.mult = (u32)tmp;
63 314
64 err = clocksource_register(&clocksource_hpet); 315 return clocksource_register(&clocksource_hpet);
65 if (err)
66 iounmap(hpet_base);
67
68 return err;
69} 316}
70 317
71module_init(init_hpet_clocksource); 318module_init(init_hpet_clocksource);
319
320#ifdef CONFIG_HPET_EMULATE_RTC
321
322/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
323 * is enabled, we support RTC interrupt functionality in software.
324 * RTC has 3 kinds of interrupts:
325 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
326 * is updated
327 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
328 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
329 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
330 * (1) and (2) above are implemented using polling at a frequency of
331 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
332 * overhead. (DEFAULT_RTC_INT_FREQ)
333 * For (3), we use interrupts at 64Hz or user specified periodic
334 * frequency, whichever is higher.
335 */
336#include <linux/mc146818rtc.h>
337#include <linux/rtc.h>
338
339#define DEFAULT_RTC_INT_FREQ 64
340#define DEFAULT_RTC_SHIFT 6
341#define RTC_NUM_INTS 1
342
343static unsigned long hpet_rtc_flags;
344static unsigned long hpet_prev_update_sec;
345static struct rtc_time hpet_alarm_time;
346static unsigned long hpet_pie_count;
347static unsigned long hpet_t1_cmp;
348static unsigned long hpet_default_delta;
349static unsigned long hpet_pie_delta;
350static unsigned long hpet_pie_limit;
351
352/*
353 * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
354 * is not supported by all HPET implementations for timer 1.
355 *
356 * hpet_rtc_timer_init() is called when the rtc is initialized.
357 */
358int hpet_rtc_timer_init(void)
359{
360 unsigned long cfg, cnt, delta, flags;
361
362 if (!is_hpet_enabled())
363 return 0;
364
365 if (!hpet_default_delta) {
366 uint64_t clc;
367
368 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
369 clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
370 hpet_default_delta = (unsigned long) clc;
371 }
372
373 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
374 delta = hpet_default_delta;
375 else
376 delta = hpet_pie_delta;
377
378 local_irq_save(flags);
379
380 cnt = delta + hpet_readl(HPET_COUNTER);
381 hpet_writel(cnt, HPET_T1_CMP);
382 hpet_t1_cmp = cnt;
383
384 cfg = hpet_readl(HPET_T1_CFG);
385 cfg &= ~HPET_TN_PERIODIC;
386 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
387 hpet_writel(cfg, HPET_T1_CFG);
388
389 local_irq_restore(flags);
390
391 return 1;
392}
393
394/*
395 * The functions below are called from rtc driver.
396 * Return 0 if HPET is not being used.
397 * Otherwise do the necessary changes and return 1.
398 */
399int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
400{
401 if (!is_hpet_enabled())
402 return 0;
403
404 hpet_rtc_flags &= ~bit_mask;
405 return 1;
406}
407
408int hpet_set_rtc_irq_bit(unsigned long bit_mask)
409{
410 unsigned long oldbits = hpet_rtc_flags;
411
412 if (!is_hpet_enabled())
413 return 0;
414
415 hpet_rtc_flags |= bit_mask;
416
417 if (!oldbits)
418 hpet_rtc_timer_init();
419
420 return 1;
421}
422
423int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
424 unsigned char sec)
425{
426 if (!is_hpet_enabled())
427 return 0;
428
429 hpet_alarm_time.tm_hour = hrs;
430 hpet_alarm_time.tm_min = min;
431 hpet_alarm_time.tm_sec = sec;
432
433 return 1;
434}
435
436int hpet_set_periodic_freq(unsigned long freq)
437{
438 uint64_t clc;
439
440 if (!is_hpet_enabled())
441 return 0;
442
443 if (freq <= DEFAULT_RTC_INT_FREQ)
444 hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
445 else {
446 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
447 do_div(clc, freq);
448 clc >>= hpet_clockevent.shift;
449 hpet_pie_delta = (unsigned long) clc;
450 }
451 return 1;
452}
453
454int hpet_rtc_dropped_irq(void)
455{
456 return is_hpet_enabled();
457}
458
459static void hpet_rtc_timer_reinit(void)
460{
461 unsigned long cfg, delta;
462 int lost_ints = -1;
463
464 if (unlikely(!hpet_rtc_flags)) {
465 cfg = hpet_readl(HPET_T1_CFG);
466 cfg &= ~HPET_TN_ENABLE;
467 hpet_writel(cfg, HPET_T1_CFG);
468 return;
469 }
470
471 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
472 delta = hpet_default_delta;
473 else
474 delta = hpet_pie_delta;
475
476 /*
477 * Increment the comparator value until we are ahead of the
478 * current count.
479 */
480 do {
481 hpet_t1_cmp += delta;
482 hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
483 lost_ints++;
484 } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
485
486 if (lost_ints) {
487 if (hpet_rtc_flags & RTC_PIE)
488 hpet_pie_count += lost_ints;
489 if (printk_ratelimit())
490 printk(KERN_WARNING "rtc: lost %d interrupts\n",
491 lost_ints);
492 }
493}
494
495irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
496{
497 struct rtc_time curr_time;
498 unsigned long rtc_int_flag = 0;
499
500 hpet_rtc_timer_reinit();
501
502 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
503 rtc_get_rtc_time(&curr_time);
504
505 if (hpet_rtc_flags & RTC_UIE &&
506 curr_time.tm_sec != hpet_prev_update_sec) {
507 rtc_int_flag = RTC_UF;
508 hpet_prev_update_sec = curr_time.tm_sec;
509 }
510
511 if (hpet_rtc_flags & RTC_PIE &&
512 ++hpet_pie_count >= hpet_pie_limit) {
513 rtc_int_flag |= RTC_PF;
514 hpet_pie_count = 0;
515 }
516
517 if (hpet_rtc_flags & RTC_PIE &&
518 (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
519 (curr_time.tm_min == hpet_alarm_time.tm_min) &&
520 (curr_time.tm_hour == hpet_alarm_time.tm_hour))
521 rtc_int_flag |= RTC_AF;
522
523 if (rtc_int_flag) {
524 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
525 rtc_interrupt(rtc_int_flag, dev_id);
526 }
527 return IRQ_HANDLED;
528}
529#endif
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index 9a0060b92e32..a6bc7bb38834 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -2,7 +2,7 @@
2 * i8253.c 8253/PIT functions 2 * i8253.c 8253/PIT functions
3 * 3 *
4 */ 4 */
5#include <linux/clocksource.h> 5#include <linux/clockchips.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/jiffies.h> 7#include <linux/jiffies.h>
8#include <linux/sysdev.h> 8#include <linux/sysdev.h>
@@ -19,17 +19,97 @@
19DEFINE_SPINLOCK(i8253_lock); 19DEFINE_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock); 20EXPORT_SYMBOL(i8253_lock);
21 21
22void setup_pit_timer(void) 22/*
23 * HPET replaces the PIT, when enabled. So we need to know, which of
24 * the two timers is used
25 */
26struct clock_event_device *global_clock_event;
27
28/*
29 * Initialize the PIT timer.
30 *
31 * This is also called after resume to bring the PIT into operation again.
32 */
33static void init_pit_timer(enum clock_event_mode mode,
34 struct clock_event_device *evt)
35{
36 unsigned long flags;
37
38 spin_lock_irqsave(&i8253_lock, flags);
39
40 switch(mode) {
41 case CLOCK_EVT_MODE_PERIODIC:
42 /* binary, mode 2, LSB/MSB, ch 0 */
43 outb_p(0x34, PIT_MODE);
44 udelay(10);
45 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
46 udelay(10);
47 outb(LATCH >> 8 , PIT_CH0); /* MSB */
48 break;
49
50 case CLOCK_EVT_MODE_ONESHOT:
51 case CLOCK_EVT_MODE_SHUTDOWN:
52 case CLOCK_EVT_MODE_UNUSED:
53 /* One shot setup */
54 outb_p(0x38, PIT_MODE);
55 udelay(10);
56 break;
57 }
58 spin_unlock_irqrestore(&i8253_lock, flags);
59}
60
61/*
62 * Program the next event in oneshot mode
63 *
64 * Delta is given in PIT ticks
65 */
66static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
23{ 67{
24 unsigned long flags; 68 unsigned long flags;
25 69
26 spin_lock_irqsave(&i8253_lock, flags); 70 spin_lock_irqsave(&i8253_lock, flags);
27 outb_p(0x34,PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ 71 outb_p(delta & 0xff , PIT_CH0); /* LSB */
28 udelay(10); 72 outb(delta >> 8 , PIT_CH0); /* MSB */
29 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
30 udelay(10);
31 outb(LATCH >> 8 , PIT_CH0); /* MSB */
32 spin_unlock_irqrestore(&i8253_lock, flags); 73 spin_unlock_irqrestore(&i8253_lock, flags);
74
75 return 0;
76}
77
78/*
79 * On UP the PIT can serve all of the possible timer functions. On SMP systems
80 * it can be solely used for the global tick.
81 *
82 * The profiling and update capabilites are switched off once the local apic is
83 * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
84 * !using_apic_timer decisions in do_timer_interrupt_hook()
85 */
86struct clock_event_device pit_clockevent = {
87 .name = "pit",
88 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
89 .set_mode = init_pit_timer,
90 .set_next_event = pit_next_event,
91 .shift = 32,
92 .irq = 0,
93};
94
95/*
96 * Initialize the conversion factor and the min/max deltas of the clock event
97 * structure and register the clock event source with the framework.
98 */
99void __init setup_pit_timer(void)
100{
101 /*
102 * Start pit with the boot cpu mask and make it global after the
103 * IO_APIC has been initialized.
104 */
105 pit_clockevent.cpumask = cpumask_of_cpu(0);
106 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
107 pit_clockevent.max_delta_ns =
108 clockevent_delta2ns(0x7FFF, &pit_clockevent);
109 pit_clockevent.min_delta_ns =
110 clockevent_delta2ns(0xF, &pit_clockevent);
111 clockevents_register_device(&pit_clockevent);
112 global_clock_event = &pit_clockevent;
33} 113}
34 114
35/* 115/*
@@ -46,7 +126,7 @@ static cycle_t pit_read(void)
46 static u32 old_jifs; 126 static u32 old_jifs;
47 127
48 spin_lock_irqsave(&i8253_lock, flags); 128 spin_lock_irqsave(&i8253_lock, flags);
49 /* 129 /*
50 * Although our caller may have the read side of xtime_lock, 130 * Although our caller may have the read side of xtime_lock,
51 * this is now a seqlock, and we are cheating in this routine 131 * this is now a seqlock, and we are cheating in this routine
52 * by having side effects on state that we cannot undo if 132 * by having side effects on state that we cannot undo if
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index c8d45821c788..03abfdb1a6e4 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -41,6 +41,7 @@ static void mask_and_ack_8259A(unsigned int);
41static struct irq_chip i8259A_chip = { 41static struct irq_chip i8259A_chip = {
42 .name = "XT-PIC", 42 .name = "XT-PIC",
43 .mask = disable_8259A_irq, 43 .mask = disable_8259A_irq,
44 .disable = disable_8259A_irq,
44 .unmask = enable_8259A_irq, 45 .unmask = enable_8259A_irq,
45 .mask_ack = mask_and_ack_8259A, 46 .mask_ack = mask_and_ack_8259A,
46}; 47};
@@ -410,12 +411,6 @@ void __init native_init_IRQ(void)
410 intr_init_hook(); 411 intr_init_hook();
411 412
412 /* 413 /*
413 * Set the clock to HZ Hz, we already have a valid
414 * vector now:
415 */
416 setup_pit_timer();
417
418 /*
419 * External FPU? Set up irq13 if so, for 414 * External FPU? Set up irq13 if so, for
420 * original braindamaged IBM FERR coupling. 415 * original braindamaged IBM FERR coupling.
421 */ 416 */
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index e30ccedad0b9..4ccebd454e25 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -482,8 +482,8 @@ static void do_irq_balance(void)
482 package_index = CPU_TO_PACKAGEINDEX(i); 482 package_index = CPU_TO_PACKAGEINDEX(i);
483 for (j = 0; j < NR_IRQS; j++) { 483 for (j = 0; j < NR_IRQS; j++) {
484 unsigned long value_now, delta; 484 unsigned long value_now, delta;
485 /* Is this an active IRQ? */ 485 /* Is this an active IRQ or balancing disabled ? */
486 if (!irq_desc[j].action) 486 if (!irq_desc[j].action || irq_balancing_disabled(j))
487 continue; 487 continue;
488 if ( package_index == i ) 488 if ( package_index == i )
489 IRQ_DELTA(package_index,j) = 0; 489 IRQ_DELTA(package_index,j) = 0;
@@ -1281,11 +1281,9 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1281 trigger == IOAPIC_LEVEL) 1281 trigger == IOAPIC_LEVEL)
1282 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1282 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1283 handle_fasteoi_irq, "fasteoi"); 1283 handle_fasteoi_irq, "fasteoi");
1284 else { 1284 else
1285 irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
1286 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1285 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1287 handle_edge_irq, "edge"); 1286 handle_edge_irq, "edge");
1288 }
1289 set_intr_gate(vector, interrupt[irq]); 1287 set_intr_gate(vector, interrupt[irq]);
1290} 1288}
1291 1289
@@ -1588,7 +1586,7 @@ void /*__init*/ print_local_APIC(void * dummy)
1588 v = apic_read(APIC_LVR); 1586 v = apic_read(APIC_LVR);
1589 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1587 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1590 ver = GET_APIC_VERSION(v); 1588 ver = GET_APIC_VERSION(v);
1591 maxlvt = get_maxlvt(); 1589 maxlvt = lapic_get_maxlvt();
1592 1590
1593 v = apic_read(APIC_TASKPRI); 1591 v = apic_read(APIC_TASKPRI);
1594 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1592 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 5785d84103a6..0f2ca590bf23 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -10,7 +10,6 @@
10 * io_apic.c.) 10 * io_apic.c.)
11 */ 11 */
12 12
13#include <asm/uaccess.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
@@ -21,19 +20,34 @@
21 20
22#include <asm/idle.h> 21#include <asm/idle.h>
23 22
23#include <asm/apic.h>
24#include <asm/uaccess.h>
25
24DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; 26DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
25EXPORT_PER_CPU_SYMBOL(irq_stat); 27EXPORT_PER_CPU_SYMBOL(irq_stat);
26 28
27#ifndef CONFIG_X86_LOCAL_APIC
28/* 29/*
29 * 'what should we do if we get a hw irq event on an illegal vector'. 30 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves. 31 * each architecture has to answer this themselves.
31 */ 32 */
32void ack_bad_irq(unsigned int irq) 33void ack_bad_irq(unsigned int irq)
33{ 34{
34 printk("unexpected IRQ trap at vector %02x\n", irq); 35 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
35} 36
37#ifdef CONFIG_X86_LOCAL_APIC
38 /*
39 * Currently unexpected vectors happen only on SMP and APIC.
40 * We _must_ ack these because every local APIC has only N
41 * irq slots per priority level, and a 'hanging, unacked' IRQ
42 * holds up an irq slot - in excessive cases (when multiple
43 * unexpected vectors occur) that might lock up the APIC
44 * completely.
45 * But only ack when the APIC is enabled -AK
46 */
47 if (cpu_has_apic)
48 ack_APIC_irq();
36#endif 49#endif
50}
37 51
38#ifdef CONFIG_4KSTACKS 52#ifdef CONFIG_4KSTACKS
39/* 53/*
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 5d8a07c20281..821df34d2b3a 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -23,6 +23,7 @@
23#include <linux/dmi.h> 23#include <linux/dmi.h>
24#include <linux/kprobes.h> 24#include <linux/kprobes.h>
25#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <linux/kernel_stat.h>
26 27
27#include <asm/smp.h> 28#include <asm/smp.h>
28#include <asm/nmi.h> 29#include <asm/nmi.h>
@@ -973,9 +974,13 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
973 cpu_clear(cpu, backtrace_mask); 974 cpu_clear(cpu, backtrace_mask);
974 } 975 }
975 976
976 sum = per_cpu(irq_stat, cpu).apic_timer_irqs; 977 /*
978 * Take the local apic timer and PIT/HPET into account. We don't
979 * know which one is active, when we have highres/dyntick on
980 */
981 sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_irqs(0);
977 982
978 /* if the apic timer isn't firing, this cpu isn't doing much */ 983 /* if the none of the timers isn't firing, this cpu isn't doing much */
979 if (!touched && last_irq_sums[cpu] == sum) { 984 if (!touched && last_irq_sums[cpu] == sum) {
980 /* 985 /*
981 * Ayiee, looks like this CPU is stuck ... 986 * Ayiee, looks like this CPU is stuck ...
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 7845d480c293..bea304d48cdb 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -38,6 +38,7 @@
38#include <linux/ptrace.h> 38#include <linux/ptrace.h>
39#include <linux/random.h> 39#include <linux/random.h>
40#include <linux/personality.h> 40#include <linux/personality.h>
41#include <linux/tick.h>
41 42
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
43#include <asm/pgtable.h> 44#include <asm/pgtable.h>
@@ -211,6 +212,7 @@ void cpu_idle(void)
211 212
212 /* endless idle loop with no priority at all */ 213 /* endless idle loop with no priority at all */
213 while (1) { 214 while (1) {
215 tick_nohz_stop_sched_tick();
214 while (!need_resched()) { 216 while (!need_resched()) {
215 void (*idle)(void); 217 void (*idle)(void);
216 218
@@ -238,6 +240,7 @@ void cpu_idle(void)
238 idle(); 240 idle();
239 __exit_idle(); 241 __exit_idle();
240 } 242 }
243 tick_nohz_restart_sched_tick();
241 preempt_enable_no_resched(); 244 preempt_enable_no_resched();
242 schedule(); 245 schedule();
243 preempt_disable(); 246 preempt_disable();
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index f46a4d095e6c..48bfcaa13ecc 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -94,12 +94,6 @@ cpumask_t cpu_possible_map;
94EXPORT_SYMBOL(cpu_possible_map); 94EXPORT_SYMBOL(cpu_possible_map);
95static cpumask_t smp_commenced_mask; 95static cpumask_t smp_commenced_mask;
96 96
97/* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there
98 * is no way to resync one AP against BP. TBD: for prescott and above, we
99 * should use IA64's algorithm
100 */
101static int __devinitdata tsc_sync_disabled;
102
103/* Per CPU bogomips and other parameters */ 97/* Per CPU bogomips and other parameters */
104struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 98struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
105EXPORT_SYMBOL(cpu_data); 99EXPORT_SYMBOL(cpu_data);
@@ -216,151 +210,6 @@ valid_k7:
216 ; 210 ;
217} 211}
218 212
219/*
220 * TSC synchronization.
221 *
222 * We first check whether all CPUs have their TSC's synchronized,
223 * then we print a warning if not, and always resync.
224 */
225
226static struct {
227 atomic_t start_flag;
228 atomic_t count_start;
229 atomic_t count_stop;
230 unsigned long long values[NR_CPUS];
231} tsc __cpuinitdata = {
232 .start_flag = ATOMIC_INIT(0),
233 .count_start = ATOMIC_INIT(0),
234 .count_stop = ATOMIC_INIT(0),
235};
236
237#define NR_LOOPS 5
238
239static void __init synchronize_tsc_bp(void)
240{
241 int i;
242 unsigned long long t0;
243 unsigned long long sum, avg;
244 long long delta;
245 unsigned int one_usec;
246 int buggy = 0;
247
248 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
249
250 /* convert from kcyc/sec to cyc/usec */
251 one_usec = cpu_khz / 1000;
252
253 atomic_set(&tsc.start_flag, 1);
254 wmb();
255
256 /*
257 * We loop a few times to get a primed instruction cache,
258 * then the last pass is more or less synchronized and
259 * the BP and APs set their cycle counters to zero all at
260 * once. This reduces the chance of having random offsets
261 * between the processors, and guarantees that the maximum
262 * delay between the cycle counters is never bigger than
263 * the latency of information-passing (cachelines) between
264 * two CPUs.
265 */
266 for (i = 0; i < NR_LOOPS; i++) {
267 /*
268 * all APs synchronize but they loop on '== num_cpus'
269 */
270 while (atomic_read(&tsc.count_start) != num_booting_cpus()-1)
271 cpu_relax();
272 atomic_set(&tsc.count_stop, 0);
273 wmb();
274 /*
275 * this lets the APs save their current TSC:
276 */
277 atomic_inc(&tsc.count_start);
278
279 rdtscll(tsc.values[smp_processor_id()]);
280 /*
281 * We clear the TSC in the last loop:
282 */
283 if (i == NR_LOOPS-1)
284 write_tsc(0, 0);
285
286 /*
287 * Wait for all APs to leave the synchronization point:
288 */
289 while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1)
290 cpu_relax();
291 atomic_set(&tsc.count_start, 0);
292 wmb();
293 atomic_inc(&tsc.count_stop);
294 }
295
296 sum = 0;
297 for (i = 0; i < NR_CPUS; i++) {
298 if (cpu_isset(i, cpu_callout_map)) {
299 t0 = tsc.values[i];
300 sum += t0;
301 }
302 }
303 avg = sum;
304 do_div(avg, num_booting_cpus());
305
306 for (i = 0; i < NR_CPUS; i++) {
307 if (!cpu_isset(i, cpu_callout_map))
308 continue;
309 delta = tsc.values[i] - avg;
310 if (delta < 0)
311 delta = -delta;
312 /*
313 * We report bigger than 2 microseconds clock differences.
314 */
315 if (delta > 2*one_usec) {
316 long long realdelta;
317
318 if (!buggy) {
319 buggy = 1;
320 printk("\n");
321 }
322 realdelta = delta;
323 do_div(realdelta, one_usec);
324 if (tsc.values[i] < avg)
325 realdelta = -realdelta;
326
327 if (realdelta)
328 printk(KERN_INFO "CPU#%d had %Ld usecs TSC "
329 "skew, fixed it up.\n", i, realdelta);
330 }
331 }
332 if (!buggy)
333 printk("passed.\n");
334}
335
336static void __cpuinit synchronize_tsc_ap(void)
337{
338 int i;
339
340 /*
341 * Not every cpu is online at the time
342 * this gets called, so we first wait for the BP to
343 * finish SMP initialization:
344 */
345 while (!atomic_read(&tsc.start_flag))
346 cpu_relax();
347
348 for (i = 0; i < NR_LOOPS; i++) {
349 atomic_inc(&tsc.count_start);
350 while (atomic_read(&tsc.count_start) != num_booting_cpus())
351 cpu_relax();
352
353 rdtscll(tsc.values[smp_processor_id()]);
354 if (i == NR_LOOPS-1)
355 write_tsc(0, 0);
356
357 atomic_inc(&tsc.count_stop);
358 while (atomic_read(&tsc.count_stop) != num_booting_cpus())
359 cpu_relax();
360 }
361}
362#undef NR_LOOPS
363
364extern void calibrate_delay(void); 213extern void calibrate_delay(void);
365 214
366static atomic_t init_deasserted; 215static atomic_t init_deasserted;
@@ -438,20 +287,12 @@ static void __cpuinit smp_callin(void)
438 /* 287 /*
439 * Save our processor parameters 288 * Save our processor parameters
440 */ 289 */
441 smp_store_cpu_info(cpuid); 290 smp_store_cpu_info(cpuid);
442
443 disable_APIC_timer();
444 291
445 /* 292 /*
446 * Allow the master to continue. 293 * Allow the master to continue.
447 */ 294 */
448 cpu_set(cpuid, cpu_callin_map); 295 cpu_set(cpuid, cpu_callin_map);
449
450 /*
451 * Synchronize the TSC with the BP
452 */
453 if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled)
454 synchronize_tsc_ap();
455} 296}
456 297
457static int cpucount; 298static int cpucount;
@@ -554,13 +395,17 @@ static void __cpuinit start_secondary(void *unused)
554 smp_callin(); 395 smp_callin();
555 while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) 396 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
556 rep_nop(); 397 rep_nop();
398 /*
399 * Check TSC synchronization with the BP:
400 */
401 check_tsc_sync_target();
402
557 setup_secondary_clock(); 403 setup_secondary_clock();
558 if (nmi_watchdog == NMI_IO_APIC) { 404 if (nmi_watchdog == NMI_IO_APIC) {
559 disable_8259A_irq(0); 405 disable_8259A_irq(0);
560 enable_NMI_through_LVT0(NULL); 406 enable_NMI_through_LVT0(NULL);
561 enable_8259A_irq(0); 407 enable_8259A_irq(0);
562 } 408 }
563 enable_APIC_timer();
564 /* 409 /*
565 * low-memory mappings have been cleared, flush them from 410 * low-memory mappings have been cleared, flush them from
566 * the local TLBs too. 411 * the local TLBs too.
@@ -752,7 +597,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
752 /* 597 /*
753 * Due to the Pentium erratum 3AP. 598 * Due to the Pentium erratum 3AP.
754 */ 599 */
755 maxlvt = get_maxlvt(); 600 maxlvt = lapic_get_maxlvt();
756 if (maxlvt > 3) { 601 if (maxlvt > 3) {
757 apic_read_around(APIC_SPIV); 602 apic_read_around(APIC_SPIV);
758 apic_write(APIC_ESR, 0); 603 apic_write(APIC_ESR, 0);
@@ -849,7 +694,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
849 */ 694 */
850 Dprintk("#startup loops: %d.\n", num_starts); 695 Dprintk("#startup loops: %d.\n", num_starts);
851 696
852 maxlvt = get_maxlvt(); 697 maxlvt = lapic_get_maxlvt();
853 698
854 for (j = 1; j <= num_starts; j++) { 699 for (j = 1; j <= num_starts; j++) {
855 Dprintk("Sending STARTUP #%d.\n",j); 700 Dprintk("Sending STARTUP #%d.\n",j);
@@ -1125,8 +970,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1125 info.cpu = cpu; 970 info.cpu = cpu;
1126 INIT_WORK(&info.task, do_warm_boot_cpu); 971 INIT_WORK(&info.task, do_warm_boot_cpu);
1127 972
1128 tsc_sync_disabled = 1;
1129
1130 /* init low mem mapping */ 973 /* init low mem mapping */
1131 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, 974 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
1132 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); 975 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
@@ -1134,7 +977,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
1134 schedule_work(&info.task); 977 schedule_work(&info.task);
1135 wait_for_completion(&done); 978 wait_for_completion(&done);
1136 979
1137 tsc_sync_disabled = 0;
1138 zap_low_mappings(); 980 zap_low_mappings();
1139 ret = 0; 981 ret = 0;
1140exit: 982exit:
@@ -1331,12 +1173,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1331 smpboot_setup_io_apic(); 1173 smpboot_setup_io_apic();
1332 1174
1333 setup_boot_clock(); 1175 setup_boot_clock();
1334
1335 /*
1336 * Synchronize the TSC with the AP
1337 */
1338 if (cpu_has_tsc && cpucount && cpu_khz)
1339 synchronize_tsc_bp();
1340} 1176}
1341 1177
1342/* These are wrappers to interface to the new boot process. Someone 1178/* These are wrappers to interface to the new boot process. Someone
@@ -1471,9 +1307,16 @@ int __cpuinit __cpu_up(unsigned int cpu)
1471 } 1307 }
1472 1308
1473 local_irq_enable(); 1309 local_irq_enable();
1310
1474 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 1311 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1475 /* Unleash the CPU! */ 1312 /* Unleash the CPU! */
1476 cpu_set(cpu, smp_commenced_mask); 1313 cpu_set(cpu, smp_commenced_mask);
1314
1315 /*
1316 * Check TSC synchronization with the AP:
1317 */
1318 check_tsc_sync_source(cpu);
1319
1477 while (!cpu_isset(cpu, cpu_online_map)) 1320 while (!cpu_isset(cpu, cpu_online_map))
1478 cpu_relax(); 1321 cpu_relax();
1479 1322
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index a4f67a6e6821..a5350059557a 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -159,15 +159,6 @@ EXPORT_SYMBOL(profile_pc);
159 */ 159 */
160irqreturn_t timer_interrupt(int irq, void *dev_id) 160irqreturn_t timer_interrupt(int irq, void *dev_id)
161{ 161{
162 /*
163 * Here we are in the timer irq handler. We just have irqs locally
164 * disabled but we don't know if the timer_bh is running on the other
165 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
166 * the irq version of write_lock because as just said we have irq
167 * locally disabled. -arca
168 */
169 write_seqlock(&xtime_lock);
170
171#ifdef CONFIG_X86_IO_APIC 162#ifdef CONFIG_X86_IO_APIC
172 if (timer_ack) { 163 if (timer_ack) {
173 /* 164 /*
@@ -186,7 +177,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
186 177
187 do_timer_interrupt_hook(); 178 do_timer_interrupt_hook();
188 179
189
190 if (MCA_bus) { 180 if (MCA_bus) {
191 /* The PS/2 uses level-triggered interrupts. You can't 181 /* The PS/2 uses level-triggered interrupts. You can't
192 turn them off, nor would you want to (any attempt to 182 turn them off, nor would you want to (any attempt to
@@ -201,18 +191,11 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
201 outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */ 191 outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */
202 } 192 }
203 193
204 write_sequnlock(&xtime_lock);
205
206#ifdef CONFIG_X86_LOCAL_APIC
207 if (using_apic_timer)
208 smp_send_timer_broadcast_ipi();
209#endif
210
211 return IRQ_HANDLED; 194 return IRQ_HANDLED;
212} 195}
213 196
214/* not static: needed by APM */ 197/* not static: needed by APM */
215unsigned long get_cmos_time(void) 198unsigned long read_persistent_clock(void)
216{ 199{
217 unsigned long retval; 200 unsigned long retval;
218 unsigned long flags; 201 unsigned long flags;
@@ -225,7 +208,6 @@ unsigned long get_cmos_time(void)
225 208
226 return retval; 209 return retval;
227} 210}
228EXPORT_SYMBOL(get_cmos_time);
229 211
230static void sync_cmos_clock(unsigned long dummy); 212static void sync_cmos_clock(unsigned long dummy);
231 213
@@ -278,114 +260,16 @@ void notify_arch_cmos_timer(void)
278 mod_timer(&sync_cmos_timer, jiffies + 1); 260 mod_timer(&sync_cmos_timer, jiffies + 1);
279} 261}
280 262
281static long clock_cmos_diff;
282static unsigned long sleep_start;
283
284static int timer_suspend(struct sys_device *dev, pm_message_t state)
285{
286 /*
287 * Estimate time zone so that set_time can update the clock
288 */
289 unsigned long ctime = get_cmos_time();
290
291 clock_cmos_diff = -ctime;
292 clock_cmos_diff += get_seconds();
293 sleep_start = ctime;
294 return 0;
295}
296
297static int timer_resume(struct sys_device *dev)
298{
299 unsigned long flags;
300 unsigned long sec;
301 unsigned long ctime = get_cmos_time();
302 long sleep_length = (ctime - sleep_start) * HZ;
303 struct timespec ts;
304
305 if (sleep_length < 0) {
306 printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n");
307 /* The time after the resume must not be earlier than the time
308 * before the suspend or some nasty things will happen
309 */
310 sleep_length = 0;
311 ctime = sleep_start;
312 }
313#ifdef CONFIG_HPET_TIMER
314 if (is_hpet_enabled())
315 hpet_reenable();
316#endif
317 setup_pit_timer();
318
319 sec = ctime + clock_cmos_diff;
320 ts.tv_sec = sec;
321 ts.tv_nsec = 0;
322 do_settimeofday(&ts);
323 write_seqlock_irqsave(&xtime_lock, flags);
324 jiffies_64 += sleep_length;
325 write_sequnlock_irqrestore(&xtime_lock, flags);
326 touch_softlockup_watchdog();
327 return 0;
328}
329
330static struct sysdev_class timer_sysclass = {
331 .resume = timer_resume,
332 .suspend = timer_suspend,
333 set_kset_name("timer"),
334};
335
336
337/* XXX this driverfs stuff should probably go elsewhere later -john */
338static struct sys_device device_timer = {
339 .id = 0,
340 .cls = &timer_sysclass,
341};
342
343static int time_init_device(void)
344{
345 int error = sysdev_class_register(&timer_sysclass);
346 if (!error)
347 error = sysdev_register(&device_timer);
348 return error;
349}
350
351device_initcall(time_init_device);
352
353#ifdef CONFIG_HPET_TIMER
354extern void (*late_time_init)(void); 263extern void (*late_time_init)(void);
355/* Duplicate of time_init() below, with hpet_enable part added */ 264/* Duplicate of time_init() below, with hpet_enable part added */
356static void __init hpet_time_init(void) 265static void __init hpet_time_init(void)
357{ 266{
358 struct timespec ts; 267 if (!hpet_enable())
359 ts.tv_sec = get_cmos_time(); 268 setup_pit_timer();
360 ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
361
362 do_settimeofday(&ts);
363
364 if ((hpet_enable() >= 0) && hpet_use_timer) {
365 printk("Using HPET for base-timer\n");
366 }
367
368 do_time_init(); 269 do_time_init();
369} 270}
370#endif
371 271
372void __init time_init(void) 272void __init time_init(void)
373{ 273{
374 struct timespec ts; 274 late_time_init = hpet_time_init;
375#ifdef CONFIG_HPET_TIMER
376 if (is_hpet_capable()) {
377 /*
378 * HPET initialization needs to do memory-mapped io. So, let
379 * us do a late initialization after mem_init().
380 */
381 late_time_init = hpet_time_init;
382 return;
383 }
384#endif
385 ts.tv_sec = get_cmos_time();
386 ts.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
387
388 do_settimeofday(&ts);
389
390 do_time_init();
391} 275}
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index 46f752a8bbf3..3082a418635c 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -60,12 +60,6 @@ static inline int check_tsc_unstable(void)
60 return tsc_unstable; 60 return tsc_unstable;
61} 61}
62 62
63void mark_tsc_unstable(void)
64{
65 tsc_unstable = 1;
66}
67EXPORT_SYMBOL_GPL(mark_tsc_unstable);
68
69/* Accellerators for sched_clock() 63/* Accellerators for sched_clock()
70 * convert from cycles(64bits) => nanoseconds (64bits) 64 * convert from cycles(64bits) => nanoseconds (64bits)
71 * basic equation: 65 * basic equation:
@@ -222,34 +216,6 @@ out_no_tsc:
222 216
223#ifdef CONFIG_CPU_FREQ 217#ifdef CONFIG_CPU_FREQ
224 218
225static unsigned int cpufreq_delayed_issched = 0;
226static unsigned int cpufreq_init = 0;
227static struct work_struct cpufreq_delayed_get_work;
228
229static void handle_cpufreq_delayed_get(struct work_struct *work)
230{
231 unsigned int cpu;
232
233 for_each_online_cpu(cpu)
234 cpufreq_get(cpu);
235
236 cpufreq_delayed_issched = 0;
237}
238
239/*
240 * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
241 * to verify the CPU frequency the timing core thinks the CPU is running
242 * at is still correct.
243 */
244static inline void cpufreq_delayed_get(void)
245{
246 if (cpufreq_init && !cpufreq_delayed_issched) {
247 cpufreq_delayed_issched = 1;
248 printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
249 schedule_work(&cpufreq_delayed_get_work);
250 }
251}
252
253/* 219/*
254 * if the CPU frequency is scaled, TSC-based delays will need a different 220 * if the CPU frequency is scaled, TSC-based delays will need a different
255 * loops_per_jiffy value to function properly. 221 * loops_per_jiffy value to function properly.
@@ -313,17 +279,9 @@ static struct notifier_block time_cpufreq_notifier_block = {
313 279
314static int __init cpufreq_tsc(void) 280static int __init cpufreq_tsc(void)
315{ 281{
316 int ret; 282 return cpufreq_register_notifier(&time_cpufreq_notifier_block,
317 283 CPUFREQ_TRANSITION_NOTIFIER);
318 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
319 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
320 CPUFREQ_TRANSITION_NOTIFIER);
321 if (!ret)
322 cpufreq_init = 1;
323
324 return ret;
325} 284}
326
327core_initcall(cpufreq_tsc); 285core_initcall(cpufreq_tsc);
328 286
329#endif 287#endif
@@ -331,7 +289,6 @@ core_initcall(cpufreq_tsc);
331/* clock source code */ 289/* clock source code */
332 290
333static unsigned long current_tsc_khz = 0; 291static unsigned long current_tsc_khz = 0;
334static int tsc_update_callback(void);
335 292
336static cycle_t read_tsc(void) 293static cycle_t read_tsc(void)
337{ 294{
@@ -349,37 +306,28 @@ static struct clocksource clocksource_tsc = {
349 .mask = CLOCKSOURCE_MASK(64), 306 .mask = CLOCKSOURCE_MASK(64),
350 .mult = 0, /* to be set */ 307 .mult = 0, /* to be set */
351 .shift = 22, 308 .shift = 22,
352 .update_callback = tsc_update_callback, 309 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
353 .is_continuous = 1, 310 CLOCK_SOURCE_MUST_VERIFY,
354}; 311};
355 312
356static int tsc_update_callback(void) 313void mark_tsc_unstable(void)
357{ 314{
358 int change = 0; 315 if (!tsc_unstable) {
359 316 tsc_unstable = 1;
360 /* check to see if we should switch to the safe clocksource: */ 317 /* Can be called before registration */
361 if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { 318 if (clocksource_tsc.mult)
362 clocksource_tsc.rating = 0; 319 clocksource_change_rating(&clocksource_tsc, 0);
363 clocksource_reselect(); 320 else
364 change = 1; 321 clocksource_tsc.rating = 0;
365 }
366
367 /* only update if tsc_khz has changed: */
368 if (current_tsc_khz != tsc_khz) {
369 current_tsc_khz = tsc_khz;
370 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
371 clocksource_tsc.shift);
372 change = 1;
373 } 322 }
374
375 return change;
376} 323}
324EXPORT_SYMBOL_GPL(mark_tsc_unstable);
377 325
378static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) 326static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
379{ 327{
380 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", 328 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
381 d->ident); 329 d->ident);
382 mark_tsc_unstable(); 330 tsc_unstable = 1;
383 return 0; 331 return 0;
384} 332}
385 333
@@ -396,65 +344,44 @@ static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
396 {} 344 {}
397}; 345};
398 346
399#define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
400static struct timer_list verify_tsc_freq_timer;
401
402/* XXX - Probably should add locking */
403static void verify_tsc_freq(unsigned long unused)
404{
405 static u64 last_tsc;
406 static unsigned long last_jiffies;
407
408 u64 now_tsc, interval_tsc;
409 unsigned long now_jiffies, interval_jiffies;
410
411
412 if (check_tsc_unstable())
413 return;
414
415 rdtscll(now_tsc);
416 now_jiffies = jiffies;
417
418 if (!last_jiffies) {
419 goto out;
420 }
421
422 interval_jiffies = now_jiffies - last_jiffies;
423 interval_tsc = now_tsc - last_tsc;
424 interval_tsc *= HZ;
425 do_div(interval_tsc, cpu_khz*1000);
426
427 if (interval_tsc < (interval_jiffies * 3 / 4)) {
428 printk("TSC appears to be running slowly. "
429 "Marking it as unstable\n");
430 mark_tsc_unstable();
431 return;
432 }
433
434out:
435 last_tsc = now_tsc;
436 last_jiffies = now_jiffies;
437 /* set us up to go off on the next interval: */
438 mod_timer(&verify_tsc_freq_timer,
439 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
440}
441
442/* 347/*
443 * Make an educated guess if the TSC is trustworthy and synchronized 348 * Make an educated guess if the TSC is trustworthy and synchronized
444 * over all CPUs. 349 * over all CPUs.
445 */ 350 */
446static __init int unsynchronized_tsc(void) 351__cpuinit int unsynchronized_tsc(void)
447{ 352{
353 if (!cpu_has_tsc || tsc_unstable)
354 return 1;
448 /* 355 /*
449 * Intel systems are normally all synchronized. 356 * Intel systems are normally all synchronized.
450 * Exceptions must mark TSC as unstable: 357 * Exceptions must mark TSC as unstable:
451 */ 358 */
452 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 359 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
453 return 0; 360 /* assume multi socket systems are not synchronized: */
361 if (num_possible_cpus() > 1)
362 tsc_unstable = 1;
363 }
364 return tsc_unstable;
365}
366
367/*
368 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
369 */
370#ifdef CONFIG_MGEODE_LX
371/* RTSC counts during suspend */
372#define RTSC_SUSP 0x100
373
374static void __init check_geode_tsc_reliable(void)
375{
376 unsigned long val;
454 377
455 /* assume multi socket systems are not synchronized: */ 378 rdmsrl(MSR_GEODE_BUSCONT_CONF0, val);
456 return num_possible_cpus() > 1; 379 if ((val & RTSC_SUSP))
380 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
457} 381}
382#else
383static inline void check_geode_tsc_reliable(void) { }
384#endif
458 385
459static int __init init_tsc_clocksource(void) 386static int __init init_tsc_clocksource(void)
460{ 387{
@@ -463,20 +390,16 @@ static int __init init_tsc_clocksource(void)
463 /* check blacklist */ 390 /* check blacklist */
464 dmi_check_system(bad_tsc_dmi_table); 391 dmi_check_system(bad_tsc_dmi_table);
465 392
466 if (unsynchronized_tsc()) /* mark unstable if unsynced */ 393 unsynchronized_tsc();
467 mark_tsc_unstable(); 394 check_geode_tsc_reliable();
468 current_tsc_khz = tsc_khz; 395 current_tsc_khz = tsc_khz;
469 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, 396 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
470 clocksource_tsc.shift); 397 clocksource_tsc.shift);
471 /* lower the rating if we already know its unstable: */ 398 /* lower the rating if we already know its unstable: */
472 if (check_tsc_unstable()) 399 if (check_tsc_unstable()) {
473 clocksource_tsc.rating = 0; 400 clocksource_tsc.rating = 0;
474 401 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
475 init_timer(&verify_tsc_freq_timer); 402 }
476 verify_tsc_freq_timer.function = verify_tsc_freq;
477 verify_tsc_freq_timer.expires =
478 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
479 add_timer(&verify_tsc_freq_timer);
480 403
481 return clocksource_register(&clocksource_tsc); 404 return clocksource_register(&clocksource_tsc);
482 } 405 }
diff --git a/arch/i386/kernel/tsc_sync.c b/arch/i386/kernel/tsc_sync.c
new file mode 100644
index 000000000000..12424629af87
--- /dev/null
+++ b/arch/i386/kernel/tsc_sync.c
@@ -0,0 +1 @@
#include "../../x86_64/kernel/tsc_sync.c"
diff --git a/arch/i386/kernel/vmitime.c b/arch/i386/kernel/vmitime.c
index 2e2d8dbcbd68..76d2adcae5a3 100644
--- a/arch/i386/kernel/vmitime.c
+++ b/arch/i386/kernel/vmitime.c
@@ -115,7 +115,7 @@ static struct clocksource clocksource_vmi = {
115 .mask = CLOCKSOURCE_MASK(64), 115 .mask = CLOCKSOURCE_MASK(64),
116 .mult = 0, /* to be set */ 116 .mult = 0, /* to be set */
117 .shift = 22, 117 .shift = 22,
118 .is_continuous = 1, 118 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
119}; 119};
120 120
121 121
diff --git a/arch/i386/mach-default/setup.c b/arch/i386/mach-default/setup.c
index cc2f519b2f7f..c78816210706 100644
--- a/arch/i386/mach-default/setup.c
+++ b/arch/i386/mach-default/setup.c
@@ -79,7 +79,12 @@ void __init trap_init_hook(void)
79{ 79{
80} 80}
81 81
82static struct irqaction irq0 = { timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL}; 82static struct irqaction irq0 = {
83 .handler = timer_interrupt,
84 .flags = IRQF_DISABLED | IRQF_NOBALANCING,
85 .mask = CPU_MASK_NONE,
86 .name = "timer"
87};
83 88
84/** 89/**
85 * time_init_hook - do any specific initialisations for the system timer. 90 * time_init_hook - do any specific initialisations for the system timer.
@@ -90,6 +95,7 @@ static struct irqaction irq0 = { timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE,
90 **/ 95 **/
91void __init time_init_hook(void) 96void __init time_init_hook(void)
92{ 97{
98 irq0.mask = cpumask_of_cpu(0);
93 setup_irq(0, &irq0); 99 setup_irq(0, &irq0);
94} 100}
95 101
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index db185f34e341..d51f0f11f7f9 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -22,6 +22,7 @@ config IA64
22 22
23config 64BIT 23config 64BIT
24 bool 24 bool
25 select ATA_NONSTANDARD if ATA
25 default y 26 default y
26 27
27config ZONE_DMA 28config ZONE_DMA
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 9197d7b361b3..3549c94467b8 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -651,7 +651,7 @@ int __init acpi_boot_init(void)
651 * information -- the successor to MPS tables. 651 * information -- the successor to MPS tables.
652 */ 652 */
653 653
654 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt) < 1) { 654 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
655 printk(KERN_ERR PREFIX "Can't find MADT\n"); 655 printk(KERN_ERR PREFIX "Can't find MADT\n");
656 goto skip_madt; 656 goto skip_madt;
657 } 657 }
@@ -702,7 +702,7 @@ int __init acpi_boot_init(void)
702 * gets interrupts such as power and sleep buttons. If it's not 702 * gets interrupts such as power and sleep buttons. If it's not
703 * on a Legacy interrupt, it needs to be setup. 703 * on a Legacy interrupt, it needs to be setup.
704 */ 704 */
705 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt) < 1) 705 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
706 printk(KERN_ERR PREFIX "Can't find FADT\n"); 706 printk(KERN_ERR PREFIX "Can't find FADT\n");
707 707
708#ifdef CONFIG_SMP 708#ifdef CONFIG_SMP
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 545fcbc8cea2..e5e56bd498db 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -307,7 +307,7 @@ static unsigned int __init calibrate_hpt(void)
307struct clocksource clocksource_mips = { 307struct clocksource clocksource_mips = {
308 .name = "MIPS", 308 .name = "MIPS",
309 .mask = 0xffffffff, 309 .mask = 0xffffffff,
310 .is_continuous = 1, 310 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
311}; 311};
312 312
313static void __init init_mips_clocksource(void) 313static void __init init_mips_clocksource(void)
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 39db12890214..5e5c0e4add91 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -305,8 +305,6 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
305 level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); 305 level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f)));
306 if (level) 306 if (level)
307 desc->status |= IRQ_LEVEL; 307 desc->status |= IRQ_LEVEL;
308 else
309 desc->status |= IRQ_DELAYED_DISABLE;
310 set_irq_chip_and_handler(virq, &pmac_pic, level ? 308 set_irq_chip_and_handler(virq, &pmac_pic, level ?
311 handle_level_irq : handle_edge_irq); 309 handle_level_irq : handle_edge_irq);
312 return 0; 310 return 0;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 3b91f27ab202..ee9fd7b85928 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -312,7 +312,7 @@ static struct clocksource clocksource_tod = {
312 .mask = -1ULL, 312 .mask = -1ULL,
313 .mult = 1000, 313 .mult = 1000,
314 .shift = 12, 314 .shift = 12,
315 .is_continuous = 1, 315 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
316}; 316};
317 317
318 318
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 4f3891215b87..4d16d8917074 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -135,12 +135,6 @@ config SH_HP6XX
135 More information (hardware only) at 135 More information (hardware only) at
136 <http://www.hp.com/jornada/>. 136 <http://www.hp.com/jornada/>.
137 137
138config SH_EC3104
139 bool "EC3104"
140 help
141 Select EC3104 if configuring for a system with an Eclipse
142 International EC3104 chip, e.g. the Harris AD2000.
143
144config SH_SATURN 138config SH_SATURN
145 bool "Saturn" 139 bool "Saturn"
146 select CPU_SUBTYPE_SH7604 140 select CPU_SUBTYPE_SH7604
@@ -156,9 +150,6 @@ config SH_DREAMCAST
156 <http://www.m17n.org/linux-sh/dreamcast/>. There is a 150 <http://www.m17n.org/linux-sh/dreamcast/>. There is a
157 Dreamcast project is at <http://linuxdc.sourceforge.net/>. 151 Dreamcast project is at <http://linuxdc.sourceforge.net/>.
158 152
159config SH_BIGSUR
160 bool "BigSur"
161
162config SH_MPC1211 153config SH_MPC1211
163 bool "Interface MPC1211" 154 bool "Interface MPC1211"
164 help 155 help
@@ -481,6 +472,7 @@ config SH_PCLK_FREQ
481 472
482config SH_CLK_MD 473config SH_CLK_MD
483 int "CPU Mode Pin Setting" 474 int "CPU Mode Pin Setting"
475 default 0
484 depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206 476 depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206
485 help 477 help
486 MD2 - MD0 pin setting. 478 MD2 - MD0 pin setting.
@@ -510,8 +502,9 @@ source "arch/sh/cchips/Kconfig"
510config HEARTBEAT 502config HEARTBEAT
511 bool "Heartbeat LED" 503 bool "Heartbeat LED"
512 depends on SH_MPC1211 || SH_SH03 || \ 504 depends on SH_MPC1211 || SH_SH03 || \
513 SH_BIGSUR || SOLUTION_ENGINE || \ 505 SOLUTION_ENGINE || \
514 SH_RTS7751R2D || SH_SH4202_MICRODEV || SH_LANDISK 506 SH_RTS7751R2D || SH_SH4202_MICRODEV || SH_LANDISK || \
507 SH_R7780RP
515 help 508 help
516 Use the power-on LED on your machine as a load meter. The exact 509 Use the power-on LED on your machine as a load meter. The exact
517 behavior is platform-dependent, but normally the flash frequency is 510 behavior is platform-dependent, but normally the flash frequency is
@@ -596,6 +589,8 @@ menu "Boot options"
596config ZERO_PAGE_OFFSET 589config ZERO_PAGE_OFFSET
597 hex "Zero page offset" 590 hex "Zero page offset"
598 default "0x00004000" if SH_MPC1211 || SH_SH03 591 default "0x00004000" if SH_MPC1211 || SH_SH03
592 default "0x00010000" if PAGE_SIZE_64KB
593 default "0x00002000" if PAGE_SIZE_8KB
599 default "0x00001000" 594 default "0x00001000"
600 help 595 help
601 This sets the default offset of zero page. 596 This sets the default offset of zero page.
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index c1dbef212634..bd9b1729f8b8 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -35,6 +35,7 @@ endif
35endif 35endif
36 36
37cflags-$(CONFIG_CPU_SH2) := -m2 37cflags-$(CONFIG_CPU_SH2) := -m2
38cflags-$(CONFIG_CPU_SH2A) := -m2a $(call cc-option,-m2a-nofpu,)
38cflags-$(CONFIG_CPU_SH3) := -m3 39cflags-$(CONFIG_CPU_SH3) := -m3
39cflags-$(CONFIG_CPU_SH4) := -m4 \ 40cflags-$(CONFIG_CPU_SH4) := -m4 \
40 $(call cc-option,-mno-implicit-fp,-m4-nofpu) 41 $(call cc-option,-mno-implicit-fp,-m4-nofpu)
@@ -93,10 +94,8 @@ machdir-$(CONFIG_SH_7300_SOLUTION_ENGINE) := se/7300
93machdir-$(CONFIG_SH_7343_SOLUTION_ENGINE) := se/7343 94machdir-$(CONFIG_SH_7343_SOLUTION_ENGINE) := se/7343
94machdir-$(CONFIG_SH_73180_SOLUTION_ENGINE) := se/73180 95machdir-$(CONFIG_SH_73180_SOLUTION_ENGINE) := se/73180
95machdir-$(CONFIG_SH_HP6XX) := hp6xx 96machdir-$(CONFIG_SH_HP6XX) := hp6xx
96machdir-$(CONFIG_SH_EC3104) := ec3104
97machdir-$(CONFIG_SH_SATURN) := saturn 97machdir-$(CONFIG_SH_SATURN) := saturn
98machdir-$(CONFIG_SH_DREAMCAST) := dreamcast 98machdir-$(CONFIG_SH_DREAMCAST) := dreamcast
99machdir-$(CONFIG_SH_BIGSUR) := bigsur
100machdir-$(CONFIG_SH_MPC1211) := mpc1211 99machdir-$(CONFIG_SH_MPC1211) := mpc1211
101machdir-$(CONFIG_SH_SH03) := sh03 100machdir-$(CONFIG_SH_SH03) := sh03
102machdir-$(CONFIG_SH_SECUREEDGE5410) := snapgear 101machdir-$(CONFIG_SH_SECUREEDGE5410) := snapgear
diff --git a/arch/sh/boards/bigsur/Makefile b/arch/sh/boards/bigsur/Makefile
deleted file mode 100644
index 0ff9497ac58e..000000000000
--- a/arch/sh/boards/bigsur/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1#
2# Makefile for the BigSur specific parts of the kernel
3#
4
5obj-y := setup.o io.o irq.o led.o
6
diff --git a/arch/sh/boards/bigsur/io.c b/arch/sh/boards/bigsur/io.c
deleted file mode 100644
index 23071f97eec3..000000000000
--- a/arch/sh/boards/bigsur/io.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * arch/sh/boards/bigsur/io.c
3 *
4 * By Dustin McIntire (dustin@sensoria.com) (c)2001
5 * Derived from io_hd64465.h, which bore the message:
6 * By Greg Banks <gbanks@pocketpenguins.com>
7 * (c) 2000 PocketPenguins Inc.
8 * and from io_hd64461.h, which bore the message:
9 * Copyright 2000 Stuart Menefy (stuart.menefy@st.com)
10 *
11 * May be copied or modified under the terms of the GNU General Public
12 * License. See linux/COPYING for more information.
13 *
14 * IO functions for a Hitachi Big Sur Evaluation Board.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <asm/machvec.h>
20#include <asm/io.h>
21#include <asm/bigsur/bigsur.h>
22
23/* Low iomap maps port 0-1K to addresses in 8byte chunks */
24#define BIGSUR_IOMAP_LO_THRESH 0x400
25#define BIGSUR_IOMAP_LO_SHIFT 3
26#define BIGSUR_IOMAP_LO_MASK ((1<<BIGSUR_IOMAP_LO_SHIFT)-1)
27#define BIGSUR_IOMAP_LO_NMAP (BIGSUR_IOMAP_LO_THRESH>>BIGSUR_IOMAP_LO_SHIFT)
28static u32 bigsur_iomap_lo[BIGSUR_IOMAP_LO_NMAP];
29static u8 bigsur_iomap_lo_shift[BIGSUR_IOMAP_LO_NMAP];
30
31/* High iomap maps port 1K-64K to addresses in 1K chunks */
32#define BIGSUR_IOMAP_HI_THRESH 0x10000
33#define BIGSUR_IOMAP_HI_SHIFT 10
34#define BIGSUR_IOMAP_HI_MASK ((1<<BIGSUR_IOMAP_HI_SHIFT)-1)
35#define BIGSUR_IOMAP_HI_NMAP (BIGSUR_IOMAP_HI_THRESH>>BIGSUR_IOMAP_HI_SHIFT)
36static u32 bigsur_iomap_hi[BIGSUR_IOMAP_HI_NMAP];
37static u8 bigsur_iomap_hi_shift[BIGSUR_IOMAP_HI_NMAP];
38
39void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift)
40{
41 u32 port, endport = baseport + nports;
42
43 pr_debug("bigsur_port_map(base=0x%0x, n=0x%0x, addr=0x%08x)\n",
44 baseport, nports, addr);
45
46 for (port = baseport ;
47 port < endport && port < BIGSUR_IOMAP_LO_THRESH ;
48 port += (1<<BIGSUR_IOMAP_LO_SHIFT)) {
49 pr_debug(" maplo[0x%x] = 0x%08x\n", port, addr);
50 bigsur_iomap_lo[port>>BIGSUR_IOMAP_LO_SHIFT] = addr;
51 bigsur_iomap_lo_shift[port>>BIGSUR_IOMAP_LO_SHIFT] = shift;
52 addr += (1<<(BIGSUR_IOMAP_LO_SHIFT));
53 }
54
55 for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH);
56 port < endport && port < BIGSUR_IOMAP_HI_THRESH ;
57 port += (1<<BIGSUR_IOMAP_HI_SHIFT)) {
58 pr_debug(" maphi[0x%x] = 0x%08x\n", port, addr);
59 bigsur_iomap_hi[port>>BIGSUR_IOMAP_HI_SHIFT] = addr;
60 bigsur_iomap_hi_shift[port>>BIGSUR_IOMAP_HI_SHIFT] = shift;
61 addr += (1<<(BIGSUR_IOMAP_HI_SHIFT));
62 }
63}
64EXPORT_SYMBOL(bigsur_port_map);
65
66void bigsur_port_unmap(u32 baseport, u32 nports)
67{
68 u32 port, endport = baseport + nports;
69
70 pr_debug("bigsur_port_unmap(base=0x%0x, n=0x%0x)\n", baseport, nports);
71
72 for (port = baseport ;
73 port < endport && port < BIGSUR_IOMAP_LO_THRESH ;
74 port += (1<<BIGSUR_IOMAP_LO_SHIFT)) {
75 bigsur_iomap_lo[port>>BIGSUR_IOMAP_LO_SHIFT] = 0;
76 }
77
78 for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH);
79 port < endport && port < BIGSUR_IOMAP_HI_THRESH ;
80 port += (1<<BIGSUR_IOMAP_HI_SHIFT)) {
81 bigsur_iomap_hi[port>>BIGSUR_IOMAP_HI_SHIFT] = 0;
82 }
83}
84EXPORT_SYMBOL(bigsur_port_unmap);
85
86unsigned long bigsur_isa_port2addr(unsigned long port)
87{
88 unsigned long addr = 0;
89 unsigned char shift;
90
91 /* Physical address not in P0, do nothing */
92 if (PXSEG(port)) {
93 addr = port;
94 /* physical address in P0, map to P2 */
95 } else if (port >= 0x30000) {
96 addr = P2SEGADDR(port);
97 /* Big Sur I/O + HD64465 registers 0x10000-0x30000 */
98 } else if (port >= BIGSUR_IOMAP_HI_THRESH) {
99 addr = BIGSUR_INTERNAL_BASE + (port - BIGSUR_IOMAP_HI_THRESH);
100 /* Handle remapping of high IO/PCI IO ports */
101 } else if (port >= BIGSUR_IOMAP_LO_THRESH) {
102 addr = bigsur_iomap_hi[port >> BIGSUR_IOMAP_HI_SHIFT];
103 shift = bigsur_iomap_hi_shift[port >> BIGSUR_IOMAP_HI_SHIFT];
104
105 if (addr != 0)
106 addr += (port & BIGSUR_IOMAP_HI_MASK) << shift;
107 } else {
108 /* Handle remapping of low IO ports */
109 addr = bigsur_iomap_lo[port >> BIGSUR_IOMAP_LO_SHIFT];
110 shift = bigsur_iomap_lo_shift[port >> BIGSUR_IOMAP_LO_SHIFT];
111
112 if (addr != 0)
113 addr += (port & BIGSUR_IOMAP_LO_MASK) << shift;
114 }
115
116 pr_debug("%s(0x%08lx) = 0x%08lx\n", __FUNCTION__, port, addr);
117
118 return addr;
119}
120
diff --git a/arch/sh/boards/bigsur/irq.c b/arch/sh/boards/bigsur/irq.c
deleted file mode 100644
index 1ab04da36382..000000000000
--- a/arch/sh/boards/bigsur/irq.c
+++ /dev/null
@@ -1,334 +0,0 @@
1/*
2 *
3 * By Dustin McIntire (dustin@sensoria.com) (c)2001
4 *
5 * Setup and IRQ handling code for the HD64465 companion chip.
6 * by Greg Banks <gbanks@pocketpenguins.com>
7 * Copyright (c) 2000 PocketPenguins Inc
8 *
9 * Derived from setup_hd64465.c which bore the message:
10 * Greg Banks <gbanks@pocketpenguins.com>
11 * Copyright (c) 2000 PocketPenguins Inc and
12 * Copyright (C) 2000 YAEGASHI Takeshi
13 * and setup_cqreek.c which bore message:
14 * Copyright (C) 2000 Niibe Yutaka
15 *
16 * May be copied or modified under the terms of the GNU General Public
17 * License. See linux/COPYING for more information.
18 *
19 * IRQ functions for a Hitachi Big Sur Evaluation Board.
20 *
21 */
22#undef DEBUG
23
24#include <linux/sched.h>
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/param.h>
28#include <linux/ioport.h>
29#include <linux/interrupt.h>
30#include <linux/init.h>
31#include <linux/irq.h>
32#include <linux/bitops.h>
33
34#include <asm/io.h>
35#include <asm/irq.h>
36
37#include <asm/bigsur/io.h>
38#include <asm/hd64465/hd64465.h>
39#include <asm/bigsur/bigsur.h>
40
41//#define BIGSUR_DEBUG 3
42#undef BIGSUR_DEBUG
43
44#ifdef BIGSUR_DEBUG
45#define DIPRINTK(n, args...) if (BIGSUR_DEBUG>(n)) printk(args)
46#else
47#define DIPRINTK(n, args...)
48#endif /* BIGSUR_DEBUG */
49
50#ifdef CONFIG_HD64465
51extern int hd64465_irq_demux(int irq);
52#endif /* CONFIG_HD64465 */
53
54
55/*===========================================================*/
56// Big Sur CPLD IRQ Routines
57/*===========================================================*/
58
59/* Level 1 IRQ routines */
60static void disable_bigsur_l1irq(unsigned int irq)
61{
62 unsigned char mask;
63 unsigned int mask_port = ((irq - BIGSUR_IRQ_LOW)/8) ? BIGSUR_IRLMR1 : BIGSUR_IRLMR0;
64 unsigned char bit = (1 << ((irq - MGATE_IRQ_LOW)%8) );
65
66 if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH) {
67 pr_debug("Disable L1 IRQ %d\n", irq);
68 DIPRINTK(2,"disable_bigsur_l1irq: IMR=0x%08x mask=0x%x\n",
69 mask_port, bit);
70
71 /* Disable IRQ - set mask bit */
72 mask = inb(mask_port) | bit;
73 outb(mask, mask_port);
74 return;
75 }
76 pr_debug("disable_bigsur_l1irq: Invalid IRQ %d\n", irq);
77}
78
79static void enable_bigsur_l1irq(unsigned int irq)
80{
81 unsigned char mask;
82 unsigned int mask_port = ((irq - BIGSUR_IRQ_LOW)/8) ? BIGSUR_IRLMR1 : BIGSUR_IRLMR0;
83 unsigned char bit = (1 << ((irq - MGATE_IRQ_LOW)%8) );
84
85 if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH) {
86 pr_debug("Enable L1 IRQ %d\n", irq);
87 DIPRINTK(2,"enable_bigsur_l1irq: IMR=0x%08x mask=0x%x\n",
88 mask_port, bit);
89 /* Enable L1 IRQ - clear mask bit */
90 mask = inb(mask_port) & ~bit;
91 outb(mask, mask_port);
92 return;
93 }
94 pr_debug("enable_bigsur_l1irq: Invalid IRQ %d\n", irq);
95}
96
97
98/* Level 2 irq masks and registers for L2 decoding */
99/* Level2 bitmasks for each level 1 IRQ */
100const u32 bigsur_l2irq_mask[] =
101 {0x40,0x80,0x08,0x01,0x01,0x3C,0x3E,0xFF,0x40,0x80,0x06,0x03};
102/* Level2 to ISR[n] map for each level 1 IRQ */
103const u32 bigsur_l2irq_reg[] =
104 { 2, 2, 3, 3, 1, 2, 1, 0, 1, 1, 3, 2};
105/* Level2 to Level 1 IRQ map */
106const u32 bigsur_l2_l1_map[] =
107 {7,7,7,7,7,7,7,7, 4,6,6,6,6,6,8,9, 11,11,5,5,5,5,0,1, 3,10,10,2,-1,-1,-1,-1};
108/* IRQ inactive level (high or low) */
109const u32 bigsur_l2_inactv_state[] = {0x00, 0xBE, 0xFC, 0xF7};
110
111/* CPLD external status and mask registers base and offsets */
112static const u32 isr_base = BIGSUR_IRQ0;
113static const u32 isr_offset = BIGSUR_IRQ0 - BIGSUR_IRQ1;
114static const u32 imr_base = BIGSUR_IMR0;
115static const u32 imr_offset = BIGSUR_IMR0 - BIGSUR_IMR1;
116
117#define REG_NUM(irq) ((irq-BIGSUR_2NDLVL_IRQ_LOW)/8 )
118
119/* Level 2 IRQ routines */
120static void disable_bigsur_l2irq(unsigned int irq)
121{
122 unsigned char mask;
123 unsigned char bit = 1 << ((irq-BIGSUR_2NDLVL_IRQ_LOW)%8);
124 unsigned int mask_port = imr_base - REG_NUM(irq)*imr_offset;
125
126 if(irq >= BIGSUR_2NDLVL_IRQ_LOW && irq < BIGSUR_2NDLVL_IRQ_HIGH) {
127 pr_debug("Disable L2 IRQ %d\n", irq);
128 DIPRINTK(2,"disable_bigsur_l2irq: IMR=0x%08x mask=0x%x\n",
129 mask_port, bit);
130
131 /* Disable L2 IRQ - set mask bit */
132 mask = inb(mask_port) | bit;
133 outb(mask, mask_port);
134 return;
135 }
136 pr_debug("disable_bigsur_l2irq: Invalid IRQ %d\n", irq);
137}
138
139static void enable_bigsur_l2irq(unsigned int irq)
140{
141 unsigned char mask;
142 unsigned char bit = 1 << ((irq-BIGSUR_2NDLVL_IRQ_LOW)%8);
143 unsigned int mask_port = imr_base - REG_NUM(irq)*imr_offset;
144
145 if(irq >= BIGSUR_2NDLVL_IRQ_LOW && irq < BIGSUR_2NDLVL_IRQ_HIGH) {
146 pr_debug("Enable L2 IRQ %d\n", irq);
147 DIPRINTK(2,"enable_bigsur_l2irq: IMR=0x%08x mask=0x%x\n",
148 mask_port, bit);
149
150 /* Enable L2 IRQ - clear mask bit */
151 mask = inb(mask_port) & ~bit;
152 outb(mask, mask_port);
153 return;
154 }
155 pr_debug("enable_bigsur_l2irq: Invalid IRQ %d\n", irq);
156}
157
158static void mask_and_ack_bigsur(unsigned int irq)
159{
160 pr_debug("mask_and_ack_bigsur IRQ %d\n", irq);
161 if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH)
162 disable_bigsur_l1irq(irq);
163 else
164 disable_bigsur_l2irq(irq);
165}
166
167static void end_bigsur_irq(unsigned int irq)
168{
169 pr_debug("end_bigsur_irq IRQ %d\n", irq);
170 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
171 if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH)
172 enable_bigsur_l1irq(irq);
173 else
174 enable_bigsur_l2irq(irq);
175 }
176}
177
178static unsigned int startup_bigsur_irq(unsigned int irq)
179{
180 u8 mask;
181 u32 reg;
182
183 pr_debug("startup_bigsur_irq IRQ %d\n", irq);
184
185 if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH) {
186 /* Enable the L1 IRQ */
187 enable_bigsur_l1irq(irq);
188 /* Enable all L2 IRQs in this L1 IRQ */
189 mask = ~(bigsur_l2irq_mask[irq-BIGSUR_IRQ_LOW]);
190 reg = imr_base - bigsur_l2irq_reg[irq-BIGSUR_IRQ_LOW] * imr_offset;
191 mask &= inb(reg);
192 outb(mask,reg);
193 DIPRINTK(2,"startup_bigsur_irq: IMR=0x%08x mask=0x%x\n",reg,inb(reg));
194 }
195 else {
196 /* Enable the L2 IRQ - clear mask bit */
197 enable_bigsur_l2irq(irq);
198 /* Enable the L1 bit masking this L2 IRQ */
199 enable_bigsur_l1irq(bigsur_l2_l1_map[irq-BIGSUR_2NDLVL_IRQ_LOW]);
200 DIPRINTK(2,"startup_bigsur_irq: L1=%d L2=%d\n",
201 bigsur_l2_l1_map[irq-BIGSUR_2NDLVL_IRQ_LOW],irq);
202 }
203 return 0;
204}
205
206static void shutdown_bigsur_irq(unsigned int irq)
207{
208 pr_debug("shutdown_bigsur_irq IRQ %d\n", irq);
209 if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH)
210 disable_bigsur_l1irq(irq);
211 else
212 disable_bigsur_l2irq(irq);
213}
214
215/* Define the IRQ structures for the L1 and L2 IRQ types */
216static struct hw_interrupt_type bigsur_l1irq_type = {
217 .typename = "BigSur-CPLD-Level1-IRQ",
218 .startup = startup_bigsur_irq,
219 .shutdown = shutdown_bigsur_irq,
220 .enable = enable_bigsur_l1irq,
221 .disable = disable_bigsur_l1irq,
222 .ack = mask_and_ack_bigsur,
223 .end = end_bigsur_irq
224};
225
226static struct hw_interrupt_type bigsur_l2irq_type = {
227 .typename = "BigSur-CPLD-Level2-IRQ",
228 .startup = startup_bigsur_irq,
229 .shutdown =shutdown_bigsur_irq,
230 .enable = enable_bigsur_l2irq,
231 .disable = disable_bigsur_l2irq,
232 .ack = mask_and_ack_bigsur,
233 .end = end_bigsur_irq
234};
235
236
237static void make_bigsur_l1isr(unsigned int irq) {
238
239 /* sanity check first */
240 if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH) {
241 /* save the handler in the main description table */
242 irq_desc[irq].chip = &bigsur_l1irq_type;
243 irq_desc[irq].status = IRQ_DISABLED;
244 irq_desc[irq].action = 0;
245 irq_desc[irq].depth = 1;
246
247 disable_bigsur_l1irq(irq);
248 return;
249 }
250 pr_debug("make_bigsur_l1isr: bad irq, %d\n", irq);
251 return;
252}
253
254static void make_bigsur_l2isr(unsigned int irq) {
255
256 /* sanity check first */
257 if(irq >= BIGSUR_2NDLVL_IRQ_LOW && irq < BIGSUR_2NDLVL_IRQ_HIGH) {
258 /* save the handler in the main description table */
259 irq_desc[irq].chip = &bigsur_l2irq_type;
260 irq_desc[irq].status = IRQ_DISABLED;
261 irq_desc[irq].action = 0;
262 irq_desc[irq].depth = 1;
263
264 disable_bigsur_l2irq(irq);
265 return;
266 }
267 pr_debug("make_bigsur_l2isr: bad irq, %d\n", irq);
268 return;
269}
270
271/* The IRQ's will be decoded as follows:
272 * If a level 2 handler exists and there is an unmasked active
273 * IRQ, the 2nd level handler will be called.
274 * If a level 2 handler does not exist for the active IRQ
275 * the 1st level handler will be called.
276 */
277
278int bigsur_irq_demux(int irq)
279{
280 int dmux_irq = irq;
281 u8 mask, actv_irqs;
282 u32 reg_num;
283
284 DIPRINTK(3,"bigsur_irq_demux, irq=%d\n", irq);
285 /* decode the 1st level IRQ */
286 if(irq >= BIGSUR_IRQ_LOW && irq < BIGSUR_IRQ_HIGH) {
287 /* Get corresponding L2 ISR bitmask and ISR number */
288 mask = bigsur_l2irq_mask[irq-BIGSUR_IRQ_LOW];
289 reg_num = bigsur_l2irq_reg[irq-BIGSUR_IRQ_LOW];
290 /* find the active IRQ's (XOR with inactive level)*/
291 actv_irqs = inb(isr_base-reg_num*isr_offset) ^
292 bigsur_l2_inactv_state[reg_num];
293 /* decode active IRQ's */
294 actv_irqs = actv_irqs & mask & ~(inb(imr_base-reg_num*imr_offset));
295 /* if NEZ then we have an active L2 IRQ */
296 if(actv_irqs) dmux_irq = ffz(~actv_irqs) + reg_num*8+BIGSUR_2NDLVL_IRQ_LOW;
297 /* if no 2nd level IRQ action, but has 1st level, use 1st level handler */
298 if(!irq_desc[dmux_irq].action && irq_desc[irq].action)
299 dmux_irq = irq;
300 DIPRINTK(1,"bigsur_irq_demux: irq=%d dmux_irq=%d mask=0x%04x reg=%d\n",
301 irq, dmux_irq, mask, reg_num);
302 }
303#ifdef CONFIG_HD64465
304 dmux_irq = hd64465_irq_demux(dmux_irq);
305#endif /* CONFIG_HD64465 */
306 DIPRINTK(3,"bigsur_irq_demux, demux_irq=%d\n", dmux_irq);
307
308 return dmux_irq;
309}
310
311/*===========================================================*/
312// Big Sur Init Routines
313/*===========================================================*/
314void __init init_bigsur_IRQ(void)
315{
316 int i;
317
318 if (!MACH_BIGSUR) return;
319
320 /* Create ISR's for Big Sur CPLD IRQ's */
321 /*==============================================================*/
322 for(i=BIGSUR_IRQ_LOW;i<BIGSUR_IRQ_HIGH;i++)
323 make_bigsur_l1isr(i);
324
325 printk(KERN_INFO "Big Sur CPLD L1 interrupts %d to %d.\n",
326 BIGSUR_IRQ_LOW,BIGSUR_IRQ_HIGH);
327
328 for(i=BIGSUR_2NDLVL_IRQ_LOW;i<BIGSUR_2NDLVL_IRQ_HIGH;i++)
329 make_bigsur_l2isr(i);
330
331 printk(KERN_INFO "Big Sur CPLD L2 interrupts %d to %d.\n",
332 BIGSUR_2NDLVL_IRQ_LOW,BIGSUR_2NDLVL_IRQ_HIGH);
333
334}
diff --git a/arch/sh/boards/bigsur/led.c b/arch/sh/boards/bigsur/led.c
deleted file mode 100644
index d221439aafcc..000000000000
--- a/arch/sh/boards/bigsur/led.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * linux/arch/sh/boards/bigsur/led.c
3 *
4 * By Dustin McIntire (dustin@sensoria.com) (c)2001
5 * Derived from led_se.c and led.c, which bore the message:
6 * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
7 *
8 * May be copied or modified under the terms of the GNU General Public
9 * License. See linux/COPYING for more information.
10 *
11 * This file contains Big Sur specific LED code.
12 */
13
14#include <asm/io.h>
15#include <asm/bigsur/bigsur.h>
16
17static void mach_led(int position, int value)
18{
19 int word;
20
21 word = bigsur_inl(BIGSUR_CSLR);
22 if (value) {
23 bigsur_outl(word & ~BIGSUR_LED, BIGSUR_CSLR);
24 } else {
25 bigsur_outl(word | BIGSUR_LED, BIGSUR_CSLR);
26 }
27}
28
29#ifdef CONFIG_HEARTBEAT
30
31#include <linux/sched.h>
32
33/* Cycle the LED on/off */
34void heartbeat_bigsur(void)
35{
36 static unsigned cnt = 0, period = 0, dist = 0;
37
38 if (cnt == 0 || cnt == dist)
39 mach_led( -1, 1);
40 else if (cnt == 7 || cnt == dist+7)
41 mach_led( -1, 0);
42
43 if (++cnt > period) {
44 cnt = 0;
45 /* The hyperbolic function below modifies the heartbeat period
46 * length in dependency of the current (5min) load. It goes
47 * through the points f(0)=126, f(1)=86, f(5)=51,
48 * f(inf)->30. */
49 period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
50 dist = period / 4;
51 }
52}
53#endif /* CONFIG_HEARTBEAT */
54
diff --git a/arch/sh/boards/bigsur/setup.c b/arch/sh/boards/bigsur/setup.c
deleted file mode 100644
index 9711c20fc9e4..000000000000
--- a/arch/sh/boards/bigsur/setup.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 *
3 * By Dustin McIntire (dustin@sensoria.com) (c)2001
4 *
5 * Setup and IRQ handling code for the HD64465 companion chip.
6 * by Greg Banks <gbanks@pocketpenguins.com>
7 * Copyright (c) 2000 PocketPenguins Inc
8 *
9 * Derived from setup_hd64465.c which bore the message:
10 * Greg Banks <gbanks@pocketpenguins.com>
11 * Copyright (c) 2000 PocketPenguins Inc and
12 * Copyright (C) 2000 YAEGASHI Takeshi
13 * and setup_cqreek.c which bore message:
14 * Copyright (C) 2000 Niibe Yutaka
15 *
16 * May be copied or modified under the terms of the GNU General Public
17 * License. See linux/COPYING for more information.
18 *
19 * Setup functions for a Hitachi Big Sur Evaluation Board.
20 *
21 */
22
23#include <linux/sched.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/param.h>
27#include <linux/ioport.h>
28#include <linux/interrupt.h>
29#include <linux/init.h>
30#include <linux/irq.h>
31#include <linux/bitops.h>
32
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/machvec.h>
36#include <asm/bigsur/io.h>
37#include <asm/hd64465/hd64465.h>
38#include <asm/bigsur/bigsur.h>
39
40/*===========================================================*/
41// Big Sur Init Routines
42/*===========================================================*/
43
44static void __init bigsur_setup(char **cmdline_p)
45{
46 /* Mask all 2nd level IRQ's */
47 outb(-1,BIGSUR_IMR0);
48 outb(-1,BIGSUR_IMR1);
49 outb(-1,BIGSUR_IMR2);
50 outb(-1,BIGSUR_IMR3);
51
52 /* Mask 1st level interrupts */
53 outb(-1,BIGSUR_IRLMR0);
54 outb(-1,BIGSUR_IRLMR1);
55
56#if defined (CONFIG_HD64465) && defined (CONFIG_SERIAL)
57 /* remap IO ports for first ISA serial port to HD64465 UART */
58 bigsur_port_map(0x3f8, 8, CONFIG_HD64465_IOBASE + 0x8000, 1);
59#endif /* CONFIG_HD64465 && CONFIG_SERIAL */
60 /* TODO: setup IDE registers */
61 bigsur_port_map(BIGSUR_IDECTL_IOPORT, 2, BIGSUR_ICTL, 8);
62 /* Setup the Ethernet port to BIGSUR_ETHER_IOPORT */
63 bigsur_port_map(BIGSUR_ETHER_IOPORT, 16, BIGSUR_ETHR+BIGSUR_ETHER_IOPORT, 0);
64 /* set page to 1 */
65 outw(1, BIGSUR_ETHR+0xe);
66 /* set the IO port to BIGSUR_ETHER_IOPORT */
67 outw(BIGSUR_ETHER_IOPORT<<3, BIGSUR_ETHR+0x2);
68}
69
70/*
71 * The Machine Vector
72 */
73extern void heartbeat_bigsur(void);
74extern void init_bigsur_IRQ(void);
75
76struct sh_machine_vector mv_bigsur __initmv = {
77 .mv_name = "Big Sur",
78 .mv_setup = bigsur_setup,
79
80 .mv_isa_port2addr = bigsur_isa_port2addr,
81 .mv_irq_demux = bigsur_irq_demux,
82
83 .mv_init_irq = init_bigsur_IRQ,
84#ifdef CONFIG_HEARTBEAT
85 .mv_heartbeat = heartbeat_bigsur,
86#endif
87};
88ALIAS_MV(bigsur)
diff --git a/arch/sh/boards/ec3104/Makefile b/arch/sh/boards/ec3104/Makefile
deleted file mode 100644
index 178891534b67..000000000000
--- a/arch/sh/boards/ec3104/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1#
2# Makefile for the EC3104 specific parts of the kernel
3#
4
5obj-y := setup.o io.o irq.o
6
diff --git a/arch/sh/boards/ec3104/io.c b/arch/sh/boards/ec3104/io.c
deleted file mode 100644
index 2f86394b280b..000000000000
--- a/arch/sh/boards/ec3104/io.c
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * linux/arch/sh/boards/ec3104/io.c
3 * EC3104 companion chip support
4 *
5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
6 *
7 */
8/* EC3104 note:
9 * This code was written without any documentation about the EC3104 chip. While
10 * I hope I got most of the basic functionality right, the register names I use
11 * are most likely completely different from those in the chip documentation.
12 *
13 * If you have any further information about the EC3104, please tell me
14 * (prumpf@tux.org).
15 */
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <asm/io.h>
20#include <asm/page.h>
21#include <asm/ec3104/ec3104.h>
22
23/*
24 * EC3104 has a real ISA bus which we redirect low port accesses to (the
25 * actual device on mine is a ESS 1868, and I don't want to hack the driver
26 * more than strictly necessary). I am not going to duplicate the
27 * hard coding of PC addresses (for the 16550s aso) here though; it's just
28 * too ugly.
29 */
30
31#define low_port(port) ((port) < 0x10000)
32
33static inline unsigned long port2addr(unsigned long port)
34{
35 switch(port >> 16) {
36 case 0:
37 return EC3104_ISA_BASE + port * 2;
38
39 /* XXX hack. it's unclear what to do about the serial ports */
40 case 1:
41 return EC3104_BASE + (port&0xffff) * 4;
42
43 default:
44 /* XXX PCMCIA */
45 return 0;
46 }
47}
48
49unsigned char ec3104_inb(unsigned long port)
50{
51 u8 ret;
52
53 ret = *(volatile u8 *)port2addr(port);
54
55 return ret;
56}
57
58unsigned short ec3104_inw(unsigned long port)
59{
60 BUG();
61}
62
63unsigned long ec3104_inl(unsigned long port)
64{
65 BUG();
66}
67
68void ec3104_outb(unsigned char data, unsigned long port)
69{
70 *(volatile u8 *)port2addr(port) = data;
71}
72
73void ec3104_outw(unsigned short data, unsigned long port)
74{
75 BUG();
76}
77
78void ec3104_outl(unsigned long data, unsigned long port)
79{
80 BUG();
81}
diff --git a/arch/sh/boards/ec3104/irq.c b/arch/sh/boards/ec3104/irq.c
deleted file mode 100644
index ffa4ff1f090f..000000000000
--- a/arch/sh/boards/ec3104/irq.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * linux/arch/sh/boards/ec3104/irq.c
3 * EC3104 companion chip support
4 *
5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
6 *
7 */
8
9#include <asm/io.h>
10#include <asm/irq.h>
11#include <asm/ec3104/ec3104.h>
12
13/* This is for debugging mostly; here's the table that I intend to keep
14 * in here:
15 *
16 * index function base addr power interrupt bit
17 * 0 power b0ec0000 --- 00000001 (unused)
18 * 1 irqs b0ec1000 --- 00000002 (unused)
19 * 2 ?? b0ec2000 b0ec0008 00000004
20 * 3 PS2 (1) b0ec3000 b0ec000c 00000008
21 * 4 PS2 (2) b0ec4000 b0ec0010 00000010
22 * 5 ?? b0ec5000 b0ec0014 00000020
23 * 6 I2C b0ec6000 b0ec0018 00000040
24 * 7 serial (1) b0ec7000 b0ec001c 00000080
25 * 8 serial (2) b0ec8000 b0ec0020 00000100
26 * 9 serial (3) b0ec9000 b0ec0024 00000200
27 * 10 serial (4) b0eca000 b0ec0028 00000400
28 * 12 GPIO (1) b0ecc000 b0ec0030
29 * 13 GPIO (2) b0ecc000 b0ec0030
30 * 16 pcmcia (1) b0ed0000 b0ec0040 00010000
31 * 17 pcmcia (2) b0ed1000 b0ec0044 00020000
32 */
33
34/* I used the register names from another interrupt controller I worked with,
35 * since it seems to be identical to the ec3104 except that all bits are
36 * inverted:
37 *
38 * IRR: Interrupt Request Register (pending and enabled interrupts)
39 * IMR: Interrupt Mask Register (which interrupts are enabled)
40 * IPR: Interrupt Pending Register (pending interrupts, even disabled ones)
41 *
42 * 0 bits mean pending or enabled, 1 bits mean not pending or disabled. all
43 * IRQs seem to be level-triggered.
44 */
45
46#define EC3104_IRR (EC3104_BASE + 0x1000)
47#define EC3104_IMR (EC3104_BASE + 0x1004)
48#define EC3104_IPR (EC3104_BASE + 0x1008)
49
50#define ctrl_readl(addr) (*(volatile u32 *)(addr))
51#define ctrl_writel(data,addr) (*(volatile u32 *)(addr) = (data))
52#define ctrl_readb(addr) (*(volatile u8 *)(addr))
53
54static char *ec3104_name(unsigned index)
55{
56 switch(index) {
57 case 0:
58 return "power management";
59 case 1:
60 return "interrupts";
61 case 3:
62 return "PS2 (1)";
63 case 4:
64 return "PS2 (2)";
65 case 5:
66 return "I2C (1)";
67 case 6:
68 return "I2C (2)";
69 case 7:
70 return "serial (1)";
71 case 8:
72 return "serial (2)";
73 case 9:
74 return "serial (3)";
75 case 10:
76 return "serial (4)";
77 case 16:
78 return "pcmcia (1)";
79 case 17:
80 return "pcmcia (2)";
81 default: {
82 static char buf[32];
83
84 sprintf(buf, "unknown (%d)", index);
85
86 return buf;
87 }
88 }
89}
90
91int get_pending_interrupts(char *buf)
92{
93 u32 ipr;
94 u32 bit;
95 char *p = buf;
96
97 p += sprintf(p, "pending: (");
98
99 ipr = ctrl_inl(EC3104_IPR);
100
101 for (bit = 1; bit < 32; bit++)
102 if (!(ipr & (1<<bit)))
103 p += sprintf(p, "%s ", ec3104_name(bit));
104
105 p += sprintf(p, ")\n");
106
107 return p - buf;
108}
109
110static inline u32 ec3104_irq2mask(unsigned int irq)
111{
112 return (1 << (irq - EC3104_IRQBASE));
113}
114
115static inline void mask_ec3104_irq(unsigned int irq)
116{
117 u32 mask;
118
119 mask = ctrl_readl(EC3104_IMR);
120
121 mask |= ec3104_irq2mask(irq);
122
123 ctrl_writel(mask, EC3104_IMR);
124}
125
126static inline void unmask_ec3104_irq(unsigned int irq)
127{
128 u32 mask;
129
130 mask = ctrl_readl(EC3104_IMR);
131
132 mask &= ~ec3104_irq2mask(irq);
133
134 ctrl_writel(mask, EC3104_IMR);
135}
136
137static void disable_ec3104_irq(unsigned int irq)
138{
139 mask_ec3104_irq(irq);
140}
141
142static void enable_ec3104_irq(unsigned int irq)
143{
144 unmask_ec3104_irq(irq);
145}
146
147static void mask_and_ack_ec3104_irq(unsigned int irq)
148{
149 mask_ec3104_irq(irq);
150}
151
152static void end_ec3104_irq(unsigned int irq)
153{
154 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
155 unmask_ec3104_irq(irq);
156}
157
158static unsigned int startup_ec3104_irq(unsigned int irq)
159{
160 unmask_ec3104_irq(irq);
161
162 return 0;
163}
164
165static void shutdown_ec3104_irq(unsigned int irq)
166{
167 mask_ec3104_irq(irq);
168
169}
170
171static struct hw_interrupt_type ec3104_int = {
172 .typename = "EC3104",
173 .enable = enable_ec3104_irq,
174 .disable = disable_ec3104_irq,
175 .ack = mask_and_ack_ec3104_irq,
176 .end = end_ec3104_irq,
177 .startup = startup_ec3104_irq,
178 .shutdown = shutdown_ec3104_irq,
179};
180
181/* Yuck. the _demux API is ugly */
182int ec3104_irq_demux(int irq)
183{
184 if (irq == EC3104_IRQ) {
185 unsigned int mask;
186
187 mask = ctrl_readl(EC3104_IRR);
188
189 if (mask == 0xffffffff)
190 return EC3104_IRQ;
191 else
192 return EC3104_IRQBASE + ffz(mask);
193 }
194
195 return irq;
196}
diff --git a/arch/sh/boards/ec3104/setup.c b/arch/sh/boards/ec3104/setup.c
deleted file mode 100644
index 902bc975a13e..000000000000
--- a/arch/sh/boards/ec3104/setup.c
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * linux/arch/sh/boards/ec3104/setup.c
3 * EC3104 companion chip support
4 *
5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
6 *
7 */
8/* EC3104 note:
9 * This code was written without any documentation about the EC3104 chip. While
10 * I hope I got most of the basic functionality right, the register names I use
11 * are most likely completely different from those in the chip documentation.
12 *
13 * If you have any further information about the EC3104, please tell me
14 * (prumpf@tux.org).
15 */
16
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/param.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/types.h>
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/machvec.h>
27#include <asm/mach/ec3104.h>
28
29static void __init ec3104_setup(char **cmdline_p)
30{
31 char str[8];
32 int i;
33
34 for (i=0; i<8; i++)
35 str[i] = ctrl_readb(EC3104_BASE + i);
36
37 for (i = EC3104_IRQBASE; i < EC3104_IRQBASE + 32; i++)
38 irq_desc[i].handler = &ec3104_int;
39
40 printk("initializing EC3104 \"%.8s\" at %08x, IRQ %d, IRQ base %d\n",
41 str, EC3104_BASE, EC3104_IRQ, EC3104_IRQBASE);
42
43 /* mask all interrupts. this should have been done by the boot
44 * loader for us but we want to be sure ... */
45 ctrl_writel(0xffffffff, EC3104_IMR);
46}
47
48/*
49 * The Machine Vector
50 */
51struct sh_machine_vector mv_ec3104 __initmv = {
52 .mv_name = "EC3104",
53 .mv_setup = ec3104_setup,
54 .mv_nr_irqs = 96,
55
56 .mv_inb = ec3104_inb,
57 .mv_inw = ec3104_inw,
58 .mv_inl = ec3104_inl,
59 .mv_outb = ec3104_outb,
60 .mv_outw = ec3104_outw,
61 .mv_outl = ec3104_outl,
62
63 .mv_irq_demux = ec3104_irq_demux,
64};
65ALIAS_MV(ec3104)
diff --git a/arch/sh/boards/mpc1211/Makefile b/arch/sh/boards/mpc1211/Makefile
index 1644ebed78cb..8cd31b5d200b 100644
--- a/arch/sh/boards/mpc1211/Makefile
+++ b/arch/sh/boards/mpc1211/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the Interface (CTP/PCI/MPC-SH02) specific parts of the kernel 2# Makefile for the Interface (CTP/PCI/MPC-SH02) specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o rtc.o led.o 5obj-y := setup.o rtc.o
6 6
7obj-$(CONFIG_PCI) += pci.o 7obj-$(CONFIG_PCI) += pci.o
8 8
diff --git a/arch/sh/boards/mpc1211/led.c b/arch/sh/boards/mpc1211/led.c
deleted file mode 100644
index 8df1591823d6..000000000000
--- a/arch/sh/boards/mpc1211/led.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * linux/arch/sh/boards/mpc1211/led.c
3 *
4 * Copyright (C) 2001 Saito.K & Jeanne
5 *
6 * This file contains Interface MPC-1211 specific LED code.
7 */
8
9
10static void mach_led(int position, int value)
11{
12 volatile unsigned char* p = (volatile unsigned char*)0xa2000000;
13
14 if (value) {
15 *p |= 1;
16 } else {
17 *p &= ~1;
18 }
19}
20
21#ifdef CONFIG_HEARTBEAT
22
23#include <linux/sched.h>
24
25/* Cycle the LED's in the clasic Knightrider/Sun pattern */
26void heartbeat_mpc1211(void)
27{
28 static unsigned int cnt = 0, period = 0;
29 volatile unsigned char* p = (volatile unsigned char*)0xa2000000;
30 static unsigned bit = 0, up = 1;
31
32 cnt += 1;
33 if (cnt < period) {
34 return;
35 }
36
37 cnt = 0;
38
39 /* Go through the points (roughly!):
40 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
41 */
42 period = 110 - ( (300<<FSHIFT)/
43 ((avenrun[0]/5) + (3<<FSHIFT)) );
44
45 if (up) {
46 if (bit == 7) {
47 bit--;
48 up=0;
49 } else {
50 bit ++;
51 }
52 } else {
53 if (bit == 0) {
54 bit++;
55 up=1;
56 } else {
57 bit--;
58 }
59 }
60 *p = 1<<bit;
61
62}
63#endif /* CONFIG_HEARTBEAT */
diff --git a/arch/sh/boards/mpc1211/setup.c b/arch/sh/boards/mpc1211/setup.c
index 7c3d1d304157..1a0604b23ce0 100644
--- a/arch/sh/boards/mpc1211/setup.c
+++ b/arch/sh/boards/mpc1211/setup.c
@@ -10,6 +10,7 @@
10#include <linux/hdreg.h> 10#include <linux/hdreg.h>
11#include <linux/ide.h> 11#include <linux/ide.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/platform_device.h>
13#include <asm/io.h> 14#include <asm/io.h>
14#include <asm/machvec.h> 15#include <asm/machvec.h>
15#include <asm/mpc1211/mpc1211.h> 16#include <asm/mpc1211/mpc1211.h>
@@ -281,6 +282,32 @@ static int put_smb_blk(unsigned char *p, int address, int command, int no)
281 return 0; 282 return 0;
282} 283}
283 284
285static struct resource heartbeat_resources[] = {
286 [0] = {
287 .start = 0xa2000000,
288 .end = 0xa2000000 + 8 - 1,
289 .flags = IORESOURCE_MEM,
290 },
291};
292
293static struct platform_device heartbeat_device = {
294 .name = "heartbeat",
295 .id = -1,
296 .num_resources = ARRAY_SIZE(heartbeat_resources),
297 .resource = heartbeat_resources,
298};
299
300static struct platform_device *mpc1211_devices[] __initdata = {
301 &heartbeat_device,
302};
303
304static int __init mpc1211_devices_setup(void)
305{
306 return platform_add_devices(mpc1211_devices,
307 ARRAY_SIZE(mpc1211_devices));
308}
309__initcall(mpc1211_devices_setup);
310
284/* arch/sh/boards/mpc1211/rtc.c */ 311/* arch/sh/boards/mpc1211/rtc.c */
285void mpc1211_time_init(void); 312void mpc1211_time_init(void);
286 313
@@ -317,9 +344,5 @@ struct sh_machine_vector mv_mpc1211 __initmv = {
317 .mv_nr_irqs = 48, 344 .mv_nr_irqs = 48,
318 .mv_irq_demux = mpc1211_irq_demux, 345 .mv_irq_demux = mpc1211_irq_demux,
319 .mv_init_irq = init_mpc1211_IRQ, 346 .mv_init_irq = init_mpc1211_IRQ,
320
321#ifdef CONFIG_HEARTBEAT
322 .mv_heartbeat = heartbeat_mpc1211,
323#endif
324}; 347};
325ALIAS_MV(mpc1211) 348ALIAS_MV(mpc1211)
diff --git a/arch/sh/boards/renesas/r7780rp/Makefile b/arch/sh/boards/renesas/r7780rp/Makefile
index 574b0316ed56..3c93012e91a3 100644
--- a/arch/sh/boards/renesas/r7780rp/Makefile
+++ b/arch/sh/boards/renesas/r7780rp/Makefile
@@ -4,5 +4,4 @@
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o io.o irq.o
6 6
7obj-$(CONFIG_HEARTBEAT) += led.o
8obj-$(CONFIG_PUSH_SWITCH) += psw.o 7obj-$(CONFIG_PUSH_SWITCH) += psw.o
diff --git a/arch/sh/boards/renesas/r7780rp/io.c b/arch/sh/boards/renesas/r7780rp/io.c
index 311ccccba718..f74d2ffb3851 100644
--- a/arch/sh/boards/renesas/r7780rp/io.c
+++ b/arch/sh/boards/renesas/r7780rp/io.c
@@ -11,22 +11,9 @@
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/io.h>
14#include <asm/r7780rp.h> 15#include <asm/r7780rp.h>
15#include <asm/addrspace.h> 16#include <asm/addrspace.h>
16#include <asm/io.h>
17
18static inline unsigned long port2adr(unsigned int port)
19{
20 if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
21 if (port == 0x3f6)
22 return (PA_AREA5_IO + 0x80c);
23 else
24 return (PA_AREA5_IO + 0x1000 + ((port-0x1f0) << 1));
25 else
26 maybebadio((unsigned long)port);
27
28 return port;
29}
30 17
31static inline unsigned long port88796l(unsigned int port, int flag) 18static inline unsigned long port88796l(unsigned int port, int flag)
32{ 19{
@@ -40,18 +27,6 @@ static inline unsigned long port88796l(unsigned int port, int flag)
40 return addr; 27 return addr;
41} 28}
42 29
43/* The 7780 R7780RP-1 seems to have everything hooked */
44/* up pretty normally (nothing on high-bytes only...) so this */
45/* shouldn't be needed */
46static inline int shifted_port(unsigned long port)
47{
48 /* For IDE registers, value is not shifted */
49 if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
50 return 0;
51 else
52 return 1;
53}
54
55#if defined(CONFIG_NE2000) || defined(CONFIG_NE2000_MODULE) 30#if defined(CONFIG_NE2000) || defined(CONFIG_NE2000_MODULE)
56#define CHECK_AX88796L_PORT(port) \ 31#define CHECK_AX88796L_PORT(port) \
57 ((port >= AX88796L_IO_BASE) && (port < (AX88796L_IO_BASE+0x20))) 32 ((port >= AX88796L_IO_BASE) && (port < (AX88796L_IO_BASE+0x20)))
@@ -70,12 +45,10 @@ u8 r7780rp_inb(unsigned long port)
70{ 45{
71 if (CHECK_AX88796L_PORT(port)) 46 if (CHECK_AX88796L_PORT(port))
72 return ctrl_inw(port88796l(port, 0)) & 0xff; 47 return ctrl_inw(port88796l(port, 0)) & 0xff;
73 else if (PXSEG(port)) 48 else if (is_pci_ioaddr(port))
74 return ctrl_inb(port);
75 else if (is_pci_ioaddr(port) || shifted_port(port))
76 return ctrl_inb(pci_ioaddr(port)); 49 return ctrl_inb(pci_ioaddr(port));
77 50
78 return ctrl_inw(port2adr(port)) & 0xff; 51 return ctrl_inw(port) & 0xff;
79} 52}
80 53
81u8 r7780rp_inb_p(unsigned long port) 54u8 r7780rp_inb_p(unsigned long port)
@@ -84,12 +57,10 @@ u8 r7780rp_inb_p(unsigned long port)
84 57
85 if (CHECK_AX88796L_PORT(port)) 58 if (CHECK_AX88796L_PORT(port))
86 v = ctrl_inw(port88796l(port, 0)) & 0xff; 59 v = ctrl_inw(port88796l(port, 0)) & 0xff;
87 else if (PXSEG(port)) 60 else if (is_pci_ioaddr(port))
88 v = ctrl_inb(port);
89 else if (is_pci_ioaddr(port) || shifted_port(port))
90 v = ctrl_inb(pci_ioaddr(port)); 61 v = ctrl_inb(pci_ioaddr(port));
91 else 62 else
92 v = ctrl_inw(port2adr(port)) & 0xff; 63 v = ctrl_inw(port) & 0xff;
93 64
94 ctrl_delay(); 65 ctrl_delay();
95 66
@@ -98,80 +69,56 @@ u8 r7780rp_inb_p(unsigned long port)
98 69
99u16 r7780rp_inw(unsigned long port) 70u16 r7780rp_inw(unsigned long port)
100{ 71{
101 if (CHECK_AX88796L_PORT(port)) 72 if (is_pci_ioaddr(port))
102 maybebadio(port);
103 else if (PXSEG(port))
104 return ctrl_inw(port);
105 else if (is_pci_ioaddr(port) || shifted_port(port))
106 return ctrl_inw(pci_ioaddr(port)); 73 return ctrl_inw(pci_ioaddr(port));
107 else
108 maybebadio(port);
109 74
110 return 0; 75 return ctrl_inw(port);
111} 76}
112 77
113u32 r7780rp_inl(unsigned long port) 78u32 r7780rp_inl(unsigned long port)
114{ 79{
115 if (CHECK_AX88796L_PORT(port)) 80 if (is_pci_ioaddr(port))
116 maybebadio(port);
117 else if (PXSEG(port))
118 return ctrl_inl(port);
119 else if (is_pci_ioaddr(port) || shifted_port(port))
120 return ctrl_inl(pci_ioaddr(port)); 81 return ctrl_inl(pci_ioaddr(port));
121 else
122 maybebadio(port);
123 82
124 return 0; 83 return ctrl_inl(port);
125} 84}
126 85
127void r7780rp_outb(u8 value, unsigned long port) 86void r7780rp_outb(u8 value, unsigned long port)
128{ 87{
129 if (CHECK_AX88796L_PORT(port)) 88 if (CHECK_AX88796L_PORT(port))
130 ctrl_outw(value, port88796l(port, 0)); 89 ctrl_outw(value, port88796l(port, 0));
131 else if (PXSEG(port)) 90 else if (is_pci_ioaddr(port))
132 ctrl_outb(value, port);
133 else if (is_pci_ioaddr(port) || shifted_port(port))
134 ctrl_outb(value, pci_ioaddr(port)); 91 ctrl_outb(value, pci_ioaddr(port));
135 else 92 else
136 ctrl_outw(value, port2adr(port)); 93 ctrl_outb(value, port);
137} 94}
138 95
139void r7780rp_outb_p(u8 value, unsigned long port) 96void r7780rp_outb_p(u8 value, unsigned long port)
140{ 97{
141 if (CHECK_AX88796L_PORT(port)) 98 if (CHECK_AX88796L_PORT(port))
142 ctrl_outw(value, port88796l(port, 0)); 99 ctrl_outw(value, port88796l(port, 0));
143 else if (PXSEG(port)) 100 else if (is_pci_ioaddr(port))
144 ctrl_outb(value, port);
145 else if (is_pci_ioaddr(port) || shifted_port(port))
146 ctrl_outb(value, pci_ioaddr(port)); 101 ctrl_outb(value, pci_ioaddr(port));
147 else 102 else
148 ctrl_outw(value, port2adr(port)); 103 ctrl_outb(value, port);
149 104
150 ctrl_delay(); 105 ctrl_delay();
151} 106}
152 107
153void r7780rp_outw(u16 value, unsigned long port) 108void r7780rp_outw(u16 value, unsigned long port)
154{ 109{
155 if (CHECK_AX88796L_PORT(port)) 110 if (is_pci_ioaddr(port))
156 maybebadio(port);
157 else if (PXSEG(port))
158 ctrl_outw(value, port);
159 else if (is_pci_ioaddr(port) || shifted_port(port))
160 ctrl_outw(value, pci_ioaddr(port)); 111 ctrl_outw(value, pci_ioaddr(port));
161 else 112 else
162 maybebadio(port); 113 ctrl_outw(value, port);
163} 114}
164 115
165void r7780rp_outl(u32 value, unsigned long port) 116void r7780rp_outl(u32 value, unsigned long port)
166{ 117{
167 if (CHECK_AX88796L_PORT(port)) 118 if (is_pci_ioaddr(port))
168 maybebadio(port);
169 else if (PXSEG(port))
170 ctrl_outl(value, port);
171 else if (is_pci_ioaddr(port) || shifted_port(port))
172 ctrl_outl(value, pci_ioaddr(port)); 119 ctrl_outl(value, pci_ioaddr(port));
173 else 120 else
174 maybebadio(port); 121 ctrl_outl(value, port);
175} 122}
176 123
177void r7780rp_insb(unsigned long port, void *dst, unsigned long count) 124void r7780rp_insb(unsigned long port, void *dst, unsigned long count)
@@ -183,16 +130,13 @@ void r7780rp_insb(unsigned long port, void *dst, unsigned long count)
183 p = (volatile u16 *)port88796l(port, 0); 130 p = (volatile u16 *)port88796l(port, 0);
184 while (count--) 131 while (count--)
185 *buf++ = *p & 0xff; 132 *buf++ = *p & 0xff;
186 } else if (PXSEG(port)) { 133 } else if (is_pci_ioaddr(port)) {
187 while (count--)
188 *buf++ = *(volatile u8 *)port;
189 } else if (is_pci_ioaddr(port) || shifted_port(port)) {
190 volatile u8 *bp = (volatile u8 *)pci_ioaddr(port); 134 volatile u8 *bp = (volatile u8 *)pci_ioaddr(port);
191 135
192 while (count--) 136 while (count--)
193 *buf++ = *bp; 137 *buf++ = *bp;
194 } else { 138 } else {
195 p = (volatile u16 *)port2adr(port); 139 p = (volatile u16 *)port;
196 while (count--) 140 while (count--)
197 *buf++ = *p & 0xff; 141 *buf++ = *p & 0xff;
198 } 142 }
@@ -205,30 +149,26 @@ void r7780rp_insw(unsigned long port, void *dst, unsigned long count)
205 149
206 if (CHECK_AX88796L_PORT(port)) 150 if (CHECK_AX88796L_PORT(port))
207 p = (volatile u16 *)port88796l(port, 1); 151 p = (volatile u16 *)port88796l(port, 1);
208 else if (PXSEG(port)) 152 else if (is_pci_ioaddr(port))
209 p = (volatile u16 *)port;
210 else if (is_pci_ioaddr(port) || shifted_port(port))
211 p = (volatile u16 *)pci_ioaddr(port); 153 p = (volatile u16 *)pci_ioaddr(port);
212 else 154 else
213 p = (volatile u16 *)port2adr(port); 155 p = (volatile u16 *)port;
214 156
215 while (count--) 157 while (count--)
216 *buf++ = *p; 158 *buf++ = *p;
159
160 flush_dcache_all();
217} 161}
218 162
219void r7780rp_insl(unsigned long port, void *dst, unsigned long count) 163void r7780rp_insl(unsigned long port, void *dst, unsigned long count)
220{ 164{
221 u32 *buf = dst; 165 if (is_pci_ioaddr(port)) {
222
223 if (CHECK_AX88796L_PORT(port))
224 maybebadio(port);
225 else if (is_pci_ioaddr(port) || shifted_port(port)) {
226 volatile u32 *p = (volatile u32 *)pci_ioaddr(port); 166 volatile u32 *p = (volatile u32 *)pci_ioaddr(port);
167 u32 *buf = dst;
227 168
228 while (count--) 169 while (count--)
229 *buf++ = *p; 170 *buf++ = *p;
230 } else 171 }
231 maybebadio(port);
232} 172}
233 173
234void r7780rp_outsb(unsigned long port, const void *src, unsigned long count) 174void r7780rp_outsb(unsigned long port, const void *src, unsigned long count)
@@ -240,19 +180,14 @@ void r7780rp_outsb(unsigned long port, const void *src, unsigned long count)
240 p = (volatile u16 *)port88796l(port, 0); 180 p = (volatile u16 *)port88796l(port, 0);
241 while (count--) 181 while (count--)
242 *p = *buf++; 182 *p = *buf++;
243 } else if (PXSEG(port)) 183 } else if (is_pci_ioaddr(port)) {
244 while (count--)
245 ctrl_outb(*buf++, port);
246 else if (is_pci_ioaddr(port) || shifted_port(port)) {
247 volatile u8 *bp = (volatile u8 *)pci_ioaddr(port); 184 volatile u8 *bp = (volatile u8 *)pci_ioaddr(port);
248 185
249 while (count--) 186 while (count--)
250 *bp = *buf++; 187 *bp = *buf++;
251 } else { 188 } else
252 p = (volatile u16 *)port2adr(port);
253 while (count--) 189 while (count--)
254 *p = *buf++; 190 ctrl_outb(*buf++, port);
255 }
256} 191}
257 192
258void r7780rp_outsw(unsigned long port, const void *src, unsigned long count) 193void r7780rp_outsw(unsigned long port, const void *src, unsigned long count)
@@ -262,40 +197,37 @@ void r7780rp_outsw(unsigned long port, const void *src, unsigned long count)
262 197
263 if (CHECK_AX88796L_PORT(port)) 198 if (CHECK_AX88796L_PORT(port))
264 p = (volatile u16 *)port88796l(port, 1); 199 p = (volatile u16 *)port88796l(port, 1);
265 else if (PXSEG(port)) 200 else if (is_pci_ioaddr(port))
266 p = (volatile u16 *)port;
267 else if (is_pci_ioaddr(port) || shifted_port(port))
268 p = (volatile u16 *)pci_ioaddr(port); 201 p = (volatile u16 *)pci_ioaddr(port);
269 else 202 else
270 p = (volatile u16 *)port2adr(port); 203 p = (volatile u16 *)port;
271 204
272 while (count--) 205 while (count--)
273 *p = *buf++; 206 *p = *buf++;
207
208 flush_dcache_all();
274} 209}
275 210
276void r7780rp_outsl(unsigned long port, const void *src, unsigned long count) 211void r7780rp_outsl(unsigned long port, const void *src, unsigned long count)
277{ 212{
278 const u32 *buf = src; 213 const u32 *buf = src;
214 u32 *p;
279 215
280 if (CHECK_AX88796L_PORT(port)) 216 if (is_pci_ioaddr(port))
281 maybebadio(port); 217 p = (u32 *)pci_ioaddr(port);
282 else if (is_pci_ioaddr(port) || shifted_port(port)) { 218 else
283 volatile u32 *p = (volatile u32 *)pci_ioaddr(port); 219 p = (u32 *)port;
284 220
285 while (count--) 221 while (count--)
286 *p = *buf++; 222 ctrl_outl(*buf++, (unsigned long)p);
287 } else
288 maybebadio(port);
289} 223}
290 224
291void __iomem *r7780rp_ioport_map(unsigned long port, unsigned int size) 225void __iomem *r7780rp_ioport_map(unsigned long port, unsigned int size)
292{ 226{
293 if (CHECK_AX88796L_PORT(port)) 227 if (CHECK_AX88796L_PORT(port))
294 return (void __iomem *)port88796l(port, size > 1); 228 return (void __iomem *)port88796l(port, size > 1);
295 else if (PXSEG(port)) 229 else if (is_pci_ioaddr(port))
296 return (void __iomem *)port;
297 else if (is_pci_ioaddr(port) || shifted_port(port))
298 return (void __iomem *)pci_ioaddr(port); 230 return (void __iomem *)pci_ioaddr(port);
299 231
300 return (void __iomem *)port2adr(port); 232 return (void __iomem *)port;
301} 233}
diff --git a/arch/sh/boards/renesas/r7780rp/led.c b/arch/sh/boards/renesas/r7780rp/led.c
deleted file mode 100644
index 6a00a257afd2..000000000000
--- a/arch/sh/boards/renesas/r7780rp/led.c
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * Copyright (C) Atom Create Engineering Co., Ltd.
3 *
4 * May be copied or modified under the terms of GNU General Public
5 * License. See linux/COPYING for more information.
6 *
7 * This file contains Renesas Solutions HIGHLANDER R7780RP-1 specific LED code.
8 */
9#include <linux/sched.h>
10#include <asm/io.h>
11#include <asm/r7780rp/r7780rp.h>
12
13/* Cycle the LED's in the clasic Knightriger/Sun pattern */
14void heartbeat_r7780rp(void)
15{
16 static unsigned int cnt = 0, period = 0;
17 volatile unsigned short *p = (volatile unsigned short *)PA_OBLED;
18 static unsigned bit = 0, up = 1;
19 unsigned bit_pos[] = {2, 1, 0, 3, 6, 5, 4, 7};
20
21 cnt += 1;
22 if (cnt < period)
23 return;
24
25 cnt = 0;
26
27 /* Go through the points (roughly!):
28 * f(0)=10, f(1)=16, f(2)=20, f(5)=35, f(int)->110
29 */
30 period = 110 - ((300 << FSHIFT)/((avenrun[0]/5) + (3<<FSHIFT)));
31
32 *p = 1 << bit_pos[bit];
33 if (up)
34 if (bit == 7) {
35 bit--;
36 up = 0;
37 } else
38 bit++;
39 else if (bit == 0)
40 up = 1;
41 else
42 bit--;
43}
diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
index 9f89c8de9db9..0d74db9f1792 100644
--- a/arch/sh/boards/renesas/r7780rp/setup.c
+++ b/arch/sh/boards/renesas/r7780rp/setup.c
@@ -2,7 +2,7 @@
2 * arch/sh/boards/renesas/r7780rp/setup.c 2 * arch/sh/boards/renesas/r7780rp/setup.c
3 * 3 *
4 * Copyright (C) 2002 Atom Create Engineering Co., Ltd. 4 * Copyright (C) 2002 Atom Create Engineering Co., Ltd.
5 * Copyright (C) 2005, 2006 Paul Mundt 5 * Copyright (C) 2005 - 2007 Paul Mundt
6 * 6 *
7 * Renesas Solutions Highlander R7780RP-1 Support. 7 * Renesas Solutions Highlander R7780RP-1 Support.
8 * 8 *
@@ -12,12 +12,12 @@
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/pata_platform.h>
15#include <asm/machvec.h> 16#include <asm/machvec.h>
16#include <asm/r7780rp.h> 17#include <asm/r7780rp.h>
17#include <asm/clock.h> 18#include <asm/clock.h>
18#include <asm/io.h> 19#include <asm/io.h>
19 20
20extern void heartbeat_r7780rp(void);
21extern void init_r7780rp_IRQ(void); 21extern void init_r7780rp_IRQ(void);
22 22
23static struct resource m66596_usb_host_resources[] = { 23static struct resource m66596_usb_host_resources[] = {
@@ -46,14 +46,14 @@ static struct platform_device m66596_usb_host_device = {
46 46
47static struct resource cf_ide_resources[] = { 47static struct resource cf_ide_resources[] = {
48 [0] = { 48 [0] = {
49 .start = 0x1f0, 49 .start = PA_AREA5_IO + 0x1000,
50 .end = 0x1f0 + 8, 50 .end = PA_AREA5_IO + 0x1000 + 0x08 - 1,
51 .flags = IORESOURCE_IO, 51 .flags = IORESOURCE_MEM,
52 }, 52 },
53 [1] = { 53 [1] = {
54 .start = 0x1f0 + 0x206, 54 .start = PA_AREA5_IO + 0x80c,
55 .end = 0x1f0 + 8 + 0x206 + 8, 55 .end = PA_AREA5_IO + 0x80c + 0x16 - 1,
56 .flags = IORESOURCE_IO, 56 .flags = IORESOURCE_MEM,
57 }, 57 },
58 [2] = { 58 [2] = {
59#ifdef CONFIG_SH_R7780MP 59#ifdef CONFIG_SH_R7780MP
@@ -65,16 +65,44 @@ static struct resource cf_ide_resources[] = {
65 }, 65 },
66}; 66};
67 67
68static struct pata_platform_info pata_info = {
69 .ioport_shift = 1,
70};
71
68static struct platform_device cf_ide_device = { 72static struct platform_device cf_ide_device = {
69 .name = "pata_platform", 73 .name = "pata_platform",
70 .id = -1, 74 .id = -1,
71 .num_resources = ARRAY_SIZE(cf_ide_resources), 75 .num_resources = ARRAY_SIZE(cf_ide_resources),
72 .resource = cf_ide_resources, 76 .resource = cf_ide_resources,
77 .dev = {
78 .platform_data = &pata_info,
79 },
80};
81
82static unsigned char heartbeat_bit_pos[] = { 2, 1, 0, 3, 6, 5, 4, 7 };
83
84static struct resource heartbeat_resources[] = {
85 [0] = {
86 .start = PA_OBLED,
87 .end = PA_OBLED + ARRAY_SIZE(heartbeat_bit_pos) - 1,
88 .flags = IORESOURCE_MEM,
89 },
90};
91
92static struct platform_device heartbeat_device = {
93 .name = "heartbeat",
94 .id = -1,
95 .dev = {
96 .platform_data = heartbeat_bit_pos,
97 },
98 .num_resources = ARRAY_SIZE(heartbeat_resources),
99 .resource = heartbeat_resources,
73}; 100};
74 101
75static struct platform_device *r7780rp_devices[] __initdata = { 102static struct platform_device *r7780rp_devices[] __initdata = {
76 &m66596_usb_host_device, 103 &m66596_usb_host_device,
77 &cf_ide_device, 104 &cf_ide_device,
105 &heartbeat_device,
78}; 106};
79 107
80static int __init r7780rp_devices_setup(void) 108static int __init r7780rp_devices_setup(void)
@@ -148,7 +176,7 @@ static void __init r7780rp_setup(char **cmdline_p)
148#ifndef CONFIG_SH_R7780MP 176#ifndef CONFIG_SH_R7780MP
149 ctrl_outw(0x0001, PA_SDPOW); /* SD Power ON */ 177 ctrl_outw(0x0001, PA_SDPOW); /* SD Power ON */
150#endif 178#endif
151 ctrl_outw(ctrl_inw(PA_IVDRCTL) | 0x0100, PA_IVDRCTL); /* Si13112 */ 179 ctrl_outw(ctrl_inw(PA_IVDRCTL) | 0x01, PA_IVDRCTL); /* Si13112 */
152 180
153 pm_power_off = r7780rp_power_off; 181 pm_power_off = r7780rp_power_off;
154} 182}
@@ -185,8 +213,5 @@ struct sh_machine_vector mv_r7780rp __initmv = {
185 213
186 .mv_ioport_map = r7780rp_ioport_map, 214 .mv_ioport_map = r7780rp_ioport_map,
187 .mv_init_irq = init_r7780rp_IRQ, 215 .mv_init_irq = init_r7780rp_IRQ,
188#ifdef CONFIG_HEARTBEAT
189 .mv_heartbeat = heartbeat_r7780rp,
190#endif
191}; 216};
192ALIAS_MV(r7780rp) 217ALIAS_MV(r7780rp)
diff --git a/arch/sh/boards/renesas/rts7751r2d/Makefile b/arch/sh/boards/renesas/rts7751r2d/Makefile
index 686fc9ea5989..0d4c75a72be0 100644
--- a/arch/sh/boards/renesas/rts7751r2d/Makefile
+++ b/arch/sh/boards/renesas/rts7751r2d/Makefile
@@ -2,5 +2,4 @@
2# Makefile for the RTS7751R2D specific parts of the kernel 2# Makefile for the RTS7751R2D specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o irq.o
6obj-$(CONFIG_HEARTBEAT) += led.o
diff --git a/arch/sh/boards/renesas/rts7751r2d/io.c b/arch/sh/boards/renesas/rts7751r2d/io.c
deleted file mode 100644
index f2507a804979..000000000000
--- a/arch/sh/boards/renesas/rts7751r2d/io.c
+++ /dev/null
@@ -1,302 +0,0 @@
1/*
2 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
3 * Based largely on io_se.c.
4 *
5 * I/O routine for Renesas Technology sales RTS7751R2D.
6 *
7 * Initial version only to support LAN access; some
8 * placeholder code from io_rts7751r2d.c left in with the
9 * expectation of later SuperIO and PCMCIA access.
10 */
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/pci.h>
14#include <linux/io.h>
15#include <asm/rts7751r2d.h>
16#include <asm/addrspace.h>
17
18/*
19 * The 7751R RTS7751R2D uses the built-in PCI controller (PCIC)
20 * of the 7751R processor, and has a SuperIO accessible via the PCI.
21 * The board also includes a PCMCIA controller on its memory bus,
22 * like the other Solution Engine boards.
23 */
24
25static inline unsigned long port2adr(unsigned int port)
26{
27 if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
28 if (port == 0x3f6)
29 return (PA_AREA5_IO + 0x80c);
30 else
31 return (PA_AREA5_IO + 0x1000 + ((port-0x1f0) << 1));
32 else
33 maybebadio((unsigned long)port);
34
35 return port;
36}
37
38static inline unsigned long port88796l(unsigned int port, int flag)
39{
40 unsigned long addr;
41
42 if (flag)
43 addr = PA_AX88796L + ((port - AX88796L_IO_BASE) << 1);
44 else
45 addr = PA_AX88796L + ((port - AX88796L_IO_BASE) << 1) + 0x1000;
46
47 return addr;
48}
49
50/* The 7751R RTS7751R2D seems to have everything hooked */
51/* up pretty normally (nothing on high-bytes only...) so this */
52/* shouldn't be needed */
53static inline int shifted_port(unsigned long port)
54{
55 /* For IDE registers, value is not shifted */
56 if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
57 return 0;
58 else
59 return 1;
60}
61
62#if defined(CONFIG_NE2000) || defined(CONFIG_NE2000_MODULE)
63#define CHECK_AX88796L_PORT(port) \
64 ((port >= AX88796L_IO_BASE) && (port < (AX88796L_IO_BASE+0x20)))
65#else
66#define CHECK_AX88796L_PORT(port) (0)
67#endif
68
69/*
70 * General outline: remap really low stuff [eventually] to SuperIO,
71 * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
72 * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
73 * should be way beyond the window, and is used w/o translation for
74 * compatibility.
75 */
76unsigned char rts7751r2d_inb(unsigned long port)
77{
78 if (CHECK_AX88796L_PORT(port))
79 return (*(volatile unsigned short *)port88796l(port, 0)) & 0xff;
80 else if (PXSEG(port))
81 return *(volatile unsigned char *)port;
82 else if (is_pci_ioaddr(port) || shifted_port(port))
83 return *(volatile unsigned char *)pci_ioaddr(port);
84 else
85 return (*(volatile unsigned short *)port2adr(port) & 0xff);
86}
87
88unsigned char rts7751r2d_inb_p(unsigned long port)
89{
90 unsigned char v;
91
92 if (CHECK_AX88796L_PORT(port))
93 v = (*(volatile unsigned short *)port88796l(port, 0)) & 0xff;
94 else if (PXSEG(port))
95 v = *(volatile unsigned char *)port;
96 else if (is_pci_ioaddr(port) || shifted_port(port))
97 v = *(volatile unsigned char *)pci_ioaddr(port);
98 else
99 v = (*(volatile unsigned short *)port2adr(port) & 0xff);
100
101 ctrl_delay();
102
103 return v;
104}
105
106unsigned short rts7751r2d_inw(unsigned long port)
107{
108 if (CHECK_AX88796L_PORT(port))
109 maybebadio(port);
110 else if (PXSEG(port))
111 return *(volatile unsigned short *)port;
112 else if (is_pci_ioaddr(port) || shifted_port(port))
113 return *(volatile unsigned short *)pci_ioaddr(port);
114 else
115 maybebadio(port);
116
117 return 0;
118}
119
120unsigned int rts7751r2d_inl(unsigned long port)
121{
122 if (CHECK_AX88796L_PORT(port))
123 maybebadio(port);
124 else if (PXSEG(port))
125 return *(volatile unsigned long *)port;
126 else if (is_pci_ioaddr(port) || shifted_port(port))
127 return *(volatile unsigned long *)pci_ioaddr(port);
128 else
129 maybebadio(port);
130
131 return 0;
132}
133
134void rts7751r2d_outb(unsigned char value, unsigned long port)
135{
136 if (CHECK_AX88796L_PORT(port))
137 *((volatile unsigned short *)port88796l(port, 0)) = value;
138 else if (PXSEG(port))
139 *(volatile unsigned char *)port = value;
140 else if (is_pci_ioaddr(port) || shifted_port(port))
141 *(volatile unsigned char *)pci_ioaddr(port) = value;
142 else
143 *(volatile unsigned short *)port2adr(port) = value;
144}
145
146void rts7751r2d_outb_p(unsigned char value, unsigned long port)
147{
148 if (CHECK_AX88796L_PORT(port))
149 *((volatile unsigned short *)port88796l(port, 0)) = value;
150 else if (PXSEG(port))
151 *(volatile unsigned char *)port = value;
152 else if (is_pci_ioaddr(port) || shifted_port(port))
153 *(volatile unsigned char *)pci_ioaddr(port) = value;
154 else
155 *(volatile unsigned short *)port2adr(port) = value;
156
157 ctrl_delay();
158}
159
160void rts7751r2d_outw(unsigned short value, unsigned long port)
161{
162 if (CHECK_AX88796L_PORT(port))
163 maybebadio(port);
164 else if (PXSEG(port))
165 *(volatile unsigned short *)port = value;
166 else if (is_pci_ioaddr(port) || shifted_port(port))
167 *(volatile unsigned short *)pci_ioaddr(port) = value;
168 else
169 maybebadio(port);
170}
171
172void rts7751r2d_outl(unsigned int value, unsigned long port)
173{
174 if (CHECK_AX88796L_PORT(port))
175 maybebadio(port);
176 else if (PXSEG(port))
177 *(volatile unsigned long *)port = value;
178 else if (is_pci_ioaddr(port) || shifted_port(port))
179 *(volatile unsigned long *)pci_ioaddr(port) = value;
180 else
181 maybebadio(port);
182}
183
184void rts7751r2d_insb(unsigned long port, void *addr, unsigned long count)
185{
186 unsigned long a = (unsigned long)addr;
187 volatile __u8 *bp;
188 volatile __u16 *p;
189
190 if (CHECK_AX88796L_PORT(port)) {
191 p = (volatile unsigned short *)port88796l(port, 0);
192 while (count--)
193 ctrl_outb(*p & 0xff, a++);
194 } else if (PXSEG(port))
195 while (count--)
196 ctrl_outb(ctrl_inb(port), a++);
197 else if (is_pci_ioaddr(port) || shifted_port(port)) {
198 bp = (__u8 *)pci_ioaddr(port);
199 while (count--)
200 ctrl_outb(*bp, a++);
201 } else {
202 p = (volatile unsigned short *)port2adr(port);
203 while (count--)
204 ctrl_outb(*p & 0xff, a++);
205 }
206}
207
208void rts7751r2d_insw(unsigned long port, void *addr, unsigned long count)
209{
210 unsigned long a = (unsigned long)addr;
211 volatile __u16 *p;
212
213 if (CHECK_AX88796L_PORT(port))
214 p = (volatile unsigned short *)port88796l(port, 1);
215 else if (PXSEG(port))
216 p = (volatile unsigned short *)port;
217 else if (is_pci_ioaddr(port) || shifted_port(port))
218 p = (volatile unsigned short *)pci_ioaddr(port);
219 else
220 p = (volatile unsigned short *)port2adr(port);
221 while (count--)
222 ctrl_outw(*p, a++);
223}
224
225void rts7751r2d_insl(unsigned long port, void *addr, unsigned long count)
226{
227 if (CHECK_AX88796L_PORT(port))
228 maybebadio(port);
229 else if (is_pci_ioaddr(port) || shifted_port(port)) {
230 unsigned long a = (unsigned long)addr;
231
232 while (count--) {
233 ctrl_outl(ctrl_inl(pci_ioaddr(port)), a);
234 a += 4;
235 }
236 } else
237 maybebadio(port);
238}
239
240void rts7751r2d_outsb(unsigned long port, const void *addr, unsigned long count)
241{
242 unsigned long a = (unsigned long)addr;
243 volatile __u8 *bp;
244 volatile __u16 *p;
245
246 if (CHECK_AX88796L_PORT(port)) {
247 p = (volatile unsigned short *)port88796l(port, 0);
248 while (count--)
249 *p = ctrl_inb(a++);
250 } else if (PXSEG(port))
251 while (count--)
252 ctrl_outb(a++, port);
253 else if (is_pci_ioaddr(port) || shifted_port(port)) {
254 bp = (__u8 *)pci_ioaddr(port);
255 while (count--)
256 *bp = ctrl_inb(a++);
257 } else {
258 p = (volatile unsigned short *)port2adr(port);
259 while (count--)
260 *p = ctrl_inb(a++);
261 }
262}
263
264void rts7751r2d_outsw(unsigned long port, const void *addr, unsigned long count)
265{
266 unsigned long a = (unsigned long)addr;
267 volatile __u16 *p;
268
269 if (CHECK_AX88796L_PORT(port))
270 p = (volatile unsigned short *)port88796l(port, 1);
271 else if (PXSEG(port))
272 p = (volatile unsigned short *)port;
273 else if (is_pci_ioaddr(port) || shifted_port(port))
274 p = (volatile unsigned short *)pci_ioaddr(port);
275 else
276 p = (volatile unsigned short *)port2adr(port);
277
278 while (count--) {
279 ctrl_outw(*p, a);
280 a += 2;
281 }
282}
283
284void rts7751r2d_outsl(unsigned long port, const void *addr, unsigned long count)
285{
286 if (CHECK_AX88796L_PORT(port))
287 maybebadio(port);
288 else if (is_pci_ioaddr(port) || shifted_port(port)) {
289 unsigned long a = (unsigned long)addr;
290
291 while (count--) {
292 ctrl_outl(ctrl_inl(a), pci_ioaddr(port));
293 a += 4;
294 }
295 } else
296 maybebadio(port);
297}
298
299unsigned long rts7751r2d_isa_port2addr(unsigned long offset)
300{
301 return port2adr(offset);
302}
diff --git a/arch/sh/boards/renesas/rts7751r2d/irq.c b/arch/sh/boards/renesas/rts7751r2d/irq.c
index cb0eb20d1b43..0bae9041aceb 100644
--- a/arch/sh/boards/renesas/rts7751r2d/irq.c
+++ b/arch/sh/boards/renesas/rts7751r2d/irq.c
@@ -9,7 +9,9 @@
9 * Atom Create Engineering Co., Ltd. 2002. 9 * Atom Create Engineering Co., Ltd. 2002.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/interrupt.h>
12#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/interrupt.h>
13#include <linux/io.h> 15#include <linux/io.h>
14#include <asm/rts7751r2d.h> 16#include <asm/rts7751r2d.h>
15 17
@@ -22,79 +24,31 @@ static int mask_pos[] = {6, 11, 9, 8, 12, 10, 5, 4, 7, 14, 13, 0, 0, 0, 0};
22extern int voyagergx_irq_demux(int irq); 24extern int voyagergx_irq_demux(int irq);
23extern void setup_voyagergx_irq(void); 25extern void setup_voyagergx_irq(void);
24 26
25static void enable_rts7751r2d_irq(unsigned int irq); 27static void enable_rts7751r2d_irq(unsigned int irq)
26static void disable_rts7751r2d_irq(unsigned int irq);
27
28/* shutdown is same as "disable" */
29#define shutdown_rts7751r2d_irq disable_rts7751r2d_irq
30
31static void ack_rts7751r2d_irq(unsigned int irq);
32static void end_rts7751r2d_irq(unsigned int irq);
33
34static unsigned int startup_rts7751r2d_irq(unsigned int irq)
35{ 28{
36 enable_rts7751r2d_irq(irq); 29 /* Set priority in IPR back to original value */
37 return 0; /* never anything pending */ 30 ctrl_outw(ctrl_inw(IRLCNTR1) | (1 << mask_pos[irq]), IRLCNTR1);
38} 31}
39 32
40static void disable_rts7751r2d_irq(unsigned int irq) 33static void disable_rts7751r2d_irq(unsigned int irq)
41{ 34{
42 unsigned short val;
43 unsigned short mask = 0xffff ^ (0x0001 << mask_pos[irq]);
44
45 /* Set the priority in IPR to 0 */ 35 /* Set the priority in IPR to 0 */
46 val = ctrl_inw(IRLCNTR1); 36 ctrl_outw(ctrl_inw(IRLCNTR1) & (0xffff ^ (1 << mask_pos[irq])),
47 val &= mask; 37 IRLCNTR1);
48 ctrl_outw(val, IRLCNTR1);
49}
50
51static void enable_rts7751r2d_irq(unsigned int irq)
52{
53 unsigned short val;
54 unsigned short value = (0x0001 << mask_pos[irq]);
55
56 /* Set priority in IPR back to original value */
57 val = ctrl_inw(IRLCNTR1);
58 val |= value;
59 ctrl_outw(val, IRLCNTR1);
60} 38}
61 39
62int rts7751r2d_irq_demux(int irq) 40int rts7751r2d_irq_demux(int irq)
63{ 41{
64 int demux_irq; 42 return voyagergx_irq_demux(irq);
65
66 demux_irq = voyagergx_irq_demux(irq);
67 return demux_irq;
68}
69
70static void ack_rts7751r2d_irq(unsigned int irq)
71{
72 disable_rts7751r2d_irq(irq);
73} 43}
74 44
75static void end_rts7751r2d_irq(unsigned int irq) 45static struct irq_chip rts7751r2d_irq_chip __read_mostly = {
76{ 46 .name = "rts7751r2d",
77 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) 47 .mask = disable_rts7751r2d_irq,
78 enable_rts7751r2d_irq(irq); 48 .unmask = enable_rts7751r2d_irq,
79} 49 .mask_ack = disable_rts7751r2d_irq,
80
81static struct hw_interrupt_type rts7751r2d_irq_type = {
82 .typename = "RTS7751R2D IRQ",
83 .startup = startup_rts7751r2d_irq,
84 .shutdown = shutdown_rts7751r2d_irq,
85 .enable = enable_rts7751r2d_irq,
86 .disable = disable_rts7751r2d_irq,
87 .ack = ack_rts7751r2d_irq,
88 .end = end_rts7751r2d_irq,
89}; 50};
90 51
91static void make_rts7751r2d_irq(unsigned int irq)
92{
93 disable_irq_nosync(irq);
94 irq_desc[irq].chip = &rts7751r2d_irq_type;
95 disable_rts7751r2d_irq(irq);
96}
97
98/* 52/*
99 * Initialize IRQ setting 53 * Initialize IRQ setting
100 */ 54 */
@@ -119,8 +73,12 @@ void __init init_rts7751r2d_IRQ(void)
119 * IRL14=Extention #3 73 * IRL14=Extention #3
120 */ 74 */
121 75
122 for (i=0; i<15; i++) 76 for (i=0; i<15; i++) {
123 make_rts7751r2d_irq(i); 77 disable_irq_nosync(i);
78 set_irq_chip_and_handler_name(i, &rts7751r2d_irq_chip,
79 handle_level_irq, "level");
80 enable_rts7751r2d_irq(i);
81 }
124 82
125 setup_voyagergx_irq(); 83 setup_voyagergx_irq();
126} 84}
diff --git a/arch/sh/boards/renesas/rts7751r2d/led.c b/arch/sh/boards/renesas/rts7751r2d/led.c
deleted file mode 100644
index 509f548bdce0..000000000000
--- a/arch/sh/boards/renesas/rts7751r2d/led.c
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * linux/arch/sh/boards/renesas/rts7751r2d/led.c
3 *
4 * Copyright (C) Atom Create Engineering Co., Ltd.
5 *
6 * May be copied or modified under the terms of GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * This file contains Renesas Technology Sales RTS7751R2D specific LED code.
10 */
11#include <linux/io.h>
12#include <linux/sched.h>
13#include <asm/rts7751r2d.h>
14
15/* Cycle the LED's in the clasic Knightriger/Sun pattern */
16void heartbeat_rts7751r2d(void)
17{
18 static unsigned int cnt = 0, period = 0;
19 volatile unsigned short *p = (volatile unsigned short *)PA_OUTPORT;
20 static unsigned bit = 0, up = 1;
21
22 cnt += 1;
23 if (cnt < period)
24 return;
25
26 cnt = 0;
27
28 /* Go through the points (roughly!):
29 * f(0)=10, f(1)=16, f(2)=20, f(5)=35, f(int)->110
30 */
31 period = 110 - ((300 << FSHIFT)/((avenrun[0]/5) + (3<<FSHIFT)));
32
33 *p = 1 << bit;
34 if (up)
35 if (bit == 7) {
36 bit--;
37 up = 0;
38 } else
39 bit++;
40 else if (bit == 0)
41 up = 1;
42 else
43 bit--;
44}
diff --git a/arch/sh/boards/renesas/rts7751r2d/setup.c b/arch/sh/boards/renesas/rts7751r2d/setup.c
index 5c042d35ec91..44b42082a0af 100644
--- a/arch/sh/boards/renesas/rts7751r2d/setup.c
+++ b/arch/sh/boards/renesas/rts7751r2d/setup.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Renesas Technology Sales RTS7751R2D Support. 2 * Renesas Technology Sales RTS7751R2D Support.
3 * 3 *
4 * Copyright (C) 2002 Atom Create Engineering Co., Ltd. 4 * Copyright (C) 2002 - 2006 Atom Create Engineering Co., Ltd.
5 * Copyright (C) 2004 - 2006 Paul Mundt 5 * Copyright (C) 2004 - 2007 Paul Mundt
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
@@ -10,33 +10,13 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/pata_platform.h>
13#include <linux/serial_8250.h> 14#include <linux/serial_8250.h>
14#include <linux/pm.h> 15#include <linux/pm.h>
15#include <asm/machvec.h> 16#include <asm/machvec.h>
16#include <asm/mach/rts7751r2d.h> 17#include <asm/rts7751r2d.h>
17#include <asm/io.h>
18#include <asm/voyagergx.h> 18#include <asm/voyagergx.h>
19 19#include <asm/io.h>
20extern void heartbeat_rts7751r2d(void);
21extern void init_rts7751r2d_IRQ(void);
22extern int rts7751r2d_irq_demux(int irq);
23
24extern void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t);
25extern int voyagergx_consistent_free(struct device *, size_t, void *, dma_addr_t);
26
27static struct plat_serial8250_port uart_platform_data[] = {
28 {
29 .membase = (void *)VOYAGER_UART_BASE,
30 .mapbase = VOYAGER_UART_BASE,
31 .iotype = UPIO_MEM,
32 .irq = VOYAGER_UART0_IRQ,
33 .flags = UPF_BOOT_AUTOCONF,
34 .regshift = 2,
35 .uartclk = (9600 * 16),
36 }, {
37 .flags = 0,
38 },
39};
40 20
41static void __init voyagergx_serial_init(void) 21static void __init voyagergx_serial_init(void)
42{ 22{
@@ -45,32 +25,96 @@ static void __init voyagergx_serial_init(void)
45 /* 25 /*
46 * GPIO Control 26 * GPIO Control
47 */ 27 */
48 val = inl(GPIO_MUX_HIGH); 28 val = readl((void __iomem *)GPIO_MUX_HIGH);
49 val |= 0x00001fe0; 29 val |= 0x00001fe0;
50 outl(val, GPIO_MUX_HIGH); 30 writel(val, (void __iomem *)GPIO_MUX_HIGH);
51 31
52 /* 32 /*
53 * Power Mode Gate 33 * Power Mode Gate
54 */ 34 */
55 val = inl(POWER_MODE0_GATE); 35 val = readl((void __iomem *)POWER_MODE0_GATE);
56 val |= (POWER_MODE0_GATE_U0 | POWER_MODE0_GATE_U1); 36 val |= (POWER_MODE0_GATE_U0 | POWER_MODE0_GATE_U1);
57 outl(val, POWER_MODE0_GATE); 37 writel(val, (void __iomem *)POWER_MODE0_GATE);
58 38
59 val = inl(POWER_MODE1_GATE); 39 val = readl((void __iomem *)POWER_MODE1_GATE);
60 val |= (POWER_MODE1_GATE_U0 | POWER_MODE1_GATE_U1); 40 val |= (POWER_MODE1_GATE_U0 | POWER_MODE1_GATE_U1);
61 outl(val, POWER_MODE1_GATE); 41 writel(val, (void __iomem *)POWER_MODE1_GATE);
62} 42}
63 43
44static struct resource cf_ide_resources[] = {
45 [0] = {
46 .start = PA_AREA5_IO + 0x1000,
47 .end = PA_AREA5_IO + 0x1000 + 0x08 - 1,
48 .flags = IORESOURCE_MEM,
49 },
50 [1] = {
51 .start = PA_AREA5_IO + 0x80c,
52 .end = PA_AREA5_IO + 0x80c + 0x16 - 1,
53 .flags = IORESOURCE_MEM,
54 },
55 [2] = {
56#ifdef CONFIG_RTS7751R2D_REV11
57 .start = 1,
58#else
59 .start = 2,
60#endif
61 .flags = IORESOURCE_IRQ,
62 },
63};
64
65static struct pata_platform_info pata_info = {
66 .ioport_shift = 1,
67};
68
69static struct platform_device cf_ide_device = {
70 .name = "pata_platform",
71 .id = -1,
72 .num_resources = ARRAY_SIZE(cf_ide_resources),
73 .resource = cf_ide_resources,
74 .dev = {
75 .platform_data = &pata_info,
76 },
77};
78
79static struct plat_serial8250_port uart_platform_data[] = {
80 {
81 .membase = (void __iomem *)VOYAGER_UART_BASE,
82 .mapbase = VOYAGER_UART_BASE,
83 .iotype = UPIO_MEM,
84 .irq = VOYAGER_UART0_IRQ,
85 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
86 .regshift = 2,
87 .uartclk = (9600 * 16),
88 }
89};
90
64static struct platform_device uart_device = { 91static struct platform_device uart_device = {
65 .name = "serial8250", 92 .name = "serial8250",
66 .id = -1, 93 .id = PLAT8250_DEV_PLATFORM,
67 .dev = { 94 .dev = {
68 .platform_data = uart_platform_data, 95 .platform_data = uart_platform_data,
69 }, 96 },
70}; 97};
71 98
99static struct resource heartbeat_resources[] = {
100 [0] = {
101 .start = PA_OUTPORT,
102 .end = PA_OUTPORT + 8 - 1,
103 .flags = IORESOURCE_MEM,
104 },
105};
106
107static struct platform_device heartbeat_device = {
108 .name = "heartbeat",
109 .id = -1,
110 .num_resources = ARRAY_SIZE(heartbeat_resources),
111 .resource = heartbeat_resources,
112};
113
72static struct platform_device *rts7751r2d_devices[] __initdata = { 114static struct platform_device *rts7751r2d_devices[] __initdata = {
73 &uart_device, 115 &uart_device,
116 &heartbeat_device,
117 &cf_ide_device,
74}; 118};
75 119
76static int __init rts7751r2d_devices_setup(void) 120static int __init rts7751r2d_devices_setup(void)
@@ -78,6 +122,7 @@ static int __init rts7751r2d_devices_setup(void)
78 return platform_add_devices(rts7751r2d_devices, 122 return platform_add_devices(rts7751r2d_devices,
79 ARRAY_SIZE(rts7751r2d_devices)); 123 ARRAY_SIZE(rts7751r2d_devices));
80} 124}
125__initcall(rts7751r2d_devices_setup);
81 126
82static void rts7751r2d_power_off(void) 127static void rts7751r2d_power_off(void)
83{ 128{
@@ -89,14 +134,17 @@ static void rts7751r2d_power_off(void)
89 */ 134 */
90static void __init rts7751r2d_setup(char **cmdline_p) 135static void __init rts7751r2d_setup(char **cmdline_p)
91{ 136{
92 device_initcall(rts7751r2d_devices_setup); 137 u16 ver = ctrl_inw(PA_VERREG);
138
139 printk(KERN_INFO "Renesas Technology Sales RTS7751R2D support.\n");
140
141 printk(KERN_INFO "FPGA version:%d (revision:%d)\n",
142 (ver >> 4) & 0xf, ver & 0xf);
93 143
94 ctrl_outw(0x0000, PA_OUTPORT); 144 ctrl_outw(0x0000, PA_OUTPORT);
95 pm_power_off = rts7751r2d_power_off; 145 pm_power_off = rts7751r2d_power_off;
96 146
97 voyagergx_serial_init(); 147 voyagergx_serial_init();
98
99 printk(KERN_INFO "Renesas Technology Sales RTS7751R2D support.\n");
100} 148}
101 149
102/* 150/*
@@ -107,31 +155,7 @@ struct sh_machine_vector mv_rts7751r2d __initmv = {
107 .mv_setup = rts7751r2d_setup, 155 .mv_setup = rts7751r2d_setup,
108 .mv_nr_irqs = 72, 156 .mv_nr_irqs = 72,
109 157
110 .mv_inb = rts7751r2d_inb,
111 .mv_inw = rts7751r2d_inw,
112 .mv_inl = rts7751r2d_inl,
113 .mv_outb = rts7751r2d_outb,
114 .mv_outw = rts7751r2d_outw,
115 .mv_outl = rts7751r2d_outl,
116
117 .mv_inb_p = rts7751r2d_inb_p,
118 .mv_inw_p = rts7751r2d_inw,
119 .mv_inl_p = rts7751r2d_inl,
120 .mv_outb_p = rts7751r2d_outb_p,
121 .mv_outw_p = rts7751r2d_outw,
122 .mv_outl_p = rts7751r2d_outl,
123
124 .mv_insb = rts7751r2d_insb,
125 .mv_insw = rts7751r2d_insw,
126 .mv_insl = rts7751r2d_insl,
127 .mv_outsb = rts7751r2d_outsb,
128 .mv_outsw = rts7751r2d_outsw,
129 .mv_outsl = rts7751r2d_outsl,
130
131 .mv_init_irq = init_rts7751r2d_IRQ, 158 .mv_init_irq = init_rts7751r2d_IRQ,
132#ifdef CONFIG_HEARTBEAT
133 .mv_heartbeat = heartbeat_rts7751r2d,
134#endif
135 .mv_irq_demux = rts7751r2d_irq_demux, 159 .mv_irq_demux = rts7751r2d_irq_demux,
136 160
137#ifdef CONFIG_USB_SM501 161#ifdef CONFIG_USB_SM501
diff --git a/arch/sh/boards/se/7206/Makefile b/arch/sh/boards/se/7206/Makefile
index 63950f4f2453..63e7ed699f39 100644
--- a/arch/sh/boards/se/7206/Makefile
+++ b/arch/sh/boards/se/7206/Makefile
@@ -3,5 +3,3 @@
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o io.o irq.o
6obj-$(CONFIG_HEARTBEAT) += led.o
7
diff --git a/arch/sh/boards/se/7206/led.c b/arch/sh/boards/se/7206/led.c
deleted file mode 100644
index ef794601ab86..000000000000
--- a/arch/sh/boards/se/7206/led.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * linux/arch/sh/kernel/led_se.c
3 *
4 * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * This file contains Solution Engine specific LED code.
10 */
11
12#include <linux/config.h>
13#include <asm/se7206.h>
14
15#ifdef CONFIG_HEARTBEAT
16
17#include <linux/sched.h>
18
19/* Cycle the LED's in the clasic Knightrider/Sun pattern */
20void heartbeat_se(void)
21{
22 static unsigned int cnt = 0, period = 0;
23 volatile unsigned short* p = (volatile unsigned short*)PA_LED;
24 static unsigned bit = 0, up = 1;
25
26 cnt += 1;
27 if (cnt < period) {
28 return;
29 }
30
31 cnt = 0;
32
33 /* Go through the points (roughly!):
34 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
35 */
36 period = 110 - ( (300<<FSHIFT)/
37 ((avenrun[0]/5) + (3<<FSHIFT)) );
38
39 if (up) {
40 if (bit == 7) {
41 bit--;
42 up=0;
43 } else {
44 bit ++;
45 }
46 } else {
47 if (bit == 0) {
48 bit++;
49 up=1;
50 } else {
51 bit--;
52 }
53 }
54 *p = 1<<(bit+8);
55
56}
57#endif /* CONFIG_HEARTBEAT */
diff --git a/arch/sh/boards/se/7206/setup.c b/arch/sh/boards/se/7206/setup.c
index 0f42e91a3238..ca714879f559 100644
--- a/arch/sh/boards/se/7206/setup.c
+++ b/arch/sh/boards/se/7206/setup.c
@@ -3,6 +3,7 @@
3 * linux/arch/sh/boards/se/7206/setup.c 3 * linux/arch/sh/boards/se/7206/setup.c
4 * 4 *
5 * Copyright (C) 2006 Yoshinori Sato 5 * Copyright (C) 2006 Yoshinori Sato
6 * Copyright (C) 2007 Paul Mundt
6 * 7 *
7 * Hitachi 7206 SolutionEngine Support. 8 * Hitachi 7206 SolutionEngine Support.
8 * 9 *
@@ -34,15 +35,37 @@ static struct platform_device smc91x_device = {
34 .resource = smc91x_resources, 35 .resource = smc91x_resources,
35}; 36};
36 37
38static unsigned char heartbeat_bit_pos[] = { 8, 9, 10, 11, 12, 13, 14, 15 };
39
40static struct resource heartbeat_resources[] = {
41 [0] = {
42 .start = PA_LED,
43 .end = PA_LED + ARRAY_SIZE(heartbeat_bit_pos) - 1,
44 .flags = IORESOURCE_MEM,
45 },
46};
47
48static struct platform_device heartbeat_device = {
49 .name = "heartbeat",
50 .id = -1,
51 .dev = {
52 .platform_data = heartbeat_bit_pos,
53 },
54 .num_resources = ARRAY_SIZE(heartbeat_resources),
55 .resource = heartbeat_resources,
56};
57
58static struct platform_device *se7206_devices[] __initdata = {
59 &smc91x_device,
60 &heartbeat_device,
61};
62
37static int __init se7206_devices_setup(void) 63static int __init se7206_devices_setup(void)
38{ 64{
39 return platform_device_register(&smc91x_device); 65 return platform_add_devices(se7206_devices, ARRAY_SIZE(se7206_devices));
40} 66}
41
42__initcall(se7206_devices_setup); 67__initcall(se7206_devices_setup);
43 68
44void heartbeat_se(void);
45
46/* 69/*
47 * The Machine Vector 70 * The Machine Vector
48 */ 71 */
@@ -72,8 +95,5 @@ struct sh_machine_vector mv_se __initmv = {
72 .mv_outsl = se7206_outsl, 95 .mv_outsl = se7206_outsl,
73 96
74 .mv_init_irq = init_se7206_IRQ, 97 .mv_init_irq = init_se7206_IRQ,
75#ifdef CONFIG_HEARTBEAT
76 .mv_heartbeat = heartbeat_se,
77#endif
78}; 98};
79ALIAS_MV(se) 99ALIAS_MV(se)
diff --git a/arch/sh/boards/se/7300/Makefile b/arch/sh/boards/se/7300/Makefile
index 0fbd4f47815c..46247368f14b 100644
--- a/arch/sh/boards/se/7300/Makefile
+++ b/arch/sh/boards/se/7300/Makefile
@@ -3,5 +3,3 @@
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o io.o irq.o
6
7obj-$(CONFIG_HEARTBEAT) += led.o
diff --git a/arch/sh/boards/se/7300/led.c b/arch/sh/boards/se/7300/led.c
deleted file mode 100644
index 4d03bb7774be..000000000000
--- a/arch/sh/boards/se/7300/led.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * linux/arch/sh/boards/se/7300/led.c
3 *
4 * Derived from linux/arch/sh/boards/se/770x/led.c
5 *
6 * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
7 *
8 * May be copied or modified under the terms of the GNU General Public
9 * License. See linux/COPYING for more information.
10 *
11 * This file contains Solution Engine specific LED code.
12 */
13
14#include <linux/sched.h>
15#include <asm/se7300.h>
16
17/* Cycle the LED's in the clasic Knightrider/Sun pattern */
18void heartbeat_7300se(void)
19{
20 static unsigned int cnt = 0, period = 0;
21 volatile unsigned short *p = (volatile unsigned short *) PA_LED;
22 static unsigned bit = 0, up = 1;
23
24 cnt += 1;
25 if (cnt < period) {
26 return;
27 }
28
29 cnt = 0;
30
31 /* Go through the points (roughly!):
32 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
33 */
34 period = 110 - ((300 << FSHIFT) / ((avenrun[0] / 5) + (3 << FSHIFT)));
35
36 if (up) {
37 if (bit == 7) {
38 bit--;
39 up = 0;
40 } else {
41 bit++;
42 }
43 } else {
44 if (bit == 0) {
45 bit++;
46 up = 1;
47 } else {
48 bit--;
49 }
50 }
51 *p = 1 << (bit + 8);
52
53}
54
diff --git a/arch/sh/boards/se/7300/setup.c b/arch/sh/boards/se/7300/setup.c
index 6f082a722d42..f1960956bad0 100644
--- a/arch/sh/boards/se/7300/setup.c
+++ b/arch/sh/boards/se/7300/setup.c
@@ -6,14 +6,43 @@
6 * SH-Mobile SolutionEngine 7300 Support. 6 * SH-Mobile SolutionEngine 7300 Support.
7 * 7 *
8 */ 8 */
9
10#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <asm/machvec.h> 11#include <asm/machvec.h>
12#include <asm/se7300.h> 12#include <asm/se7300.h>
13 13
14void heartbeat_7300se(void);
15void init_7300se_IRQ(void); 14void init_7300se_IRQ(void);
16 15
16static unsigned char heartbeat_bit_pos[] = { 8, 9, 10, 11, 12, 13, 14, 15 };
17
18static struct resource heartbeat_resources[] = {
19 [0] = {
20 .start = PA_LED,
21 .end = PA_LED + ARRAY_SIZE(heartbeat_bit_pos) - 1,
22 .flags = IORESOURCE_MEM,
23 },
24};
25
26static struct platform_device heartbeat_device = {
27 .name = "heartbeat",
28 .id = -1,
29 .dev = {
30 .platform_data = heartbeat_bit_pos,
31 },
32 .num_resources = ARRAY_SIZE(heartbeat_resources),
33 .resource = heartbeat_resources,
34};
35
36static struct platform_device *se7300_devices[] __initdata = {
37 &heartbeat_device,
38};
39
40static int __init se7300_devices_setup(void)
41{
42 return platform_add_devices(se7300_devices, ARRAY_SIZE(se7300_devices));
43}
44__initcall(se7300_devices_setup);
45
17/* 46/*
18 * The Machine Vector 47 * The Machine Vector
19 */ 48 */
@@ -42,8 +71,5 @@ struct sh_machine_vector mv_7300se __initmv = {
42 .mv_outsl = sh7300se_outsl, 71 .mv_outsl = sh7300se_outsl,
43 72
44 .mv_init_irq = init_7300se_IRQ, 73 .mv_init_irq = init_7300se_IRQ,
45#ifdef CONFIG_HEARTBEAT
46 .mv_heartbeat = heartbeat_7300se,
47#endif
48}; 74};
49ALIAS_MV(7300se) 75ALIAS_MV(7300se)
diff --git a/arch/sh/boards/se/73180/Makefile b/arch/sh/boards/se/73180/Makefile
index 8f63886a0f3f..e7c09967c529 100644
--- a/arch/sh/boards/se/73180/Makefile
+++ b/arch/sh/boards/se/73180/Makefile
@@ -3,5 +3,3 @@
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o io.o irq.o
6
7obj-$(CONFIG_HEARTBEAT) += led.o
diff --git a/arch/sh/boards/se/73180/led.c b/arch/sh/boards/se/73180/led.c
deleted file mode 100644
index 4b72e9a3ead9..000000000000
--- a/arch/sh/boards/se/73180/led.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * arch/sh/boards/se/73180/led.c
3 *
4 * Derived from arch/sh/boards/se/770x/led.c
5 *
6 * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
7 *
8 * May be copied or modified under the terms of the GNU General Public
9 * License. See linux/COPYING for more information.
10 *
11 * This file contains Solution Engine specific LED code.
12 */
13
14#include <linux/sched.h>
15#include <asm/mach/se73180.h>
16
17/* Cycle the LED's in the clasic Knightrider/Sun pattern */
18void heartbeat_73180se(void)
19{
20 static unsigned int cnt = 0, period = 0;
21 volatile unsigned short *p = (volatile unsigned short *) PA_LED;
22 static unsigned bit = 0, up = 1;
23
24 cnt += 1;
25 if (cnt < period) {
26 return;
27 }
28
29 cnt = 0;
30
31 /* Go through the points (roughly!):
32 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
33 */
34 period = 110 - ((300 << FSHIFT) / ((avenrun[0] / 5) + (3 << FSHIFT)));
35
36 if (up) {
37 if (bit == 7) {
38 bit--;
39 up = 0;
40 } else {
41 bit++;
42 }
43 } else {
44 if (bit == 0) {
45 bit++;
46 up = 1;
47 } else {
48 bit--;
49 }
50 }
51 *p = 1 << (bit + LED_SHIFT);
52
53}
diff --git a/arch/sh/boards/se/73180/setup.c b/arch/sh/boards/se/73180/setup.c
index b38ef50a160a..911ce1cdbd7f 100644
--- a/arch/sh/boards/se/73180/setup.c
+++ b/arch/sh/boards/se/73180/setup.c
@@ -10,13 +10,39 @@
10 */ 10 */
11 11
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/platform_device.h>
13#include <asm/machvec.h> 14#include <asm/machvec.h>
14#include <asm/se73180.h> 15#include <asm/se73180.h>
15#include <asm/irq.h> 16#include <asm/irq.h>
16 17
17void heartbeat_73180se(void);
18void init_73180se_IRQ(void); 18void init_73180se_IRQ(void);
19 19
20static struct resource heartbeat_resources[] = {
21 [0] = {
22 .start = PA_LED,
23 .end = PA_LED + 8 - 1,
24 .flags = IORESOURCE_MEM,
25 },
26};
27
28static struct platform_device heartbeat_device = {
29 .name = "heartbeat",
30 .id = -1,
31 .num_resources = ARRAY_SIZE(heartbeat_resources),
32 .resource = heartbeat_resources,
33};
34
35static struct platform_device *se73180_devices[] __initdata = {
36 &heartbeat_device,
37};
38
39static int __init se73180_devices_setup(void)
40{
41 return platform_add_devices(sh7343se_platform_devices,
42 ARRAY_SIZE(sh7343se_platform_devices));
43}
44__initcall(se73180_devices_setup);
45
20/* 46/*
21 * The Machine Vector 47 * The Machine Vector
22 */ 48 */
@@ -46,8 +72,5 @@ struct sh_machine_vector mv_73180se __initmv = {
46 72
47 .mv_init_irq = init_73180se_IRQ, 73 .mv_init_irq = init_73180se_IRQ,
48 .mv_irq_demux = shmse_irq_demux, 74 .mv_irq_demux = shmse_irq_demux,
49#ifdef CONFIG_HEARTBEAT
50 .mv_heartbeat = heartbeat_73180se,
51#endif
52}; 75};
53ALIAS_MV(73180se) 76ALIAS_MV(73180se)
diff --git a/arch/sh/boards/se/7343/Makefile b/arch/sh/boards/se/7343/Makefile
index 4291069c0b4f..3024796c6203 100644
--- a/arch/sh/boards/se/7343/Makefile
+++ b/arch/sh/boards/se/7343/Makefile
@@ -3,5 +3,3 @@
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o io.o irq.o
6
7obj-$(CONFIG_HEARTBEAT) += led.o
diff --git a/arch/sh/boards/se/7343/led.c b/arch/sh/boards/se/7343/led.c
deleted file mode 100644
index 6b39e191c420..000000000000
--- a/arch/sh/boards/se/7343/led.c
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * arch/sh/boards/se/7343/led.c
3 *
4 */
5#include <linux/sched.h>
6#include <asm/mach/se7343.h>
7
8/* Cycle the LED's in the clasic Knightrider/Sun pattern */
9void heartbeat_7343se(void)
10{
11 static unsigned int cnt = 0, period = 0;
12 volatile unsigned short *p = (volatile unsigned short *) PA_LED;
13 static unsigned bit = 0, up = 1;
14
15 cnt += 1;
16 if (cnt < period) {
17 return;
18 }
19
20 cnt = 0;
21
22 /* Go through the points (roughly!):
23 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
24 */
25 period = 110 - ((300 << FSHIFT) / ((avenrun[0] / 5) + (3 << FSHIFT)));
26
27 if (up) {
28 if (bit == 7) {
29 bit--;
30 up = 0;
31 } else {
32 bit++;
33 }
34 } else {
35 if (bit == 0) {
36 bit++;
37 up = 1;
38 } else {
39 bit--;
40 }
41 }
42 *p = 1 << (bit + LED_SHIFT);
43
44}
diff --git a/arch/sh/boards/se/7343/setup.c b/arch/sh/boards/se/7343/setup.c
index c7d17fe7764e..3fdb16f2cef1 100644
--- a/arch/sh/boards/se/7343/setup.c
+++ b/arch/sh/boards/se/7343/setup.c
@@ -4,7 +4,6 @@
4#include <asm/mach/se7343.h> 4#include <asm/mach/se7343.h>
5#include <asm/irq.h> 5#include <asm/irq.h>
6 6
7void heartbeat_7343se(void);
8void init_7343se_IRQ(void); 7void init_7343se_IRQ(void);
9 8
10static struct resource smc91x_resources[] = { 9static struct resource smc91x_resources[] = {
@@ -31,14 +30,30 @@ static struct platform_device smc91x_device = {
31 .resource = smc91x_resources, 30 .resource = smc91x_resources,
32}; 31};
33 32
34static struct platform_device *smc91x_platform_devices[] __initdata = { 33static struct resource heartbeat_resources[] = {
34 [0] = {
35 .start = PA_LED,
36 .end = PA_LED + 8 - 1,
37 .flags = IORESOURCE_MEM,
38 },
39};
40
41static struct platform_device heartbeat_device = {
42 .name = "heartbeat",
43 .id = -1,
44 .num_resources = ARRAY_SIZE(heartbeat_resources),
45 .resource = heartbeat_resources,
46};
47
48static struct platform_device *sh7343se_platform_devices[] __initdata = {
35 &smc91x_device, 49 &smc91x_device,
50 &heartbeat_device,
36}; 51};
37 52
38static int __init sh7343se_devices_setup(void) 53static int __init sh7343se_devices_setup(void)
39{ 54{
40 return platform_add_devices(smc91x_platform_devices, 55 return platform_add_devices(sh7343se_platform_devices,
41 ARRAY_SIZE(smc91x_platform_devices)); 56 ARRAY_SIZE(sh7343se_platform_devices));
42} 57}
43 58
44static void __init sh7343se_setup(char **cmdline_p) 59static void __init sh7343se_setup(char **cmdline_p)
@@ -76,8 +91,5 @@ struct sh_machine_vector mv_7343se __initmv = {
76 91
77 .mv_init_irq = init_7343se_IRQ, 92 .mv_init_irq = init_7343se_IRQ,
78 .mv_irq_demux = shmse_irq_demux, 93 .mv_irq_demux = shmse_irq_demux,
79#ifdef CONFIG_HEARTBEAT
80 .mv_heartbeat = heartbeat_7343se,
81#endif
82}; 94};
83ALIAS_MV(7343se) 95ALIAS_MV(7343se)
diff --git a/arch/sh/boards/se/770x/Makefile b/arch/sh/boards/se/770x/Makefile
index 9a5035f80ec0..8e624b06d5ea 100644
--- a/arch/sh/boards/se/770x/Makefile
+++ b/arch/sh/boards/se/770x/Makefile
@@ -3,4 +3,3 @@
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o io.o irq.o
6obj-$(CONFIG_HEARTBEAT) += led.o
diff --git a/arch/sh/boards/se/770x/irq.c b/arch/sh/boards/se/770x/irq.c
index fcd7cd7fa05f..307ca5da6232 100644
--- a/arch/sh/boards/se/770x/irq.c
+++ b/arch/sh/boards/se/770x/irq.c
@@ -2,56 +2,96 @@
2 * linux/arch/sh/boards/se/770x/irq.c 2 * linux/arch/sh/boards/se/770x/irq.c
3 * 3 *
4 * Copyright (C) 2000 Kazumoto Kojima 4 * Copyright (C) 2000 Kazumoto Kojima
5 * Copyright (C) 2006 Nobuhiro Iwamatsu
5 * 6 *
6 * Hitachi SolutionEngine Support. 7 * Hitachi SolutionEngine Support.
7 * 8 *
8 */ 9 */
9 10
10#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/interrupt.h>
11#include <linux/irq.h> 13#include <linux/irq.h>
12#include <asm/irq.h> 14#include <asm/irq.h>
13#include <asm/io.h> 15#include <asm/io.h>
14#include <asm/se.h> 16#include <asm/se.h>
15 17
18/*
19 * If the problem of make_ipr_irq is solved,
20 * this code will become unnecessary. :-)
21 */
22static void se770x_disable_ipr_irq(unsigned int irq)
23{
24 struct ipr_data *p = get_irq_chip_data(irq);
25
26 ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr);
27}
28
29static void se770x_enable_ipr_irq(unsigned int irq)
30{
31 struct ipr_data *p = get_irq_chip_data(irq);
32
33 ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr);
34}
35
36static struct irq_chip se770x_irq_chip = {
37 .name = "MS770xSE-FPGA",
38 .mask = se770x_disable_ipr_irq,
39 .unmask = se770x_enable_ipr_irq,
40 .mask_ack = se770x_disable_ipr_irq,
41};
42
43void make_se770x_irq(struct ipr_data *table, unsigned int nr_irqs)
44{
45 int i;
46
47 for (i = 0; i < nr_irqs; i++) {
48 unsigned int irq = table[i].irq;
49 disable_irq_nosync(irq);
50 set_irq_chip_and_handler_name(irq, &se770x_irq_chip,
51 handle_level_irq, "level");
52 set_irq_chip_data(irq, &table[i]);
53 se770x_enable_ipr_irq(irq);
54 }
55}
56
16static struct ipr_data se770x_ipr_map[] = { 57static struct ipr_data se770x_ipr_map[] = {
17#if defined(CONFIG_CPU_SUBTYPE_SH7705) 58#if defined(CONFIG_CPU_SUBTYPE_SH7705)
18 /* This is default value */ 59 /* This is default value */
19 { 0xf-0x2, BCR_ILCRA, 2, 0x2 }, 60 { 0xf-0x2, 0, 8, 0x2 , BCR_ILCRA},
20 { 0xf-0xa, BCR_ILCRA, 1, 0xa }, 61 { 0xf-0xa, 0, 4, 0xa , BCR_ILCRA},
21 { 0xf-0x5, BCR_ILCRB, 0, 0x5 }, 62 { 0xf-0x5, 0, 0, 0x5 , BCR_ILCRB},
22 { 0xf-0x8, BCR_ILCRC, 1, 0x8 }, 63 { 0xf-0x8, 0, 4, 0x8 , BCR_ILCRC},
23 { 0xf-0xc, BCR_ILCRC, 0, 0xc }, 64 { 0xf-0xc, 0, 0, 0xc , BCR_ILCRC},
24 { 0xf-0xe, BCR_ILCRD, 3, 0xe }, 65 { 0xf-0xe, 0, 12, 0xe , BCR_ILCRD},
25 { 0xf-0x3, BCR_ILCRD, 1, 0x3 }, /* LAN */ 66 { 0xf-0x3, 0, 4, 0x3 , BCR_ILCRD}, /* LAN */
26 { 0xf-0xd, BCR_ILCRE, 2, 0xd }, 67 { 0xf-0xd, 0, 8, 0xd , BCR_ILCRE},
27 { 0xf-0x9, BCR_ILCRE, 1, 0x9 }, 68 { 0xf-0x9, 0, 4, 0x9 , BCR_ILCRE},
28 { 0xf-0x1, BCR_ILCRE, 0, 0x1 }, 69 { 0xf-0x1, 0, 0, 0x1 , BCR_ILCRE},
29 { 0xf-0xf, BCR_ILCRF, 3, 0xf }, 70 { 0xf-0xf, 0, 12, 0xf , BCR_ILCRF},
30 { 0xf-0xb, BCR_ILCRF, 1, 0xb }, 71 { 0xf-0xb, 0, 4, 0xb , BCR_ILCRF},
31 { 0xf-0x7, BCR_ILCRG, 3, 0x7 }, 72 { 0xf-0x7, 0, 12, 0x7 , BCR_ILCRG},
32 { 0xf-0x6, BCR_ILCRG, 2, 0x6 }, 73 { 0xf-0x6, 0, 8, 0x6 , BCR_ILCRG},
33 { 0xf-0x4, BCR_ILCRG, 1, 0x4 }, 74 { 0xf-0x4, 0, 4, 0x4 , BCR_ILCRG},
34#else 75#else
35 { 14, BCR_ILCRA, 2, 0x0f-14 }, 76 { 14, 0, 8, 0x0f-14 ,BCR_ILCRA},
36 { 12, BCR_ILCRA, 1, 0x0f-12 }, 77 { 12, 0, 4, 0x0f-12 ,BCR_ILCRA},
37 { 8, BCR_ILCRB, 1, 0x0f- 8 }, 78 { 8, 0, 4, 0x0f- 8 ,BCR_ILCRB},
38 { 6, BCR_ILCRC, 3, 0x0f- 6 }, 79 { 6, 0, 12, 0x0f- 6 ,BCR_ILCRC},
39 { 5, BCR_ILCRC, 2, 0x0f- 5 }, 80 { 5, 0, 8, 0x0f- 5 ,BCR_ILCRC},
40 { 4, BCR_ILCRC, 1, 0x0f- 4 }, 81 { 4, 0, 4, 0x0f- 4 ,BCR_ILCRC},
41 { 3, BCR_ILCRC, 0, 0x0f- 3 }, 82 { 3, 0, 0, 0x0f- 3 ,BCR_ILCRC},
42 { 1, BCR_ILCRD, 3, 0x0f- 1 }, 83 { 1, 0, 12, 0x0f- 1 ,BCR_ILCRD},
43 84 /* ST NIC */
44 { 10, BCR_ILCRD, 1, 0x0f-10 }, /* LAN */ 85 { 10, 0, 4, 0x0f-10 ,BCR_ILCRD}, /* LAN */
45 86 /* MRSHPC IRQs setting */
46 { 0, BCR_ILCRE, 3, 0x0f- 0 }, /* PCIRQ3 */ 87 { 0, 0, 12, 0x0f- 0 ,BCR_ILCRE}, /* PCIRQ3 */
47 { 11, BCR_ILCRE, 2, 0x0f-11 }, /* PCIRQ2 */ 88 { 11, 0, 8, 0x0f-11 ,BCR_ILCRE}, /* PCIRQ2 */
48 { 9, BCR_ILCRE, 1, 0x0f- 9 }, /* PCIRQ1 */ 89 { 9, 0, 4, 0x0f- 9 ,BCR_ILCRE}, /* PCIRQ1 */
49 { 7, BCR_ILCRE, 0, 0x0f- 7 }, /* PCIRQ0 */ 90 { 7, 0, 0, 0x0f- 7 ,BCR_ILCRE}, /* PCIRQ0 */
50
51 /* #2, #13 are allocated for SLOT IRQ #1 and #2 (for now) */ 91 /* #2, #13 are allocated for SLOT IRQ #1 and #2 (for now) */
52 /* NOTE: #2 and #13 are not used on PC */ 92 /* NOTE: #2 and #13 are not used on PC */
53 { 13, BCR_ILCRG, 1, 0x0f-13 }, /* SLOTIRQ2 */ 93 { 13, 0, 4, 0x0f-13 ,BCR_ILCRG}, /* SLOTIRQ2 */
54 { 2, BCR_ILCRG, 0, 0x0f- 2 }, /* SLOTIRQ1 */ 94 { 2, 0, 0, 0x0f- 2 ,BCR_ILCRG}, /* SLOTIRQ1 */
55#endif 95#endif
56}; 96};
57 97
@@ -81,5 +121,5 @@ void __init init_se_IRQ(void)
81 ctrl_outw(0, BCR_ILCRF); 121 ctrl_outw(0, BCR_ILCRF);
82 ctrl_outw(0, BCR_ILCRG); 122 ctrl_outw(0, BCR_ILCRG);
83#endif 123#endif
84 make_ipr_irq(se770x_ipr_map, ARRAY_SIZE(se770x_ipr_map)); 124 make_se770x_irq(se770x_ipr_map, ARRAY_SIZE(se770x_ipr_map));
85} 125}
diff --git a/arch/sh/boards/se/770x/led.c b/arch/sh/boards/se/770x/led.c
deleted file mode 100644
index d93dd831b2ad..000000000000
--- a/arch/sh/boards/se/770x/led.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * linux/arch/sh/boards/se/770x/led.c
3 *
4 * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * This file contains Solution Engine specific LED code.
10 */
11
12#include <linux/sched.h>
13#include <asm/se.h>
14
15/* Cycle the LED's in the clasic Knightrider/Sun pattern */
16void heartbeat_se(void)
17{
18 static unsigned int cnt = 0, period = 0;
19 volatile unsigned short* p = (volatile unsigned short*)PA_LED;
20 static unsigned bit = 0, up = 1;
21
22 cnt += 1;
23 if (cnt < period) {
24 return;
25 }
26
27 cnt = 0;
28
29 /* Go through the points (roughly!):
30 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
31 */
32 period = 110 - ( (300<<FSHIFT)/
33 ((avenrun[0]/5) + (3<<FSHIFT)) );
34
35 if (up) {
36 if (bit == 7) {
37 bit--;
38 up=0;
39 } else {
40 bit ++;
41 }
42 } else {
43 if (bit == 0) {
44 bit++;
45 up=1;
46 } else {
47 bit--;
48 }
49 }
50 *p = 1<<(bit+8);
51
52}
diff --git a/arch/sh/boards/se/770x/setup.c b/arch/sh/boards/se/770x/setup.c
index a1d51d5fa925..45cbc36b9fb7 100644
--- a/arch/sh/boards/se/770x/setup.c
+++ b/arch/sh/boards/se/770x/setup.c
@@ -1,5 +1,4 @@
1/* $Id: setup.c,v 1.1.2.4 2002/03/02 21:57:07 lethal Exp $ 1/*
2 *
3 * linux/arch/sh/boards/se/770x/setup.c 2 * linux/arch/sh/boards/se/770x/setup.c
4 * 3 *
5 * Copyright (C) 2000 Kazumoto Kojima 4 * Copyright (C) 2000 Kazumoto Kojima
@@ -8,12 +7,12 @@
8 * 7 *
9 */ 8 */
10#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <asm/machvec.h> 11#include <asm/machvec.h>
12#include <asm/se.h> 12#include <asm/se.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/smc37c93x.h> 14#include <asm/smc37c93x.h>
15 15
16void heartbeat_se(void);
17void init_se_IRQ(void); 16void init_se_IRQ(void);
18 17
19/* 18/*
@@ -36,11 +35,6 @@ static void __init smsc_setup(char **cmdline_p)
36 smsc_config(ACTIVATE_INDEX, 0x01); 35 smsc_config(ACTIVATE_INDEX, 0x01);
37 smsc_config(IRQ_SELECT_INDEX, 6); /* IRQ6 */ 36 smsc_config(IRQ_SELECT_INDEX, 6); /* IRQ6 */
38 37
39 /* IDE1 */
40 smsc_config(CURRENT_LDN_INDEX, LDN_IDE1);
41 smsc_config(ACTIVATE_INDEX, 0x01);
42 smsc_config(IRQ_SELECT_INDEX, 14); /* IRQ14 */
43
44 /* AUXIO (GPIO): to use IDE1 */ 38 /* AUXIO (GPIO): to use IDE1 */
45 smsc_config(CURRENT_LDN_INDEX, LDN_AUXIO); 39 smsc_config(CURRENT_LDN_INDEX, LDN_AUXIO);
46 smsc_config(GPIO46_INDEX, 0x00); /* nIOROP */ 40 smsc_config(GPIO46_INDEX, 0x00); /* nIOROP */
@@ -69,6 +63,36 @@ static void __init smsc_setup(char **cmdline_p)
69 outb_p(CONFIG_EXIT, CONFIG_PORT); 63 outb_p(CONFIG_EXIT, CONFIG_PORT);
70} 64}
71 65
66static unsigned char heartbeat_bit_pos[] = { 8, 9, 10, 11, 12, 13, 14, 15 };
67
68static struct resource heartbeat_resources[] = {
69 [0] = {
70 .start = PA_LED,
71 .end = PA_LED + ARRAY_SIZE(heartbeat_bit_pos) - 1,
72 .flags = IORESOURCE_MEM,
73 },
74};
75
76static struct platform_device heartbeat_device = {
77 .name = "heartbeat",
78 .id = -1,
79 .dev = {
80 .platform_data = heartbeat_bit_pos,
81 },
82 .num_resources = ARRAY_SIZE(heartbeat_resources),
83 .resource = heartbeat_resources,
84};
85
86static struct platform_device *se_devices[] __initdata = {
87 &heartbeat_device,
88};
89
90static int __init se_devices_setup(void)
91{
92 return platform_add_devices(se_devices, ARRAY_SIZE(se_devices));
93}
94__initcall(se_devices_setup);
95
72/* 96/*
73 * The Machine Vector 97 * The Machine Vector
74 */ 98 */
@@ -107,8 +131,5 @@ struct sh_machine_vector mv_se __initmv = {
107 .mv_outsl = se_outsl, 131 .mv_outsl = se_outsl,
108 132
109 .mv_init_irq = init_se_IRQ, 133 .mv_init_irq = init_se_IRQ,
110#ifdef CONFIG_HEARTBEAT
111 .mv_heartbeat = heartbeat_se,
112#endif
113}; 134};
114ALIAS_MV(se) 135ALIAS_MV(se)
diff --git a/arch/sh/boards/se/7751/Makefile b/arch/sh/boards/se/7751/Makefile
index 188900c48321..dbc29f3a9de5 100644
--- a/arch/sh/boards/se/7751/Makefile
+++ b/arch/sh/boards/se/7751/Makefile
@@ -5,4 +5,3 @@
5obj-y := setup.o io.o irq.o 5obj-y := setup.o io.o irq.o
6 6
7obj-$(CONFIG_PCI) += pci.o 7obj-$(CONFIG_PCI) += pci.o
8obj-$(CONFIG_HEARTBEAT) += led.o
diff --git a/arch/sh/boards/se/7751/led.c b/arch/sh/boards/se/7751/led.c
deleted file mode 100644
index de4194d97c88..000000000000
--- a/arch/sh/boards/se/7751/led.c
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * linux/arch/sh/boards/se/7751/led.c
3 *
4 * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * This file contains Solution Engine specific LED code.
10 */
11#include <linux/sched.h>
12#include <asm/se7751.h>
13
14/* Cycle the LED's in the clasic Knightrider/Sun pattern */
15void heartbeat_7751se(void)
16{
17 static unsigned int cnt = 0, period = 0;
18 volatile unsigned short* p = (volatile unsigned short*)PA_LED;
19 static unsigned bit = 0, up = 1;
20
21 cnt += 1;
22 if (cnt < period) {
23 return;
24 }
25
26 cnt = 0;
27
28 /* Go through the points (roughly!):
29 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
30 */
31 period = 110 - ( (300<<FSHIFT)/
32 ((avenrun[0]/5) + (3<<FSHIFT)) );
33
34 if (up) {
35 if (bit == 7) {
36 bit--;
37 up=0;
38 } else {
39 bit ++;
40 }
41 } else {
42 if (bit == 0) {
43 bit++;
44 up=1;
45 } else {
46 bit--;
47 }
48 }
49 *p = 1<<(bit+8);
50
51}
diff --git a/arch/sh/boards/se/7751/setup.c b/arch/sh/boards/se/7751/setup.c
index f7e1dd39c836..e3feae6ec0bf 100644
--- a/arch/sh/boards/se/7751/setup.c
+++ b/arch/sh/boards/se/7751/setup.c
@@ -9,11 +9,11 @@
9 * Ian da Silva and Jeremy Siegel, 2001. 9 * Ian da Silva and Jeremy Siegel, 2001.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/platform_device.h>
12#include <asm/machvec.h> 13#include <asm/machvec.h>
13#include <asm/se7751.h> 14#include <asm/se7751.h>
14#include <asm/io.h> 15#include <asm/io.h>
15 16
16void heartbeat_7751se(void);
17void init_7751se_IRQ(void); 17void init_7751se_IRQ(void);
18 18
19#ifdef CONFIG_SH_KGDB 19#ifdef CONFIG_SH_KGDB
@@ -161,11 +161,40 @@ static int kgdb_uart_setup(void)
161} 161}
162#endif /* CONFIG_SH_KGDB */ 162#endif /* CONFIG_SH_KGDB */
163 163
164static unsigned char heartbeat_bit_pos[] = { 8, 9, 10, 11, 12, 13, 14, 15 };
165
166static struct resource heartbeat_resources[] = {
167 [0] = {
168 .start = PA_LED,
169 .end = PA_LED + ARRAY_SIZE(heartbeat_bit_pos) - 1,
170 .flags = IORESOURCE_MEM,
171 },
172};
173
174static struct platform_device heartbeat_device = {
175 .name = "heartbeat",
176 .id = -1,
177 .dev = {
178 .platform_data = heartbeat_bit_pos,
179 },
180 .num_resources = ARRAY_SIZE(heartbeat_resources),
181 .resource = heartbeat_resources,
182};
183
184static struct platform_device *se7751_devices[] __initdata = {
185 &smc91x_device,
186 &heartbeat_device,
187};
188
189static int __init se7751_devices_setup(void)
190{
191 return platform_add_devices(se7751_devices, ARRAY_SIZE(se7751_devices));
192}
193__initcall(se7751_devices_setup);
164 194
165/* 195/*
166 * The Machine Vector 196 * The Machine Vector
167 */ 197 */
168
169struct sh_machine_vector mv_7751se __initmv = { 198struct sh_machine_vector mv_7751se __initmv = {
170 .mv_name = "7751 SolutionEngine", 199 .mv_name = "7751 SolutionEngine",
171 .mv_setup = sh7751se_setup, 200 .mv_setup = sh7751se_setup,
@@ -189,8 +218,5 @@ struct sh_machine_vector mv_7751se __initmv = {
189 .mv_outsl = sh7751se_outsl, 218 .mv_outsl = sh7751se_outsl,
190 219
191 .mv_init_irq = init_7751se_IRQ, 220 .mv_init_irq = init_7751se_IRQ,
192#ifdef CONFIG_HEARTBEAT
193 .mv_heartbeat = heartbeat_7751se,
194#endif
195}; 221};
196ALIAS_MV(7751se) 222ALIAS_MV(7751se)
diff --git a/arch/sh/boards/sh03/Makefile b/arch/sh/boards/sh03/Makefile
index 321be50e36a5..400306a796ec 100644
--- a/arch/sh/boards/sh03/Makefile
+++ b/arch/sh/boards/sh03/Makefile
@@ -3,4 +3,3 @@
3# 3#
4 4
5obj-y := setup.o rtc.o 5obj-y := setup.o rtc.o
6obj-$(CONFIG_HEARTBEAT) += led.o
diff --git a/arch/sh/boards/sh03/led.c b/arch/sh/boards/sh03/led.c
deleted file mode 100644
index d38562ad6be8..000000000000
--- a/arch/sh/boards/sh03/led.c
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * linux/arch/sh/boards/sh03/led.c
3 *
4 * Copyright (C) 2004 Saito.K Interface Corporation.
5 *
6 * This file contains Interface CTP/PCI-SH03 specific LED code.
7 */
8
9#include <linux/sched.h>
10
11/* Cycle the LED's in the clasic Knightrider/Sun pattern */
12void heartbeat_sh03(void)
13{
14 static unsigned int cnt = 0, period = 0;
15 volatile unsigned char* p = (volatile unsigned char*)0xa0800000;
16 static unsigned bit = 0, up = 1;
17
18 cnt += 1;
19 if (cnt < period) {
20 return;
21 }
22
23 cnt = 0;
24
25 /* Go through the points (roughly!):
26 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
27 */
28 period = 110 - ( (300<<FSHIFT)/
29 ((avenrun[0]/5) + (3<<FSHIFT)) );
30
31 if (up) {
32 if (bit == 7) {
33 bit--;
34 up=0;
35 } else {
36 bit ++;
37 }
38 } else {
39 if (bit == 0) {
40 bit++;
41 up=1;
42 } else {
43 bit--;
44 }
45 }
46 *p = 1<<bit;
47
48}
diff --git a/arch/sh/boards/sh03/setup.c b/arch/sh/boards/sh03/setup.c
index 5ad1e19771be..c069c444b4ec 100644
--- a/arch/sh/boards/sh03/setup.c
+++ b/arch/sh/boards/sh03/setup.c
@@ -8,6 +8,7 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/platform_device.h>
11#include <asm/io.h> 12#include <asm/io.h>
12#include <asm/rtc.h> 13#include <asm/rtc.h>
13#include <asm/sh03/io.h> 14#include <asm/sh03/io.h>
@@ -48,15 +49,36 @@ static void __init sh03_setup(char **cmdline_p)
48 board_time_init = sh03_time_init; 49 board_time_init = sh03_time_init;
49} 50}
50 51
52static struct resource heartbeat_resources[] = {
53 [0] = {
54 .start = 0xa0800000,
55 .end = 0xa0800000 + 8 - 1,
56 .flags = IORESOURCE_MEM,
57 },
58};
59
60static struct platform_device heartbeat_device = {
61 .name = "heartbeat",
62 .id = -1,
63 .num_resources = ARRAY_SIZE(heartbeat_resources),
64 .resource = heartbeat_resources,
65};
66
67static struct platform_device *sh03_devices[] __initdata = {
68 &heartbeat_device,
69};
70
71static int __init sh03_devices_setup(void)
72{
73 return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices));
74}
75__initcall(sh03_devices_setup);
76
51struct sh_machine_vector mv_sh03 __initmv = { 77struct sh_machine_vector mv_sh03 __initmv = {
52 .mv_name = "Interface (CTP/PCI-SH03)", 78 .mv_name = "Interface (CTP/PCI-SH03)",
53 .mv_setup = sh03_setup, 79 .mv_setup = sh03_setup,
54 .mv_nr_irqs = 48, 80 .mv_nr_irqs = 48,
55 .mv_ioport_map = sh03_ioport_map, 81 .mv_ioport_map = sh03_ioport_map,
56 .mv_init_irq = init_sh03_IRQ, 82 .mv_init_irq = init_sh03_IRQ,
57
58#ifdef CONFIG_HEARTBEAT
59 .mv_heartbeat = heartbeat_sh03,
60#endif
61}; 83};
62ALIAS_MV(sh03) 84ALIAS_MV(sh03)
diff --git a/arch/sh/boards/shmin/setup.c b/arch/sh/boards/shmin/setup.c
index a31a1d1e2681..4a9df4a6b034 100644
--- a/arch/sh/boards/shmin/setup.c
+++ b/arch/sh/boards/shmin/setup.c
@@ -12,12 +12,22 @@
12#include <asm/irq.h> 12#include <asm/irq.h>
13#include <asm/io.h> 13#include <asm/io.h>
14 14
15#define PFC_PHCR 0xa400010e 15#define PFC_PHCR 0xa400010eUL
16#define INTC_ICR1 0xa4000010UL
17#define INTC_IPRC 0xa4000016UL
18
19static struct ipr_data shmin_ipr_map[] = {
20 { .irq=32, .addr=INTC_IPRC, .shift= 0, .priority=0 },
21 { .irq=33, .addr=INTC_IPRC, .shift= 4, .priority=0 },
22 { .irq=34, .addr=INTC_IPRC, .shift= 8, .priority=8 },
23 { .irq=35, .addr=INTC_IPRC, .shift=12, .priority=0 },
24};
16 25
17static void __init init_shmin_irq(void) 26static void __init init_shmin_irq(void)
18{ 27{
19 ctrl_outw(0x2a00, PFC_PHCR); // IRQ0-3=IRQ 28 ctrl_outw(0x2a00, PFC_PHCR); // IRQ0-3=IRQ
20 ctrl_outw(0x0aaa, INTC_ICR1); // IRQ0-3=IRQ-mode,Low-active. 29 ctrl_outw(0x0aaa, INTC_ICR1); // IRQ0-3=IRQ-mode,Low-active.
30 make_ipr_irq(shmin_ipr_map, ARRAY_SIZE(shmin_ipr_map));
21} 31}
22 32
23static void __iomem *shmin_ioport_map(unsigned long port, unsigned int size) 33static void __iomem *shmin_ioport_map(unsigned long port, unsigned int size)
diff --git a/arch/sh/cchips/voyagergx/irq.c b/arch/sh/cchips/voyagergx/irq.c
index f7ea700d05ae..70f12907647f 100644
--- a/arch/sh/cchips/voyagergx/irq.c
+++ b/arch/sh/cchips/voyagergx/irq.c
@@ -28,21 +28,21 @@ static void disable_voyagergx_irq(unsigned int irq)
28 unsigned long val; 28 unsigned long val;
29 unsigned long mask = 1 << (irq - VOYAGER_IRQ_BASE); 29 unsigned long mask = 1 << (irq - VOYAGER_IRQ_BASE);
30 30
31 pr_debug("disable_voyagergx_irq(%d): mask=%lx\n", irq, mask); 31 pr_debug("disable_voyagergx_irq(%d): mask=%x\n", irq, mask);
32 val = inl(VOYAGER_INT_MASK); 32 val = readl((void __iomem *)VOYAGER_INT_MASK);
33 val &= ~mask; 33 val &= ~mask;
34 outl(val, VOYAGER_INT_MASK); 34 writel(val, (void __iomem *)VOYAGER_INT_MASK);
35} 35}
36 36
37static void enable_voyagergx_irq(unsigned int irq) 37static void enable_voyagergx_irq(unsigned int irq)
38{ 38{
39 unsigned long val; 39 unsigned long val;
40 unsigned long mask = 1 << (irq - VOYAGER_IRQ_BASE); 40 unsigned long mask = 1 << (irq - VOYAGER_IRQ_BASE);
41 41
42 pr_debug("disable_voyagergx_irq(%d): mask=%lx\n", irq, mask); 42 pr_debug("disable_voyagergx_irq(%d): mask=%x\n", irq, mask);
43 val = inl(VOYAGER_INT_MASK); 43 val = readl((void __iomem *)VOYAGER_INT_MASK);
44 val |= mask; 44 val |= mask;
45 outl(val, VOYAGER_INT_MASK); 45 writel(val, (void __iomem *)VOYAGER_INT_MASK);
46} 46}
47 47
48static void mask_and_ack_voyagergx(unsigned int irq) 48static void mask_and_ack_voyagergx(unsigned int irq)
@@ -68,20 +68,20 @@ static void shutdown_voyagergx_irq(unsigned int irq)
68} 68}
69 69
70static struct hw_interrupt_type voyagergx_irq_type = { 70static struct hw_interrupt_type voyagergx_irq_type = {
71 .typename = "VOYAGERGX-IRQ", 71 .typename = "VOYAGERGX-IRQ",
72 .startup = startup_voyagergx_irq, 72 .startup = startup_voyagergx_irq,
73 .shutdown = shutdown_voyagergx_irq, 73 .shutdown = shutdown_voyagergx_irq,
74 .enable = enable_voyagergx_irq, 74 .enable = enable_voyagergx_irq,
75 .disable = disable_voyagergx_irq, 75 .disable = disable_voyagergx_irq,
76 .ack = mask_and_ack_voyagergx, 76 .ack = mask_and_ack_voyagergx,
77 .end = end_voyagergx_irq, 77 .end = end_voyagergx_irq,
78}; 78};
79 79
80static irqreturn_t voyagergx_interrupt(int irq, void *dev_id) 80static irqreturn_t voyagergx_interrupt(int irq, void *dev_id)
81{ 81{
82 printk(KERN_INFO 82 printk(KERN_INFO
83 "VoyagerGX: spurious interrupt, status: 0x%x\n", 83 "VoyagerGX: spurious interrupt, status: 0x%x\n",
84 inl(INT_STATUS)); 84 (unsigned int)readl((void __iomem *)INT_STATUS));
85 return IRQ_HANDLED; 85 return IRQ_HANDLED;
86} 86}
87 87
@@ -93,13 +93,13 @@ static struct {
93void voyagergx_register_irq_demux(int irq, 93void voyagergx_register_irq_demux(int irq,
94 int (*demux)(int irq, void *dev), void *dev) 94 int (*demux)(int irq, void *dev), void *dev)
95{ 95{
96 voyagergx_demux[irq - VOYAGER_IRQ_BASE].func = demux; 96 voyagergx_demux[irq - VOYAGER_IRQ_BASE].func = demux;
97 voyagergx_demux[irq - VOYAGER_IRQ_BASE].dev = dev; 97 voyagergx_demux[irq - VOYAGER_IRQ_BASE].dev = dev;
98} 98}
99 99
100void voyagergx_unregister_irq_demux(int irq) 100void voyagergx_unregister_irq_demux(int irq)
101{ 101{
102 voyagergx_demux[irq - VOYAGER_IRQ_BASE].func = 0; 102 voyagergx_demux[irq - VOYAGER_IRQ_BASE].func = 0;
103} 103}
104 104
105int voyagergx_irq_demux(int irq) 105int voyagergx_irq_demux(int irq)
@@ -107,31 +107,25 @@ int voyagergx_irq_demux(int irq)
107 107
108 if (irq == IRQ_VOYAGER ) { 108 if (irq == IRQ_VOYAGER ) {
109 unsigned long i = 0, bit __attribute__ ((unused)); 109 unsigned long i = 0, bit __attribute__ ((unused));
110 unsigned long val = inl(INT_STATUS); 110 unsigned long val = readl((void __iomem *)INT_STATUS);
111#if 1 111
112 if ( val & ( 1 << 1 )){ 112 if (val & (1 << 1))
113 i = 1; 113 i = 1;
114 } else if ( val & ( 1 << 2 )){ 114 else if (val & (1 << 2))
115 i = 2; 115 i = 2;
116 } else if ( val & ( 1 << 6 )){ 116 else if (val & (1 << 6))
117 i = 6; 117 i = 6;
118 } else if( val & ( 1 << 10 )){ 118 else if (val & (1 << 10))
119 i = 10; 119 i = 10;
120 } else if( val & ( 1 << 11 )){ 120 else if (val & (1 << 11))
121 i = 11; 121 i = 11;
122 } else if( val & ( 1 << 12 )){ 122 else if (val & (1 << 12))
123 i = 12; 123 i = 12;
124 } else if( val & ( 1 << 17 )){ 124 else if (val & (1 << 17))
125 i = 17; 125 i = 17;
126 } else { 126 else
127 printk("Unexpected IRQ irq = %d status = 0x%08lx\n", irq, val); 127 printk("Unexpected IRQ irq = %d status = 0x%08lx\n", irq, val);
128 } 128 pr_debug("voyagergx_irq_demux %d \n", i);
129 pr_debug("voyagergx_irq_demux %ld\n", i);
130#else
131 for (bit = 1, i = 0 ; i < VOYAGER_IRQ_NUM ; bit <<= 1, i++)
132 if (val & bit)
133 break;
134#endif
135 if (i < VOYAGER_IRQ_NUM) { 129 if (i < VOYAGER_IRQ_NUM) {
136 irq = VOYAGER_IRQ_BASE + i; 130 irq = VOYAGER_IRQ_BASE + i;
137 if (voyagergx_demux[i].func != 0) 131 if (voyagergx_demux[i].func != 0)
diff --git a/arch/sh/cchips/voyagergx/setup.c b/arch/sh/cchips/voyagergx/setup.c
index 66b2fedd7ad9..33f03027c193 100644
--- a/arch/sh/cchips/voyagergx/setup.c
+++ b/arch/sh/cchips/voyagergx/setup.c
@@ -19,7 +19,7 @@ static int __init setup_voyagergx(void)
19{ 19{
20 unsigned long val; 20 unsigned long val;
21 21
22 val = inl(DRAM_CTRL); 22 val = readl((void __iomem *)DRAM_CTRL);
23 val |= (DRAM_CTRL_CPU_COLUMN_SIZE_256 | 23 val |= (DRAM_CTRL_CPU_COLUMN_SIZE_256 |
24 DRAM_CTRL_CPU_ACTIVE_PRECHARGE | 24 DRAM_CTRL_CPU_ACTIVE_PRECHARGE |
25 DRAM_CTRL_CPU_RESET | 25 DRAM_CTRL_CPU_RESET |
@@ -29,7 +29,7 @@ static int __init setup_voyagergx(void)
29 DRAM_CTRL_ACTIVE_PRECHARGE | 29 DRAM_CTRL_ACTIVE_PRECHARGE |
30 DRAM_CTRL_RESET | 30 DRAM_CTRL_RESET |
31 DRAM_CTRL_REMAIN_ACTIVE); 31 DRAM_CTRL_REMAIN_ACTIVE);
32 outl(val, DRAM_CTRL); 32 writel(val, (void __iomem *)DRAM_CTRL);
33 33
34 return 0; 34 return 0;
35} 35}
diff --git a/arch/sh/configs/rts7751r2d_defconfig b/arch/sh/configs/rts7751r2d_defconfig
index 099e98f14729..db6a02df5af6 100644
--- a/arch/sh/configs/rts7751r2d_defconfig
+++ b/arch/sh/configs/rts7751r2d_defconfig
@@ -1,15 +1,21 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18 3# Linux kernel version: 2.6.20
4# Tue Oct 3 11:38:36 2006 4# Thu Feb 15 17:17:29 2007
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
8CONFIG_GENERIC_FIND_NEXT_BIT=y 8CONFIG_GENERIC_FIND_NEXT_BIT=y
9CONFIG_GENERIC_HWEIGHT=y 9CONFIG_GENERIC_HWEIGHT=y
10CONFIG_GENERIC_HARDIRQS=y 10CONFIG_GENERIC_HARDIRQS=y
11CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
11CONFIG_GENERIC_IRQ_PROBE=y 12CONFIG_GENERIC_IRQ_PROBE=y
12CONFIG_GENERIC_CALIBRATE_DELAY=y 13CONFIG_GENERIC_CALIBRATE_DELAY=y
14# CONFIG_GENERIC_TIME is not set
15CONFIG_STACKTRACE_SUPPORT=y
16CONFIG_LOCKDEP_SUPPORT=y
17# CONFIG_ARCH_HAS_ILOG2_U32 is not set
18# CONFIG_ARCH_HAS_ILOG2_U64 is not set
13CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
14 20
15# 21#
@@ -33,8 +39,8 @@ CONFIG_SYSVIPC=y
33# CONFIG_UTS_NS is not set 39# CONFIG_UTS_NS is not set
34# CONFIG_AUDIT is not set 40# CONFIG_AUDIT is not set
35# CONFIG_IKCONFIG is not set 41# CONFIG_IKCONFIG is not set
42CONFIG_SYSFS_DEPRECATED=y
36# CONFIG_RELAY is not set 43# CONFIG_RELAY is not set
37CONFIG_INITRAMFS_SOURCE=""
38# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 44# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
39CONFIG_SYSCTL=y 45CONFIG_SYSCTL=y
40CONFIG_EMBEDDED=y 46CONFIG_EMBEDDED=y
@@ -97,10 +103,8 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
97# CONFIG_SH_73180_SOLUTION_ENGINE is not set 103# CONFIG_SH_73180_SOLUTION_ENGINE is not set
98# CONFIG_SH_7751_SYSTEMH is not set 104# CONFIG_SH_7751_SYSTEMH is not set
99# CONFIG_SH_HP6XX is not set 105# CONFIG_SH_HP6XX is not set
100# CONFIG_SH_EC3104 is not set
101# CONFIG_SH_SATURN is not set 106# CONFIG_SH_SATURN is not set
102# CONFIG_SH_DREAMCAST is not set 107# CONFIG_SH_DREAMCAST is not set
103# CONFIG_SH_BIGSUR is not set
104# CONFIG_SH_MPC1211 is not set 108# CONFIG_SH_MPC1211 is not set
105# CONFIG_SH_SH03 is not set 109# CONFIG_SH_SH03 is not set
106# CONFIG_SH_SECUREEDGE5410 is not set 110# CONFIG_SH_SECUREEDGE5410 is not set
@@ -113,6 +117,9 @@ CONFIG_SH_RTS7751R2D=y
113# CONFIG_SH_LANDISK is not set 117# CONFIG_SH_LANDISK is not set
114# CONFIG_SH_TITAN is not set 118# CONFIG_SH_TITAN is not set
115# CONFIG_SH_SHMIN is not set 119# CONFIG_SH_SHMIN is not set
120# CONFIG_SH_7206_SOLUTION_ENGINE is not set
121# CONFIG_SH_7619_SOLUTION_ENGINE is not set
122# CONFIG_SH_ASDAP310 is not set
116# CONFIG_SH_UNKNOWN is not set 123# CONFIG_SH_UNKNOWN is not set
117 124
118# 125#
@@ -124,6 +131,12 @@ CONFIG_CPU_SH4=y
124# SH-2 Processor Support 131# SH-2 Processor Support
125# 132#
126# CONFIG_CPU_SUBTYPE_SH7604 is not set 133# CONFIG_CPU_SUBTYPE_SH7604 is not set
134# CONFIG_CPU_SUBTYPE_SH7619 is not set
135
136#
137# SH-2A Processor Support
138#
139# CONFIG_CPU_SUBTYPE_SH7206 is not set
127 140
128# 141#
129# SH-3 Processor Support 142# SH-3 Processor Support
@@ -159,12 +172,14 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
159# 172#
160# CONFIG_CPU_SUBTYPE_SH7770 is not set 173# CONFIG_CPU_SUBTYPE_SH7770 is not set
161# CONFIG_CPU_SUBTYPE_SH7780 is not set 174# CONFIG_CPU_SUBTYPE_SH7780 is not set
175# CONFIG_CPU_SUBTYPE_SH7785 is not set
162 176
163# 177#
164# SH4AL-DSP Processor Support 178# SH4AL-DSP Processor Support
165# 179#
166# CONFIG_CPU_SUBTYPE_SH73180 is not set 180# CONFIG_CPU_SUBTYPE_SH73180 is not set
167# CONFIG_CPU_SUBTYPE_SH7343 is not set 181# CONFIG_CPU_SUBTYPE_SH7343 is not set
182# CONFIG_CPU_SUBTYPE_SH7722 is not set
168 183
169# 184#
170# Memory management options 185# Memory management options
@@ -174,6 +189,9 @@ CONFIG_PAGE_OFFSET=0x80000000
174CONFIG_MEMORY_START=0x0c000000 189CONFIG_MEMORY_START=0x0c000000
175CONFIG_MEMORY_SIZE=0x04000000 190CONFIG_MEMORY_SIZE=0x04000000
176CONFIG_VSYSCALL=y 191CONFIG_VSYSCALL=y
192CONFIG_PAGE_SIZE_4KB=y
193# CONFIG_PAGE_SIZE_8KB is not set
194# CONFIG_PAGE_SIZE_64KB is not set
177CONFIG_SELECT_MEMORY_MODEL=y 195CONFIG_SELECT_MEMORY_MODEL=y
178CONFIG_FLATMEM_MANUAL=y 196CONFIG_FLATMEM_MANUAL=y
179# CONFIG_DISCONTIGMEM_MANUAL is not set 197# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -183,6 +201,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
183# CONFIG_SPARSEMEM_STATIC is not set 201# CONFIG_SPARSEMEM_STATIC is not set
184CONFIG_SPLIT_PTLOCK_CPUS=4 202CONFIG_SPLIT_PTLOCK_CPUS=4
185# CONFIG_RESOURCES_64BIT is not set 203# CONFIG_RESOURCES_64BIT is not set
204CONFIG_ZONE_DMA_FLAG=0
186 205
187# 206#
188# Cache configuration 207# Cache configuration
@@ -195,11 +214,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
195# Processor features 214# Processor features
196# 215#
197CONFIG_CPU_LITTLE_ENDIAN=y 216CONFIG_CPU_LITTLE_ENDIAN=y
217# CONFIG_CPU_BIG_ENDIAN is not set
198CONFIG_SH_FPU=y 218CONFIG_SH_FPU=y
199# CONFIG_SH_DSP is not set 219# CONFIG_SH_DSP is not set
200# CONFIG_SH_STORE_QUEUES is not set 220# CONFIG_SH_STORE_QUEUES is not set
201CONFIG_CPU_HAS_INTEVT=y 221CONFIG_CPU_HAS_INTEVT=y
222CONFIG_CPU_HAS_IPR_IRQ=y
202CONFIG_CPU_HAS_SR_RB=y 223CONFIG_CPU_HAS_SR_RB=y
224CONFIG_CPU_HAS_PTEA=y
203 225
204# 226#
205# Timer support 227# Timer support
@@ -210,6 +232,8 @@ CONFIG_SH_TMU=y
210# RTS7751R2D options 232# RTS7751R2D options
211# 233#
212CONFIG_RTS7751R2D_REV11=y 234CONFIG_RTS7751R2D_REV11=y
235CONFIG_SH_TIMER_IRQ=16
236# CONFIG_NO_IDLE_HZ is not set
213CONFIG_SH_PCLK_FREQ=60000000 237CONFIG_SH_PCLK_FREQ=60000000
214 238
215# 239#
@@ -232,10 +256,16 @@ CONFIG_VOYAGERGX=y
232CONFIG_HEARTBEAT=y 256CONFIG_HEARTBEAT=y
233 257
234# 258#
259# Additional SuperH Device Drivers
260#
261# CONFIG_PUSH_SWITCH is not set
262
263#
235# Kernel features 264# Kernel features
236# 265#
237# CONFIG_HZ_100 is not set 266# CONFIG_HZ_100 is not set
238CONFIG_HZ_250=y 267CONFIG_HZ_250=y
268# CONFIG_HZ_300 is not set
239# CONFIG_HZ_1000 is not set 269# CONFIG_HZ_1000 is not set
240CONFIG_HZ=250 270CONFIG_HZ=250
241# CONFIG_KEXEC is not set 271# CONFIG_KEXEC is not set
@@ -251,7 +281,7 @@ CONFIG_ZERO_PAGE_OFFSET=0x00010000
251CONFIG_BOOT_LINK_OFFSET=0x00800000 281CONFIG_BOOT_LINK_OFFSET=0x00800000
252# CONFIG_UBC_WAKEUP is not set 282# CONFIG_UBC_WAKEUP is not set
253CONFIG_CMDLINE_BOOL=y 283CONFIG_CMDLINE_BOOL=y
254CONFIG_CMDLINE="mem=64M console=ttySC0,115200 root=/dev/hda1" 284CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
255 285
256# 286#
257# Bus options 287# Bus options
@@ -260,7 +290,6 @@ CONFIG_PCI=y
260CONFIG_SH_PCIDMA_NONCOHERENT=y 290CONFIG_SH_PCIDMA_NONCOHERENT=y
261CONFIG_PCI_AUTO=y 291CONFIG_PCI_AUTO=y
262CONFIG_PCI_AUTO_UPDATE_RESOURCES=y 292CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
263# CONFIG_PCI_MULTITHREAD_PROBE is not set
264 293
265# 294#
266# PCCARD (PCMCIA/CardBus) support 295# PCCARD (PCMCIA/CardBus) support
@@ -302,6 +331,7 @@ CONFIG_UNIX=y
302CONFIG_XFRM=y 331CONFIG_XFRM=y
303# CONFIG_XFRM_USER is not set 332# CONFIG_XFRM_USER is not set
304# CONFIG_XFRM_SUB_POLICY is not set 333# CONFIG_XFRM_SUB_POLICY is not set
334# CONFIG_XFRM_MIGRATE is not set
305# CONFIG_NET_KEY is not set 335# CONFIG_NET_KEY is not set
306CONFIG_INET=y 336CONFIG_INET=y
307# CONFIG_IP_MULTICAST is not set 337# CONFIG_IP_MULTICAST is not set
@@ -319,11 +349,13 @@ CONFIG_IP_FIB_HASH=y
319# CONFIG_INET_TUNNEL is not set 349# CONFIG_INET_TUNNEL is not set
320CONFIG_INET_XFRM_MODE_TRANSPORT=y 350CONFIG_INET_XFRM_MODE_TRANSPORT=y
321CONFIG_INET_XFRM_MODE_TUNNEL=y 351CONFIG_INET_XFRM_MODE_TUNNEL=y
352CONFIG_INET_XFRM_MODE_BEET=y
322CONFIG_INET_DIAG=y 353CONFIG_INET_DIAG=y
323CONFIG_INET_TCP_DIAG=y 354CONFIG_INET_TCP_DIAG=y
324# CONFIG_TCP_CONG_ADVANCED is not set 355# CONFIG_TCP_CONG_ADVANCED is not set
325CONFIG_TCP_CONG_CUBIC=y 356CONFIG_TCP_CONG_CUBIC=y
326CONFIG_DEFAULT_TCP_CONG="cubic" 357CONFIG_DEFAULT_TCP_CONG="cubic"
358# CONFIG_TCP_MD5SIG is not set
327# CONFIG_IPV6 is not set 359# CONFIG_IPV6 is not set
328# CONFIG_INET6_XFRM_TUNNEL is not set 360# CONFIG_INET6_XFRM_TUNNEL is not set
329# CONFIG_INET6_TUNNEL is not set 361# CONFIG_INET6_TUNNEL is not set
@@ -380,7 +412,7 @@ CONFIG_WIRELESS_EXT=y
380# 412#
381CONFIG_STANDALONE=y 413CONFIG_STANDALONE=y
382CONFIG_PREVENT_FIRMWARE_BUILD=y 414CONFIG_PREVENT_FIRMWARE_BUILD=y
383# CONFIG_FW_LOADER is not set 415CONFIG_FW_LOADER=m
384# CONFIG_SYS_HYPERVISOR is not set 416# CONFIG_SYS_HYPERVISOR is not set
385 417
386# 418#
@@ -422,44 +454,145 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
422# CONFIG_ATA_OVER_ETH is not set 454# CONFIG_ATA_OVER_ETH is not set
423 455
424# 456#
425# ATA/ATAPI/MFM/RLL support 457# Misc devices
426#
427CONFIG_IDE=y
428CONFIG_IDE_MAX_HWIFS=4
429CONFIG_BLK_DEV_IDE=y
430
431#
432# Please see Documentation/ide.txt for help/info on IDE drives
433# 458#
434# CONFIG_BLK_DEV_IDE_SATA is not set 459# CONFIG_SGI_IOC4 is not set
435CONFIG_BLK_DEV_IDEDISK=y 460# CONFIG_TIFM_CORE is not set
436# CONFIG_IDEDISK_MULTI_MODE is not set
437# CONFIG_BLK_DEV_IDECD is not set
438# CONFIG_BLK_DEV_IDETAPE is not set
439# CONFIG_BLK_DEV_IDEFLOPPY is not set
440# CONFIG_IDE_TASK_IOCTL is not set
441 461
442# 462#
443# IDE chipset support/bugfixes 463# ATA/ATAPI/MFM/RLL support
444# 464#
445CONFIG_IDE_GENERIC=y 465# CONFIG_IDE is not set
446# CONFIG_BLK_DEV_IDEPCI is not set
447# CONFIG_IDE_ARM is not set
448# CONFIG_BLK_DEV_IDEDMA is not set
449# CONFIG_IDEDMA_AUTO is not set
450# CONFIG_BLK_DEV_HD is not set
451 466
452# 467#
453# SCSI device support 468# SCSI device support
454# 469#
455# CONFIG_RAID_ATTRS is not set 470# CONFIG_RAID_ATTRS is not set
456# CONFIG_SCSI is not set 471CONFIG_SCSI=y
472# CONFIG_SCSI_TGT is not set
457# CONFIG_SCSI_NETLINK is not set 473# CONFIG_SCSI_NETLINK is not set
474CONFIG_SCSI_PROC_FS=y
475
476#
477# SCSI support type (disk, tape, CD-ROM)
478#
479CONFIG_BLK_DEV_SD=y
480# CONFIG_CHR_DEV_ST is not set
481# CONFIG_CHR_DEV_OSST is not set
482# CONFIG_BLK_DEV_SR is not set
483# CONFIG_CHR_DEV_SG is not set
484# CONFIG_CHR_DEV_SCH is not set
485
486#
487# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
488#
489# CONFIG_SCSI_MULTI_LUN is not set
490# CONFIG_SCSI_CONSTANTS is not set
491# CONFIG_SCSI_LOGGING is not set
492# CONFIG_SCSI_SCAN_ASYNC is not set
493
494#
495# SCSI Transports
496#
497# CONFIG_SCSI_SPI_ATTRS is not set
498# CONFIG_SCSI_FC_ATTRS is not set
499# CONFIG_SCSI_ISCSI_ATTRS is not set
500# CONFIG_SCSI_SAS_ATTRS is not set
501# CONFIG_SCSI_SAS_LIBSAS is not set
502
503#
504# SCSI low-level drivers
505#
506# CONFIG_ISCSI_TCP is not set
507# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
508# CONFIG_SCSI_3W_9XXX is not set
509# CONFIG_SCSI_ACARD is not set
510# CONFIG_SCSI_AACRAID is not set
511# CONFIG_SCSI_AIC7XXX is not set
512# CONFIG_SCSI_AIC7XXX_OLD is not set
513# CONFIG_SCSI_AIC79XX is not set
514# CONFIG_SCSI_AIC94XX is not set
515# CONFIG_SCSI_DPT_I2O is not set
516# CONFIG_SCSI_ARCMSR is not set
517# CONFIG_MEGARAID_NEWGEN is not set
518# CONFIG_MEGARAID_LEGACY is not set
519# CONFIG_MEGARAID_SAS is not set
520# CONFIG_SCSI_HPTIOP is not set
521# CONFIG_SCSI_DMX3191D is not set
522# CONFIG_SCSI_FUTURE_DOMAIN is not set
523# CONFIG_SCSI_IPS is not set
524# CONFIG_SCSI_INITIO is not set
525# CONFIG_SCSI_INIA100 is not set
526# CONFIG_SCSI_STEX is not set
527# CONFIG_SCSI_SYM53C8XX_2 is not set
528# CONFIG_SCSI_IPR is not set
529# CONFIG_SCSI_QLOGIC_1280 is not set
530# CONFIG_SCSI_QLA_FC is not set
531# CONFIG_SCSI_QLA_ISCSI is not set
532# CONFIG_SCSI_LPFC is not set
533# CONFIG_SCSI_DC395x is not set
534# CONFIG_SCSI_DC390T is not set
535# CONFIG_SCSI_NSP32 is not set
536# CONFIG_SCSI_DEBUG is not set
537# CONFIG_SCSI_SRP is not set
458 538
459# 539#
460# Serial ATA (prod) and Parallel ATA (experimental) drivers 540# Serial ATA (prod) and Parallel ATA (experimental) drivers
461# 541#
462# CONFIG_ATA is not set 542CONFIG_ATA=y
543# CONFIG_ATA_NONSTANDARD is not set
544# CONFIG_SATA_AHCI is not set
545# CONFIG_SATA_SVW is not set
546# CONFIG_ATA_PIIX is not set
547# CONFIG_SATA_MV is not set
548# CONFIG_SATA_NV is not set
549# CONFIG_PDC_ADMA is not set
550# CONFIG_SATA_QSTOR is not set
551# CONFIG_SATA_PROMISE is not set
552# CONFIG_SATA_SX4 is not set
553# CONFIG_SATA_SIL is not set
554# CONFIG_SATA_SIL24 is not set
555# CONFIG_SATA_SIS is not set
556# CONFIG_SATA_ULI is not set
557# CONFIG_SATA_VIA is not set
558# CONFIG_SATA_VITESSE is not set
559# CONFIG_SATA_INIC162X is not set
560# CONFIG_PATA_ALI is not set
561# CONFIG_PATA_AMD is not set
562# CONFIG_PATA_ARTOP is not set
563# CONFIG_PATA_ATIIXP is not set
564# CONFIG_PATA_CMD64X is not set
565# CONFIG_PATA_CS5520 is not set
566# CONFIG_PATA_CS5530 is not set
567# CONFIG_PATA_CYPRESS is not set
568# CONFIG_PATA_EFAR is not set
569# CONFIG_ATA_GENERIC is not set
570# CONFIG_PATA_HPT366 is not set
571# CONFIG_PATA_HPT37X is not set
572# CONFIG_PATA_HPT3X2N is not set
573# CONFIG_PATA_HPT3X3 is not set
574# CONFIG_PATA_IT821X is not set
575# CONFIG_PATA_IT8213 is not set
576# CONFIG_PATA_JMICRON is not set
577# CONFIG_PATA_TRIFLEX is not set
578# CONFIG_PATA_MARVELL is not set
579# CONFIG_PATA_MPIIX is not set
580# CONFIG_PATA_OLDPIIX is not set
581# CONFIG_PATA_NETCELL is not set
582# CONFIG_PATA_NS87410 is not set
583# CONFIG_PATA_OPTI is not set
584# CONFIG_PATA_OPTIDMA is not set
585# CONFIG_PATA_PDC_OLD is not set
586# CONFIG_PATA_RADISYS is not set
587# CONFIG_PATA_RZ1000 is not set
588# CONFIG_PATA_SC1200 is not set
589# CONFIG_PATA_SERVERWORKS is not set
590# CONFIG_PATA_PDC2027X is not set
591# CONFIG_PATA_SIL680 is not set
592# CONFIG_PATA_SIS is not set
593# CONFIG_PATA_VIA is not set
594# CONFIG_PATA_WINBOND is not set
595CONFIG_PATA_PLATFORM=y
463 596
464# 597#
465# Multi-device support (RAID and LVM) 598# Multi-device support (RAID and LVM)
@@ -470,6 +603,9 @@ CONFIG_IDE_GENERIC=y
470# Fusion MPT device support 603# Fusion MPT device support
471# 604#
472# CONFIG_FUSION is not set 605# CONFIG_FUSION is not set
606# CONFIG_FUSION_SPI is not set
607# CONFIG_FUSION_FC is not set
608# CONFIG_FUSION_SAS is not set
473 609
474# 610#
475# IEEE 1394 (FireWire) support 611# IEEE 1394 (FireWire) support
@@ -540,6 +676,7 @@ CONFIG_8139TOO=y
540# CONFIG_SUNDANCE is not set 676# CONFIG_SUNDANCE is not set
541# CONFIG_TLAN is not set 677# CONFIG_TLAN is not set
542# CONFIG_VIA_RHINE is not set 678# CONFIG_VIA_RHINE is not set
679# CONFIG_SC92031 is not set
543 680
544# 681#
545# Ethernet (1000 Mbit) 682# Ethernet (1000 Mbit)
@@ -559,14 +696,17 @@ CONFIG_8139TOO=y
559# CONFIG_TIGON3 is not set 696# CONFIG_TIGON3 is not set
560# CONFIG_BNX2 is not set 697# CONFIG_BNX2 is not set
561# CONFIG_QLA3XXX is not set 698# CONFIG_QLA3XXX is not set
699# CONFIG_ATL1 is not set
562 700
563# 701#
564# Ethernet (10000 Mbit) 702# Ethernet (10000 Mbit)
565# 703#
566# CONFIG_CHELSIO_T1 is not set 704# CONFIG_CHELSIO_T1 is not set
705# CONFIG_CHELSIO_T3 is not set
567# CONFIG_IXGB is not set 706# CONFIG_IXGB is not set
568# CONFIG_S2IO is not set 707# CONFIG_S2IO is not set
569# CONFIG_MYRI10GE is not set 708# CONFIG_MYRI10GE is not set
709# CONFIG_NETXEN_NIC is not set
570 710
571# 711#
572# Token Ring devices 712# Token Ring devices
@@ -611,6 +751,7 @@ CONFIG_NET_WIRELESS=y
611# CONFIG_HIPPI is not set 751# CONFIG_HIPPI is not set
612# CONFIG_PPP is not set 752# CONFIG_PPP is not set
613# CONFIG_SLIP is not set 753# CONFIG_SLIP is not set
754# CONFIG_NET_FC is not set
614# CONFIG_SHAPER is not set 755# CONFIG_SHAPER is not set
615# CONFIG_NETCONSOLE is not set 756# CONFIG_NETCONSOLE is not set
616# CONFIG_NETPOLL is not set 757# CONFIG_NETPOLL is not set
@@ -646,14 +787,23 @@ CONFIG_NET_WIRELESS=y
646# 787#
647# Serial drivers 788# Serial drivers
648# 789#
649# CONFIG_SERIAL_8250 is not set 790CONFIG_SERIAL_8250=y
791# CONFIG_SERIAL_8250_CONSOLE is not set
792CONFIG_SERIAL_8250_PCI=y
793CONFIG_SERIAL_8250_NR_UARTS=4
794CONFIG_SERIAL_8250_RUNTIME_UARTS=4
795# CONFIG_SERIAL_8250_EXTENDED is not set
650 796
651# 797#
652# Non-8250 serial port support 798# Non-8250 serial port support
653# 799#
654# CONFIG_SERIAL_SH_SCI is not set 800CONFIG_SERIAL_SH_SCI=y
801CONFIG_SERIAL_SH_SCI_NR_UARTS=1
802CONFIG_SERIAL_SH_SCI_CONSOLE=y
803CONFIG_SERIAL_CORE=y
804CONFIG_SERIAL_CORE_CONSOLE=y
655# CONFIG_SERIAL_JSM is not set 805# CONFIG_SERIAL_JSM is not set
656# CONFIG_UNIX98_PTYS is not set 806CONFIG_UNIX98_PTYS=y
657CONFIG_LEGACY_PTYS=y 807CONFIG_LEGACY_PTYS=y
658CONFIG_LEGACY_PTY_COUNT=256 808CONFIG_LEGACY_PTY_COUNT=256
659 809
@@ -671,10 +821,6 @@ CONFIG_HW_RANDOM=y
671# CONFIG_DTLK is not set 821# CONFIG_DTLK is not set
672# CONFIG_R3964 is not set 822# CONFIG_R3964 is not set
673# CONFIG_APPLICOM is not set 823# CONFIG_APPLICOM is not set
674
675#
676# Ftape, the floppy tape device driver
677#
678# CONFIG_DRM is not set 824# CONFIG_DRM is not set
679# CONFIG_RAW_DRIVER is not set 825# CONFIG_RAW_DRIVER is not set
680 826
@@ -682,7 +828,6 @@ CONFIG_HW_RANDOM=y
682# TPM devices 828# TPM devices
683# 829#
684# CONFIG_TCG_TPM is not set 830# CONFIG_TCG_TPM is not set
685# CONFIG_TELCLOCK is not set
686 831
687# 832#
688# I2C support 833# I2C support
@@ -698,6 +843,7 @@ CONFIG_HW_RANDOM=y
698# 843#
699# Dallas's 1-wire bus 844# Dallas's 1-wire bus
700# 845#
846# CONFIG_W1 is not set
701 847
702# 848#
703# Hardware Monitoring support 849# Hardware Monitoring support
@@ -706,18 +852,14 @@ CONFIG_HWMON=y
706# CONFIG_HWMON_VID is not set 852# CONFIG_HWMON_VID is not set
707# CONFIG_SENSORS_ABITUGURU is not set 853# CONFIG_SENSORS_ABITUGURU is not set
708# CONFIG_SENSORS_F71805F is not set 854# CONFIG_SENSORS_F71805F is not set
855# CONFIG_SENSORS_PC87427 is not set
709# CONFIG_SENSORS_VT1211 is not set 856# CONFIG_SENSORS_VT1211 is not set
710# CONFIG_HWMON_DEBUG_CHIP is not set 857# CONFIG_HWMON_DEBUG_CHIP is not set
711 858
712# 859#
713# Misc devices
714#
715
716#
717# Multimedia devices 860# Multimedia devices
718# 861#
719# CONFIG_VIDEO_DEV is not set 862# CONFIG_VIDEO_DEV is not set
720CONFIG_VIDEO_V4L2=y
721 863
722# 864#
723# Digital Video Broadcasting Devices 865# Digital Video Broadcasting Devices
@@ -759,7 +901,6 @@ CONFIG_SND_VERBOSE_PROCFS=y
759CONFIG_SND_MPU401_UART=m 901CONFIG_SND_MPU401_UART=m
760CONFIG_SND_OPL3_LIB=m 902CONFIG_SND_OPL3_LIB=m
761CONFIG_SND_AC97_CODEC=m 903CONFIG_SND_AC97_CODEC=m
762CONFIG_SND_AC97_BUS=m
763# CONFIG_SND_DUMMY is not set 904# CONFIG_SND_DUMMY is not set
764# CONFIG_SND_MTPAV is not set 905# CONFIG_SND_MTPAV is not set
765# CONFIG_SND_SERIAL_U16550 is not set 906# CONFIG_SND_SERIAL_U16550 is not set
@@ -782,6 +923,18 @@ CONFIG_SND_AC97_BUS=m
782# CONFIG_SND_CMIPCI is not set 923# CONFIG_SND_CMIPCI is not set
783# CONFIG_SND_CS4281 is not set 924# CONFIG_SND_CS4281 is not set
784# CONFIG_SND_CS46XX is not set 925# CONFIG_SND_CS46XX is not set
926# CONFIG_SND_DARLA20 is not set
927# CONFIG_SND_GINA20 is not set
928# CONFIG_SND_LAYLA20 is not set
929# CONFIG_SND_DARLA24 is not set
930# CONFIG_SND_GINA24 is not set
931# CONFIG_SND_LAYLA24 is not set
932# CONFIG_SND_MONA is not set
933# CONFIG_SND_MIA is not set
934# CONFIG_SND_ECHO3G is not set
935# CONFIG_SND_INDIGO is not set
936# CONFIG_SND_INDIGOIO is not set
937# CONFIG_SND_INDIGODJ is not set
785# CONFIG_SND_EMU10K1 is not set 938# CONFIG_SND_EMU10K1 is not set
786# CONFIG_SND_EMU10K1X is not set 939# CONFIG_SND_EMU10K1X is not set
787# CONFIG_SND_ENS1370 is not set 940# CONFIG_SND_ENS1370 is not set
@@ -801,6 +954,7 @@ CONFIG_SND_AC97_BUS=m
801# CONFIG_SND_MIXART is not set 954# CONFIG_SND_MIXART is not set
802# CONFIG_SND_NM256 is not set 955# CONFIG_SND_NM256 is not set
803# CONFIG_SND_PCXHR is not set 956# CONFIG_SND_PCXHR is not set
957# CONFIG_SND_RIPTIDE is not set
804# CONFIG_SND_RME32 is not set 958# CONFIG_SND_RME32 is not set
805# CONFIG_SND_RME96 is not set 959# CONFIG_SND_RME96 is not set
806# CONFIG_SND_RME9652 is not set 960# CONFIG_SND_RME9652 is not set
@@ -813,17 +967,22 @@ CONFIG_SND_YMFPCI=m
813# CONFIG_SND_AC97_POWER_SAVE is not set 967# CONFIG_SND_AC97_POWER_SAVE is not set
814 968
815# 969#
970# SoC audio support
971#
972# CONFIG_SND_SOC is not set
973
974#
816# Open Sound System 975# Open Sound System
817# 976#
818CONFIG_SOUND_PRIME=m 977CONFIG_SOUND_PRIME=m
819# CONFIG_OSS_OBSOLETE_DRIVER is not set 978# CONFIG_OBSOLETE_OSS is not set
820# CONFIG_SOUND_BT878 is not set 979# CONFIG_SOUND_BT878 is not set
821# CONFIG_SOUND_ES1371 is not set
822# CONFIG_SOUND_ICH is not set 980# CONFIG_SOUND_ICH is not set
823# CONFIG_SOUND_TRIDENT is not set 981# CONFIG_SOUND_TRIDENT is not set
824# CONFIG_SOUND_MSNDCLAS is not set 982# CONFIG_SOUND_MSNDCLAS is not set
825# CONFIG_SOUND_MSNDPIN is not set 983# CONFIG_SOUND_MSNDPIN is not set
826# CONFIG_SOUND_VIA82CXXX is not set 984# CONFIG_SOUND_VIA82CXXX is not set
985CONFIG_AC97_BUS=m
827 986
828# 987#
829# USB support 988# USB support
@@ -872,7 +1031,29 @@ CONFIG_USB_ARCH_HAS_EHCI=y
872# 1031#
873# Real Time Clock 1032# Real Time Clock
874# 1033#
875# CONFIG_RTC_CLASS is not set 1034CONFIG_RTC_LIB=y
1035CONFIG_RTC_CLASS=y
1036CONFIG_RTC_HCTOSYS=y
1037CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1038# CONFIG_RTC_DEBUG is not set
1039
1040#
1041# RTC interfaces
1042#
1043CONFIG_RTC_INTF_SYSFS=y
1044CONFIG_RTC_INTF_PROC=y
1045CONFIG_RTC_INTF_DEV=y
1046# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1047
1048#
1049# RTC drivers
1050#
1051# CONFIG_RTC_DRV_DS1553 is not set
1052# CONFIG_RTC_DRV_DS1742 is not set
1053# CONFIG_RTC_DRV_M48T86 is not set
1054CONFIG_RTC_DRV_SH=y
1055# CONFIG_RTC_DRV_TEST is not set
1056# CONFIG_RTC_DRV_V3020 is not set
876 1057
877# 1058#
878# DMA Engine support 1059# DMA Engine support
@@ -888,16 +1069,26 @@ CONFIG_USB_ARCH_HAS_EHCI=y
888# 1069#
889 1070
890# 1071#
1072# Auxiliary Display support
1073#
1074
1075#
1076# Virtualization
1077#
1078
1079#
891# File systems 1080# File systems
892# 1081#
893CONFIG_EXT2_FS=y 1082CONFIG_EXT2_FS=y
894# CONFIG_EXT2_FS_XATTR is not set 1083# CONFIG_EXT2_FS_XATTR is not set
895# CONFIG_EXT2_FS_XIP is not set 1084# CONFIG_EXT2_FS_XIP is not set
896# CONFIG_EXT3_FS is not set 1085# CONFIG_EXT3_FS is not set
1086# CONFIG_EXT4DEV_FS is not set
897# CONFIG_REISERFS_FS is not set 1087# CONFIG_REISERFS_FS is not set
898# CONFIG_JFS_FS is not set 1088# CONFIG_JFS_FS is not set
899# CONFIG_FS_POSIX_ACL is not set 1089# CONFIG_FS_POSIX_ACL is not set
900# CONFIG_XFS_FS is not set 1090# CONFIG_XFS_FS is not set
1091# CONFIG_GFS2_FS is not set
901# CONFIG_OCFS2_FS is not set 1092# CONFIG_OCFS2_FS is not set
902CONFIG_MINIX_FS=y 1093CONFIG_MINIX_FS=y
903# CONFIG_ROMFS_FS is not set 1094# CONFIG_ROMFS_FS is not set
@@ -932,7 +1123,8 @@ CONFIG_PROC_FS=y
932CONFIG_PROC_KCORE=y 1123CONFIG_PROC_KCORE=y
933CONFIG_PROC_SYSCTL=y 1124CONFIG_PROC_SYSCTL=y
934CONFIG_SYSFS=y 1125CONFIG_SYSFS=y
935# CONFIG_TMPFS is not set 1126CONFIG_TMPFS=y
1127# CONFIG_TMPFS_POSIX_ACL is not set
936# CONFIG_HUGETLBFS is not set 1128# CONFIG_HUGETLBFS is not set
937# CONFIG_HUGETLB_PAGE is not set 1129# CONFIG_HUGETLB_PAGE is not set
938CONFIG_RAMFS=y 1130CONFIG_RAMFS=y
@@ -1018,6 +1210,11 @@ CONFIG_NLS_CODEPAGE_932=y
1018# CONFIG_NLS_UTF8 is not set 1210# CONFIG_NLS_UTF8 is not set
1019 1211
1020# 1212#
1213# Distributed Lock Manager
1214#
1215# CONFIG_DLM is not set
1216
1217#
1021# Profiling support 1218# Profiling support
1022# 1219#
1023CONFIG_PROFILING=y 1220CONFIG_PROFILING=y
@@ -1026,16 +1223,20 @@ CONFIG_OPROFILE=y
1026# 1223#
1027# Kernel hacking 1224# Kernel hacking
1028# 1225#
1226CONFIG_TRACE_IRQFLAGS_SUPPORT=y
1029# CONFIG_PRINTK_TIME is not set 1227# CONFIG_PRINTK_TIME is not set
1030CONFIG_ENABLE_MUST_CHECK=y 1228CONFIG_ENABLE_MUST_CHECK=y
1031# CONFIG_MAGIC_SYSRQ is not set 1229# CONFIG_MAGIC_SYSRQ is not set
1032# CONFIG_UNUSED_SYMBOLS is not set 1230# CONFIG_UNUSED_SYMBOLS is not set
1231# CONFIG_DEBUG_FS is not set
1232# CONFIG_HEADERS_CHECK is not set
1033# CONFIG_DEBUG_KERNEL is not set 1233# CONFIG_DEBUG_KERNEL is not set
1034CONFIG_LOG_BUF_SHIFT=14 1234CONFIG_LOG_BUF_SHIFT=14
1035# CONFIG_DEBUG_BUGVERBOSE is not set 1235# CONFIG_DEBUG_BUGVERBOSE is not set
1036# CONFIG_DEBUG_FS is not set
1037# CONFIG_SH_STANDARD_BIOS is not set 1236# CONFIG_SH_STANDARD_BIOS is not set
1038# CONFIG_EARLY_SCIF_CONSOLE is not set 1237CONFIG_EARLY_SCIF_CONSOLE=y
1238CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
1239CONFIG_EARLY_PRINTK=y
1039# CONFIG_KGDB is not set 1240# CONFIG_KGDB is not set
1040 1241
1041# 1242#
@@ -1052,8 +1253,11 @@ CONFIG_LOG_BUF_SHIFT=14
1052# 1253#
1053# Library routines 1254# Library routines
1054# 1255#
1256CONFIG_BITREVERSE=y
1055# CONFIG_CRC_CCITT is not set 1257# CONFIG_CRC_CCITT is not set
1056# CONFIG_CRC16 is not set 1258# CONFIG_CRC16 is not set
1057CONFIG_CRC32=y 1259CONFIG_CRC32=y
1058# CONFIG_LIBCRC32C is not set 1260# CONFIG_LIBCRC32C is not set
1059CONFIG_PLIST=y 1261CONFIG_PLIST=y
1262CONFIG_HAS_IOMEM=y
1263CONFIG_HAS_IOPORT=y
diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig
index 5d357d68b234..4e6e77fa4ce7 100644
--- a/arch/sh/configs/se7750_defconfig
+++ b/arch/sh/configs/se7750_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18 3# Linux kernel version: 2.6.20-rc2
4# Tue Oct 3 11:49:01 2006 4# Thu Dec 28 23:15:49 2006
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
@@ -10,6 +10,11 @@ CONFIG_GENERIC_HWEIGHT=y
10CONFIG_GENERIC_HARDIRQS=y 10CONFIG_GENERIC_HARDIRQS=y
11CONFIG_GENERIC_IRQ_PROBE=y 11CONFIG_GENERIC_IRQ_PROBE=y
12CONFIG_GENERIC_CALIBRATE_DELAY=y 12CONFIG_GENERIC_CALIBRATE_DELAY=y
13# CONFIG_GENERIC_TIME is not set
14CONFIG_STACKTRACE_SUPPORT=y
15CONFIG_LOCKDEP_SUPPORT=y
16# CONFIG_ARCH_HAS_ILOG2_U32 is not set
17# CONFIG_ARCH_HAS_ILOG2_U64 is not set
13CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 18CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
14 19
15# 20#
@@ -35,6 +40,7 @@ CONFIG_BSD_PROCESS_ACCT=y
35# CONFIG_AUDIT is not set 40# CONFIG_AUDIT is not set
36CONFIG_IKCONFIG=y 41CONFIG_IKCONFIG=y
37CONFIG_IKCONFIG_PROC=y 42CONFIG_IKCONFIG_PROC=y
43CONFIG_SYSFS_DEPRECATED=y
38# CONFIG_RELAY is not set 44# CONFIG_RELAY is not set
39CONFIG_INITRAMFS_SOURCE="" 45CONFIG_INITRAMFS_SOURCE=""
40# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 46# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -116,6 +122,8 @@ CONFIG_SH_SOLUTION_ENGINE=y
116# CONFIG_SH_LANDISK is not set 122# CONFIG_SH_LANDISK is not set
117# CONFIG_SH_TITAN is not set 123# CONFIG_SH_TITAN is not set
118# CONFIG_SH_SHMIN is not set 124# CONFIG_SH_SHMIN is not set
125# CONFIG_SH_7206_SOLUTION_ENGINE is not set
126# CONFIG_SH_7619_SOLUTION_ENGINE is not set
119# CONFIG_SH_UNKNOWN is not set 127# CONFIG_SH_UNKNOWN is not set
120 128
121# 129#
@@ -127,6 +135,12 @@ CONFIG_CPU_SH4=y
127# SH-2 Processor Support 135# SH-2 Processor Support
128# 136#
129# CONFIG_CPU_SUBTYPE_SH7604 is not set 137# CONFIG_CPU_SUBTYPE_SH7604 is not set
138# CONFIG_CPU_SUBTYPE_SH7619 is not set
139
140#
141# SH-2A Processor Support
142#
143# CONFIG_CPU_SUBTYPE_SH7206 is not set
130 144
131# 145#
132# SH-3 Processor Support 146# SH-3 Processor Support
@@ -162,12 +176,14 @@ CONFIG_CPU_SUBTYPE_SH7750=y
162# 176#
163# CONFIG_CPU_SUBTYPE_SH7770 is not set 177# CONFIG_CPU_SUBTYPE_SH7770 is not set
164# CONFIG_CPU_SUBTYPE_SH7780 is not set 178# CONFIG_CPU_SUBTYPE_SH7780 is not set
179# CONFIG_CPU_SUBTYPE_SH7785 is not set
165 180
166# 181#
167# SH4AL-DSP Processor Support 182# SH4AL-DSP Processor Support
168# 183#
169# CONFIG_CPU_SUBTYPE_SH73180 is not set 184# CONFIG_CPU_SUBTYPE_SH73180 is not set
170# CONFIG_CPU_SUBTYPE_SH7343 is not set 185# CONFIG_CPU_SUBTYPE_SH7343 is not set
186# CONFIG_CPU_SUBTYPE_SH7722 is not set
171 187
172# 188#
173# Memory management options 189# Memory management options
@@ -177,6 +193,9 @@ CONFIG_PAGE_OFFSET=0x80000000
177CONFIG_MEMORY_START=0x0c000000 193CONFIG_MEMORY_START=0x0c000000
178CONFIG_MEMORY_SIZE=0x02000000 194CONFIG_MEMORY_SIZE=0x02000000
179CONFIG_VSYSCALL=y 195CONFIG_VSYSCALL=y
196CONFIG_PAGE_SIZE_4KB=y
197# CONFIG_PAGE_SIZE_8KB is not set
198# CONFIG_PAGE_SIZE_64KB is not set
180CONFIG_SELECT_MEMORY_MODEL=y 199CONFIG_SELECT_MEMORY_MODEL=y
181CONFIG_FLATMEM_MANUAL=y 200CONFIG_FLATMEM_MANUAL=y
182# CONFIG_DISCONTIGMEM_MANUAL is not set 201# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -202,17 +221,22 @@ CONFIG_CF_BASE_ADDR=0xb8000000
202# Processor features 221# Processor features
203# 222#
204CONFIG_CPU_LITTLE_ENDIAN=y 223CONFIG_CPU_LITTLE_ENDIAN=y
224# CONFIG_CPU_BIG_ENDIAN is not set
205CONFIG_SH_FPU=y 225CONFIG_SH_FPU=y
206# CONFIG_SH_DSP is not set 226# CONFIG_SH_DSP is not set
207# CONFIG_SH_STORE_QUEUES is not set 227# CONFIG_SH_STORE_QUEUES is not set
208CONFIG_CPU_HAS_INTEVT=y 228CONFIG_CPU_HAS_INTEVT=y
229CONFIG_CPU_HAS_IPR_IRQ=y
209CONFIG_CPU_HAS_SR_RB=y 230CONFIG_CPU_HAS_SR_RB=y
231CONFIG_CPU_HAS_PTEA=y
210 232
211# 233#
212# Timer support 234# Timer support
213# 235#
214CONFIG_SH_TMU=y 236CONFIG_SH_TMU=y
215CONFIG_SH_PCLK_FREQ=50000000 237CONFIG_SH_TIMER_IRQ=16
238# CONFIG_NO_IDLE_HZ is not set
239CONFIG_SH_PCLK_FREQ=33333333
216 240
217# 241#
218# CPU Frequency scaling 242# CPU Frequency scaling
@@ -231,10 +255,16 @@ CONFIG_SH_PCLK_FREQ=50000000
231CONFIG_HEARTBEAT=y 255CONFIG_HEARTBEAT=y
232 256
233# 257#
258# Additional SuperH Device Drivers
259#
260# CONFIG_PUSH_SWITCH is not set
261
262#
234# Kernel features 263# Kernel features
235# 264#
236# CONFIG_HZ_100 is not set 265# CONFIG_HZ_100 is not set
237CONFIG_HZ_250=y 266CONFIG_HZ_250=y
267# CONFIG_HZ_300 is not set
238# CONFIG_HZ_1000 is not set 268# CONFIG_HZ_1000 is not set
239CONFIG_HZ=250 269CONFIG_HZ=250
240# CONFIG_KEXEC is not set 270# CONFIG_KEXEC is not set
@@ -249,8 +279,7 @@ CONFIG_PREEMPT_NONE=y
249CONFIG_ZERO_PAGE_OFFSET=0x00001000 279CONFIG_ZERO_PAGE_OFFSET=0x00001000
250CONFIG_BOOT_LINK_OFFSET=0x00800000 280CONFIG_BOOT_LINK_OFFSET=0x00800000
251# CONFIG_UBC_WAKEUP is not set 281# CONFIG_UBC_WAKEUP is not set
252CONFIG_CMDLINE_BOOL=y 282# CONFIG_CMDLINE_BOOL is not set
253CONFIG_CMDLINE="console=ttySC1,38400 root=/dev/nfs ip=bootp"
254 283
255# 284#
256# Bus options 285# Bus options
@@ -313,11 +342,13 @@ CONFIG_IP_PNP_BOOTP=y
313# CONFIG_INET_TUNNEL is not set 342# CONFIG_INET_TUNNEL is not set
314CONFIG_INET_XFRM_MODE_TRANSPORT=y 343CONFIG_INET_XFRM_MODE_TRANSPORT=y
315CONFIG_INET_XFRM_MODE_TUNNEL=y 344CONFIG_INET_XFRM_MODE_TUNNEL=y
345CONFIG_INET_XFRM_MODE_BEET=y
316CONFIG_INET_DIAG=y 346CONFIG_INET_DIAG=y
317CONFIG_INET_TCP_DIAG=y 347CONFIG_INET_TCP_DIAG=y
318# CONFIG_TCP_CONG_ADVANCED is not set 348# CONFIG_TCP_CONG_ADVANCED is not set
319CONFIG_TCP_CONG_CUBIC=y 349CONFIG_TCP_CONG_CUBIC=y
320CONFIG_DEFAULT_TCP_CONG="cubic" 350CONFIG_DEFAULT_TCP_CONG="cubic"
351# CONFIG_TCP_MD5SIG is not set
321# CONFIG_IPV6 is not set 352# CONFIG_IPV6 is not set
322# CONFIG_INET6_XFRM_TUNNEL is not set 353# CONFIG_INET6_XFRM_TUNNEL is not set
323# CONFIG_INET6_TUNNEL is not set 354# CONFIG_INET6_TUNNEL is not set
@@ -480,16 +511,79 @@ CONFIG_MTD_ROM=y
480# CONFIG_ATA_OVER_ETH is not set 511# CONFIG_ATA_OVER_ETH is not set
481 512
482# 513#
514# Misc devices
515#
516# CONFIG_TIFM_CORE is not set
517
518#
483# ATA/ATAPI/MFM/RLL support 519# ATA/ATAPI/MFM/RLL support
484# 520#
485# CONFIG_IDE is not set 521CONFIG_IDE=y
522CONFIG_IDE_MAX_HWIFS=4
523CONFIG_BLK_DEV_IDE=y
524
525#
526# Please see Documentation/ide.txt for help/info on IDE drives
527#
528# CONFIG_BLK_DEV_IDE_SATA is not set
529CONFIG_BLK_DEV_IDEDISK=y
530# CONFIG_IDEDISK_MULTI_MODE is not set
531# CONFIG_BLK_DEV_IDECD is not set
532# CONFIG_BLK_DEV_IDETAPE is not set
533# CONFIG_BLK_DEV_IDEFLOPPY is not set
534# CONFIG_BLK_DEV_IDESCSI is not set
535# CONFIG_IDE_TASK_IOCTL is not set
536
537#
538# IDE chipset support/bugfixes
539#
540# CONFIG_IDE_GENERIC is not set
541# CONFIG_IDE_ARM is not set
542# CONFIG_BLK_DEV_IDEDMA is not set
543# CONFIG_IDEDMA_AUTO is not set
544# CONFIG_BLK_DEV_HD is not set
486 545
487# 546#
488# SCSI device support 547# SCSI device support
489# 548#
490# CONFIG_RAID_ATTRS is not set 549# CONFIG_RAID_ATTRS is not set
491# CONFIG_SCSI is not set 550CONFIG_SCSI=y
551# CONFIG_SCSI_TGT is not set
492# CONFIG_SCSI_NETLINK is not set 552# CONFIG_SCSI_NETLINK is not set
553CONFIG_SCSI_PROC_FS=y
554
555#
556# SCSI support type (disk, tape, CD-ROM)
557#
558# CONFIG_BLK_DEV_SD is not set
559# CONFIG_CHR_DEV_ST is not set
560# CONFIG_CHR_DEV_OSST is not set
561# CONFIG_BLK_DEV_SR is not set
562# CONFIG_CHR_DEV_SG is not set
563# CONFIG_CHR_DEV_SCH is not set
564
565#
566# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
567#
568# CONFIG_SCSI_MULTI_LUN is not set
569# CONFIG_SCSI_CONSTANTS is not set
570# CONFIG_SCSI_LOGGING is not set
571# CONFIG_SCSI_SCAN_ASYNC is not set
572
573#
574# SCSI Transports
575#
576# CONFIG_SCSI_SPI_ATTRS is not set
577# CONFIG_SCSI_FC_ATTRS is not set
578# CONFIG_SCSI_ISCSI_ATTRS is not set
579# CONFIG_SCSI_SAS_ATTRS is not set
580# CONFIG_SCSI_SAS_LIBSAS is not set
581
582#
583# SCSI low-level drivers
584#
585# CONFIG_ISCSI_TCP is not set
586# CONFIG_SCSI_DEBUG is not set
493 587
494# 588#
495# Serial ATA (prod) and Parallel ATA (experimental) drivers 589# Serial ATA (prod) and Parallel ATA (experimental) drivers
@@ -633,17 +727,12 @@ CONFIG_HW_RANDOM=y
633# CONFIG_GEN_RTC is not set 727# CONFIG_GEN_RTC is not set
634# CONFIG_DTLK is not set 728# CONFIG_DTLK is not set
635# CONFIG_R3964 is not set 729# CONFIG_R3964 is not set
636
637#
638# Ftape, the floppy tape device driver
639#
640# CONFIG_RAW_DRIVER is not set 730# CONFIG_RAW_DRIVER is not set
641 731
642# 732#
643# TPM devices 733# TPM devices
644# 734#
645# CONFIG_TCG_TPM is not set 735# CONFIG_TCG_TPM is not set
646# CONFIG_TELCLOCK is not set
647 736
648# 737#
649# I2C support 738# I2C support
@@ -659,6 +748,7 @@ CONFIG_HW_RANDOM=y
659# 748#
660# Dallas's 1-wire bus 749# Dallas's 1-wire bus
661# 750#
751# CONFIG_W1 is not set
662 752
663# 753#
664# Hardware Monitoring support 754# Hardware Monitoring support
@@ -667,18 +757,14 @@ CONFIG_HWMON=y
667# CONFIG_HWMON_VID is not set 757# CONFIG_HWMON_VID is not set
668# CONFIG_SENSORS_ABITUGURU is not set 758# CONFIG_SENSORS_ABITUGURU is not set
669# CONFIG_SENSORS_F71805F is not set 759# CONFIG_SENSORS_F71805F is not set
760# CONFIG_SENSORS_PC87427 is not set
670# CONFIG_SENSORS_VT1211 is not set 761# CONFIG_SENSORS_VT1211 is not set
671# CONFIG_HWMON_DEBUG_CHIP is not set 762# CONFIG_HWMON_DEBUG_CHIP is not set
672 763
673# 764#
674# Misc devices
675#
676
677#
678# Multimedia devices 765# Multimedia devices
679# 766#
680# CONFIG_VIDEO_DEV is not set 767# CONFIG_VIDEO_DEV is not set
681CONFIG_VIDEO_V4L2=y
682 768
683# 769#
684# Digital Video Broadcasting Devices 770# Digital Video Broadcasting Devices
@@ -758,14 +844,20 @@ CONFIG_FIRMWARE_EDID=y
758# 844#
759 845
760# 846#
847# Virtualization
848#
849
850#
761# File systems 851# File systems
762# 852#
763# CONFIG_EXT2_FS is not set 853# CONFIG_EXT2_FS is not set
764# CONFIG_EXT3_FS is not set 854# CONFIG_EXT3_FS is not set
855# CONFIG_EXT4DEV_FS is not set
765# CONFIG_REISERFS_FS is not set 856# CONFIG_REISERFS_FS is not set
766# CONFIG_JFS_FS is not set 857# CONFIG_JFS_FS is not set
767# CONFIG_FS_POSIX_ACL is not set 858# CONFIG_FS_POSIX_ACL is not set
768# CONFIG_XFS_FS is not set 859# CONFIG_XFS_FS is not set
860# CONFIG_GFS2_FS is not set
769# CONFIG_OCFS2_FS is not set 861# CONFIG_OCFS2_FS is not set
770# CONFIG_MINIX_FS is not set 862# CONFIG_MINIX_FS is not set
771# CONFIG_ROMFS_FS is not set 863# CONFIG_ROMFS_FS is not set
@@ -814,7 +906,6 @@ CONFIG_RAMFS=y
814# CONFIG_BEFS_FS is not set 906# CONFIG_BEFS_FS is not set
815# CONFIG_BFS_FS is not set 907# CONFIG_BFS_FS is not set
816# CONFIG_EFS_FS is not set 908# CONFIG_EFS_FS is not set
817# CONFIG_JFFS_FS is not set
818CONFIG_JFFS2_FS=y 909CONFIG_JFFS2_FS=y
819CONFIG_JFFS2_FS_DEBUG=0 910CONFIG_JFFS2_FS_DEBUG=0
820CONFIG_JFFS2_FS_WRITEBUFFER=y 911CONFIG_JFFS2_FS_WRITEBUFFER=y
@@ -875,6 +966,11 @@ CONFIG_PARTITION_ADVANCED=y
875# CONFIG_NLS is not set 966# CONFIG_NLS is not set
876 967
877# 968#
969# Distributed Lock Manager
970#
971# CONFIG_DLM is not set
972
973#
878# Profiling support 974# Profiling support
879# 975#
880# CONFIG_PROFILING is not set 976# CONFIG_PROFILING is not set
@@ -882,14 +978,16 @@ CONFIG_PARTITION_ADVANCED=y
882# 978#
883# Kernel hacking 979# Kernel hacking
884# 980#
981CONFIG_TRACE_IRQFLAGS_SUPPORT=y
885# CONFIG_PRINTK_TIME is not set 982# CONFIG_PRINTK_TIME is not set
886CONFIG_ENABLE_MUST_CHECK=y 983# CONFIG_ENABLE_MUST_CHECK is not set
887# CONFIG_MAGIC_SYSRQ is not set 984# CONFIG_MAGIC_SYSRQ is not set
888# CONFIG_UNUSED_SYMBOLS is not set 985# CONFIG_UNUSED_SYMBOLS is not set
986# CONFIG_DEBUG_FS is not set
987# CONFIG_HEADERS_CHECK is not set
889# CONFIG_DEBUG_KERNEL is not set 988# CONFIG_DEBUG_KERNEL is not set
890CONFIG_LOG_BUF_SHIFT=14 989CONFIG_LOG_BUF_SHIFT=14
891# CONFIG_DEBUG_BUGVERBOSE is not set 990# CONFIG_DEBUG_BUGVERBOSE is not set
892# CONFIG_DEBUG_FS is not set
893# CONFIG_SH_STANDARD_BIOS is not set 991# CONFIG_SH_STANDARD_BIOS is not set
894# CONFIG_EARLY_SCIF_CONSOLE is not set 992# CONFIG_EARLY_SCIF_CONSOLE is not set
895# CONFIG_KGDB is not set 993# CONFIG_KGDB is not set
@@ -908,6 +1006,7 @@ CONFIG_LOG_BUF_SHIFT=14
908# 1006#
909# Library routines 1007# Library routines
910# 1008#
1009CONFIG_BITREVERSE=y
911# CONFIG_CRC_CCITT is not set 1010# CONFIG_CRC_CCITT is not set
912# CONFIG_CRC16 is not set 1011# CONFIG_CRC16 is not set
913CONFIG_CRC32=y 1012CONFIG_CRC32=y
@@ -915,3 +1014,4 @@ CONFIG_CRC32=y
915CONFIG_ZLIB_INFLATE=y 1014CONFIG_ZLIB_INFLATE=y
916CONFIG_ZLIB_DEFLATE=y 1015CONFIG_ZLIB_DEFLATE=y
917CONFIG_PLIST=y 1016CONFIG_PLIST=y
1017CONFIG_IOMAP_COPY=y
diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile
index bf18dbfb6787..6cb92676c5fc 100644
--- a/arch/sh/drivers/Makefile
+++ b/arch/sh/drivers/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PCI) += pci/
6obj-$(CONFIG_SH_DMA) += dma/ 6obj-$(CONFIG_SH_DMA) += dma/
7obj-$(CONFIG_SUPERHYWAY) += superhyway/ 7obj-$(CONFIG_SUPERHYWAY) += superhyway/
8obj-$(CONFIG_PUSH_SWITCH) += push-switch.o 8obj-$(CONFIG_PUSH_SWITCH) += push-switch.o
9obj-$(CONFIG_HEARTBEAT) += heartbeat.o
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index f63721ed86c2..06ed0609a95d 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -19,34 +19,26 @@
19#include <asm/io.h> 19#include <asm/io.h>
20#include "dma-sh.h" 20#include "dma-sh.h"
21 21
22 22static int dmte_irq_map[] = {
23 23 DMTE0_IRQ,
24#ifdef CONFIG_CPU_SH4 24 DMTE1_IRQ,
25static struct ipr_data dmae_ipr_map[] = { 25 DMTE2_IRQ,
26 { DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY }, 26 DMTE3_IRQ,
27}; 27#if defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
28 defined(CONFIG_CPU_SUBTYPE_SH7760) || \
29 defined(CONFIG_CPU_SUBTYPE_SH7780)
30 DMTE4_IRQ,
31 DMTE5_IRQ,
32 DMTE6_IRQ,
33 DMTE7_IRQ,
28#endif 34#endif
29static struct ipr_data dmte_ipr_map[] = {
30 /*
31 * Normally we could just do DMTE0_IRQ + chan outright, though in the
32 * case of the 7751R, the DMTE IRQs for channels > 4 start right above
33 * the SCIF
34 */
35 { DMTE0_IRQ + 0, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY },
36 { DMTE0_IRQ + 1, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY },
37 { DMTE0_IRQ + 2, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY },
38 { DMTE0_IRQ + 3, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY },
39 { DMTE4_IRQ + 0, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY },
40 { DMTE4_IRQ + 1, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY },
41 { DMTE4_IRQ + 2, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY },
42 { DMTE4_IRQ + 3, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY },
43}; 35};
44 36
45static inline unsigned int get_dmte_irq(unsigned int chan) 37static inline unsigned int get_dmte_irq(unsigned int chan)
46{ 38{
47 unsigned int irq = 0; 39 unsigned int irq = 0;
48 if (chan < ARRAY_SIZE(dmte_ipr_map)) 40 if (chan < ARRAY_SIZE(dmte_irq_map))
49 irq = dmte_ipr_map[chan].irq; 41 irq = dmte_irq_map[chan];
50 return irq; 42 return irq;
51} 43}
52 44
@@ -103,7 +95,7 @@ static void sh_dmac_free_dma(struct dma_channel *chan)
103 free_irq(get_dmte_irq(chan->chan), chan); 95 free_irq(get_dmte_irq(chan->chan), chan);
104} 96}
105 97
106static void 98static int
107sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) 99sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
108{ 100{
109 if (!chcr) 101 if (!chcr)
@@ -119,6 +111,7 @@ sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
119 ctrl_outl(chcr, CHCR[chan->chan]); 111 ctrl_outl(chcr, CHCR[chan->chan]);
120 112
121 chan->flags |= DMA_CONFIGURED; 113 chan->flags |= DMA_CONFIGURED;
114 return 0;
122} 115}
123 116
124static void sh_dmac_enable_dma(struct dma_channel *chan) 117static void sh_dmac_enable_dma(struct dma_channel *chan)
@@ -262,17 +255,11 @@ static int __init sh_dmac_init(void)
262 int i; 255 int i;
263 256
264#ifdef CONFIG_CPU_SH4 257#ifdef CONFIG_CPU_SH4
265 make_ipr_irq(dmae_ipr_map, ARRAY_SIZE(dmae_ipr_map));
266 i = request_irq(DMAE_IRQ, dma_err, IRQF_DISABLED, "DMAC Address Error", 0); 258 i = request_irq(DMAE_IRQ, dma_err, IRQF_DISABLED, "DMAC Address Error", 0);
267 if (unlikely(i < 0)) 259 if (unlikely(i < 0))
268 return i; 260 return i;
269#endif 261#endif
270 262
271 i = info->nr_channels;
272 if (i > ARRAY_SIZE(dmte_ipr_map))
273 i = ARRAY_SIZE(dmte_ipr_map);
274 make_ipr_irq(dmte_ipr_map, i);
275
276 /* 263 /*
277 * Initialize DMAOR, and clean up any error flags that may have 264 * Initialize DMAOR, and clean up any error flags that may have
278 * been set. 265 * been set.
diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c
new file mode 100644
index 000000000000..bc59cb6cd78b
--- /dev/null
+++ b/arch/sh/drivers/heartbeat.c
@@ -0,0 +1,132 @@
1/*
2 * Generic heartbeat driver for regular LED banks
3 *
4 * Copyright (C) 2007 Paul Mundt
5 *
6 * Most SH reference boards include a number of individual LEDs that can
7 * be independently controlled (either via a pre-defined hardware
8 * function or via the LED class, if desired -- the hardware tends to
9 * encapsulate some of the same "triggers" that the LED class supports,
10 * so there's not too much value in it).
11 *
12 * Additionally, most of these boards also have a LED bank that we've
13 * traditionally used for strobing the load average. This use case is
14 * handled by this driver, rather than giving each LED bit position its
15 * own struct device.
16 *
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
19 * for more details.
20 */
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/sched.h>
25#include <linux/timer.h>
26#include <linux/io.h>
27
28#define DRV_NAME "heartbeat"
29#define DRV_VERSION "0.1.0"
30
31struct heartbeat_data {
32 void __iomem *base;
33 unsigned char bit_pos[8];
34 struct timer_list timer;
35};
36
37static void heartbeat_timer(unsigned long data)
38{
39 struct heartbeat_data *hd = (struct heartbeat_data *)data;
40 static unsigned bit = 0, up = 1;
41
42 ctrl_outw(1 << hd->bit_pos[bit], (unsigned long)hd->base);
43 if (up)
44 if (bit == (ARRAY_SIZE(hd->bit_pos) - 1)) {
45 bit--;
46 up = 0;
47 } else
48 bit++;
49 else if (bit == 0)
50 up = 1;
51 else
52 bit--;
53
54 mod_timer(&hd->timer, jiffies + (110 - ((300 << FSHIFT) /
55 ((avenrun[0] / 5) + (3 << FSHIFT)))));
56}
57
58static int heartbeat_drv_probe(struct platform_device *pdev)
59{
60 struct resource *res;
61 struct heartbeat_data *hd;
62
63 if (unlikely(pdev->num_resources != 1)) {
64 dev_err(&pdev->dev, "invalid number of resources\n");
65 return -EINVAL;
66 }
67
68 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
69 if (unlikely(res == NULL)) {
70 dev_err(&pdev->dev, "invalid resource\n");
71 return -EINVAL;
72 }
73
74 hd = kmalloc(sizeof(struct heartbeat_data), GFP_KERNEL);
75 if (unlikely(!hd))
76 return -ENOMEM;
77
78 if (pdev->dev.platform_data) {
79 memcpy(hd->bit_pos, pdev->dev.platform_data,
80 ARRAY_SIZE(hd->bit_pos));
81 } else {
82 int i;
83
84 for (i = 0; i < ARRAY_SIZE(hd->bit_pos); i++)
85 hd->bit_pos[i] = i;
86 }
87
88 hd->base = (void __iomem *)res->start;
89
90 setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
91 platform_set_drvdata(pdev, hd);
92
93 return mod_timer(&hd->timer, jiffies + 1);
94}
95
96static int heartbeat_drv_remove(struct platform_device *pdev)
97{
98 struct heartbeat_data *hd = platform_get_drvdata(pdev);
99
100 del_timer_sync(&hd->timer);
101
102 platform_set_drvdata(pdev, NULL);
103
104 kfree(hd);
105
106 return 0;
107}
108
109static struct platform_driver heartbeat_driver = {
110 .probe = heartbeat_drv_probe,
111 .remove = heartbeat_drv_remove,
112 .driver = {
113 .name = DRV_NAME,
114 },
115};
116
117static int __init heartbeat_init(void)
118{
119 printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
120 return platform_driver_register(&heartbeat_driver);
121}
122
123static void __exit heartbeat_exit(void)
124{
125 platform_driver_unregister(&heartbeat_driver);
126}
127module_init(heartbeat_init);
128module_exit(heartbeat_exit);
129
130MODULE_VERSION(DRV_VERSION);
131MODULE_AUTHOR("Paul Mundt");
132MODULE_LICENSE("GPLv2");
diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile
index 9e00cb8a39e9..cc8d0d0b1427 100644
--- a/arch/sh/drivers/pci/Makefile
+++ b/arch/sh/drivers/pci/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7780) += pci-sh7780.o ops-sh4.o
12obj-$(CONFIG_SH_DREAMCAST) += ops-dreamcast.o fixups-dreamcast.o \ 12obj-$(CONFIG_SH_DREAMCAST) += ops-dreamcast.o fixups-dreamcast.o \
13 dma-dreamcast.o 13 dma-dreamcast.o
14obj-$(CONFIG_SH_SECUREEDGE5410) += ops-snapgear.o 14obj-$(CONFIG_SH_SECUREEDGE5410) += ops-snapgear.o
15obj-$(CONFIG_SH_BIGSUR) += ops-bigsur.o
16obj-$(CONFIG_SH_RTS7751R2D) += ops-rts7751r2d.o fixups-rts7751r2d.o 15obj-$(CONFIG_SH_RTS7751R2D) += ops-rts7751r2d.o fixups-rts7751r2d.o
17obj-$(CONFIG_SH_SH03) += ops-sh03.o fixups-sh03.o 16obj-$(CONFIG_SH_SH03) += ops-sh03.o fixups-sh03.o
18obj-$(CONFIG_SH_R7780RP) += ops-r7780rp.o fixups-r7780rp.o 17obj-$(CONFIG_SH_R7780RP) += ops-r7780rp.o fixups-r7780rp.o
diff --git a/arch/sh/drivers/pci/ops-bigsur.c b/arch/sh/drivers/pci/ops-bigsur.c
deleted file mode 100644
index eb31be751524..000000000000
--- a/arch/sh/drivers/pci/ops-bigsur.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * linux/arch/sh/drivers/pci/ops-bigsur.c
3 *
4 * By Dustin McIntire (dustin@sensoria.com) (c)2001
5 *
6 * Ported to new API by Paul Mundt <lethal@linux-sh.org>.
7 *
8 * May be copied or modified under the terms of the GNU General Public
9 * License. See linux/COPYING for more information.
10 *
11 * PCI initialization for the Hitachi Big Sur Evaluation Board
12 */
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/init.h>
16#include <linux/pci.h>
17#include <asm/io.h>
18#include "pci-sh4.h"
19#include <asm/bigsur/bigsur.h>
20
21#define BIGSUR_PCI_IO 0x4000
22#define BIGSUR_PCI_MEM 0xfd000000
23
24static struct resource sh7751_io_resource = {
25 .name = "SH7751 IO",
26 .start = BIGSUR_PCI_IO,
27 .end = BIGSUR_PCI_IO + (64*1024) - 1,
28 .flags = IORESOURCE_IO,
29};
30
31static struct resource sh7751_mem_resource = {
32 .name = "SH7751 mem",
33 .start = BIGSUR_PCI_MEM,
34 .end = BIGSUR_PCI_MEM + (64*1024*1024) - 1,
35 .flags = IORESOURCE_MEM,
36};
37
38extern struct pci_ops sh7751_pci_ops;
39
40struct pci_channel board_pci_channels[] = {
41 { &sh4_pci_ops, &sh7751_io_resource, &sh7751_mem_resource, 0, 0xff },
42 { 0, }
43};
44
45static struct sh4_pci_address_map sh7751_pci_map = {
46 .window0 = {
47 .base = SH7751_CS3_BASE_ADDR,
48 .size = BIGSUR_LSR0_SIZE,
49 },
50
51 .window1 = {
52 .base = SH7751_CS3_BASE_ADDR,
53 .size = BIGSUR_LSR1_SIZE,
54 },
55};
56
57/*
58 * Initialize the Big Sur PCI interface
59 * Setup hardware to be Central Funtion
60 * Copy the BSR regs to the PCI interface
61 * Setup PCI windows into local RAM
62 */
63int __init pcibios_init_platform(void)
64{
65 return sh7751_pcic_init(&sh7751_pci_map);
66}
67
68int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
69{
70 /*
71 * The Big Sur can be used in a CPCI chassis, but the SH7751 PCI
72 * interface is on the wrong end of the board so that it can also
73 * support a V320 CPI interface chip... Therefor the IRQ mapping is
74 * somewhat use dependent... I'l assume a linear map for now, i.e.
75 * INTA=slot0,pin0... INTD=slot3,pin0...
76 */
77 int irq = (slot + pin-1) % 4 + BIGSUR_SH7751_PCI_IRQ_BASE;
78
79 PCIDBG(2, "PCI: Mapping Big Sur IRQ for slot %d, pin %c to irq %d\n",
80 slot, pin-1+'A', irq);
81
82 return irq;
83}
diff --git a/arch/sh/drivers/pci/pci-sh7751.c b/arch/sh/drivers/pci/pci-sh7751.c
index 85e1ee2e2e7b..9ddff760d3c6 100644
--- a/arch/sh/drivers/pci/pci-sh7751.c
+++ b/arch/sh/drivers/pci/pci-sh7751.c
@@ -157,15 +157,6 @@ int __init sh7751_pcic_init(struct sh4_pci_address_map *map)
157 PCIBIOS_MIN_IO, (64 << 10), 157 PCIBIOS_MIN_IO, (64 << 10),
158 SH7751_PCI_IO_BASE + PCIBIOS_MIN_IO); 158 SH7751_PCI_IO_BASE + PCIBIOS_MIN_IO);
159 159
160 /*
161 * XXX: For now, leave this board-specific. In the event we have other
162 * boards that need to do similar work, this can be wrapped.
163 */
164#ifdef CONFIG_SH_BIGSUR
165 bigsur_port_map(PCIBIOS_MIN_IO, (64 << 10),
166 SH7751_PCI_IO_BASE + PCIBIOS_MIN_IO, 0);
167#endif
168
169 /* Make sure the MSB's of IO window are set to access PCI space 160 /* Make sure the MSB's of IO window are set to access PCI space
170 * correctly */ 161 * correctly */
171 word = PCIBIOS_MIN_IO & SH4_PCIIOBR_MASK; 162 word = PCIBIOS_MIN_IO & SH4_PCIIOBR_MASK;
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 2f6d2bcb1c93..ff30d7f58043 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -6,7 +6,8 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := process.o signal.o traps.o irq.o \ 7obj-y := process.o signal.o traps.o irq.o \
8 ptrace.o setup.o time.o sys_sh.o semaphore.o \ 8 ptrace.o setup.o time.o sys_sh.o semaphore.o \
9 io.o io_generic.o sh_ksyms.o syscalls.o 9 io.o io_generic.o sh_ksyms.o syscalls.o \
10 debugtraps.o
10 11
11obj-y += cpu/ timers/ 12obj-y += cpu/ timers/
12obj-$(CONFIG_VSYSCALL) += vsyscall/ 13obj-$(CONFIG_VSYSCALL) += vsyscall/
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 48121766e8d2..4b339a640b13 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * CPU init code 4 * CPU init code
5 * 5 *
6 * Copyright (C) 2002, 2003 Paul Mundt 6 * Copyright (C) 2002 - 2006 Paul Mundt
7 * Copyright (C) 2003 Richard Curnow 7 * Copyright (C) 2003 Richard Curnow
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
@@ -12,6 +12,8 @@
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <asm/mmu_context.h>
15#include <asm/processor.h> 17#include <asm/processor.h>
16#include <asm/uaccess.h> 18#include <asm/uaccess.h>
17#include <asm/page.h> 19#include <asm/page.h>
@@ -46,7 +48,7 @@ static void __init cache_init(void)
46{ 48{
47 unsigned long ccr, flags; 49 unsigned long ccr, flags;
48 50
49 if (cpu_data->type == CPU_SH_NONE) 51 if (current_cpu_data.type == CPU_SH_NONE)
50 panic("Unknown CPU"); 52 panic("Unknown CPU");
51 53
52 jump_to_P2(); 54 jump_to_P2();
@@ -66,7 +68,7 @@ static void __init cache_init(void)
66 if (ccr & CCR_CACHE_ENABLE) { 68 if (ccr & CCR_CACHE_ENABLE) {
67 unsigned long ways, waysize, addrstart; 69 unsigned long ways, waysize, addrstart;
68 70
69 waysize = cpu_data->dcache.sets; 71 waysize = current_cpu_data.dcache.sets;
70 72
71#ifdef CCR_CACHE_ORA 73#ifdef CCR_CACHE_ORA
72 /* 74 /*
@@ -77,7 +79,7 @@ static void __init cache_init(void)
77 waysize >>= 1; 79 waysize >>= 1;
78#endif 80#endif
79 81
80 waysize <<= cpu_data->dcache.entry_shift; 82 waysize <<= current_cpu_data.dcache.entry_shift;
81 83
82#ifdef CCR_CACHE_EMODE 84#ifdef CCR_CACHE_EMODE
83 /* If EMODE is not set, we only have 1 way to flush. */ 85 /* If EMODE is not set, we only have 1 way to flush. */
@@ -85,7 +87,7 @@ static void __init cache_init(void)
85 ways = 1; 87 ways = 1;
86 else 88 else
87#endif 89#endif
88 ways = cpu_data->dcache.ways; 90 ways = current_cpu_data.dcache.ways;
89 91
90 addrstart = CACHE_OC_ADDRESS_ARRAY; 92 addrstart = CACHE_OC_ADDRESS_ARRAY;
91 do { 93 do {
@@ -93,10 +95,10 @@ static void __init cache_init(void)
93 95
94 for (addr = addrstart; 96 for (addr = addrstart;
95 addr < addrstart + waysize; 97 addr < addrstart + waysize;
96 addr += cpu_data->dcache.linesz) 98 addr += current_cpu_data.dcache.linesz)
97 ctrl_outl(0, addr); 99 ctrl_outl(0, addr);
98 100
99 addrstart += cpu_data->dcache.way_incr; 101 addrstart += current_cpu_data.dcache.way_incr;
100 } while (--ways); 102 } while (--ways);
101 } 103 }
102 104
@@ -108,7 +110,7 @@ static void __init cache_init(void)
108 110
109#ifdef CCR_CACHE_EMODE 111#ifdef CCR_CACHE_EMODE
110 /* Force EMODE if possible */ 112 /* Force EMODE if possible */
111 if (cpu_data->dcache.ways > 1) 113 if (current_cpu_data.dcache.ways > 1)
112 flags |= CCR_CACHE_EMODE; 114 flags |= CCR_CACHE_EMODE;
113 else 115 else
114 flags &= ~CCR_CACHE_EMODE; 116 flags &= ~CCR_CACHE_EMODE;
@@ -125,10 +127,10 @@ static void __init cache_init(void)
125#ifdef CONFIG_SH_OCRAM 127#ifdef CONFIG_SH_OCRAM
126 /* Turn on OCRAM -- halve the OC */ 128 /* Turn on OCRAM -- halve the OC */
127 flags |= CCR_CACHE_ORA; 129 flags |= CCR_CACHE_ORA;
128 cpu_data->dcache.sets >>= 1; 130 current_cpu_data.dcache.sets >>= 1;
129 131
130 cpu_data->dcache.way_size = cpu_data->dcache.sets * 132 current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
131 cpu_data->dcache.linesz; 133 current_cpu_data.dcache.linesz;
132#endif 134#endif
133 135
134 ctrl_outl(flags, CCR); 136 ctrl_outl(flags, CCR);
@@ -170,7 +172,7 @@ static void __init dsp_init(void)
170 172
171 /* If the DSP bit is still set, this CPU has a DSP */ 173 /* If the DSP bit is still set, this CPU has a DSP */
172 if (sr & SR_DSP) 174 if (sr & SR_DSP)
173 cpu_data->flags |= CPU_HAS_DSP; 175 current_cpu_data.flags |= CPU_HAS_DSP;
174 176
175 /* Now that we've determined the DSP status, clear the DSP bit. */ 177 /* Now that we've determined the DSP status, clear the DSP bit. */
176 release_dsp(); 178 release_dsp();
@@ -202,22 +204,28 @@ asmlinkage void __init sh_cpu_init(void)
202 cache_init(); 204 cache_init();
203 205
204 shm_align_mask = max_t(unsigned long, 206 shm_align_mask = max_t(unsigned long,
205 cpu_data->dcache.way_size - 1, 207 current_cpu_data.dcache.way_size - 1,
206 PAGE_SIZE - 1); 208 PAGE_SIZE - 1);
207 209
208 /* Disable the FPU */ 210 /* Disable the FPU */
209 if (fpu_disabled) { 211 if (fpu_disabled) {
210 printk("FPU Disabled\n"); 212 printk("FPU Disabled\n");
211 cpu_data->flags &= ~CPU_HAS_FPU; 213 current_cpu_data.flags &= ~CPU_HAS_FPU;
212 disable_fpu(); 214 disable_fpu();
213 } 215 }
214 216
215 /* FPU initialization */ 217 /* FPU initialization */
216 if ((cpu_data->flags & CPU_HAS_FPU)) { 218 if ((current_cpu_data.flags & CPU_HAS_FPU)) {
217 clear_thread_flag(TIF_USEDFPU); 219 clear_thread_flag(TIF_USEDFPU);
218 clear_used_math(); 220 clear_used_math();
219 } 221 }
220 222
223 /*
224 * Initialize the per-CPU ASID cache very early, since the
225 * TLB flushing routines depend on this being setup.
226 */
227 current_cpu_data.asid_cache = NO_CONTEXT;
228
221#ifdef CONFIG_SH_DSP 229#ifdef CONFIG_SH_DSP
222 /* Probe for DSP */ 230 /* Probe for DSP */
223 dsp_init(); 231 dsp_init();
@@ -225,7 +233,7 @@ asmlinkage void __init sh_cpu_init(void)
225 /* Disable the DSP */ 233 /* Disable the DSP */
226 if (dsp_disabled) { 234 if (dsp_disabled) {
227 printk("DSP Disabled\n"); 235 printk("DSP Disabled\n");
228 cpu_data->flags &= ~CPU_HAS_DSP; 236 current_cpu_data.flags &= ~CPU_HAS_DSP;
229 release_dsp(); 237 release_dsp();
230 } 238 }
231#endif 239#endif
@@ -240,4 +248,3 @@ asmlinkage void __init sh_cpu_init(void)
240 ubc_wakeup(); 248 ubc_wakeup();
241#endif 249#endif
242} 250}
243
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 35eb5751a3aa..210280b6fddf 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -43,16 +43,29 @@ static struct irq_chip ipr_irq_chip = {
43 .mask_ack = disable_ipr_irq, 43 .mask_ack = disable_ipr_irq,
44}; 44};
45 45
46unsigned int map_ipridx_to_addr(int idx) __attribute__ ((weak));
47unsigned int map_ipridx_to_addr(int idx)
48{
49 return 0;
50}
51
46void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs) 52void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs)
47{ 53{
48 int i; 54 int i;
49 55
50 for (i = 0; i < nr_irqs; i++) { 56 for (i = 0; i < nr_irqs; i++) {
51 unsigned int irq = table[i].irq; 57 unsigned int irq = table[i].irq;
52 table[i].addr = map_ipridx_to_addr(table[i].ipr_idx); 58
59 if (!irq)
60 irq = table[i].irq = i;
61
53 /* could the IPR index be mapped, if not we ignore this */ 62 /* could the IPR index be mapped, if not we ignore this */
54 if (table[i].addr == 0) 63 if (!table[i].addr) {
55 continue; 64 table[i].addr = map_ipridx_to_addr(table[i].ipr_idx);
65 if (!table[i].addr)
66 continue;
67 }
68
56 disable_irq_nosync(irq); 69 disable_irq_nosync(irq);
57 set_irq_chip_and_handler_name(irq, &ipr_irq_chip, 70 set_irq_chip_and_handler_name(irq, &ipr_irq_chip,
58 handle_level_irq, "level"); 71 handle_level_irq, "level");
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index d51fa5e9904a..7f7d292f36ec 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -178,12 +178,10 @@ interrupt_entry:
1788: .long do_exception_error 1788: .long do_exception_error
179 179
180trap_entry: 180trap_entry:
181 /* verbose BUG trapa entry check */ 181 mov #0x30,r8
182 mov #0x3e,r8 182 cmp/ge r8,r9 ! vector 0x20-0x2f is systemcall
183 cmp/ge r8,r9 183 bt 1f
184 bf/s 1f 184 add #-0x10,r9 ! convert SH2 to SH3/4 ABI
185 add #-0x10,r9
186 add #0x10,r9
1871: 1851:
188 shll2 r9 ! TRA 186 shll2 r9 ! TRA
189 mov #OFF_TRA,r8 187 mov #OFF_TRA,r8
@@ -206,7 +204,7 @@ trap_entry:
206 204
207#if defined(CONFIG_SH_STANDARD_BIOS) 205#if defined(CONFIG_SH_STANDARD_BIOS)
208 /* Unwind the stack and jmp to the debug entry */ 206 /* Unwind the stack and jmp to the debug entry */
209debug_kernel_fw: 207ENTRY(sh_bios_handler)
210 mov r15,r0 208 mov r15,r0
211 add #(22-4)*4-4,r0 209 add #(22-4)*4-4,r0
212 ldc.l @r0+,gbr 210 ldc.l @r0+,gbr
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index ba527d9b5024..108e81b682ed 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -18,27 +18,27 @@
18int __init detect_cpu_and_cache_system(void) 18int __init detect_cpu_and_cache_system(void)
19{ 19{
20#if defined(CONFIG_CPU_SUBTYPE_SH7604) 20#if defined(CONFIG_CPU_SUBTYPE_SH7604)
21 cpu_data->type = CPU_SH7604; 21 current_cpu_data.type = CPU_SH7604;
22 cpu_data->dcache.ways = 4; 22 current_cpu_data.dcache.ways = 4;
23 cpu_data->dcache.way_incr = (1<<10); 23 current_cpu_data.dcache.way_incr = (1<<10);
24 cpu_data->dcache.sets = 64; 24 current_cpu_data.dcache.sets = 64;
25 cpu_data->dcache.entry_shift = 4; 25 current_cpu_data.dcache.entry_shift = 4;
26 cpu_data->dcache.linesz = L1_CACHE_BYTES; 26 current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
27 cpu_data->dcache.flags = 0; 27 current_cpu_data.dcache.flags = 0;
28#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 28#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
29 cpu_data->type = CPU_SH7619; 29 current_cpu_data.type = CPU_SH7619;
30 cpu_data->dcache.ways = 4; 30 current_cpu_data.dcache.ways = 4;
31 cpu_data->dcache.way_incr = (1<<12); 31 current_cpu_data.dcache.way_incr = (1<<12);
32 cpu_data->dcache.sets = 256; 32 current_cpu_data.dcache.sets = 256;
33 cpu_data->dcache.entry_shift = 4; 33 current_cpu_data.dcache.entry_shift = 4;
34 cpu_data->dcache.linesz = L1_CACHE_BYTES; 34 current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
35 cpu_data->dcache.flags = 0; 35 current_cpu_data.dcache.flags = 0;
36#endif 36#endif
37 /* 37 /*
38 * SH-2 doesn't have separate caches 38 * SH-2 doesn't have separate caches
39 */ 39 */
40 cpu_data->dcache.flags |= SH_CACHE_COMBINED; 40 current_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
41 cpu_data->icache = cpu_data->dcache; 41 current_cpu_data.icache = current_cpu_data.dcache;
42 42
43 return 0; 43 return 0;
44} 44}
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 79283e6c1d8f..f83ff8a68f35 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -52,42 +52,38 @@ static int __init sh7619_devices_setup(void)
52} 52}
53__initcall(sh7619_devices_setup); 53__initcall(sh7619_devices_setup);
54 54
55#define INTC_IPRC 0xf8080000UL
56#define INTC_IPRD 0xf8080002UL
57
58#define CMI0_IRQ 86
59
60#define SCIF0_ERI_IRQ 88
61#define SCIF0_RXI_IRQ 89
62#define SCIF0_BRI_IRQ 90
63#define SCIF0_TXI_IRQ 91
64
65#define SCIF1_ERI_IRQ 92
66#define SCIF1_RXI_IRQ 93
67#define SCIF1_BRI_IRQ 94
68#define SCIF1_TXI_IRQ 95
69
70#define SCIF2_BRI_IRQ 96
71#define SCIF2_ERI_IRQ 97
72#define SCIF2_RXI_IRQ 98
73#define SCIF2_TXI_IRQ 99
74
75static struct ipr_data sh7619_ipr_map[] = { 55static struct ipr_data sh7619_ipr_map[] = {
76 { CMI0_IRQ, INTC_IPRC, 1, 2 }, 56 { 86, 0, 4, 2 }, /* CMI0 */
77 { SCIF0_ERI_IRQ, INTC_IPRD, 3, 3 }, 57 { 88, 1, 12, 3 }, /* SCIF0_ERI */
78 { SCIF0_RXI_IRQ, INTC_IPRD, 3, 3 }, 58 { 89, 1, 12, 3 }, /* SCIF0_RXI */
79 { SCIF0_BRI_IRQ, INTC_IPRD, 3, 3 }, 59 { 90, 1, 12, 3 }, /* SCIF0_BRI */
80 { SCIF0_TXI_IRQ, INTC_IPRD, 3, 3 }, 60 { 91, 1, 12, 3 }, /* SCIF0_TXI */
81 { SCIF1_ERI_IRQ, INTC_IPRD, 2, 3 }, 61 { 92, 1, 8, 3 }, /* SCIF1_ERI */
82 { SCIF1_RXI_IRQ, INTC_IPRD, 2, 3 }, 62 { 93, 1, 8, 3 }, /* SCIF1_RXI */
83 { SCIF1_BRI_IRQ, INTC_IPRD, 2, 3 }, 63 { 94, 1, 8, 3 }, /* SCIF1_BRI */
84 { SCIF1_TXI_IRQ, INTC_IPRD, 2, 3 }, 64 { 95, 1, 8, 3 }, /* SCIF1_TXI */
85 { SCIF2_ERI_IRQ, INTC_IPRD, 1, 3 }, 65 { 96, 1, 4, 3 }, /* SCIF2_ERI */
86 { SCIF2_RXI_IRQ, INTC_IPRD, 1, 3 }, 66 { 97, 1, 4, 3 }, /* SCIF2_RXI */
87 { SCIF2_BRI_IRQ, INTC_IPRD, 1, 3 }, 67 { 98, 1, 4, 3 }, /* SCIF2_BRI */
88 { SCIF2_TXI_IRQ, INTC_IPRD, 1, 3 }, 68 { 99, 1, 4, 3 }, /* SCIF2_TXI */
89}; 69};
90 70
71static unsigned int ipr_offsets[] = {
72 0xf8080000, /* IPRC */
73 0xf8080002, /* IPRD */
74 0xf8080004, /* IPRE */
75 0xf8080006, /* IPRF */
76 0xf8080008, /* IPRG */
77};
78
79/* given the IPR index return the address of the IPR register */
80unsigned int map_ipridx_to_addr(int idx)
81{
82 if (unlikely(idx >= ARRAY_SIZE(ipr_offsets)))
83 return 0;
84 return ipr_offsets[idx];
85}
86
91void __init init_IRQ_ipr(void) 87void __init init_IRQ_ipr(void)
92{ 88{
93 make_ipr_irq(sh7619_ipr_map, ARRAY_SIZE(sh7619_ipr_map)); 89 make_ipr_irq(sh7619_ipr_map, ARRAY_SIZE(sh7619_ipr_map));
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 87c6c0542089..426f6db01fc6 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -17,14 +17,14 @@
17int __init detect_cpu_and_cache_system(void) 17int __init detect_cpu_and_cache_system(void)
18{ 18{
19 /* Just SH7206 for now .. */ 19 /* Just SH7206 for now .. */
20 cpu_data->type = CPU_SH7206; 20 current_cpu_data.type = CPU_SH7206;
21 21
22 cpu_data->dcache.ways = 4; 22 current_cpu_data.dcache.ways = 4;
23 cpu_data->dcache.way_incr = (1 << 11); 23 current_cpu_data.dcache.way_incr = (1 << 11);
24 cpu_data->dcache.sets = 128; 24 current_cpu_data.dcache.sets = 128;
25 cpu_data->dcache.entry_shift = 4; 25 current_cpu_data.dcache.entry_shift = 4;
26 cpu_data->dcache.linesz = L1_CACHE_BYTES; 26 current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
27 cpu_data->dcache.flags = 0; 27 current_cpu_data.dcache.flags = 0;
28 28
29 /* 29 /*
30 * The icache is the same as the dcache as far as this setup is 30 * The icache is the same as the dcache as far as this setup is
@@ -32,7 +32,7 @@ int __init detect_cpu_and_cache_system(void)
32 * lacks the U bit that the dcache has, none of this has any bearing 32 * lacks the U bit that the dcache has, none of this has any bearing
33 * on the cache info. 33 * on the cache info.
34 */ 34 */
35 cpu_data->icache = cpu_data->dcache; 35 current_cpu_data.icache = current_cpu_data.dcache;
36 36
37 return 0; 37 return 0;
38} 38}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index 4b60fcc7d667..4ed9110632bc 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -57,55 +57,52 @@ static int __init sh7206_devices_setup(void)
57} 57}
58__initcall(sh7206_devices_setup); 58__initcall(sh7206_devices_setup);
59 59
60#define INTC_IPR08 0xfffe0c04UL
61#define INTC_IPR09 0xfffe0c06UL
62#define INTC_IPR14 0xfffe0c10UL
63
64#define CMI0_IRQ 140
65
66#define MTU1_TGI1A 164
67
68#define SCIF0_BRI_IRQ 240
69#define SCIF0_ERI_IRQ 241
70#define SCIF0_RXI_IRQ 242
71#define SCIF0_TXI_IRQ 243
72
73#define SCIF1_BRI_IRQ 244
74#define SCIF1_ERI_IRQ 245
75#define SCIF1_RXI_IRQ 246
76#define SCIF1_TXI_IRQ 247
77
78#define SCIF2_BRI_IRQ 248
79#define SCIF2_ERI_IRQ 249
80#define SCIF2_RXI_IRQ 250
81#define SCIF2_TXI_IRQ 251
82
83#define SCIF3_BRI_IRQ 252
84#define SCIF3_ERI_IRQ 253
85#define SCIF3_RXI_IRQ 254
86#define SCIF3_TXI_IRQ 255
87
88static struct ipr_data sh7206_ipr_map[] = { 60static struct ipr_data sh7206_ipr_map[] = {
89 { CMI0_IRQ, INTC_IPR08, 3, 2 }, 61 { 140, 7, 12, 2 }, /* CMI0 */
90 { MTU2_TGI1A, INTC_IPR09, 1, 2 }, 62 { 164, 8, 4, 2 }, /* MTU2_TGI1A */
91 { SCIF0_ERI_IRQ, INTC_IPR14, 3, 3 }, 63 { 240, 13, 12, 3 }, /* SCIF0_BRI */
92 { SCIF0_RXI_IRQ, INTC_IPR14, 3, 3 }, 64 { 241, 13, 12, 3 }, /* SCIF0_ERI */
93 { SCIF0_BRI_IRQ, INTC_IPR14, 3, 3 }, 65 { 242, 13, 12, 3 }, /* SCIF0_RXI */
94 { SCIF0_TXI_IRQ, INTC_IPR14, 3, 3 }, 66 { 243, 13, 12, 3 }, /* SCIF0_TXI */
95 { SCIF1_ERI_IRQ, INTC_IPR14, 2, 3 }, 67 { 244, 13, 8, 3 }, /* SCIF1_BRI */
96 { SCIF1_RXI_IRQ, INTC_IPR14, 2, 3 }, 68 { 245, 13, 8, 3 }, /* SCIF1_ERI */
97 { SCIF1_BRI_IRQ, INTC_IPR14, 2, 3 }, 69 { 246, 13, 8, 3 }, /* SCIF1_RXI */
98 { SCIF1_TXI_IRQ, INTC_IPR14, 2, 3 }, 70 { 247, 13, 8, 3 }, /* SCIF1_TXI */
99 { SCIF2_ERI_IRQ, INTC_IPR14, 1, 3 }, 71 { 248, 13, 4, 3 }, /* SCIF2_BRI */
100 { SCIF2_RXI_IRQ, INTC_IPR14, 1, 3 }, 72 { 249, 13, 4, 3 }, /* SCIF2_ERI */
101 { SCIF2_BRI_IRQ, INTC_IPR14, 1, 3 }, 73 { 250, 13, 4, 3 }, /* SCIF2_RXI */
102 { SCIF2_TXI_IRQ, INTC_IPR14, 1, 3 }, 74 { 251, 13, 4, 3 }, /* SCIF2_TXI */
103 { SCIF3_ERI_IRQ, INTC_IPR14, 0, 3 }, 75 { 252, 13, 0, 3 }, /* SCIF3_BRI */
104 { SCIF3_RXI_IRQ, INTC_IPR14, 0, 3 }, 76 { 253, 13, 0, 3 }, /* SCIF3_ERI */
105 { SCIF3_BRI_IRQ, INTC_IPR14, 0, 3 }, 77 { 254, 13, 0, 3 }, /* SCIF3_RXI */
106 { SCIF3_TXI_IRQ, INTC_IPR14, 0, 3 }, 78 { 255, 13, 0, 3 }, /* SCIF3_TXI */
79};
80
81static unsigned int ipr_offsets[] = {
82 0xfffe0818, /* IPR01 */
83 0xfffe081a, /* IPR02 */
84 0, /* unused */
85 0, /* unused */
86 0xfffe0820, /* IPR05 */
87 0xfffe0c00, /* IPR06 */
88 0xfffe0c02, /* IPR07 */
89 0xfffe0c04, /* IPR08 */
90 0xfffe0c06, /* IPR09 */
91 0xfffe0c08, /* IPR10 */
92 0xfffe0c0a, /* IPR11 */
93 0xfffe0c0c, /* IPR12 */
94 0xfffe0c0e, /* IPR13 */
95 0xfffe0c10, /* IPR14 */
107}; 96};
108 97
98/* given the IPR index return the address of the IPR register */
99unsigned int map_ipridx_to_addr(int idx)
100{
101 if (unlikely(idx >= ARRAY_SIZE(ipr_offsets)))
102 return 0;
103 return ipr_offsets[idx];
104}
105
109void __init init_IRQ_ipr(void) 106void __init init_IRQ_ipr(void)
110{ 107{
111 make_ipr_irq(sh7206_ipr_map, ARRAY_SIZE(sh7206_ipr_map)); 108 make_ipr_irq(sh7206_ipr_map, ARRAY_SIZE(sh7206_ipr_map));
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 8c0dc2700c69..c19205b0f2c0 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -13,10 +13,8 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h> 15#include <asm/thread_info.h>
16#include <asm/unistd.h>
17#include <asm/cpu/mmu_context.h> 16#include <asm/cpu/mmu_context.h>
18#include <asm/pgtable.h> 17#include <asm/unistd.h>
19#include <asm/page.h>
20 18
21! NOTE: 19! NOTE:
22! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address 20! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -138,14 +136,29 @@ ENTRY(tlb_protection_violation_store)
138 136
139call_dpf: 137call_dpf:
140 mov.l 1f, r0 138 mov.l 1f, r0
141 mov.l @r0, r6 ! address 139 mov r5, r8
140 mov.l @r0, r6
141 mov r6, r9
142 mov.l 2f, r0
143 sts pr, r10
144 jsr @r0
145 mov r15, r4
146 !
147 tst r0, r0
148 bf/s 0f
149 lds r10, pr
150 rts
151 nop
1520: sti
142 mov.l 3f, r0 153 mov.l 3f, r0
143 154 mov r9, r6
155 mov r8, r5
144 jmp @r0 156 jmp @r0
145 mov r15, r4 ! regs 157 mov r15, r4
146 158
147 .align 2 159 .align 2
1481: .long MMU_TEA 1601: .long MMU_TEA
1612: .long __do_page_fault
1493: .long do_page_fault 1623: .long do_page_fault
150 163
151 .align 2 164 .align 2
@@ -173,7 +186,7 @@ call_dae:
173 186
174#if defined(CONFIG_SH_STANDARD_BIOS) 187#if defined(CONFIG_SH_STANDARD_BIOS)
175 /* Unwind the stack and jmp to the debug entry */ 188 /* Unwind the stack and jmp to the debug entry */
176debug_kernel_fw: 189ENTRY(sh_bios_handler)
177 mov.l @r15+, r0 190 mov.l @r15+, r0
178 mov.l @r15+, r1 191 mov.l @r15+, r1
179 mov.l @r15+, r2 192 mov.l @r15+, r2
@@ -332,175 +345,9 @@ general_exception:
332! 345!
333! 346!
334 347
335/* This code makes some assumptions to improve performance.
336 * Make sure they are stil true. */
337#if PTRS_PER_PGD != PTRS_PER_PTE
338#error PGD and PTE sizes don't match
339#endif
340
341/* gas doesn't flag impossible values for mov #immediate as an error */
342#if (_PAGE_PRESENT >> 2) > 0x7f
343#error cannot load PAGE_PRESENT as an immediate
344#endif
345#if _PAGE_DIRTY > 0x7f
346#error cannot load PAGE_DIRTY as an immediate
347#endif
348#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
349#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
350#endif
351
352#if defined(CONFIG_CPU_SH4)
353#define ldmmupteh(r) mov.l 8f, r
354#else
355#define ldmmupteh(r) mov #MMU_PTEH, r
356#endif
357
358 .balign 1024,0,1024 348 .balign 1024,0,1024
359tlb_miss: 349tlb_miss:
360#ifdef COUNT_EXCEPTIONS 350 mov.l 1f, k2
361 ! Increment the counts
362 mov.l 9f, k1
363 mov.l @k1, k2
364 add #1, k2
365 mov.l k2, @k1
366#endif
367
368 ! k0 scratch
369 ! k1 pgd and pte pointers
370 ! k2 faulting address
371 ! k3 pgd and pte index masks
372 ! k4 shift
373
374 ! Load up the pgd entry (k1)
375
376 ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
377
378 mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
379 mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
380
381 mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
382
383 mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
384
385 mov k2, k0 ! 5 MT (latency=0)
386 shld k4, k0 ! 99 EX
387
388 and k3, k0 ! 78 EX
389
390 mov.l @(k0, k1), k1 ! 21 LS (latency=2)
391 mov #-(PAGE_SHIFT-2), k4 ! 6 EX
392
393 ! Load up the pte entry (k2)
394
395 mov k2, k0 ! 5 MT (latency=0)
396 shld k4, k0 ! 99 EX
397
398 tst k1, k1 ! 86 MT
399
400 bt 20f ! 110 BR
401
402 and k3, k0 ! 78 EX
403 mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
404
405 mov.l @(k0, k1), k2 ! 21 LS (latency=2)
406 add k0, k1 ! 49 EX
407
408#ifdef CONFIG_CPU_HAS_PTEA
409 ! Test the entry for present and _PAGE_ACCESSED
410
411 mov #-28, k3 ! 6 EX
412 mov k2, k0 ! 5 MT (latency=0)
413
414 tst k4, k2 ! 68 MT
415 shld k3, k0 ! 99 EX
416
417 bt 20f ! 110 BR
418
419 ! Set PTEA register
420 ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
421 !
422 ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
423
424 and #0xe, k0 ! 79 EX
425
426 mov k0, k3 ! 5 MT (latency=0)
427 mov k2, k0 ! 5 MT (latency=0)
428
429 and #1, k0 ! 79 EX
430
431 or k0, k3 ! 82 EX
432
433 ldmmupteh(k0) ! 9 LS (latency=2)
434 shll2 k4 ! 101 EX _PAGE_ACCESSED
435
436 tst k4, k2 ! 68 MT
437
438 mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
439
440 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
441
442 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
443#else
444
445 ! Test the entry for present and _PAGE_ACCESSED
446
447 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
448 tst k4, k2 ! 68 MT
449
450 shll2 k4 ! 101 EX _PAGE_ACCESSED
451 ldmmupteh(k0) ! 9 LS (latency=2)
452
453 bt 20f ! 110 BR
454 tst k4, k2 ! 68 MT
455
456 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
457
458#endif
459
460 ! Set up the entry
461
462 and k2, k3 ! 78 EX
463 bt/s 10f ! 108 BR
464
465 mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
466
467 ldtlb ! 128 CO
468
469 ! At least one instruction between ldtlb and rte
470 nop ! 119 NOP
471
472 rte ! 126 CO
473
474 nop ! 119 NOP
475
476
47710: or k4, k2 ! 82 EX
478
479 ldtlb ! 128 CO
480
481 ! At least one instruction between ldtlb and rte
482 mov.l k2, @k1 ! 27 LS
483
484 rte ! 126 CO
485
486 ! Note we cannot execute mov here, because it is executed after
487 ! restoring SSR, so would be executed in user space.
488 nop ! 119 NOP
489
490
491 .align 5
492 ! Once cache line if possible...
4931: .long swapper_pg_dir
4944: .short (PTRS_PER_PGD-1) << 2
4955: .short _PAGE_PRESENT
4967: .long _PAGE_FLAGS_HARDWARE_MASK
4978: .long MMU_PTEH
498#ifdef COUNT_EXCEPTIONS
4999: .long exception_count_miss
500#endif
501
502 ! Either pgd or pte not present
50320: mov.l 1f, k2
504 mov.l 4f, k3 351 mov.l 4f, k3
505 bra handle_exception 352 bra handle_exception
506 mov.l @k2, k2 353 mov.l @k2, k2
@@ -651,15 +498,6 @@ skip_save:
651 bf interrupt_exception 498 bf interrupt_exception
652 shlr2 r8 499 shlr2 r8
653 shlr r8 500 shlr r8
654
655#ifdef COUNT_EXCEPTIONS
656 mov.l 5f, r9
657 add r8, r9
658 mov.l @r9, r10
659 add #1, r10
660 mov.l r10, @r9
661#endif
662
663 mov.l 4f, r9 501 mov.l 4f, r9
664 add r8, r9 502 add r8, r9
665 mov.l @r9, r9 503 mov.l @r9, r9
@@ -673,9 +511,6 @@ skip_save:
6732: .long 0x000080f0 ! FD=1, IMASK=15 5112: .long 0x000080f0 ! FD=1, IMASK=15
6743: .long 0xcfffffff ! RB=0, BL=0 5123: .long 0xcfffffff ! RB=0, BL=0
6754: .long exception_handling_table 5134: .long exception_handling_table
676#ifdef COUNT_EXCEPTIONS
6775: .long exception_count_table
678#endif
679 514
680interrupt_exception: 515interrupt_exception:
681 mov.l 1f, r9 516 mov.l 1f, r9
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index e67098836290..821b0ab7b528 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -50,41 +50,41 @@ int __init detect_cpu_and_cache_system(void)
50 50
51 back_to_P1(); 51 back_to_P1();
52 52
53 cpu_data->dcache.ways = 4; 53 current_cpu_data.dcache.ways = 4;
54 cpu_data->dcache.entry_shift = 4; 54 current_cpu_data.dcache.entry_shift = 4;
55 cpu_data->dcache.linesz = L1_CACHE_BYTES; 55 current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
56 cpu_data->dcache.flags = 0; 56 current_cpu_data.dcache.flags = 0;
57 57
58 /* 58 /*
59 * 7709A/7729 has 16K cache (256-entry), while 7702 has only 59 * 7709A/7729 has 16K cache (256-entry), while 7702 has only
60 * 2K(direct) 7702 is not supported (yet) 60 * 2K(direct) 7702 is not supported (yet)
61 */ 61 */
62 if (data0 == data1 && data2 == data3) { /* Shadow */ 62 if (data0 == data1 && data2 == data3) { /* Shadow */
63 cpu_data->dcache.way_incr = (1 << 11); 63 current_cpu_data.dcache.way_incr = (1 << 11);
64 cpu_data->dcache.entry_mask = 0x7f0; 64 current_cpu_data.dcache.entry_mask = 0x7f0;
65 cpu_data->dcache.sets = 128; 65 current_cpu_data.dcache.sets = 128;
66 cpu_data->type = CPU_SH7708; 66 current_cpu_data.type = CPU_SH7708;
67 67
68 cpu_data->flags |= CPU_HAS_MMU_PAGE_ASSOC; 68 current_cpu_data.flags |= CPU_HAS_MMU_PAGE_ASSOC;
69 } else { /* 7709A or 7729 */ 69 } else { /* 7709A or 7729 */
70 cpu_data->dcache.way_incr = (1 << 12); 70 current_cpu_data.dcache.way_incr = (1 << 12);
71 cpu_data->dcache.entry_mask = 0xff0; 71 current_cpu_data.dcache.entry_mask = 0xff0;
72 cpu_data->dcache.sets = 256; 72 current_cpu_data.dcache.sets = 256;
73 cpu_data->type = CPU_SH7729; 73 current_cpu_data.type = CPU_SH7729;
74 74
75#if defined(CONFIG_CPU_SUBTYPE_SH7706) 75#if defined(CONFIG_CPU_SUBTYPE_SH7706)
76 cpu_data->type = CPU_SH7706; 76 current_cpu_data.type = CPU_SH7706;
77#endif 77#endif
78#if defined(CONFIG_CPU_SUBTYPE_SH7710) 78#if defined(CONFIG_CPU_SUBTYPE_SH7710)
79 cpu_data->type = CPU_SH7710; 79 current_cpu_data.type = CPU_SH7710;
80#endif 80#endif
81#if defined(CONFIG_CPU_SUBTYPE_SH7705) 81#if defined(CONFIG_CPU_SUBTYPE_SH7705)
82 cpu_data->type = CPU_SH7705; 82 current_cpu_data.type = CPU_SH7705;
83 83
84#if defined(CONFIG_SH7705_CACHE_32KB) 84#if defined(CONFIG_SH7705_CACHE_32KB)
85 cpu_data->dcache.way_incr = (1 << 13); 85 current_cpu_data.dcache.way_incr = (1 << 13);
86 cpu_data->dcache.entry_mask = 0x1ff0; 86 current_cpu_data.dcache.entry_mask = 0x1ff0;
87 cpu_data->dcache.sets = 512; 87 current_cpu_data.dcache.sets = 512;
88 ctrl_outl(CCR_CACHE_32KB, CCR3); 88 ctrl_outl(CCR_CACHE_32KB, CCR3);
89#else 89#else
90 ctrl_outl(CCR_CACHE_16KB, CCR3); 90 ctrl_outl(CCR_CACHE_16KB, CCR3);
@@ -95,8 +95,8 @@ int __init detect_cpu_and_cache_system(void)
95 /* 95 /*
96 * SH-3 doesn't have separate caches 96 * SH-3 doesn't have separate caches
97 */ 97 */
98 cpu_data->dcache.flags |= SH_CACHE_COMBINED; 98 current_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
99 cpu_data->icache = cpu_data->dcache; 99 current_cpu_data.icache = current_cpu_data.dcache;
100 100
101 return 0; 101 return 0;
102} 102}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7709.c b/arch/sh/kernel/cpu/sh3/setup-sh7709.c
index ff43ef2a1f0c..dc9b211cf87f 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7709.c
@@ -51,3 +51,24 @@ static int __init sh7709_devices_setup(void)
51 ARRAY_SIZE(sh7709_devices)); 51 ARRAY_SIZE(sh7709_devices));
52} 52}
53__initcall(sh7709_devices_setup); 53__initcall(sh7709_devices_setup);
54
55#define IPRx(A,N) .addr=A, .shift=0*N*-1
56#define IPRA(N) IPRx(0xfffffee2UL,N)
57#define IPRB(N) IPRx(0xfffffee4UL,N)
58#define IPRE(N) IPRx(0xa400001aUL,N)
59
60static struct ipr_data sh7709_ipr_map[] = {
61 [16] = { IPRA(15-12), 2 }, /* TMU TUNI0 */
62 [17] = { IPRA(11-8), 4 }, /* TMU TUNI1 */
63 [22] = { IPRA(3-0), 2 }, /* RTC CUI */
64 [23 ... 26] = { IPRB(7-4), 3 }, /* SCI */
65 [27] = { IPRB(15-12), 2 }, /* WDT ITI */
66 [48 ... 51] = { IPRE(15-12), 7 }, /* DMA */
67 [52 ... 55] = { IPRE(11-8), 3 }, /* IRDA */
68 [56 ... 59] = { IPRE(7-4), 3 }, /* SCIF */
69};
70
71void __init init_IRQ_ipr()
72{
73 make_ipr_irq(sh7709_ipr_map, ARRAY_SIZE(sh7709_ipr_map));
74}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 9031a22a2ce7..9d28c88d2f9d 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -10,11 +10,10 @@
10 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details. 11 * for more details.
12 */ 12 */
13
14#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/io.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <asm/io.h>
18 17
19int __init detect_cpu_and_cache_system(void) 18int __init detect_cpu_and_cache_system(void)
20{ 19{
@@ -36,20 +35,20 @@ int __init detect_cpu_and_cache_system(void)
36 /* 35 /*
37 * Setup some sane SH-4 defaults for the icache 36 * Setup some sane SH-4 defaults for the icache
38 */ 37 */
39 cpu_data->icache.way_incr = (1 << 13); 38 current_cpu_data.icache.way_incr = (1 << 13);
40 cpu_data->icache.entry_shift = 5; 39 current_cpu_data.icache.entry_shift = 5;
41 cpu_data->icache.sets = 256; 40 current_cpu_data.icache.sets = 256;
42 cpu_data->icache.ways = 1; 41 current_cpu_data.icache.ways = 1;
43 cpu_data->icache.linesz = L1_CACHE_BYTES; 42 current_cpu_data.icache.linesz = L1_CACHE_BYTES;
44 43
45 /* 44 /*
46 * And again for the dcache .. 45 * And again for the dcache ..
47 */ 46 */
48 cpu_data->dcache.way_incr = (1 << 14); 47 current_cpu_data.dcache.way_incr = (1 << 14);
49 cpu_data->dcache.entry_shift = 5; 48 current_cpu_data.dcache.entry_shift = 5;
50 cpu_data->dcache.sets = 512; 49 current_cpu_data.dcache.sets = 512;
51 cpu_data->dcache.ways = 1; 50 current_cpu_data.dcache.ways = 1;
52 cpu_data->dcache.linesz = L1_CACHE_BYTES; 51 current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
53 52
54 /* 53 /*
55 * Setup some generic flags we can probe 54 * Setup some generic flags we can probe
@@ -57,16 +56,16 @@ int __init detect_cpu_and_cache_system(void)
57 */ 56 */
58 if (((pvr >> 16) & 0xff) == 0x10) { 57 if (((pvr >> 16) & 0xff) == 0x10) {
59 if ((cvr & 0x02000000) == 0) 58 if ((cvr & 0x02000000) == 0)
60 cpu_data->flags |= CPU_HAS_L2_CACHE; 59 current_cpu_data.flags |= CPU_HAS_L2_CACHE;
61 if ((cvr & 0x10000000) == 0) 60 if ((cvr & 0x10000000) == 0)
62 cpu_data->flags |= CPU_HAS_DSP; 61 current_cpu_data.flags |= CPU_HAS_DSP;
63 62
64 cpu_data->flags |= CPU_HAS_LLSC; 63 current_cpu_data.flags |= CPU_HAS_LLSC;
65 } 64 }
66 65
67 /* FPU detection works for everyone */ 66 /* FPU detection works for everyone */
68 if ((cvr & 0x20000000) == 1) 67 if ((cvr & 0x20000000) == 1)
69 cpu_data->flags |= CPU_HAS_FPU; 68 current_cpu_data.flags |= CPU_HAS_FPU;
70 69
71 /* Mask off the upper chip ID */ 70 /* Mask off the upper chip ID */
72 pvr &= 0xffff; 71 pvr &= 0xffff;
@@ -77,151 +76,151 @@ int __init detect_cpu_and_cache_system(void)
77 */ 76 */
78 switch (pvr) { 77 switch (pvr) {
79 case 0x205: 78 case 0x205:
80 cpu_data->type = CPU_SH7750; 79 current_cpu_data.type = CPU_SH7750;
81 cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | 80 current_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
82 CPU_HAS_PERF_COUNTER; 81 CPU_HAS_PERF_COUNTER;
83 break; 82 break;
84 case 0x206: 83 case 0x206:
85 cpu_data->type = CPU_SH7750S; 84 current_cpu_data.type = CPU_SH7750S;
86 cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | 85 current_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
87 CPU_HAS_PERF_COUNTER; 86 CPU_HAS_PERF_COUNTER;
88 break; 87 break;
89 case 0x1100: 88 case 0x1100:
90 cpu_data->type = CPU_SH7751; 89 current_cpu_data.type = CPU_SH7751;
91 cpu_data->flags |= CPU_HAS_FPU; 90 current_cpu_data.flags |= CPU_HAS_FPU;
92 break; 91 break;
93 case 0x2000: 92 case 0x2000:
94 cpu_data->type = CPU_SH73180; 93 current_cpu_data.type = CPU_SH73180;
95 cpu_data->icache.ways = 4; 94 current_cpu_data.icache.ways = 4;
96 cpu_data->dcache.ways = 4; 95 current_cpu_data.dcache.ways = 4;
97 cpu_data->flags |= CPU_HAS_LLSC; 96 current_cpu_data.flags |= CPU_HAS_LLSC;
98 break; 97 break;
99 case 0x2001: 98 case 0x2001:
100 case 0x2004: 99 case 0x2004:
101 cpu_data->type = CPU_SH7770; 100 current_cpu_data.type = CPU_SH7770;
102 cpu_data->icache.ways = 4; 101 current_cpu_data.icache.ways = 4;
103 cpu_data->dcache.ways = 4; 102 current_cpu_data.dcache.ways = 4;
104 103
105 cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_LLSC; 104 current_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_LLSC;
106 break; 105 break;
107 case 0x2006: 106 case 0x2006:
108 case 0x200A: 107 case 0x200A:
109 if (prr == 0x61) 108 if (prr == 0x61)
110 cpu_data->type = CPU_SH7781; 109 current_cpu_data.type = CPU_SH7781;
111 else 110 else
112 cpu_data->type = CPU_SH7780; 111 current_cpu_data.type = CPU_SH7780;
113 112
114 cpu_data->icache.ways = 4; 113 current_cpu_data.icache.ways = 4;
115 cpu_data->dcache.ways = 4; 114 current_cpu_data.dcache.ways = 4;
116 115
117 cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | 116 current_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER |
118 CPU_HAS_LLSC; 117 CPU_HAS_LLSC;
119 break; 118 break;
120 case 0x3000: 119 case 0x3000:
121 case 0x3003: 120 case 0x3003:
122 case 0x3009: 121 case 0x3009:
123 cpu_data->type = CPU_SH7343; 122 current_cpu_data.type = CPU_SH7343;
124 cpu_data->icache.ways = 4; 123 current_cpu_data.icache.ways = 4;
125 cpu_data->dcache.ways = 4; 124 current_cpu_data.dcache.ways = 4;
126 cpu_data->flags |= CPU_HAS_LLSC; 125 current_cpu_data.flags |= CPU_HAS_LLSC;
127 break; 126 break;
128 case 0x3008: 127 case 0x3008:
129 if (prr == 0xa0) { 128 if (prr == 0xa0) {
130 cpu_data->type = CPU_SH7722; 129 current_cpu_data.type = CPU_SH7722;
131 cpu_data->icache.ways = 4; 130 current_cpu_data.icache.ways = 4;
132 cpu_data->dcache.ways = 4; 131 current_cpu_data.dcache.ways = 4;
133 cpu_data->flags |= CPU_HAS_LLSC; 132 current_cpu_data.flags |= CPU_HAS_LLSC;
134 } 133 }
135 break; 134 break;
136 case 0x8000: 135 case 0x8000:
137 cpu_data->type = CPU_ST40RA; 136 current_cpu_data.type = CPU_ST40RA;
138 cpu_data->flags |= CPU_HAS_FPU; 137 current_cpu_data.flags |= CPU_HAS_FPU;
139 break; 138 break;
140 case 0x8100: 139 case 0x8100:
141 cpu_data->type = CPU_ST40GX1; 140 current_cpu_data.type = CPU_ST40GX1;
142 cpu_data->flags |= CPU_HAS_FPU; 141 current_cpu_data.flags |= CPU_HAS_FPU;
143 break; 142 break;
144 case 0x700: 143 case 0x700:
145 cpu_data->type = CPU_SH4_501; 144 current_cpu_data.type = CPU_SH4_501;
146 cpu_data->icache.ways = 2; 145 current_cpu_data.icache.ways = 2;
147 cpu_data->dcache.ways = 2; 146 current_cpu_data.dcache.ways = 2;
148 break; 147 break;
149 case 0x600: 148 case 0x600:
150 cpu_data->type = CPU_SH4_202; 149 current_cpu_data.type = CPU_SH4_202;
151 cpu_data->icache.ways = 2; 150 current_cpu_data.icache.ways = 2;
152 cpu_data->dcache.ways = 2; 151 current_cpu_data.dcache.ways = 2;
153 cpu_data->flags |= CPU_HAS_FPU; 152 current_cpu_data.flags |= CPU_HAS_FPU;
154 break; 153 break;
155 case 0x500 ... 0x501: 154 case 0x500 ... 0x501:
156 switch (prr) { 155 switch (prr) {
157 case 0x10: 156 case 0x10:
158 cpu_data->type = CPU_SH7750R; 157 current_cpu_data.type = CPU_SH7750R;
159 break; 158 break;
160 case 0x11: 159 case 0x11:
161 cpu_data->type = CPU_SH7751R; 160 current_cpu_data.type = CPU_SH7751R;
162 break; 161 break;
163 case 0x50 ... 0x5f: 162 case 0x50 ... 0x5f:
164 cpu_data->type = CPU_SH7760; 163 current_cpu_data.type = CPU_SH7760;
165 break; 164 break;
166 } 165 }
167 166
168 cpu_data->icache.ways = 2; 167 current_cpu_data.icache.ways = 2;
169 cpu_data->dcache.ways = 2; 168 current_cpu_data.dcache.ways = 2;
170 169
171 cpu_data->flags |= CPU_HAS_FPU; 170 current_cpu_data.flags |= CPU_HAS_FPU;
172 171
173 break; 172 break;
174 default: 173 default:
175 cpu_data->type = CPU_SH_NONE; 174 current_cpu_data.type = CPU_SH_NONE;
176 break; 175 break;
177 } 176 }
178 177
179#ifdef CONFIG_SH_DIRECT_MAPPED 178#ifdef CONFIG_SH_DIRECT_MAPPED
180 cpu_data->icache.ways = 1; 179 current_cpu_data.icache.ways = 1;
181 cpu_data->dcache.ways = 1; 180 current_cpu_data.dcache.ways = 1;
182#endif 181#endif
183 182
184#ifdef CONFIG_CPU_HAS_PTEA 183#ifdef CONFIG_CPU_HAS_PTEA
185 cpu_data->flags |= CPU_HAS_PTEA; 184 current_cpu_data.flags |= CPU_HAS_PTEA;
186#endif 185#endif
187 186
188 /* 187 /*
189 * On anything that's not a direct-mapped cache, look to the CVR 188 * On anything that's not a direct-mapped cache, look to the CVR
190 * for I/D-cache specifics. 189 * for I/D-cache specifics.
191 */ 190 */
192 if (cpu_data->icache.ways > 1) { 191 if (current_cpu_data.icache.ways > 1) {
193 size = sizes[(cvr >> 20) & 0xf]; 192 size = sizes[(cvr >> 20) & 0xf];
194 cpu_data->icache.way_incr = (size >> 1); 193 current_cpu_data.icache.way_incr = (size >> 1);
195 cpu_data->icache.sets = (size >> 6); 194 current_cpu_data.icache.sets = (size >> 6);
196 195
197 } 196 }
198 197
199 /* Setup the rest of the I-cache info */ 198 /* Setup the rest of the I-cache info */
200 cpu_data->icache.entry_mask = cpu_data->icache.way_incr - 199 current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
201 cpu_data->icache.linesz; 200 current_cpu_data.icache.linesz;
202 201
203 cpu_data->icache.way_size = cpu_data->icache.sets * 202 current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
204 cpu_data->icache.linesz; 203 current_cpu_data.icache.linesz;
205 204
206 /* And the rest of the D-cache */ 205 /* And the rest of the D-cache */
207 if (cpu_data->dcache.ways > 1) { 206 if (current_cpu_data.dcache.ways > 1) {
208 size = sizes[(cvr >> 16) & 0xf]; 207 size = sizes[(cvr >> 16) & 0xf];
209 cpu_data->dcache.way_incr = (size >> 1); 208 current_cpu_data.dcache.way_incr = (size >> 1);
210 cpu_data->dcache.sets = (size >> 6); 209 current_cpu_data.dcache.sets = (size >> 6);
211 } 210 }
212 211
213 cpu_data->dcache.entry_mask = cpu_data->dcache.way_incr - 212 current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
214 cpu_data->dcache.linesz; 213 current_cpu_data.dcache.linesz;
215 214
216 cpu_data->dcache.way_size = cpu_data->dcache.sets * 215 current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
217 cpu_data->dcache.linesz; 216 current_cpu_data.dcache.linesz;
218 217
219 /* 218 /*
220 * Setup the L2 cache desc 219 * Setup the L2 cache desc
221 * 220 *
222 * SH-4A's have an optional PIPT L2. 221 * SH-4A's have an optional PIPT L2.
223 */ 222 */
224 if (cpu_data->flags & CPU_HAS_L2_CACHE) { 223 if (current_cpu_data.flags & CPU_HAS_L2_CACHE) {
225 /* 224 /*
226 * Size calculation is much more sensible 225 * Size calculation is much more sensible
227 * than it is for the L1. 226 * than it is for the L1.
@@ -232,16 +231,22 @@ int __init detect_cpu_and_cache_system(void)
232 231
233 BUG_ON(!size); 232 BUG_ON(!size);
234 233
235 cpu_data->scache.way_incr = (1 << 16); 234 current_cpu_data.scache.way_incr = (1 << 16);
236 cpu_data->scache.entry_shift = 5; 235 current_cpu_data.scache.entry_shift = 5;
237 cpu_data->scache.ways = 4; 236 current_cpu_data.scache.ways = 4;
238 cpu_data->scache.linesz = L1_CACHE_BYTES; 237 current_cpu_data.scache.linesz = L1_CACHE_BYTES;
239 cpu_data->scache.entry_mask = 238
240 (cpu_data->scache.way_incr - cpu_data->scache.linesz); 239 current_cpu_data.scache.entry_mask =
241 cpu_data->scache.sets = size / 240 (current_cpu_data.scache.way_incr -
242 (cpu_data->scache.linesz * cpu_data->scache.ways); 241 current_cpu_data.scache.linesz);
243 cpu_data->scache.way_size = 242
244 (cpu_data->scache.sets * cpu_data->scache.linesz); 243 current_cpu_data.scache.sets = size /
244 (current_cpu_data.scache.linesz *
245 current_cpu_data.scache.ways);
246
247 current_cpu_data.scache.way_size =
248 (current_cpu_data.scache.sets *
249 current_cpu_data.scache.linesz);
245 } 250 }
246 251
247 return 0; 252 return 0;
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index cbac27634c0b..6f8f458912c7 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -46,11 +46,13 @@ static struct platform_device rtc_device = {
46 46
47static struct plat_sci_port sci_platform_data[] = { 47static struct plat_sci_port sci_platform_data[] = {
48 { 48 {
49#ifndef CONFIG_SH_RTS7751R2D
49 .mapbase = 0xffe00000, 50 .mapbase = 0xffe00000,
50 .flags = UPF_BOOT_AUTOCONF, 51 .flags = UPF_BOOT_AUTOCONF,
51 .type = PORT_SCI, 52 .type = PORT_SCI,
52 .irqs = { 23, 24, 25, 0 }, 53 .irqs = { 23, 24, 25, 0 },
53 }, { 54 }, {
55#endif
54 .mapbase = 0xffe80000, 56 .mapbase = 0xffe80000,
55 .flags = UPF_BOOT_AUTOCONF, 57 .flags = UPF_BOOT_AUTOCONF,
56 .type = PORT_SCIF, 58 .type = PORT_SCIF,
@@ -101,7 +103,7 @@ static struct ipr_data sh7750_ipr_map[] = {
101 { 35, 2, 8, 7 }, /* DMAC DMTE1 */ 103 { 35, 2, 8, 7 }, /* DMAC DMTE1 */
102 { 36, 2, 8, 7 }, /* DMAC DMTE2 */ 104 { 36, 2, 8, 7 }, /* DMAC DMTE2 */
103 { 37, 2, 8, 7 }, /* DMAC DMTE3 */ 105 { 37, 2, 8, 7 }, /* DMAC DMTE3 */
104 { 28, 2, 8, 7 }, /* DMAC DMAE */ 106 { 38, 2, 8, 7 }, /* DMAC DMAE */
105}; 107};
106 108
107static struct ipr_data sh7751_ipr_map[] = { 109static struct ipr_data sh7751_ipr_map[] = {
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 07e5377bf550..b7c702821e6f 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -52,17 +52,11 @@ static int __init sh7760_devices_setup(void)
52} 52}
53__initcall(sh7760_devices_setup); 53__initcall(sh7760_devices_setup);
54 54
55/*
56 * SH7760 INTC2-Style interrupts, vectors IRQ48-111 INTEVT 0x800-0xFE0
57 */
58static struct intc2_data intc2_irq_table[] = { 55static struct intc2_data intc2_irq_table[] = {
59 /* INTPRIO0 | INTMSK0 */
60 {48, 0, 28, 0, 31, 3}, /* IRQ 4 */ 56 {48, 0, 28, 0, 31, 3}, /* IRQ 4 */
61 {49, 0, 24, 0, 30, 3}, /* IRQ 3 */ 57 {49, 0, 24, 0, 30, 3}, /* IRQ 3 */
62 {50, 0, 20, 0, 29, 3}, /* IRQ 2 */ 58 {50, 0, 20, 0, 29, 3}, /* IRQ 2 */
63 {51, 0, 16, 0, 28, 3}, /* IRQ 1 */ 59 {51, 0, 16, 0, 28, 3}, /* IRQ 1 */
64 /* 52-55 (INTEVT 0x880-0x8E0) unused/reserved */
65 /* INTPRIO4 | INTMSK0 */
66 {56, 4, 28, 0, 25, 3}, /* HCAN2_CHAN0 */ 60 {56, 4, 28, 0, 25, 3}, /* HCAN2_CHAN0 */
67 {57, 4, 24, 0, 24, 3}, /* HCAN2_CHAN1 */ 61 {57, 4, 24, 0, 24, 3}, /* HCAN2_CHAN1 */
68 {58, 4, 20, 0, 23, 3}, /* I2S_CHAN0 */ 62 {58, 4, 20, 0, 23, 3}, /* I2S_CHAN0 */
@@ -71,18 +65,15 @@ static struct intc2_data intc2_irq_table[] = {
71 {61, 4, 8, 0, 20, 3}, /* AC97_CHAN1 */ 65 {61, 4, 8, 0, 20, 3}, /* AC97_CHAN1 */
72 {62, 4, 4, 0, 19, 3}, /* I2C_CHAN0 */ 66 {62, 4, 4, 0, 19, 3}, /* I2C_CHAN0 */
73 {63, 4, 0, 0, 18, 3}, /* I2C_CHAN1 */ 67 {63, 4, 0, 0, 18, 3}, /* I2C_CHAN1 */
74 /* INTPRIO8 | INTMSK0 */
75 {52, 8, 16, 0, 11, 3}, /* SCIF0_ERI_IRQ */ 68 {52, 8, 16, 0, 11, 3}, /* SCIF0_ERI_IRQ */
76 {53, 8, 16, 0, 10, 3}, /* SCIF0_RXI_IRQ */ 69 {53, 8, 16, 0, 10, 3}, /* SCIF0_RXI_IRQ */
77 {54, 8, 16, 0, 9, 3}, /* SCIF0_BRI_IRQ */ 70 {54, 8, 16, 0, 9, 3}, /* SCIF0_BRI_IRQ */
78 {55, 8, 16, 0, 8, 3}, /* SCIF0_TXI_IRQ */ 71 {55, 8, 16, 0, 8, 3}, /* SCIF0_TXI_IRQ */
79 {64, 8, 28, 0, 17, 3}, /* USBHI_IRQ */ 72 {64, 8, 28, 0, 17, 3}, /* USBHI_IRQ */
80 {65, 8, 24, 0, 16, 3}, /* LCDC */ 73 {65, 8, 24, 0, 16, 3}, /* LCDC */
81 /* 66, 67 unused */
82 {68, 8, 20, 0, 14, 13}, /* DMABRGI0_IRQ */ 74 {68, 8, 20, 0, 14, 13}, /* DMABRGI0_IRQ */
83 {69, 8, 20, 0, 13, 13}, /* DMABRGI1_IRQ */ 75 {69, 8, 20, 0, 13, 13}, /* DMABRGI1_IRQ */
84 {70, 8, 20, 0, 12, 13}, /* DMABRGI2_IRQ */ 76 {70, 8, 20, 0, 12, 13}, /* DMABRGI2_IRQ */
85 /* 71 unused */
86 {72, 8, 12, 0, 7, 3}, /* SCIF1_ERI_IRQ */ 77 {72, 8, 12, 0, 7, 3}, /* SCIF1_ERI_IRQ */
87 {73, 8, 12, 0, 6, 3}, /* SCIF1_RXI_IRQ */ 78 {73, 8, 12, 0, 6, 3}, /* SCIF1_RXI_IRQ */
88 {74, 8, 12, 0, 5, 3}, /* SCIF1_BRI_IRQ */ 79 {74, 8, 12, 0, 5, 3}, /* SCIF1_BRI_IRQ */
@@ -91,26 +82,71 @@ static struct intc2_data intc2_irq_table[] = {
91 {77, 8, 8, 0, 2, 3}, /* SCIF2_RXI_IRQ */ 82 {77, 8, 8, 0, 2, 3}, /* SCIF2_RXI_IRQ */
92 {78, 8, 8, 0, 1, 3}, /* SCIF2_BRI_IRQ */ 83 {78, 8, 8, 0, 1, 3}, /* SCIF2_BRI_IRQ */
93 {79, 8, 8, 0, 0, 3}, /* SCIF2_TXI_IRQ */ 84 {79, 8, 8, 0, 0, 3}, /* SCIF2_TXI_IRQ */
94 /* | INTMSK4 */
95 {80, 8, 4, 4, 23, 3}, /* SIM_ERI */ 85 {80, 8, 4, 4, 23, 3}, /* SIM_ERI */
96 {81, 8, 4, 4, 22, 3}, /* SIM_RXI */ 86 {81, 8, 4, 4, 22, 3}, /* SIM_RXI */
97 {82, 8, 4, 4, 21, 3}, /* SIM_TXI */ 87 {82, 8, 4, 4, 21, 3}, /* SIM_TXI */
98 {83, 8, 4, 4, 20, 3}, /* SIM_TEI */ 88 {83, 8, 4, 4, 20, 3}, /* SIM_TEI */
99 {84, 8, 0, 4, 19, 3}, /* HSPII */ 89 {84, 8, 0, 4, 19, 3}, /* HSPII */
100 /* INTPRIOC | INTMSK4 */
101 /* 85-87 unused/reserved */
102 {88, 12, 20, 4, 18, 3}, /* MMCI0 */ 90 {88, 12, 20, 4, 18, 3}, /* MMCI0 */
103 {89, 12, 20, 4, 17, 3}, /* MMCI1 */ 91 {89, 12, 20, 4, 17, 3}, /* MMCI1 */
104 {90, 12, 20, 4, 16, 3}, /* MMCI2 */ 92 {90, 12, 20, 4, 16, 3}, /* MMCI2 */
105 {91, 12, 20, 4, 15, 3}, /* MMCI3 */ 93 {91, 12, 20, 4, 15, 3}, /* MMCI3 */
106 {92, 12, 12, 4, 6, 3}, /* MFI (unsure, bug? in my 7760 manual*/ 94 {92, 12, 12, 4, 6, 3}, /* MFI */
107 /* 93-107 reserved/undocumented */
108 {108,12, 4, 4, 1, 3}, /* ADC */ 95 {108,12, 4, 4, 1, 3}, /* ADC */
109 {109,12, 0, 4, 0, 3}, /* CMTI */ 96 {109,12, 0, 4, 0, 3}, /* CMTI */
110 /* 110-111 reserved/unused */
111}; 97};
112 98
99static struct ipr_data sh7760_ipr_map[] = {
100 /* IRQ, IPR-idx, shift, priority */
101 { 16, 0, 12, 2 }, /* TMU0 TUNI*/
102 { 17, 0, 8, 2 }, /* TMU1 TUNI */
103 { 18, 0, 4, 2 }, /* TMU2 TUNI */
104 { 19, 0, 4, 2 }, /* TMU2 TIPCI */
105 { 27, 1, 12, 2 }, /* WDT ITI */
106 { 28, 1, 8, 2 }, /* REF RCMI */
107 { 29, 1, 8, 2 }, /* REF ROVI */
108 { 32, 2, 0, 7 }, /* HUDI */
109 { 33, 2, 12, 7 }, /* GPIOI */
110 { 34, 2, 8, 7 }, /* DMAC DMTE0 */
111 { 35, 2, 8, 7 }, /* DMAC DMTE1 */
112 { 36, 2, 8, 7 }, /* DMAC DMTE2 */
113 { 37, 2, 8, 7 }, /* DMAC DMTE3 */
114 { 38, 2, 8, 7 }, /* DMAC DMAE */
115 { 44, 2, 8, 7 }, /* DMAC DMTE4 */
116 { 45, 2, 8, 7 }, /* DMAC DMTE5 */
117 { 46, 2, 8, 7 }, /* DMAC DMTE6 */
118 { 47, 2, 8, 7 }, /* DMAC DMTE7 */
119/* these here are only valid if INTC_ICR bit 7 is set to 1!
120 * XXX: maybe CONFIG_SH_IRLMODE symbol? SH7751 could use it too */
121#if 0
122 { 2, 3, 12, 3 }, /* IRL0 */
123 { 5, 3, 8, 3 }, /* IRL1 */
124 { 8, 3, 4, 3 }, /* IRL2 */
125 { 11, 3, 0, 3 }, /* IRL3 */
126#endif
127};
128
129static unsigned long ipr_offsets[] = {
130 0xffd00004UL, /* 0: IPRA */
131 0xffd00008UL, /* 1: IPRB */
132 0xffd0000cUL, /* 2: IPRC */
133 0xffd00010UL, /* 3: IPRD */
134};
135
136/* given the IPR index return the address of the IPR register */
137unsigned int map_ipridx_to_addr(int idx)
138{
139 if (idx >= ARRAY_SIZE(ipr_offsets))
140 return 0;
141 return ipr_offsets[idx];
142}
143
113void __init init_IRQ_intc2(void) 144void __init init_IRQ_intc2(void)
114{ 145{
115 make_intc2_irq(intc2_irq_table, ARRAY_SIZE(intc2_irq_table)); 146 make_intc2_irq(intc2_irq_table, ARRAY_SIZE(intc2_irq_table));
116} 147}
148
149void __init init_IRQ_ipr(void)
150{
151 make_ipr_irq(sh7760_ipr_map, ARRAY_SIZE(sh7760_ipr_map));
152}
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S
new file mode 100644
index 000000000000..13b66746410a
--- /dev/null
+++ b/arch/sh/kernel/debugtraps.S
@@ -0,0 +1,41 @@
1/*
2 * arch/sh/kernel/debugtraps.S
3 *
4 * Debug trap jump tables for SuperH
5 *
6 * Copyright (C) 2006 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/sys.h>
13#include <linux/linkage.h>
14
15#if !defined(CONFIG_SH_KGDB)
16#define kgdb_handle_exception debug_trap_handler
17#endif
18
19#if !defined(CONFIG_SH_STANDARD_BIOS)
20#define sh_bios_handler debug_trap_handler
21#endif
22
23 .data
24
25ENTRY(debug_trap_table)
26 .long debug_trap_handler /* 0x30 */
27 .long debug_trap_handler /* 0x31 */
28 .long debug_trap_handler /* 0x32 */
29 .long debug_trap_handler /* 0x33 */
30 .long debug_trap_handler /* 0x34 */
31 .long debug_trap_handler /* 0x35 */
32 .long debug_trap_handler /* 0x36 */
33 .long debug_trap_handler /* 0x37 */
34 .long debug_trap_handler /* 0x38 */
35 .long debug_trap_handler /* 0x39 */
36 .long debug_trap_handler /* 0x3a */
37 .long debug_trap_handler /* 0x3b */
38 .long kgdb_handle_exception /* 0x3c */
39 .long debug_trap_handler /* 0x3d */
40 .long bug_trap_handler /* 0x3e */
41 .long sh_bios_handler /* 0x3f */
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index 560b91cdd15c..9048c0326d87 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -106,12 +106,32 @@ static struct console scif_console = {
106}; 106};
107 107
108#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS) 108#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
109#define DEFAULT_BAUD 115200
109/* 110/*
110 * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4 111 * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
111 * devices that aren't using sh-ipl+g. 112 * devices that aren't using sh-ipl+g.
112 */ 113 */
113static void scif_sercon_init(int baud) 114static void scif_sercon_init(char *s)
114{ 115{
116 unsigned baud = DEFAULT_BAUD;
117 char *e;
118
119 if (*s == ',')
120 ++s;
121
122 if (*s) {
123 /* ignore ioport/device name */
124 s += strcspn(s, ",");
125 if (*s == ',')
126 s++;
127 }
128
129 if (*s) {
130 baud = simple_strtoul(s, &e, 0);
131 if (baud == 0 || s == e)
132 baud = DEFAULT_BAUD;
133 }
134
115 ctrl_outw(0, scif_port.mapbase + 8); 135 ctrl_outw(0, scif_port.mapbase + 8);
116 ctrl_outw(0, scif_port.mapbase); 136 ctrl_outw(0, scif_port.mapbase);
117 137
@@ -167,7 +187,7 @@ int __init setup_early_printk(char *buf)
167 early_console = &scif_console; 187 early_console = &scif_console;
168 188
169#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS) 189#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
170 scif_sercon_init(115200); 190 scif_sercon_init(buf + 6);
171#endif 191#endif
172 } 192 }
173#endif 193#endif
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index fc279aeb73ab..ab4ebb856c2a 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -54,79 +54,24 @@
54# define resume_kernel __restore_all 54# define resume_kernel __restore_all
55#endif 55#endif
56 56
57#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
58! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
59! If both are configured, handle the debug traps (breakpoints) in SW,
60! but still allow BIOS traps to FW.
61
62 .align 2
63debug_kernel:
64#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
65 /* Force BIOS call to FW (debug_trap put TRA in r8) */
66 mov r8,r0
67 shlr2 r0
68 cmp/eq #0x3f,r0
69 bt debug_kernel_fw
70#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
71
72debug_enter:
73#if defined(CONFIG_SH_KGDB)
74 /* Jump to kgdb, pass stacked regs as arg */
75debug_kernel_sw:
76 mov.l 3f, r0
77 jmp @r0
78 mov r15, r4
79 .align 2
803: .long kgdb_handle_exception
81#endif /* CONFIG_SH_KGDB */
82#ifdef CONFIG_SH_STANDARD_BIOS
83 bra debug_kernel_fw
84 nop
85#endif
86#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
87
88 .align 2
89debug_trap:
90#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
91 mov r8, r0
92 shlr2 r0
93 cmp/eq #0x3f, r0 ! sh_bios() trap
94 bf 1f
95#ifdef CONFIG_SH_KGDB
96 cmp/eq #0xff, r0 ! XXX: KGDB trap, fix for SH-2.
97 bf 1f
98#endif
99 mov #OFF_SR, r0
100 mov.l @(r0,r15), r0 ! get status register
101 shll r0
102 shll r0 ! kernel space?
103 bt/s debug_kernel
1041:
105#endif
106 mov.l @r15, r0 ! Restore R0 value
107 mov.l 1f, r8
108 jmp @r8
109 nop
110 57
111 .align 2 58 .align 2
112ENTRY(exception_error) 59ENTRY(exception_error)
113 ! 60 !
114#ifdef CONFIG_TRACE_IRQFLAGS 61#ifdef CONFIG_TRACE_IRQFLAGS
115 mov.l 3f, r0 62 mov.l 2f, r0
116 jsr @r0 63 jsr @r0
117 nop 64 nop
118#endif 65#endif
119 sti 66 sti
120 mov.l 2f, r0 67 mov.l 1f, r0
121 jmp @r0 68 jmp @r0
122 nop 69 nop
123 70
124!
125 .align 2 71 .align 2
1261: .long break_point_trap_software 721: .long do_exception_error
1272: .long do_exception_error
128#ifdef CONFIG_TRACE_IRQFLAGS 73#ifdef CONFIG_TRACE_IRQFLAGS
1293: .long trace_hardirqs_on 742: .long trace_hardirqs_on
130#endif 75#endif
131 76
132 .align 2 77 .align 2
@@ -331,16 +276,31 @@ __restore_all:
3311: .long restore_all 2761: .long restore_all
332 277
333 .align 2 278 .align 2
334not_syscall_tra:
335 bra debug_trap
336 nop
337
338 .align 2
339syscall_badsys: ! Bad syscall number 279syscall_badsys: ! Bad syscall number
340 mov #-ENOSYS, r0 280 mov #-ENOSYS, r0
341 bra resume_userspace 281 bra resume_userspace
342 mov.l r0, @(OFF_R0,r15) ! Return value 282 mov.l r0, @(OFF_R0,r15) ! Return value
343 283
284/*
285 * The main debug trap handler.
286 *
287 * r8=TRA (not the trap number!)
288 *
289 * Note: This assumes that the trapa value is left in its original
290 * form (without the shlr2 shift) so the calculation for the jump
291 * call table offset remains a simple in place mask.
292 */
293debug_trap:
294 mov r8, r0
295 and #(0xf << 2), r0
296 mov.l 1f, r8
297 add r0, r8
298 mov.l @r8, r8
299 jmp @r8
300 nop
301
302 .align 2
3031: .long debug_trap_table
344 304
345/* 305/*
346 * Syscall interface: 306 * Syscall interface:
@@ -348,17 +308,19 @@ syscall_badsys: ! Bad syscall number
348 * Syscall #: R3 308 * Syscall #: R3
349 * Arguments #0 to #3: R4--R7 309 * Arguments #0 to #3: R4--R7
350 * Arguments #4 to #6: R0, R1, R2 310 * Arguments #4 to #6: R0, R1, R2
351 * TRA: (number of arguments + 0x10) x 4 311 * TRA: (number of arguments + ABI revision) x 4
352 * 312 *
353 * This code also handles delegating other traps to the BIOS/gdb stub 313 * This code also handles delegating other traps to the BIOS/gdb stub
354 * according to: 314 * according to:
355 * 315 *
356 * Trap number 316 * Trap number
357 * (TRA>>2) Purpose 317 * (TRA>>2) Purpose
358 * -------- ------- 318 * -------- -------
359 * 0x0-0xf old syscall ABI 319 * 0x00-0x0f original SH-3/4 syscall ABI (not in general use).
360 * 0x10-0x1f new syscall ABI 320 * 0x10-0x1f general SH-3/4 syscall ABI.
361 * 0x20-0xff delegated through debug_trap to BIOS/gdb stub. 321 * 0x20-0x2f syscall ABI for SH-2 parts.
322 * 0x30-0x3f debug traps used by the kernel.
323 * 0x40-0xff Not supported by all parts, so left unhandled.
362 * 324 *
363 * Note: When we're first called, the TRA value must be shifted 325 * Note: When we're first called, the TRA value must be shifted
364 * right 2 bits in order to get the value that was used as the "trapa" 326 * right 2 bits in order to get the value that was used as the "trapa"
@@ -375,17 +337,22 @@ ret_from_fork:
375 nop 337 nop
376 .align 2 338 .align 2
3771: .long schedule_tail 3391: .long schedule_tail
378 ! 340
341/*
342 * The poorly named main trapa decode and dispatch routine, for
343 * system calls and debug traps through their respective jump tables.
344 */
379ENTRY(system_call) 345ENTRY(system_call)
380#if !defined(CONFIG_CPU_SH2) 346#if !defined(CONFIG_CPU_SH2)
381 mov.l 1f, r9 347 mov.l 1f, r9
382 mov.l @r9, r8 ! Read from TRA (Trap Address) Register 348 mov.l @r9, r8 ! Read from TRA (Trap Address) Register
383#endif 349#endif
384 ! 350 /*
385 ! Is the trap argument >= 0x20? (TRA will be >= 0x80) 351 * Check the trap type
386 mov #0x7f, r9 352 */
353 mov #((0x20 << 2) - 1), r9
387 cmp/hi r9, r8 354 cmp/hi r9, r8
388 bt/s not_syscall_tra 355 bt/s debug_trap ! it's a debug trap..
389 mov #OFF_TRA, r9 356 mov #OFF_TRA, r9
390 add r15, r9 357 add r15, r9
391 mov.l r8, @r9 ! set TRA value to tra 358 mov.l r8, @r9 ! set TRA value to tra
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index 28ec7487de8c..66626c03e1ee 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -1,9 +1,8 @@
1/* $Id: io_generic.c,v 1.2 2003/05/04 19:29:53 lethal Exp $ 1/*
2 * 2 * arch/sh/kernel/io_generic.c
3 * linux/arch/sh/kernel/io_generic.c
4 * 3 *
5 * Copyright (C) 2000 Niibe Yutaka 4 * Copyright (C) 2000 Niibe Yutaka
6 * Copyright (C) 2005 Paul Mundt 5 * Copyright (C) 2005 - 2007 Paul Mundt
7 * 6 *
8 * Generic I/O routine. These can be used where a machine specific version 7 * Generic I/O routine. These can be used where a machine specific version
9 * is not required. 8 * is not required.
@@ -13,8 +12,9 @@
13 * for more details. 12 * for more details.
14 */ 13 */
15#include <linux/module.h> 14#include <linux/module.h>
16#include <asm/io.h> 15#include <linux/io.h>
17#include <asm/machvec.h> 16#include <asm/machvec.h>
17#include <asm/cacheflush.h>
18 18
19#ifdef CONFIG_CPU_SH3 19#ifdef CONFIG_CPU_SH3
20/* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a 20/* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a
@@ -96,6 +96,7 @@ void generic_insw(unsigned long port, void *dst, unsigned long count)
96 while (count--) 96 while (count--)
97 *buf++ = *port_addr; 97 *buf++ = *port_addr;
98 98
99 flush_dcache_all();
99 dummy_read(); 100 dummy_read();
100} 101}
101 102
@@ -170,6 +171,7 @@ void generic_outsw(unsigned long port, const void *src, unsigned long count)
170 while (count--) 171 while (count--)
171 *port_addr = *buf++; 172 *port_addr = *buf++;
172 173
174 flush_dcache_all();
173 dummy_read(); 175 dummy_read();
174} 176}
175 177
diff --git a/arch/sh/kernel/kgdb_stub.c b/arch/sh/kernel/kgdb_stub.c
index 9c6315f0335d..d8927d85492e 100644
--- a/arch/sh/kernel/kgdb_stub.c
+++ b/arch/sh/kernel/kgdb_stub.c
@@ -1323,8 +1323,11 @@ static void kgdb_command_loop(const int excep_code, const int trapa_value)
1323} 1323}
1324 1324
1325/* There has been an exception, most likely a breakpoint. */ 1325/* There has been an exception, most likely a breakpoint. */
1326void kgdb_handle_exception(struct pt_regs *regs) 1326asmlinkage void kgdb_handle_exception(unsigned long r4, unsigned long r5,
1327 unsigned long r6, unsigned long r7,
1328 struct pt_regs __regs)
1327{ 1329{
1330 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
1328 int excep_code, vbr_val; 1331 int excep_code, vbr_val;
1329 int count; 1332 int count;
1330 int trapa_value = ctrl_inl(TRA); 1333 int trapa_value = ctrl_inl(TRA);
@@ -1368,8 +1371,6 @@ void kgdb_handle_exception(struct pt_regs *regs)
1368 1371
1369 vbr_val = trap_registers.vbr; 1372 vbr_val = trap_registers.vbr;
1370 asm("ldc %0, vbr": :"r"(vbr_val)); 1373 asm("ldc %0, vbr": :"r"(vbr_val));
1371
1372 return;
1373} 1374}
1374 1375
1375/* Trigger a breakpoint by function */ 1376/* Trigger a breakpoint by function */
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 486c06e18033..9d6a438b3eaf 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -1,42 +1,30 @@
1/* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $ 1/*
2 * arch/sh/kernel/process.c
2 * 3 *
3 * linux/arch/sh/kernel/process.c 4 * This file handles the architecture-dependent parts of process handling..
4 * 5 *
5 * Copyright (C) 1995 Linus Torvalds 6 * Copyright (C) 1995 Linus Torvalds
6 * 7 *
7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
8 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC 9 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
10 * Copyright (C) 2002 - 2006 Paul Mundt
9 */ 11 */
10
11/*
12 * This file handles the architecture-dependent parts of process handling..
13 */
14
15#include <linux/module.h> 12#include <linux/module.h>
16#include <linux/unistd.h>
17#include <linux/mm.h> 13#include <linux/mm.h>
18#include <linux/elfcore.h> 14#include <linux/elfcore.h>
19#include <linux/a.out.h>
20#include <linux/slab.h>
21#include <linux/pm.h> 15#include <linux/pm.h>
22#include <linux/ptrace.h>
23#include <linux/kallsyms.h> 16#include <linux/kallsyms.h>
24#include <linux/kexec.h> 17#include <linux/kexec.h>
25
26#include <asm/io.h>
27#include <asm/uaccess.h> 18#include <asm/uaccess.h>
28#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
29#include <asm/elf.h>
30#include <asm/ubc.h> 20#include <asm/ubc.h>
31 21
32static int hlt_counter=0; 22static int hlt_counter;
33
34int ubc_usercnt = 0; 23int ubc_usercnt = 0;
35 24
36#define HARD_IDLE_TIMEOUT (HZ / 3) 25#define HARD_IDLE_TIMEOUT (HZ / 3)
37 26
38void (*pm_idle)(void); 27void (*pm_idle)(void);
39
40void (*pm_power_off)(void); 28void (*pm_power_off)(void);
41EXPORT_SYMBOL(pm_power_off); 29EXPORT_SYMBOL(pm_power_off);
42 30
@@ -44,14 +32,12 @@ void disable_hlt(void)
44{ 32{
45 hlt_counter++; 33 hlt_counter++;
46} 34}
47
48EXPORT_SYMBOL(disable_hlt); 35EXPORT_SYMBOL(disable_hlt);
49 36
50void enable_hlt(void) 37void enable_hlt(void)
51{ 38{
52 hlt_counter--; 39 hlt_counter--;
53} 40}
54
55EXPORT_SYMBOL(enable_hlt); 41EXPORT_SYMBOL(enable_hlt);
56 42
57void default_idle(void) 43void default_idle(void)
@@ -152,19 +138,21 @@ __asm__(".align 5\n"
152 ".align 2\n\t" 138 ".align 2\n\t"
153 "1:.long do_exit"); 139 "1:.long do_exit");
154 140
141/* Don't use this in BL=1(cli). Or else, CPU resets! */
155int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 142int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
156{ /* Don't use this in BL=1(cli). Or else, CPU resets! */ 143{
157 struct pt_regs regs; 144 struct pt_regs regs;
158 145
159 memset(&regs, 0, sizeof(regs)); 146 memset(&regs, 0, sizeof(regs));
160 regs.regs[4] = (unsigned long) arg; 147 regs.regs[4] = (unsigned long)arg;
161 regs.regs[5] = (unsigned long) fn; 148 regs.regs[5] = (unsigned long)fn;
162 149
163 regs.pc = (unsigned long) kernel_thread_helper; 150 regs.pc = (unsigned long)kernel_thread_helper;
164 regs.sr = (1 << 30); 151 regs.sr = (1 << 30);
165 152
166 /* Ok, create the new process.. */ 153 /* Ok, create the new process.. */
167 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 154 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
155 &regs, 0, NULL, NULL);
168} 156}
169 157
170/* 158/*
@@ -211,21 +199,20 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
211 return fpvalid; 199 return fpvalid;
212} 200}
213 201
214/* 202/*
215 * Capture the user space registers if the task is not running (in user space) 203 * Capture the user space registers if the task is not running (in user space)
216 */ 204 */
217int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 205int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
218{ 206{
219 struct pt_regs ptregs; 207 struct pt_regs ptregs;
220 208
221 ptregs = *task_pt_regs(tsk); 209 ptregs = *task_pt_regs(tsk);
222 elf_core_copy_regs(regs, &ptregs); 210 elf_core_copy_regs(regs, &ptregs);
223 211
224 return 1; 212 return 1;
225} 213}
226 214
227int 215int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
228dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
229{ 216{
230 int fpvalid = 0; 217 int fpvalid = 0;
231 218
@@ -263,12 +250,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
263 childregs->regs[15] = usp; 250 childregs->regs[15] = usp;
264 ti->addr_limit = USER_DS; 251 ti->addr_limit = USER_DS;
265 } else { 252 } else {
266 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; 253 childregs->regs[15] = (unsigned long)task_stack_page(p) +
254 THREAD_SIZE;
267 ti->addr_limit = KERNEL_DS; 255 ti->addr_limit = KERNEL_DS;
268 } 256 }
269 if (clone_flags & CLONE_SETTLS) { 257
258 if (clone_flags & CLONE_SETTLS)
270 childregs->gbr = childregs->regs[0]; 259 childregs->gbr = childregs->regs[0];
271 } 260
272 childregs->regs[0] = 0; /* Set return value for child */ 261 childregs->regs[0] = 0; /* Set return value for child */
273 262
274 p->thread.sp = (unsigned long) childregs; 263 p->thread.sp = (unsigned long) childregs;
@@ -280,8 +269,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
280} 269}
281 270
282/* Tracing by user break controller. */ 271/* Tracing by user break controller. */
283static void 272static void ubc_set_tracing(int asid, unsigned long pc)
284ubc_set_tracing(int asid, unsigned long pc)
285{ 273{
286#if defined(CONFIG_CPU_SH4A) 274#if defined(CONFIG_CPU_SH4A)
287 unsigned long val; 275 unsigned long val;
@@ -297,7 +285,7 @@ ubc_set_tracing(int asid, unsigned long pc)
297 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); 285 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
298 ctrl_outl(val, UBC_CRR0); 286 ctrl_outl(val, UBC_CRR0);
299 287
300 /* Read UBC register that we writed last. For chekking UBC Register changed */ 288 /* Read UBC register that we wrote last, for checking update */
301 val = ctrl_inl(UBC_CRR0); 289 val = ctrl_inl(UBC_CRR0);
302 290
303#else /* CONFIG_CPU_SH4A */ 291#else /* CONFIG_CPU_SH4A */
@@ -305,13 +293,14 @@ ubc_set_tracing(int asid, unsigned long pc)
305 293
306#ifdef CONFIG_MMU 294#ifdef CONFIG_MMU
307 /* We don't have any ASID settings for the SH-2! */ 295 /* We don't have any ASID settings for the SH-2! */
308 if (cpu_data->type != CPU_SH7604) 296 if (current_cpu_data.type != CPU_SH7604)
309 ctrl_outb(asid, UBC_BASRA); 297 ctrl_outb(asid, UBC_BASRA);
310#endif 298#endif
311 299
312 ctrl_outl(0, UBC_BAMRA); 300 ctrl_outl(0, UBC_BAMRA);
313 301
314 if (cpu_data->type == CPU_SH7729 || cpu_data->type == CPU_SH7710) { 302 if (current_cpu_data.type == CPU_SH7729 ||
303 current_cpu_data.type == CPU_SH7710) {
315 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); 304 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
316 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); 305 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
317 } else { 306 } else {
@@ -325,7 +314,8 @@ ubc_set_tracing(int asid, unsigned long pc)
325 * switch_to(x,y) should switch tasks from x to y. 314 * switch_to(x,y) should switch tasks from x to y.
326 * 315 *
327 */ 316 */
328struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) 317struct task_struct *__switch_to(struct task_struct *prev,
318 struct task_struct *next)
329{ 319{
330#if defined(CONFIG_SH_FPU) 320#if defined(CONFIG_SH_FPU)
331 unlazy_fpu(prev, task_pt_regs(prev)); 321 unlazy_fpu(prev, task_pt_regs(prev));
@@ -354,7 +344,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
354#ifdef CONFIG_MMU 344#ifdef CONFIG_MMU
355 /* 345 /*
356 * Restore the kernel mode register 346 * Restore the kernel mode register
357 * k7 (r7_bank1) 347 * k7 (r7_bank1)
358 */ 348 */
359 asm volatile("ldc %0, r7_bank" 349 asm volatile("ldc %0, r7_bank"
360 : /* no output */ 350 : /* no output */
@@ -367,7 +357,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
367 else if (next->thread.ubc_pc && next->mm) { 357 else if (next->thread.ubc_pc && next->mm) {
368 int asid = 0; 358 int asid = 0;
369#ifdef CONFIG_MMU 359#ifdef CONFIG_MMU
370 asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK; 360 asid |= cpu_asid(smp_processor_id(), next->mm);
371#endif 361#endif
372 ubc_set_tracing(asid, next->thread.ubc_pc); 362 ubc_set_tracing(asid, next->thread.ubc_pc);
373 } else { 363 } else {
@@ -405,7 +395,8 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
405 if (!newsp) 395 if (!newsp)
406 newsp = regs->regs[15]; 396 newsp = regs->regs[15];
407 return do_fork(clone_flags, newsp, regs, 0, 397 return do_fork(clone_flags, newsp, regs, 0,
408 (int __user *)parent_tidptr, (int __user *)child_tidptr); 398 (int __user *)parent_tidptr,
399 (int __user *)child_tidptr);
409} 400}
410 401
411/* 402/*
@@ -493,9 +484,27 @@ asmlinkage void break_point_trap(void)
493 force_sig(SIGTRAP, current); 484 force_sig(SIGTRAP, current);
494} 485}
495 486
496asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, 487/*
497 unsigned long r6, unsigned long r7, 488 * Generic trap handler.
498 struct pt_regs __regs) 489 */
490asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
491 unsigned long r6, unsigned long r7,
492 struct pt_regs __regs)
493{
494 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
495
496 /* Rewind */
497 regs->pc -= 2;
498
499 force_sig(SIGTRAP, current);
500}
501
502/*
503 * Special handler for BUG() traps.
504 */
505asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
506 unsigned long r6, unsigned long r7,
507 struct pt_regs __regs)
499{ 508{
500 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 509 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
501 510
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index d6b817aa568f..98802ab28211 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -1,14 +1,11 @@
1/* 1/*
2 * linux/arch/sh/kernel/setup.c 2 * arch/sh/kernel/setup.c
3 * 3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002, 2003 Paul Mundt
6 */
7
8/*
9 * This file handles the architecture-dependent parts of initialization 4 * This file handles the architecture-dependent parts of initialization
5 *
6 * Copyright (C) 1999 Niibe Yutaka
7 * Copyright (C) 2002 - 2006 Paul Mundt
10 */ 8 */
11
12#include <linux/screen_info.h> 9#include <linux/screen_info.h>
13#include <linux/ioport.h> 10#include <linux/ioport.h>
14#include <linux/init.h> 11#include <linux/init.h>
@@ -395,9 +392,9 @@ static const char *cpu_name[] = {
395 [CPU_SH_NONE] = "Unknown" 392 [CPU_SH_NONE] = "Unknown"
396}; 393};
397 394
398const char *get_cpu_subtype(void) 395const char *get_cpu_subtype(struct sh_cpuinfo *c)
399{ 396{
400 return cpu_name[boot_cpu_data.type]; 397 return cpu_name[c->type];
401} 398}
402 399
403#ifdef CONFIG_PROC_FS 400#ifdef CONFIG_PROC_FS
@@ -407,19 +404,19 @@ static const char *cpu_flags[] = {
407 "ptea", "llsc", "l2", NULL 404 "ptea", "llsc", "l2", NULL
408}; 405};
409 406
410static void show_cpuflags(struct seq_file *m) 407static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
411{ 408{
412 unsigned long i; 409 unsigned long i;
413 410
414 seq_printf(m, "cpu flags\t:"); 411 seq_printf(m, "cpu flags\t:");
415 412
416 if (!cpu_data->flags) { 413 if (!c->flags) {
417 seq_printf(m, " %s\n", cpu_flags[0]); 414 seq_printf(m, " %s\n", cpu_flags[0]);
418 return; 415 return;
419 } 416 }
420 417
421 for (i = 0; cpu_flags[i]; i++) 418 for (i = 0; cpu_flags[i]; i++)
422 if ((cpu_data->flags & (1 << i))) 419 if ((c->flags & (1 << i)))
423 seq_printf(m, " %s", cpu_flags[i+1]); 420 seq_printf(m, " %s", cpu_flags[i+1]);
424 421
425 seq_printf(m, "\n"); 422 seq_printf(m, "\n");
@@ -441,16 +438,20 @@ static void show_cacheinfo(struct seq_file *m, const char *type,
441 */ 438 */
442static int show_cpuinfo(struct seq_file *m, void *v) 439static int show_cpuinfo(struct seq_file *m, void *v)
443{ 440{
444 unsigned int cpu = smp_processor_id(); 441 struct sh_cpuinfo *c = v;
442 unsigned int cpu = c - cpu_data;
443
444 if (!cpu_online(cpu))
445 return 0;
445 446
446 if (!cpu && cpu_online(cpu)) 447 if (cpu == 0)
447 seq_printf(m, "machine\t\t: %s\n", get_system_type()); 448 seq_printf(m, "machine\t\t: %s\n", get_system_type());
448 449
449 seq_printf(m, "processor\t: %d\n", cpu); 450 seq_printf(m, "processor\t: %d\n", cpu);
450 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine); 451 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
451 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype()); 452 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
452 453
453 show_cpuflags(m); 454 show_cpuflags(m, c);
454 455
455 seq_printf(m, "cache type\t: "); 456 seq_printf(m, "cache type\t: ");
456 457
@@ -459,22 +460,22 @@ static int show_cpuinfo(struct seq_file *m, void *v)
459 * unified cache on the SH-2 and SH-3, as well as the harvard 460 * unified cache on the SH-2 and SH-3, as well as the harvard
460 * style cache on the SH-4. 461 * style cache on the SH-4.
461 */ 462 */
462 if (boot_cpu_data.icache.flags & SH_CACHE_COMBINED) { 463 if (c->icache.flags & SH_CACHE_COMBINED) {
463 seq_printf(m, "unified\n"); 464 seq_printf(m, "unified\n");
464 show_cacheinfo(m, "cache", boot_cpu_data.icache); 465 show_cacheinfo(m, "cache", c->icache);
465 } else { 466 } else {
466 seq_printf(m, "split (harvard)\n"); 467 seq_printf(m, "split (harvard)\n");
467 show_cacheinfo(m, "icache", boot_cpu_data.icache); 468 show_cacheinfo(m, "icache", c->icache);
468 show_cacheinfo(m, "dcache", boot_cpu_data.dcache); 469 show_cacheinfo(m, "dcache", c->dcache);
469 } 470 }
470 471
471 /* Optional secondary cache */ 472 /* Optional secondary cache */
472 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) 473 if (c->flags & CPU_HAS_L2_CACHE)
473 show_cacheinfo(m, "scache", boot_cpu_data.scache); 474 show_cacheinfo(m, "scache", c->scache);
474 475
475 seq_printf(m, "bogomips\t: %lu.%02lu\n", 476 seq_printf(m, "bogomips\t: %lu.%02lu\n",
476 boot_cpu_data.loops_per_jiffy/(500000/HZ), 477 c->loops_per_jiffy/(500000/HZ),
477 (boot_cpu_data.loops_per_jiffy/(5000/HZ)) % 100); 478 (c->loops_per_jiffy/(5000/HZ)) % 100);
478 479
479 return show_clocks(m); 480 return show_clocks(m);
480} 481}
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index e6106239a0fe..fe1b276c97c6 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -105,7 +105,6 @@ EXPORT_SYMBOL(__flush_purge_region);
105EXPORT_SYMBOL(clear_user_page); 105EXPORT_SYMBOL(clear_user_page);
106#endif 106#endif
107 107
108EXPORT_SYMBOL(flush_tlb_page);
109EXPORT_SYMBOL(__down_trylock); 108EXPORT_SYMBOL(__down_trylock);
110 109
111#ifdef CONFIG_SMP 110#ifdef CONFIG_SMP
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 379c88bf5d9a..32f10a03fbb5 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -127,7 +127,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
127{ 127{
128 struct task_struct *tsk = current; 128 struct task_struct *tsk = current;
129 129
130 if (!(cpu_data->flags & CPU_HAS_FPU)) 130 if (!(current_cpu_data.flags & CPU_HAS_FPU))
131 return 0; 131 return 0;
132 132
133 set_used_math(); 133 set_used_math();
@@ -140,7 +140,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
140{ 140{
141 struct task_struct *tsk = current; 141 struct task_struct *tsk = current;
142 142
143 if (!(cpu_data->flags & CPU_HAS_FPU)) 143 if (!(current_cpu_data.flags & CPU_HAS_FPU))
144 return 0; 144 return 0;
145 145
146 if (!used_math()) { 146 if (!used_math()) {
@@ -181,7 +181,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
181#undef COPY 181#undef COPY
182 182
183#ifdef CONFIG_SH_FPU 183#ifdef CONFIG_SH_FPU
184 if (cpu_data->flags & CPU_HAS_FPU) { 184 if (current_cpu_data.flags & CPU_HAS_FPU) {
185 int owned_fp; 185 int owned_fp;
186 struct task_struct *tsk = current; 186 struct task_struct *tsk = current;
187 187
diff --git a/arch/sh/kernel/syscalls.S b/arch/sh/kernel/syscalls.S
index ca81976e9e34..38fc8cd3ea3a 100644
--- a/arch/sh/kernel/syscalls.S
+++ b/arch/sh/kernel/syscalls.S
@@ -319,15 +319,15 @@ ENTRY(sys_call_table)
319 .long sys_mq_getsetattr 319 .long sys_mq_getsetattr
320 .long sys_kexec_load 320 .long sys_kexec_load
321 .long sys_waitid 321 .long sys_waitid
322 .long sys_ni_syscall /* 285 */ 322 .long sys_add_key /* 285 */
323 .long sys_add_key
324 .long sys_request_key 323 .long sys_request_key
325 .long sys_keyctl 324 .long sys_keyctl
326 .long sys_ioprio_set 325 .long sys_ioprio_set
327 .long sys_ioprio_get /* 290 */ 326 .long sys_ioprio_get
328 .long sys_inotify_init 327 .long sys_inotify_init /* 290 */
329 .long sys_inotify_add_watch 328 .long sys_inotify_add_watch
330 .long sys_inotify_rm_watch 329 .long sys_inotify_rm_watch
330 .long sys_ni_syscall
331 .long sys_migrate_pages 331 .long sys_migrate_pages
332 .long sys_openat /* 295 */ 332 .long sys_openat /* 295 */
333 .long sys_mkdirat 333 .long sys_mkdirat
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index ec110157992d..e9f168f60f95 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -156,13 +156,13 @@ static inline void do_bug_verbose(struct pt_regs *regs)
156{ 156{
157} 157}
158#endif /* CONFIG_DEBUG_BUGVERBOSE */ 158#endif /* CONFIG_DEBUG_BUGVERBOSE */
159#endif /* CONFIG_BUG */
160 159
161void handle_BUG(struct pt_regs *regs) 160void handle_BUG(struct pt_regs *regs)
162{ 161{
163 do_bug_verbose(regs); 162 do_bug_verbose(regs);
164 die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); 163 die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
165} 164}
165#endif /* CONFIG_BUG */
166 166
167/* 167/*
168 * handle an instruction that does an unaligned memory access by emulating the 168 * handle an instruction that does an unaligned memory access by emulating the
@@ -641,7 +641,7 @@ int is_dsp_inst(struct pt_regs *regs)
641 * Safe guard if DSP mode is already enabled or we're lacking 641 * Safe guard if DSP mode is already enabled or we're lacking
642 * the DSP altogether. 642 * the DSP altogether.
643 */ 643 */
644 if (!(cpu_data->flags & CPU_HAS_DSP) || (regs->sr & SR_DSP)) 644 if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
645 return 0; 645 return 0;
646 646
647 get_user(inst, ((unsigned short *) regs->pc)); 647 get_user(inst, ((unsigned short *) regs->pc));
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 29f4ee35c6dc..6b0d28ac9241 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -20,7 +20,7 @@ config CPU_SH4
20 bool 20 bool
21 select CPU_HAS_INTEVT 21 select CPU_HAS_INTEVT
22 select CPU_HAS_SR_RB 22 select CPU_HAS_SR_RB
23 select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 23 select CPU_HAS_PTEA if (!CPU_SUBTYPE_ST40 && !CPU_SH4A) || CPU_SHX2
24 24
25config CPU_SH4A 25config CPU_SH4A
26 bool 26 bool
@@ -72,6 +72,7 @@ config CPU_SUBTYPE_SH7705
72config CPU_SUBTYPE_SH7706 72config CPU_SUBTYPE_SH7706
73 bool "Support SH7706 processor" 73 bool "Support SH7706 processor"
74 select CPU_SH3 74 select CPU_SH3
75 select CPU_HAS_IPR_IRQ
75 help 76 help
76 Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU. 77 Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU.
77 78
@@ -92,6 +93,7 @@ config CPU_SUBTYPE_SH7708
92config CPU_SUBTYPE_SH7709 93config CPU_SUBTYPE_SH7709
93 bool "Support SH7709 processor" 94 bool "Support SH7709 processor"
94 select CPU_SH3 95 select CPU_SH3
96 select CPU_HAS_IPR_IRQ
95 select CPU_HAS_PINT_IRQ 97 select CPU_HAS_PINT_IRQ
96 help 98 help
97 Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU. 99 Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU.
@@ -149,6 +151,7 @@ config CPU_SUBTYPE_SH7760
149 bool "Support SH7760 processor" 151 bool "Support SH7760 processor"
150 select CPU_SH4 152 select CPU_SH4
151 select CPU_HAS_INTC2_IRQ 153 select CPU_HAS_INTC2_IRQ
154 select CPU_HAS_IPR_IRQ
152 155
153config CPU_SUBTYPE_SH4_202 156config CPU_SUBTYPE_SH4_202
154 bool "Support SH4-202 processor" 157 bool "Support SH4-202 processor"
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 909dcfa8c8c6..de6d2c9aa477 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -46,10 +46,10 @@ static int cache_seq_show(struct seq_file *file, void *iter)
46 46
47 if (cache_type == CACHE_TYPE_DCACHE) { 47 if (cache_type == CACHE_TYPE_DCACHE) {
48 base = CACHE_OC_ADDRESS_ARRAY; 48 base = CACHE_OC_ADDRESS_ARRAY;
49 cache = &cpu_data->dcache; 49 cache = &current_cpu_data.dcache;
50 } else { 50 } else {
51 base = CACHE_IC_ADDRESS_ARRAY; 51 base = CACHE_IC_ADDRESS_ARRAY;
52 cache = &cpu_data->icache; 52 cache = &current_cpu_data.icache;
53 } 53 }
54 54
55 /* 55 /*
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c
index 838731fc608d..6d1dbec08ad4 100644
--- a/arch/sh/mm/cache-sh3.c
+++ b/arch/sh/mm/cache-sh3.c
@@ -44,11 +44,11 @@ void __flush_wback_region(void *start, int size)
44 44
45 for (v = begin; v < end; v+=L1_CACHE_BYTES) { 45 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
46 unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY; 46 unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY;
47 for (j = 0; j < cpu_data->dcache.ways; j++) { 47 for (j = 0; j < current_cpu_data.dcache.ways; j++) {
48 unsigned long data, addr, p; 48 unsigned long data, addr, p;
49 49
50 p = __pa(v); 50 p = __pa(v);
51 addr = addrstart | (v & cpu_data->dcache.entry_mask); 51 addr = addrstart | (v & current_cpu_data.dcache.entry_mask);
52 local_irq_save(flags); 52 local_irq_save(flags);
53 data = ctrl_inl(addr); 53 data = ctrl_inl(addr);
54 54
@@ -60,7 +60,7 @@ void __flush_wback_region(void *start, int size)
60 break; 60 break;
61 } 61 }
62 local_irq_restore(flags); 62 local_irq_restore(flags);
63 addrstart += cpu_data->dcache.way_incr; 63 addrstart += current_cpu_data.dcache.way_incr;
64 } 64 }
65 } 65 }
66} 66}
@@ -85,7 +85,7 @@ void __flush_purge_region(void *start, int size)
85 85
86 data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ 86 data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */
87 addr = CACHE_OC_ADDRESS_ARRAY | 87 addr = CACHE_OC_ADDRESS_ARRAY |
88 (v & cpu_data->dcache.entry_mask) | SH_CACHE_ASSOC; 88 (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC;
89 ctrl_outl(data, addr); 89 ctrl_outl(data, addr);
90 } 90 }
91} 91}
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index c6955157c989..e0cd4b7f4aeb 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -54,21 +54,21 @@ static void __init emit_cache_params(void)
54 ctrl_inl(CCN_CVR), 54 ctrl_inl(CCN_CVR),
55 ctrl_inl(CCN_PRR)); 55 ctrl_inl(CCN_PRR));
56 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", 56 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 cpu_data->icache.ways, 57 current_cpu_data.icache.ways,
58 cpu_data->icache.sets, 58 current_cpu_data.icache.sets,
59 cpu_data->icache.way_incr); 59 current_cpu_data.icache.way_incr);
60 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", 60 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 cpu_data->icache.entry_mask, 61 current_cpu_data.icache.entry_mask,
62 cpu_data->icache.alias_mask, 62 current_cpu_data.icache.alias_mask,
63 cpu_data->icache.n_aliases); 63 current_cpu_data.icache.n_aliases);
64 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", 64 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 cpu_data->dcache.ways, 65 current_cpu_data.dcache.ways,
66 cpu_data->dcache.sets, 66 current_cpu_data.dcache.sets,
67 cpu_data->dcache.way_incr); 67 current_cpu_data.dcache.way_incr);
68 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", 68 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 cpu_data->dcache.entry_mask, 69 current_cpu_data.dcache.entry_mask,
70 cpu_data->dcache.alias_mask, 70 current_cpu_data.dcache.alias_mask,
71 cpu_data->dcache.n_aliases); 71 current_cpu_data.dcache.n_aliases);
72 72
73 if (!__flush_dcache_segment_fn) 73 if (!__flush_dcache_segment_fn)
74 panic("unknown number of cache ways\n"); 74 panic("unknown number of cache ways\n");
@@ -87,10 +87,10 @@ void __init p3_cache_init(void)
87{ 87{
88 int i; 88 int i;
89 89
90 compute_alias(&cpu_data->icache); 90 compute_alias(&current_cpu_data.icache);
91 compute_alias(&cpu_data->dcache); 91 compute_alias(&current_cpu_data.dcache);
92 92
93 switch (cpu_data->dcache.ways) { 93 switch (current_cpu_data.dcache.ways) {
94 case 1: 94 case 1:
95 __flush_dcache_segment_fn = __flush_dcache_segment_1way; 95 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
96 break; 96 break;
@@ -110,7 +110,7 @@ void __init p3_cache_init(void)
110 if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL)) 110 if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
111 panic("%s failed.", __FUNCTION__); 111 panic("%s failed.", __FUNCTION__);
112 112
113 for (i = 0; i < cpu_data->dcache.n_aliases; i++) 113 for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
114 mutex_init(&p3map_mutex[i]); 114 mutex_init(&p3map_mutex[i]);
115} 115}
116 116
@@ -200,13 +200,14 @@ void flush_cache_sigtramp(unsigned long addr)
200 : /* no output */ 200 : /* no output */
201 : "m" (__m(v))); 201 : "m" (__m(v)));
202 202
203 index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); 203 index = CACHE_IC_ADDRESS_ARRAY |
204 (v & current_cpu_data.icache.entry_mask);
204 205
205 local_irq_save(flags); 206 local_irq_save(flags);
206 jump_to_P2(); 207 jump_to_P2();
207 208
208 for (i = 0; i < cpu_data->icache.ways; 209 for (i = 0; i < current_cpu_data.icache.ways;
209 i++, index += cpu_data->icache.way_incr) 210 i++, index += current_cpu_data.icache.way_incr)
210 ctrl_outl(0, index); /* Clear out Valid-bit */ 211 ctrl_outl(0, index); /* Clear out Valid-bit */
211 212
212 back_to_P1(); 213 back_to_P1();
@@ -223,7 +224,7 @@ static inline void flush_cache_4096(unsigned long start,
223 * All types of SH-4 require PC to be in P2 to operate on the I-cache. 224 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
224 * Some types of SH-4 require PC to be in P2 to operate on the D-cache. 225 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
225 */ 226 */
226 if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || 227 if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
227 (start < CACHE_OC_ADDRESS_ARRAY)) 228 (start < CACHE_OC_ADDRESS_ARRAY))
228 exec_offset = 0x20000000; 229 exec_offset = 0x20000000;
229 230
@@ -236,16 +237,26 @@ static inline void flush_cache_4096(unsigned long start,
236/* 237/*
237 * Write back & invalidate the D-cache of the page. 238 * Write back & invalidate the D-cache of the page.
238 * (To avoid "alias" issues) 239 * (To avoid "alias" issues)
240 *
241 * This uses a lazy write-back on UP, which is explicitly
242 * disabled on SMP.
239 */ 243 */
240void flush_dcache_page(struct page *page) 244void flush_dcache_page(struct page *page)
241{ 245{
242 if (test_bit(PG_mapped, &page->flags)) { 246#ifndef CONFIG_SMP
247 struct address_space *mapping = page_mapping(page);
248
249 if (mapping && !mapping_mapped(mapping))
250 set_bit(PG_dcache_dirty, &page->flags);
251 else
252#endif
253 {
243 unsigned long phys = PHYSADDR(page_address(page)); 254 unsigned long phys = PHYSADDR(page_address(page));
244 unsigned long addr = CACHE_OC_ADDRESS_ARRAY; 255 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
245 int i, n; 256 int i, n;
246 257
247 /* Loop all the D-cache */ 258 /* Loop all the D-cache */
248 n = cpu_data->dcache.n_aliases; 259 n = current_cpu_data.dcache.n_aliases;
249 for (i = 0; i < n; i++, addr += 4096) 260 for (i = 0; i < n; i++, addr += 4096)
250 flush_cache_4096(addr, phys); 261 flush_cache_4096(addr, phys);
251 } 262 }
@@ -277,7 +288,7 @@ static inline void flush_icache_all(void)
277 288
278void flush_dcache_all(void) 289void flush_dcache_all(void)
279{ 290{
280 (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size); 291 (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
281 wmb(); 292 wmb();
282} 293}
283 294
@@ -291,8 +302,8 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
291 unsigned long end) 302 unsigned long end)
292{ 303{
293 unsigned long d = 0, p = start & PAGE_MASK; 304 unsigned long d = 0, p = start & PAGE_MASK;
294 unsigned long alias_mask = cpu_data->dcache.alias_mask; 305 unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
295 unsigned long n_aliases = cpu_data->dcache.n_aliases; 306 unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
296 unsigned long select_bit; 307 unsigned long select_bit;
297 unsigned long all_aliases_mask; 308 unsigned long all_aliases_mask;
298 unsigned long addr_offset; 309 unsigned long addr_offset;
@@ -379,7 +390,7 @@ void flush_cache_mm(struct mm_struct *mm)
379 * If cache is only 4k-per-way, there are never any 'aliases'. Since 390 * If cache is only 4k-per-way, there are never any 'aliases'. Since
380 * the cache is physically tagged, the data can just be left in there. 391 * the cache is physically tagged, the data can just be left in there.
381 */ 392 */
382 if (cpu_data->dcache.n_aliases == 0) 393 if (current_cpu_data.dcache.n_aliases == 0)
383 return; 394 return;
384 395
385 /* 396 /*
@@ -416,7 +427,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
416 unsigned long phys = pfn << PAGE_SHIFT; 427 unsigned long phys = pfn << PAGE_SHIFT;
417 unsigned int alias_mask; 428 unsigned int alias_mask;
418 429
419 alias_mask = cpu_data->dcache.alias_mask; 430 alias_mask = current_cpu_data.dcache.alias_mask;
420 431
421 /* We only need to flush D-cache when we have alias */ 432 /* We only need to flush D-cache when we have alias */
422 if ((address^phys) & alias_mask) { 433 if ((address^phys) & alias_mask) {
@@ -430,7 +441,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
430 phys); 441 phys);
431 } 442 }
432 443
433 alias_mask = cpu_data->icache.alias_mask; 444 alias_mask = current_cpu_data.icache.alias_mask;
434 if (vma->vm_flags & VM_EXEC) { 445 if (vma->vm_flags & VM_EXEC) {
435 /* 446 /*
436 * Evict entries from the portion of the cache from which code 447 * Evict entries from the portion of the cache from which code
@@ -462,7 +473,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
462 * If cache is only 4k-per-way, there are never any 'aliases'. Since 473 * If cache is only 4k-per-way, there are never any 'aliases'. Since
463 * the cache is physically tagged, the data can just be left in there. 474 * the cache is physically tagged, the data can just be left in there.
464 */ 475 */
465 if (cpu_data->dcache.n_aliases == 0) 476 if (current_cpu_data.dcache.n_aliases == 0)
466 return; 477 return;
467 478
468 /* 479 /*
@@ -523,7 +534,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
523 unsigned long a, ea, p; 534 unsigned long a, ea, p;
524 unsigned long temp_pc; 535 unsigned long temp_pc;
525 536
526 dcache = &cpu_data->dcache; 537 dcache = &current_cpu_data.dcache;
527 /* Write this way for better assembly. */ 538 /* Write this way for better assembly. */
528 way_count = dcache->ways; 539 way_count = dcache->ways;
529 way_incr = dcache->way_incr; 540 way_incr = dcache->way_incr;
@@ -598,7 +609,7 @@ static void __flush_dcache_segment_1way(unsigned long start,
598 base_addr = ((base_addr >> 16) << 16); 609 base_addr = ((base_addr >> 16) << 16);
599 base_addr |= start; 610 base_addr |= start;
600 611
601 dcache = &cpu_data->dcache; 612 dcache = &current_cpu_data.dcache;
602 linesz = dcache->linesz; 613 linesz = dcache->linesz;
603 way_incr = dcache->way_incr; 614 way_incr = dcache->way_incr;
604 way_size = dcache->way_size; 615 way_size = dcache->way_size;
@@ -640,7 +651,7 @@ static void __flush_dcache_segment_2way(unsigned long start,
640 base_addr = ((base_addr >> 16) << 16); 651 base_addr = ((base_addr >> 16) << 16);
641 base_addr |= start; 652 base_addr |= start;
642 653
643 dcache = &cpu_data->dcache; 654 dcache = &current_cpu_data.dcache;
644 linesz = dcache->linesz; 655 linesz = dcache->linesz;
645 way_incr = dcache->way_incr; 656 way_incr = dcache->way_incr;
646 way_size = dcache->way_size; 657 way_size = dcache->way_size;
@@ -699,7 +710,7 @@ static void __flush_dcache_segment_4way(unsigned long start,
699 base_addr = ((base_addr >> 16) << 16); 710 base_addr = ((base_addr >> 16) << 16);
700 base_addr |= start; 711 base_addr |= start;
701 712
702 dcache = &cpu_data->dcache; 713 dcache = &current_cpu_data.dcache;
703 linesz = dcache->linesz; 714 linesz = dcache->linesz;
704 way_incr = dcache->way_incr; 715 way_incr = dcache->way_incr;
705 way_size = dcache->way_size; 716 way_size = dcache->way_size;
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index 045abdf078f5..31f8deb7a158 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -3,11 +3,11 @@
3 * 3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka 4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2004 Alex Song 5 * Copyright (C) 2004 Alex Song
6 * Copyright (C) 2006 Paul Mundt
6 * 7 *
7 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 10 * for more details.
10 *
11 */ 11 */
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/mman.h> 13#include <linux/mman.h>
@@ -32,9 +32,9 @@ static inline void cache_wback_all(void)
32{ 32{
33 unsigned long ways, waysize, addrstart; 33 unsigned long ways, waysize, addrstart;
34 34
35 ways = cpu_data->dcache.ways; 35 ways = current_cpu_data.dcache.ways;
36 waysize = cpu_data->dcache.sets; 36 waysize = current_cpu_data.dcache.sets;
37 waysize <<= cpu_data->dcache.entry_shift; 37 waysize <<= current_cpu_data.dcache.entry_shift;
38 38
39 addrstart = CACHE_OC_ADDRESS_ARRAY; 39 addrstart = CACHE_OC_ADDRESS_ARRAY;
40 40
@@ -43,7 +43,7 @@ static inline void cache_wback_all(void)
43 43
44 for (addr = addrstart; 44 for (addr = addrstart;
45 addr < addrstart + waysize; 45 addr < addrstart + waysize;
46 addr += cpu_data->dcache.linesz) { 46 addr += current_cpu_data.dcache.linesz) {
47 unsigned long data; 47 unsigned long data;
48 int v = SH_CACHE_UPDATED | SH_CACHE_VALID; 48 int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
49 49
@@ -51,10 +51,9 @@ static inline void cache_wback_all(void)
51 51
52 if ((data & v) == v) 52 if ((data & v) == v)
53 ctrl_outl(data & ~v, addr); 53 ctrl_outl(data & ~v, addr);
54
55 } 54 }
56 55
57 addrstart += cpu_data->dcache.way_incr; 56 addrstart += current_cpu_data.dcache.way_incr;
58 } while (--ways); 57 } while (--ways);
59} 58}
60 59
@@ -94,9 +93,9 @@ static void __flush_dcache_page(unsigned long phys)
94 local_irq_save(flags); 93 local_irq_save(flags);
95 jump_to_P2(); 94 jump_to_P2();
96 95
97 ways = cpu_data->dcache.ways; 96 ways = current_cpu_data.dcache.ways;
98 waysize = cpu_data->dcache.sets; 97 waysize = current_cpu_data.dcache.sets;
99 waysize <<= cpu_data->dcache.entry_shift; 98 waysize <<= current_cpu_data.dcache.entry_shift;
100 99
101 addrstart = CACHE_OC_ADDRESS_ARRAY; 100 addrstart = CACHE_OC_ADDRESS_ARRAY;
102 101
@@ -105,7 +104,7 @@ static void __flush_dcache_page(unsigned long phys)
105 104
106 for (addr = addrstart; 105 for (addr = addrstart;
107 addr < addrstart + waysize; 106 addr < addrstart + waysize;
108 addr += cpu_data->dcache.linesz) { 107 addr += current_cpu_data.dcache.linesz) {
109 unsigned long data; 108 unsigned long data;
110 109
111 data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); 110 data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
@@ -115,7 +114,7 @@ static void __flush_dcache_page(unsigned long phys)
115 } 114 }
116 } 115 }
117 116
118 addrstart += cpu_data->dcache.way_incr; 117 addrstart += current_cpu_data.dcache.way_incr;
119 } while (--ways); 118 } while (--ways);
120 119
121 back_to_P1(); 120 back_to_P1();
@@ -128,7 +127,11 @@ static void __flush_dcache_page(unsigned long phys)
128 */ 127 */
129void flush_dcache_page(struct page *page) 128void flush_dcache_page(struct page *page)
130{ 129{
131 if (test_bit(PG_mapped, &page->flags)) 130 struct address_space *mapping = page_mapping(page);
131
132 if (mapping && !mapping_mapped(mapping))
133 set_bit(PG_dcache_dirty, &page->flags);
134 else
132 __flush_dcache_page(PHYSADDR(page_address(page))); 135 __flush_dcache_page(PHYSADDR(page_address(page)));
133} 136}
134 137
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 716ebf568af2..fa5d7f0b9f18 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -17,6 +17,7 @@
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
20#include <asm/tlbflush.h>
20#include <asm/kgdb.h> 21#include <asm/kgdb.h>
21 22
22extern void die(const char *,struct pt_regs *,long); 23extern void die(const char *,struct pt_regs *,long);
@@ -224,3 +225,89 @@ do_sigbus:
224 if (!user_mode(regs)) 225 if (!user_mode(regs))
225 goto no_context; 226 goto no_context;
226} 227}
228
229#ifdef CONFIG_SH_STORE_QUEUES
230/*
231 * This is a special case for the SH-4 store queues, as pages for this
232 * space still need to be faulted in before it's possible to flush the
233 * store queue cache for writeout to the remapped region.
234 */
235#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
236#else
237#define P3_ADDR_MAX P4SEG
238#endif
239
240/*
241 * Called with interrupts disabled.
242 */
243asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
244 unsigned long writeaccess,
245 unsigned long address)
246{
247 pgd_t *pgd;
248 pud_t *pud;
249 pmd_t *pmd;
250 pte_t *pte;
251 pte_t entry;
252 struct mm_struct *mm = current->mm;
253 spinlock_t *ptl;
254 int ret = 1;
255
256#ifdef CONFIG_SH_KGDB
257 if (kgdb_nofault && kgdb_bus_err_hook)
258 kgdb_bus_err_hook();
259#endif
260
261 /*
262 * We don't take page faults for P1, P2, and parts of P4, these
263 * are always mapped, whether it be due to legacy behaviour in
264 * 29-bit mode, or due to PMB configuration in 32-bit mode.
265 */
266 if (address >= P3SEG && address < P3_ADDR_MAX) {
267 pgd = pgd_offset_k(address);
268 mm = NULL;
269 } else {
270 if (unlikely(address >= TASK_SIZE || !mm))
271 return 1;
272
273 pgd = pgd_offset(mm, address);
274 }
275
276 pud = pud_offset(pgd, address);
277 if (pud_none_or_clear_bad(pud))
278 return 1;
279 pmd = pmd_offset(pud, address);
280 if (pmd_none_or_clear_bad(pmd))
281 return 1;
282
283 if (mm)
284 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
285 else
286 pte = pte_offset_kernel(pmd, address);
287
288 entry = *pte;
289 if (unlikely(pte_none(entry) || pte_not_present(entry)))
290 goto unlock;
291 if (unlikely(writeaccess && !pte_write(entry)))
292 goto unlock;
293
294 if (writeaccess)
295 entry = pte_mkdirty(entry);
296 entry = pte_mkyoung(entry);
297
298#ifdef CONFIG_CPU_SH4
299 /*
300 * ITLB is not affected by "ldtlb" instruction.
301 * So, we need to flush the entry by ourselves.
302 */
303 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
304#endif
305
306 set_pte(pte, entry);
307 update_mmu_cache(NULL, address, entry);
308 ret = 0;
309unlock:
310 if (mm)
311 pte_unmap_unlock(pte, ptl);
312 return ret;
313}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index bf0c263cb6fd..ae957a932375 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -39,11 +39,6 @@
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40pgd_t swapper_pg_dir[PTRS_PER_PGD]; 40pgd_t swapper_pg_dir[PTRS_PER_PGD];
41 41
42/*
43 * Cache of MMU context last used.
44 */
45unsigned long mmu_context_cache = NO_CONTEXT;
46
47#ifdef CONFIG_MMU 42#ifdef CONFIG_MMU
48/* It'd be good if these lines were in the standard header file. */ 43/* It'd be good if these lines were in the standard header file. */
49#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) 44#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
@@ -111,7 +106,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
111 106
112 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 107 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
113 108
114 __flush_tlb_page(get_asid(), addr); 109 flush_tlb_one(get_asid(), addr);
115} 110}
116 111
117/* 112/*
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index 90b494a0cf45..be03d74e99cb 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -45,12 +45,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
45 return NULL; 45 return NULL;
46 46
47 /* 47 /*
48 * Don't remap the low PCI/ISA area, it's always mapped..
49 */
50 if (phys_addr >= 0xA0000 && last_addr < 0x100000)
51 return (void __iomem *)phys_to_virt(phys_addr);
52
53 /*
54 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is 48 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
55 * mapped at the end of the address space (typically 0xfd000000) 49 * mapped at the end of the address space (typically 0xfd000000)
56 * in a non-translatable area, so mapping through page tables for 50 * in a non-translatable area, so mapping through page tables for
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 3f98d2a4f936..969efeceb928 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -13,7 +13,7 @@
13 13
14extern struct mutex p3map_mutex[]; 14extern struct mutex p3map_mutex[];
15 15
16#define CACHE_ALIAS (cpu_data->dcache.alias_mask) 16#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
17 17
18/* 18/*
19 * clear_user_page 19 * clear_user_page
@@ -23,7 +23,6 @@ extern struct mutex p3map_mutex[];
23 */ 23 */
24void clear_user_page(void *to, unsigned long address, struct page *page) 24void clear_user_page(void *to, unsigned long address, struct page *page)
25{ 25{
26 __set_bit(PG_mapped, &page->flags);
27 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) 26 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
28 clear_page(to); 27 clear_page(to);
29 else { 28 else {
@@ -40,7 +39,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
40 mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); 39 mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
41 set_pte(pte, entry); 40 set_pte(pte, entry);
42 local_irq_save(flags); 41 local_irq_save(flags);
43 __flush_tlb_page(get_asid(), p3_addr); 42 flush_tlb_one(get_asid(), p3_addr);
44 local_irq_restore(flags); 43 local_irq_restore(flags);
45 update_mmu_cache(NULL, p3_addr, entry); 44 update_mmu_cache(NULL, p3_addr, entry);
46 __clear_user_page((void *)p3_addr, to); 45 __clear_user_page((void *)p3_addr, to);
@@ -59,7 +58,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
59void copy_user_page(void *to, void *from, unsigned long address, 58void copy_user_page(void *to, void *from, unsigned long address,
60 struct page *page) 59 struct page *page)
61{ 60{
62 __set_bit(PG_mapped, &page->flags);
63 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) 61 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
64 copy_page(to, from); 62 copy_page(to, from);
65 else { 63 else {
@@ -76,7 +74,7 @@ void copy_user_page(void *to, void *from, unsigned long address,
76 mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); 74 mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
77 set_pte(pte, entry); 75 set_pte(pte, entry);
78 local_irq_save(flags); 76 local_irq_save(flags);
79 __flush_tlb_page(get_asid(), p3_addr); 77 flush_tlb_one(get_asid(), p3_addr);
80 local_irq_restore(flags); 78 local_irq_restore(flags);
81 update_mmu_cache(NULL, p3_addr, entry); 79 update_mmu_cache(NULL, p3_addr, entry);
82 __copy_user_page((void *)p3_addr, from, to); 80 __copy_user_page((void *)p3_addr, from, to);
@@ -84,23 +82,3 @@ void copy_user_page(void *to, void *from, unsigned long address,
84 mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); 82 mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
85 } 83 }
86} 84}
87
88/*
89 * For SH-4, we have our own implementation for ptep_get_and_clear
90 */
91inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
92{
93 pte_t pte = *ptep;
94
95 pte_clear(mm, addr, ptep);
96 if (!pte_not_present(pte)) {
97 unsigned long pfn = pte_pfn(pte);
98 if (pfn_valid(pfn)) {
99 struct page *page = pfn_to_page(pfn);
100 struct address_space *mapping = page_mapping(page);
101 if (!mapping || !mapping_writably_mapped(mapping))
102 __clear_bit(PG_mapped, &page->flags);
103 }
104 }
105 return pte;
106}
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c
index ff9ece986cbc..887ab9d18ccd 100644
--- a/arch/sh/mm/pg-sh7705.c
+++ b/arch/sh/mm/pg-sh7705.c
@@ -7,9 +7,7 @@
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 9 * for more details.
10 *
11 */ 10 */
12
13#include <linux/init.h> 11#include <linux/init.h>
14#include <linux/mman.h> 12#include <linux/mman.h>
15#include <linux/mm.h> 13#include <linux/mm.h>
@@ -45,13 +43,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
45 43
46 p = __pa(p1_begin); 44 p = __pa(p1_begin);
47 45
48 ways = cpu_data->dcache.ways; 46 ways = current_cpu_data.dcache.ways;
49 addr = CACHE_OC_ADDRESS_ARRAY; 47 addr = CACHE_OC_ADDRESS_ARRAY;
50 48
51 do { 49 do {
52 unsigned long data; 50 unsigned long data;
53 51
54 addr |= (v & cpu_data->dcache.entry_mask); 52 addr |= (v & current_cpu_data.dcache.entry_mask);
55 53
56 data = ctrl_inl(addr); 54 data = ctrl_inl(addr);
57 if ((data & CACHE_PHYSADDR_MASK) == 55 if ((data & CACHE_PHYSADDR_MASK) ==
@@ -60,7 +58,7 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
60 ctrl_outl(data, addr); 58 ctrl_outl(data, addr);
61 } 59 }
62 60
63 addr += cpu_data->dcache.way_incr; 61 addr += current_cpu_data.dcache.way_incr;
64 } while (--ways); 62 } while (--ways);
65 63
66 p1_begin += L1_CACHE_BYTES; 64 p1_begin += L1_CACHE_BYTES;
@@ -76,7 +74,6 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
76{ 74{
77 struct page *page = virt_to_page(to); 75 struct page *page = virt_to_page(to);
78 76
79 __set_bit(PG_mapped, &page->flags);
80 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { 77 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
81 clear_page(to); 78 clear_page(to);
82 __flush_wback_region(to, PAGE_SIZE); 79 __flush_wback_region(to, PAGE_SIZE);
@@ -95,12 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
95 * @from: P1 address 92 * @from: P1 address
96 * @address: U0 address to be mapped 93 * @address: U0 address to be mapped
97 */ 94 */
98void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) 95void copy_user_page(void *to, void *from, unsigned long address,
96 struct page *pg)
99{ 97{
100 struct page *page = virt_to_page(to); 98 struct page *page = virt_to_page(to);
101 99
102
103 __set_bit(PG_mapped, &page->flags);
104 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { 100 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
105 copy_page(to, from); 101 copy_page(to, from);
106 __flush_wback_region(to, PAGE_SIZE); 102 __flush_wback_region(to, PAGE_SIZE);
@@ -112,26 +108,3 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg
112 __flush_wback_region(to, PAGE_SIZE); 108 __flush_wback_region(to, PAGE_SIZE);
113 } 109 }
114} 110}
115
116/*
117 * For SH7705, we have our own implementation for ptep_get_and_clear
118 * Copied from pg-sh4.c
119 */
120inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
121{
122 pte_t pte = *ptep;
123
124 pte_clear(mm, addr, ptep);
125 if (!pte_not_present(pte)) {
126 unsigned long pfn = pte_pfn(pte);
127 if (pfn_valid(pfn)) {
128 struct page *page = pfn_to_page(pfn);
129 struct address_space *mapping = page_mapping(page);
130 if (!mapping || !mapping_writably_mapped(mapping))
131 __clear_bit(PG_mapped, &page->flags);
132 }
133 }
134
135 return pte;
136}
137
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
index 73ec7f6084fa..d2f7b4a2eb05 100644
--- a/arch/sh/mm/tlb-flush.c
+++ b/arch/sh/mm/tlb-flush.c
@@ -2,24 +2,28 @@
2 * TLB flushing operations for SH with an MMU. 2 * TLB flushing operations for SH with an MMU.
3 * 3 *
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt 5 * Copyright (C) 2003 - 2006 Paul Mundt
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 9 * for more details.
10 */ 10 */
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/io.h>
12#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
13#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
14 16
15void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 17void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
16{ 18{
17 if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { 19 unsigned int cpu = smp_processor_id();
20
21 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
18 unsigned long flags; 22 unsigned long flags;
19 unsigned long asid; 23 unsigned long asid;
20 unsigned long saved_asid = MMU_NO_ASID; 24 unsigned long saved_asid = MMU_NO_ASID;
21 25
22 asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; 26 asid = cpu_asid(cpu, vma->vm_mm);
23 page &= PAGE_MASK; 27 page &= PAGE_MASK;
24 28
25 local_irq_save(flags); 29 local_irq_save(flags);
@@ -27,33 +31,34 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
27 saved_asid = get_asid(); 31 saved_asid = get_asid();
28 set_asid(asid); 32 set_asid(asid);
29 } 33 }
30 __flush_tlb_page(asid, page); 34 local_flush_tlb_one(asid, page);
31 if (saved_asid != MMU_NO_ASID) 35 if (saved_asid != MMU_NO_ASID)
32 set_asid(saved_asid); 36 set_asid(saved_asid);
33 local_irq_restore(flags); 37 local_irq_restore(flags);
34 } 38 }
35} 39}
36 40
37void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 41void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
38 unsigned long end) 42 unsigned long end)
39{ 43{
40 struct mm_struct *mm = vma->vm_mm; 44 struct mm_struct *mm = vma->vm_mm;
45 unsigned int cpu = smp_processor_id();
41 46
42 if (mm->context.id != NO_CONTEXT) { 47 if (cpu_context(cpu, mm) != NO_CONTEXT) {
43 unsigned long flags; 48 unsigned long flags;
44 int size; 49 int size;
45 50
46 local_irq_save(flags); 51 local_irq_save(flags);
47 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 52 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
48 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ 53 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
49 mm->context.id = NO_CONTEXT; 54 cpu_context(cpu, mm) = NO_CONTEXT;
50 if (mm == current->mm) 55 if (mm == current->mm)
51 activate_context(mm); 56 activate_context(mm, cpu);
52 } else { 57 } else {
53 unsigned long asid; 58 unsigned long asid;
54 unsigned long saved_asid = MMU_NO_ASID; 59 unsigned long saved_asid = MMU_NO_ASID;
55 60
56 asid = mm->context.id & MMU_CONTEXT_ASID_MASK; 61 asid = cpu_asid(cpu, mm);
57 start &= PAGE_MASK; 62 start &= PAGE_MASK;
58 end += (PAGE_SIZE - 1); 63 end += (PAGE_SIZE - 1);
59 end &= PAGE_MASK; 64 end &= PAGE_MASK;
@@ -62,7 +67,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
62 set_asid(asid); 67 set_asid(asid);
63 } 68 }
64 while (start < end) { 69 while (start < end) {
65 __flush_tlb_page(asid, start); 70 local_flush_tlb_one(asid, start);
66 start += PAGE_SIZE; 71 start += PAGE_SIZE;
67 } 72 }
68 if (saved_asid != MMU_NO_ASID) 73 if (saved_asid != MMU_NO_ASID)
@@ -72,26 +77,27 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
72 } 77 }
73} 78}
74 79
75void flush_tlb_kernel_range(unsigned long start, unsigned long end) 80void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
76{ 81{
82 unsigned int cpu = smp_processor_id();
77 unsigned long flags; 83 unsigned long flags;
78 int size; 84 int size;
79 85
80 local_irq_save(flags); 86 local_irq_save(flags);
81 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 87 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
82 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ 88 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
83 flush_tlb_all(); 89 local_flush_tlb_all();
84 } else { 90 } else {
85 unsigned long asid; 91 unsigned long asid;
86 unsigned long saved_asid = get_asid(); 92 unsigned long saved_asid = get_asid();
87 93
88 asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; 94 asid = cpu_asid(cpu, &init_mm);
89 start &= PAGE_MASK; 95 start &= PAGE_MASK;
90 end += (PAGE_SIZE - 1); 96 end += (PAGE_SIZE - 1);
91 end &= PAGE_MASK; 97 end &= PAGE_MASK;
92 set_asid(asid); 98 set_asid(asid);
93 while (start < end) { 99 while (start < end) {
94 __flush_tlb_page(asid, start); 100 local_flush_tlb_one(asid, start);
95 start += PAGE_SIZE; 101 start += PAGE_SIZE;
96 } 102 }
97 set_asid(saved_asid); 103 set_asid(saved_asid);
@@ -99,22 +105,24 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
99 local_irq_restore(flags); 105 local_irq_restore(flags);
100} 106}
101 107
102void flush_tlb_mm(struct mm_struct *mm) 108void local_flush_tlb_mm(struct mm_struct *mm)
103{ 109{
110 unsigned int cpu = smp_processor_id();
111
104 /* Invalidate all TLB of this process. */ 112 /* Invalidate all TLB of this process. */
105 /* Instead of invalidating each TLB, we get new MMU context. */ 113 /* Instead of invalidating each TLB, we get new MMU context. */
106 if (mm->context.id != NO_CONTEXT) { 114 if (cpu_context(cpu, mm) != NO_CONTEXT) {
107 unsigned long flags; 115 unsigned long flags;
108 116
109 local_irq_save(flags); 117 local_irq_save(flags);
110 mm->context.id = NO_CONTEXT; 118 cpu_context(cpu, mm) = NO_CONTEXT;
111 if (mm == current->mm) 119 if (mm == current->mm)
112 activate_context(mm); 120 activate_context(mm, cpu);
113 local_irq_restore(flags); 121 local_irq_restore(flags);
114 } 122 }
115} 123}
116 124
117void flush_tlb_all(void) 125void local_flush_tlb_all(void)
118{ 126{
119 unsigned long flags, status; 127 unsigned long flags, status;
120 128
@@ -132,3 +140,54 @@ void flush_tlb_all(void)
132 ctrl_barrier(); 140 ctrl_barrier();
133 local_irq_restore(flags); 141 local_irq_restore(flags);
134} 142}
143
144void update_mmu_cache(struct vm_area_struct *vma,
145 unsigned long address, pte_t pte)
146{
147 unsigned long flags;
148 unsigned long pteval;
149 unsigned long vpn;
150 struct page *page;
151 unsigned long pfn = pte_pfn(pte);
152 struct address_space *mapping;
153
154 if (!pfn_valid(pfn))
155 return;
156
157 page = pfn_to_page(pfn);
158 mapping = page_mapping(page);
159 if (mapping) {
160 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
161 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
162
163 if (dirty)
164 __flush_wback_region((void *)P1SEGADDR(phys),
165 PAGE_SIZE);
166 }
167
168 local_irq_save(flags);
169
170 /* Set PTEH register */
171 vpn = (address & MMU_VPN_MASK) | get_asid();
172 ctrl_outl(vpn, MMU_PTEH);
173
174 pteval = pte_val(pte);
175
176#ifdef CONFIG_CPU_HAS_PTEA
177 /* Set PTEA register */
178 /* TODO: make this look less hacky */
179 ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
180#endif
181
182 /* Set PTEL register */
183 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
184#if defined(CONFIG_SH_WRITETHROUGH) && defined(CONFIG_CPU_SH4)
185 pteval |= _PAGE_WT;
186#endif
187 /* conveniently, we want all the software flags to be 0 anyway */
188 ctrl_outl(pteval, MMU_PTEL);
189
190 /* Load the TLB */
191 asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
192 local_irq_restore(flags);
193}
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c
index e55cfea01092..1ccca7c0532e 100644
--- a/arch/sh/mm/tlb-nommu.c
+++ b/arch/sh/mm/tlb-nommu.c
@@ -13,39 +13,33 @@
13/* 13/*
14 * Nothing too terribly exciting here .. 14 * Nothing too terribly exciting here ..
15 */ 15 */
16 16void local_flush_tlb_all(void)
17void flush_tlb(void)
18{
19 BUG();
20}
21
22void flush_tlb_all(void)
23{ 17{
24 BUG(); 18 BUG();
25} 19}
26 20
27void flush_tlb_mm(struct mm_struct *mm) 21void local_flush_tlb_mm(struct mm_struct *mm)
28{ 22{
29 BUG(); 23 BUG();
30} 24}
31 25
32void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 26void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
33 unsigned long end) 27 unsigned long end)
34{ 28{
35 BUG(); 29 BUG();
36} 30}
37 31
38void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 32void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
39{ 33{
40 BUG(); 34 BUG();
41} 35}
42 36
43void __flush_tlb_page(unsigned long asid, unsigned long page) 37void local_flush_tlb_one(unsigned long asid, unsigned long page)
44{ 38{
45 BUG(); 39 BUG();
46} 40}
47 41
48void flush_tlb_kernel_range(unsigned long start, unsigned long end) 42void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
49{ 43{
50 BUG(); 44 BUG();
51} 45}
@@ -55,4 +49,3 @@ void update_mmu_cache(struct vm_area_struct * vma,
55{ 49{
56 BUG(); 50 BUG();
57} 51}
58
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 46b09e26e082..e5e76eb7ee09 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -8,71 +8,11 @@
8 * 8 *
9 * Released under the terms of the GNU GPL v2.0. 9 * Released under the terms of the GNU GPL v2.0.
10 */ 10 */
11#include <linux/signal.h> 11#include <linux/io.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23
24#include <asm/system.h> 12#include <asm/system.h>
25#include <asm/io.h>
26#include <asm/uaccess.h>
27#include <asm/pgalloc.h>
28#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
29#include <asm/cacheflush.h>
30 14
31void update_mmu_cache(struct vm_area_struct * vma, 15void local_flush_tlb_one(unsigned long asid, unsigned long page)
32 unsigned long address, pte_t pte)
33{
34 unsigned long flags;
35 unsigned long pteval;
36 unsigned long vpn;
37
38 /* Ptrace may call this routine. */
39 if (vma && current->active_mm != vma->vm_mm)
40 return;
41
42#if defined(CONFIG_SH7705_CACHE_32KB)
43 {
44 struct page *page = pte_page(pte);
45 unsigned long pfn = pte_pfn(pte);
46
47 if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
48 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
49
50 __flush_wback_region((void *)P1SEGADDR(phys),
51 PAGE_SIZE);
52 __set_bit(PG_mapped, &page->flags);
53 }
54 }
55#endif
56
57 local_irq_save(flags);
58
59 /* Set PTEH register */
60 vpn = (address & MMU_VPN_MASK) | get_asid();
61 ctrl_outl(vpn, MMU_PTEH);
62
63 pteval = pte_val(pte);
64
65 /* Set PTEL register */
66 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
67 /* conveniently, we want all the software flags to be 0 anyway */
68 ctrl_outl(pteval, MMU_PTEL);
69
70 /* Load the TLB */
71 asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
72 local_irq_restore(flags);
73}
74
75void __flush_tlb_page(unsigned long asid, unsigned long page)
76{ 16{
77 unsigned long addr, data; 17 unsigned long addr, data;
78 int i, ways = MMU_NTLB_WAYS; 18 int i, ways = MMU_NTLB_WAYS;
@@ -86,7 +26,7 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
86 addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); 26 addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
87 data = (page & 0xfffe0000) | asid; /* VALID bit is off */ 27 data = (page & 0xfffe0000) | asid; /* VALID bit is off */
88 28
89 if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) { 29 if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) {
90 addr |= MMU_PAGE_ASSOC_BIT; 30 addr |= MMU_PAGE_ASSOC_BIT;
91 ways = 1; /* we already know the way .. */ 31 ways = 1; /* we already know the way .. */
92 } 32 }
@@ -94,4 +34,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
94 for (i = 0; i < ways; i++) 34 for (i = 0; i < ways; i++)
95 ctrl_outl(data, addr + (i << 8)); 35 ctrl_outl(data, addr + (i << 8));
96} 36}
97
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index 812b2d567de2..221e7095473d 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -8,76 +8,11 @@
8 * 8 *
9 * Released under the terms of the GNU GPL v2.0. 9 * Released under the terms of the GNU GPL v2.0.
10 */ 10 */
11#include <linux/signal.h> 11#include <linux/io.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23
24#include <asm/system.h> 12#include <asm/system.h>
25#include <asm/io.h>
26#include <asm/uaccess.h>
27#include <asm/pgalloc.h>
28#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
29#include <asm/cacheflush.h>
30 14
31void update_mmu_cache(struct vm_area_struct * vma, 15void local_flush_tlb_one(unsigned long asid, unsigned long page)
32 unsigned long address, pte_t pte)
33{
34 unsigned long flags;
35 unsigned long pteval;
36 unsigned long vpn;
37 struct page *page;
38 unsigned long pfn;
39
40 /* Ptrace may call this routine. */
41 if (vma && current->active_mm != vma->vm_mm)
42 return;
43
44 pfn = pte_pfn(pte);
45 if (pfn_valid(pfn)) {
46 page = pfn_to_page(pfn);
47 if (!test_bit(PG_mapped, &page->flags)) {
48 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
49 __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
50 __set_bit(PG_mapped, &page->flags);
51 }
52 }
53
54 local_irq_save(flags);
55
56 /* Set PTEH register */
57 vpn = (address & MMU_VPN_MASK) | get_asid();
58 ctrl_outl(vpn, MMU_PTEH);
59
60 pteval = pte_val(pte);
61
62 /* Set PTEA register */
63 if (cpu_data->flags & CPU_HAS_PTEA)
64 /* TODO: make this look less hacky */
65 ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
66
67 /* Set PTEL register */
68 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
69#ifdef CONFIG_SH_WRITETHROUGH
70 pteval |= _PAGE_WT;
71#endif
72 /* conveniently, we want all the software flags to be 0 anyway */
73 ctrl_outl(pteval, MMU_PTEL);
74
75 /* Load the TLB */
76 asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
77 local_irq_restore(flags);
78}
79
80void __flush_tlb_page(unsigned long asid, unsigned long page)
81{ 16{
82 unsigned long addr, data; 17 unsigned long addr, data;
83 18
@@ -93,4 +28,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
93 ctrl_outl(data, addr); 28 ctrl_outl(data, addr);
94 back_to_P1(); 29 back_to_P1();
95} 30}
96
diff --git a/arch/sh/oprofile/op_model_sh7750.c b/arch/sh/oprofile/op_model_sh7750.c
index 0104e44bc76a..ebee7e24ede9 100644
--- a/arch/sh/oprofile/op_model_sh7750.c
+++ b/arch/sh/oprofile/op_model_sh7750.c
@@ -259,7 +259,7 @@ static struct oprofile_operations sh7750_perf_counter_ops = {
259 259
260int __init oprofile_arch_init(struct oprofile_operations **ops) 260int __init oprofile_arch_init(struct oprofile_operations **ops)
261{ 261{
262 if (!(cpu_data->flags & CPU_HAS_PERF_COUNTER)) 262 if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
263 return -ENODEV; 263 return -ENODEV;
264 264
265 sh7750_perf_counter_ops.cpu_type = (char *)get_cpu_subtype(); 265 sh7750_perf_counter_ops.cpu_type = (char *)get_cpu_subtype();
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 0571755e9a84..4fe0f94cbf42 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -16,7 +16,6 @@ HD64461 HD64461
16HD64465 HD64465 16HD64465 HD64465
17SATURN SH_SATURN 17SATURN SH_SATURN
18DREAMCAST SH_DREAMCAST 18DREAMCAST SH_DREAMCAST
19BIGSUR SH_BIGSUR
20MPC1211 SH_MPC1211 19MPC1211 SH_MPC1211
21SNAPGEAR SH_SECUREEDGE5410 20SNAPGEAR SH_SECUREEDGE5410
22HS7751RVOIP SH_HS7751RVOIP 21HS7751RVOIP SH_HS7751RVOIP
diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c
index 925a65240cfe..b2e1fd8e3571 100644
--- a/arch/um/os-Linux/sigio.c
+++ b/arch/um/os-Linux/sigio.c
@@ -97,20 +97,22 @@ static int write_sigio_thread(void *unused)
97 97
98static int need_poll(struct pollfds *polls, int n) 98static int need_poll(struct pollfds *polls, int n)
99{ 99{
100 if(n <= polls->size){ 100 struct pollfd *new;
101 polls->used = n; 101
102 if(n <= polls->size)
102 return 0; 103 return 0;
103 } 104
104 kfree(polls->poll); 105 new = um_kmalloc_atomic(n * sizeof(struct pollfd));
105 polls->poll = um_kmalloc_atomic(n * sizeof(struct pollfd)); 106 if(new == NULL){
106 if(polls->poll == NULL){
107 printk("need_poll : failed to allocate new pollfds\n"); 107 printk("need_poll : failed to allocate new pollfds\n");
108 polls->size = 0;
109 polls->used = 0;
110 return -ENOMEM; 108 return -ENOMEM;
111 } 109 }
110
111 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
112 kfree(polls->poll);
113
114 polls->poll = new;
112 polls->size = n; 115 polls->size = n;
113 polls->used = n;
114 return 0; 116 return 0;
115} 117}
116 118
@@ -171,15 +173,15 @@ int add_sigio_fd(int fd)
171 goto out; 173 goto out;
172 } 174 }
173 175
174 n = current_poll.used + 1; 176 n = current_poll.used;
175 err = need_poll(&next_poll, n); 177 err = need_poll(&next_poll, n + 1);
176 if(err) 178 if(err)
177 goto out; 179 goto out;
178 180
179 for(i = 0; i < current_poll.used; i++) 181 memcpy(next_poll.poll, current_poll.poll,
180 next_poll.poll[i] = current_poll.poll[i]; 182 current_poll.used * sizeof(struct pollfd));
181 183 next_poll.poll[n] = *p;
182 next_poll.poll[n - 1] = *p; 184 next_poll.used = n + 1;
183 update_thread(); 185 update_thread();
184 out: 186 out:
185 sigio_unlock(); 187 sigio_unlock();
@@ -214,6 +216,7 @@ int ignore_sigio_fd(int fd)
214 if(p->fd != fd) 216 if(p->fd != fd)
215 next_poll.poll[n++] = *p; 217 next_poll.poll[n++] = *p;
216 } 218 }
219 next_poll.used = current_poll.used - 1;
217 220
218 update_thread(); 221 update_thread();
219 out: 222 out:
@@ -331,10 +334,9 @@ void maybe_sigio_broken(int fd, int read)
331 334
332 sigio_lock(); 335 sigio_lock();
333 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1); 336 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
334 if(err){ 337 if(err)
335 printk("maybe_sigio_broken - failed to add pollfd\n");
336 goto out; 338 goto out;
337 } 339
338 all_sigio_fds.poll[all_sigio_fds.used++] = 340 all_sigio_fds.poll[all_sigio_fds.used++] =
339 ((struct pollfd) { .fd = fd, 341 ((struct pollfd) { .fd = fd,
340 .events = read ? POLLIN : POLLOUT, 342 .events = read ? POLLIN : POLLOUT,
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 7982cbc3bc94..56eb14c98475 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -24,6 +24,14 @@ config X86
24 bool 24 bool
25 default y 25 default y
26 26
27config GENERIC_TIME
28 bool
29 default y
30
31config GENERIC_TIME_VSYSCALL
32 bool
33 default y
34
27config ZONE_DMA32 35config ZONE_DMA32
28 bool 36 bool
29 default y 37 default y
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index ae399458024b..bb47e86f3d02 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
9 x8664_ksyms.o i387.o syscall.o vsyscall.o \ 9 x8664_ksyms.o i387.o syscall.o vsyscall.o \
10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ 10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
11 pci-dma.o pci-nommu.o alternative.o 11 pci-dma.o pci-nommu.o alternative.o hpet.o tsc.o
12 12
13obj-$(CONFIG_STACKTRACE) += stacktrace.o 13obj-$(CONFIG_STACKTRACE) += stacktrace.o
14obj-$(CONFIG_X86_MCE) += mce.o therm_throt.o 14obj-$(CONFIG_X86_MCE) += mce.o therm_throt.o
@@ -19,7 +19,7 @@ obj-$(CONFIG_ACPI) += acpi/
19obj-$(CONFIG_X86_MSR) += msr.o 19obj-$(CONFIG_X86_MSR) += msr.o
20obj-$(CONFIG_MICROCODE) += microcode.o 20obj-$(CONFIG_MICROCODE) += microcode.o
21obj-$(CONFIG_X86_CPUID) += cpuid.o 21obj-$(CONFIG_X86_CPUID) += cpuid.o
22obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o 22obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o tsc_sync.o
23obj-y += apic.o nmi.o 23obj-y += apic.o nmi.o
24obj-y += io_apic.o mpparse.o \ 24obj-y += io_apic.o mpparse.o \
25 genapic.o genapic_cluster.o genapic_flat.o 25 genapic.o genapic_cluster.o genapic_flat.o
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 124b2d27b4ac..723417d924c0 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -37,6 +37,7 @@
37#include <asm/idle.h> 37#include <asm/idle.h>
38#include <asm/proto.h> 38#include <asm/proto.h>
39#include <asm/timex.h> 39#include <asm/timex.h>
40#include <asm/hpet.h>
40#include <asm/apic.h> 41#include <asm/apic.h>
41 42
42int apic_mapped; 43int apic_mapped;
@@ -763,7 +764,7 @@ static void setup_APIC_timer(unsigned int clocks)
763 local_irq_save(flags); 764 local_irq_save(flags);
764 765
765 /* wait for irq slice */ 766 /* wait for irq slice */
766 if (vxtime.hpet_address && hpet_use_timer) { 767 if (hpet_address && hpet_use_timer) {
767 int trigger = hpet_readl(HPET_T0_CMP); 768 int trigger = hpet_readl(HPET_T0_CMP);
768 while (hpet_readl(HPET_COUNTER) >= trigger) 769 while (hpet_readl(HPET_COUNTER) >= trigger)
769 /* do nothing */ ; 770 /* do nothing */ ;
@@ -785,7 +786,7 @@ static void setup_APIC_timer(unsigned int clocks)
785 /* Turn off PIT interrupt if we use APIC timer as main timer. 786 /* Turn off PIT interrupt if we use APIC timer as main timer.
786 Only works with the PM timer right now 787 Only works with the PM timer right now
787 TBD fix it for HPET too. */ 788 TBD fix it for HPET too. */
788 if (vxtime.mode == VXTIME_PMTMR && 789 if ((pmtmr_ioport != 0) &&
789 smp_processor_id() == boot_cpu_id && 790 smp_processor_id() == boot_cpu_id &&
790 apic_runs_main_timer == 1 && 791 apic_runs_main_timer == 1 &&
791 !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) { 792 !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index bd30d138113f..8047ea8c2ab2 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -53,7 +53,9 @@ static void nvidia_bugs(void)
53 return; 53 return;
54 54
55 nvidia_hpet_detected = 0; 55 nvidia_hpet_detected = 0;
56 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check); 56 if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check))
57 return;
58
57 if (nvidia_hpet_detected == 0) { 59 if (nvidia_hpet_detected == 0) {
58 acpi_skip_timer_override = 1; 60 acpi_skip_timer_override = 1;
59 printk(KERN_INFO "Nvidia board " 61 printk(KERN_INFO "Nvidia board "
diff --git a/arch/i386/kernel/time_hpet.c b/arch/x86_64/kernel/hpet.c
index 1e4702dfcd01..65a0edd71a17 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/x86_64/kernel/hpet.c
@@ -1,224 +1,138 @@
1/*
2 * linux/arch/i386/kernel/time_hpet.c
3 * This code largely copied from arch/x86_64/kernel/time.c
4 * See that file for credits.
5 *
6 * 2003-06-30 Venkatesh Pallipadi - Additional changes for HPET support
7 */
8
9#include <linux/errno.h>
10#include <linux/kernel.h> 1#include <linux/kernel.h>
11#include <linux/param.h> 2#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/init.h> 3#include <linux/init.h>
14#include <linux/smp.h> 4#include <linux/mc146818rtc.h>
5#include <linux/time.h>
6#include <linux/clocksource.h>
7#include <linux/ioport.h>
8#include <linux/acpi.h>
9#include <linux/hpet.h>
10#include <asm/pgtable.h>
11#include <asm/vsyscall.h>
12#include <asm/timex.h>
13#include <asm/hpet.h>
15 14
16#include <asm/timer.h> 15int nohpet __initdata;
17#include <asm/fixmap.h>
18#include <asm/apic.h>
19 16
20#include <linux/timex.h> 17unsigned long hpet_address;
18unsigned long hpet_period; /* fsecs / HPET clock */
19unsigned long hpet_tick; /* HPET clocks / interrupt */
21 20
22#include <asm/hpet.h> 21int hpet_use_timer; /* Use counter of hpet for time keeping,
23#include <linux/hpet.h> 22 * otherwise PIT
23 */
24 24
25static unsigned long hpet_period; /* fsecs / HPET clock */ 25#ifdef CONFIG_HPET
26unsigned long hpet_tick; /* hpet clks count per tick */ 26static __init int late_hpet_init(void)
27unsigned long hpet_address; /* hpet memory map physical address */ 27{
28int hpet_use_timer; 28 struct hpet_data hd;
29 unsigned int ntimer;
29 30
30static int use_hpet; /* can be used for runtime check of hpet */ 31 if (!hpet_address)
31static int boot_hpet_disable; /* boottime override for HPET timer */ 32 return 0;
32static void __iomem * hpet_virt_address; /* hpet kernel virtual address */
33 33
34#define FSEC_TO_USEC (1000000000UL) 34 memset(&hd, 0, sizeof(hd));
35 35
36int hpet_readl(unsigned long a) 36 ntimer = hpet_readl(HPET_ID);
37{ 37 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
38 return readl(hpet_virt_address + a); 38 ntimer++;
39}
40 39
41static void hpet_writel(unsigned long d, unsigned long a) 40 /*
42{ 41 * Register with driver.
43 writel(d, hpet_virt_address + a); 42 * Timer0 and Timer1 is used by platform.
44} 43 */
44 hd.hd_phys_address = hpet_address;
45 hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
46 hd.hd_nirqs = ntimer;
47 hd.hd_flags = HPET_DATA_PLATFORM;
48 hpet_reserve_timer(&hd, 0);
49#ifdef CONFIG_HPET_EMULATE_RTC
50 hpet_reserve_timer(&hd, 1);
51#endif
52 hd.hd_irq[0] = HPET_LEGACY_8254;
53 hd.hd_irq[1] = HPET_LEGACY_RTC;
54 if (ntimer > 2) {
55 struct hpet *hpet;
56 struct hpet_timer *timer;
57 int i;
58
59 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
60 timer = &hpet->hpet_timers[2];
61 for (i = 2; i < ntimer; timer++, i++)
62 hd.hd_irq[i] = (timer->hpet_config &
63 Tn_INT_ROUTE_CNF_MASK) >>
64 Tn_INT_ROUTE_CNF_SHIFT;
45 65
46#ifdef CONFIG_X86_LOCAL_APIC 66 }
47/*
48 * HPET counters dont wrap around on every tick. They just change the
49 * comparator value and continue. Next tick can be caught by checking
50 * for a change in the comparator value. Used in apic.c.
51 */
52static void __devinit wait_hpet_tick(void)
53{
54 unsigned int start_cmp_val, end_cmp_val;
55 67
56 start_cmp_val = hpet_readl(HPET_T0_CMP); 68 hpet_alloc(&hd);
57 do { 69 return 0;
58 end_cmp_val = hpet_readl(HPET_T0_CMP);
59 } while (start_cmp_val == end_cmp_val);
60} 70}
71fs_initcall(late_hpet_init);
61#endif 72#endif
62 73
63static int hpet_timer_stop_set_go(unsigned long tick) 74int hpet_timer_stop_set_go(unsigned long tick)
64{ 75{
65 unsigned int cfg; 76 unsigned int cfg;
66 77
67 /* 78/*
68 * Stop the timers and reset the main counter. 79 * Stop the timers and reset the main counter.
69 */ 80 */
81
70 cfg = hpet_readl(HPET_CFG); 82 cfg = hpet_readl(HPET_CFG);
71 cfg &= ~HPET_CFG_ENABLE; 83 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
72 hpet_writel(cfg, HPET_CFG); 84 hpet_writel(cfg, HPET_CFG);
73 hpet_writel(0, HPET_COUNTER); 85 hpet_writel(0, HPET_COUNTER);
74 hpet_writel(0, HPET_COUNTER + 4); 86 hpet_writel(0, HPET_COUNTER + 4);
75 87
88/*
89 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
90 * and period also hpet_tick.
91 */
76 if (hpet_use_timer) { 92 if (hpet_use_timer) {
77 /* 93 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
78 * Set up timer 0, as periodic with first interrupt to happen at 94 HPET_TN_32BIT, HPET_T0_CFG);
79 * hpet_tick, and period also hpet_tick. 95 hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
80 */ 96 hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
81 cfg = hpet_readl(HPET_T0_CFG);
82 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
83 HPET_TN_SETVAL | HPET_TN_32BIT;
84 hpet_writel(cfg, HPET_T0_CFG);
85
86 /*
87 * The first write after writing TN_SETVAL to the config register sets
88 * the counter value, the second write sets the threshold.
89 */
90 hpet_writel(tick, HPET_T0_CMP);
91 hpet_writel(tick, HPET_T0_CMP);
92 }
93 /*
94 * Go!
95 */
96 cfg = hpet_readl(HPET_CFG);
97 if (hpet_use_timer)
98 cfg |= HPET_CFG_LEGACY; 97 cfg |= HPET_CFG_LEGACY;
98 }
99/*
100 * Go!
101 */
102
99 cfg |= HPET_CFG_ENABLE; 103 cfg |= HPET_CFG_ENABLE;
100 hpet_writel(cfg, HPET_CFG); 104 hpet_writel(cfg, HPET_CFG);
101 105
102 return 0; 106 return 0;
103} 107}
104 108
105/* 109int hpet_arch_init(void)
106 * Check whether HPET was found by ACPI boot parse. If yes setup HPET
107 * counter 0 for kernel base timer.
108 */
109int __init hpet_enable(void)
110{ 110{
111 unsigned int id; 111 unsigned int id;
112 unsigned long tick_fsec_low, tick_fsec_high; /* tick in femto sec */
113 unsigned long hpet_tick_rem;
114 112
115 if (boot_hpet_disable) 113 if (!hpet_address)
116 return -1; 114 return -1;
115 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
116 __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
117
118/*
119 * Read the period, compute tick and quotient.
120 */
117 121
118 if (!hpet_address) {
119 return -1;
120 }
121 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
122 /*
123 * Read the period, compute tick and quotient.
124 */
125 id = hpet_readl(HPET_ID); 122 id = hpet_readl(HPET_ID);
126 123
127 /* 124 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
128 * We are checking for value '1' or more in number field if
129 * CONFIG_HPET_EMULATE_RTC is set because we will need an
130 * additional timer for RTC emulation.
131 * However, we can do with one timer otherwise using the
132 * the single HPET timer for system time.
133 */
134#ifdef CONFIG_HPET_EMULATE_RTC
135 if (!(id & HPET_ID_NUMBER)) {
136 iounmap(hpet_virt_address);
137 hpet_virt_address = NULL;
138 return -1; 125 return -1;
139 }
140#endif
141
142 126
143 hpet_period = hpet_readl(HPET_PERIOD); 127 hpet_period = hpet_readl(HPET_PERIOD);
144 if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD)) { 128 if (hpet_period < 100000 || hpet_period > 100000000)
145 iounmap(hpet_virt_address);
146 hpet_virt_address = NULL;
147 return -1; 129 return -1;
148 }
149 130
150 /* 131 hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
151 * 64 bit math
152 * First changing tick into fsec
153 * Then 64 bit div to find number of hpet clk per tick
154 */
155 ASM_MUL64_REG(tick_fsec_low, tick_fsec_high,
156 KERNEL_TICK_USEC, FSEC_TO_USEC);
157 ASM_DIV64_REG(hpet_tick, hpet_tick_rem,
158 hpet_period, tick_fsec_low, tick_fsec_high);
159
160 if (hpet_tick_rem > (hpet_period >> 1))
161 hpet_tick++; /* rounding the result */
162
163 hpet_use_timer = id & HPET_ID_LEGSUP;
164
165 if (hpet_timer_stop_set_go(hpet_tick)) {
166 iounmap(hpet_virt_address);
167 hpet_virt_address = NULL;
168 return -1;
169 }
170 132
171 use_hpet = 1; 133 hpet_use_timer = (id & HPET_ID_LEGSUP);
172 134
173#ifdef CONFIG_HPET 135 return hpet_timer_stop_set_go(hpet_tick);
174 {
175 struct hpet_data hd;
176 unsigned int ntimer;
177
178 memset(&hd, 0, sizeof (hd));
179
180 ntimer = hpet_readl(HPET_ID);
181 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
182 ntimer++;
183
184 /*
185 * Register with driver.
186 * Timer0 and Timer1 is used by platform.
187 */
188 hd.hd_phys_address = hpet_address;
189 hd.hd_address = hpet_virt_address;
190 hd.hd_nirqs = ntimer;
191 hd.hd_flags = HPET_DATA_PLATFORM;
192 hpet_reserve_timer(&hd, 0);
193#ifdef CONFIG_HPET_EMULATE_RTC
194 hpet_reserve_timer(&hd, 1);
195#endif
196 hd.hd_irq[0] = HPET_LEGACY_8254;
197 hd.hd_irq[1] = HPET_LEGACY_RTC;
198 if (ntimer > 2) {
199 struct hpet __iomem *hpet;
200 struct hpet_timer __iomem *timer;
201 int i;
202
203 hpet = hpet_virt_address;
204
205 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
206 timer++, i++)
207 hd.hd_irq[i] = (timer->hpet_config &
208 Tn_INT_ROUTE_CNF_MASK) >>
209 Tn_INT_ROUTE_CNF_SHIFT;
210
211 }
212
213 hpet_alloc(&hd);
214 }
215#endif
216
217#ifdef CONFIG_X86_LOCAL_APIC
218 if (hpet_use_timer)
219 wait_timer_tick = wait_hpet_tick;
220#endif
221 return 0;
222} 136}
223 137
224int hpet_reenable(void) 138int hpet_reenable(void)
@@ -226,28 +140,51 @@ int hpet_reenable(void)
226 return hpet_timer_stop_set_go(hpet_tick); 140 return hpet_timer_stop_set_go(hpet_tick);
227} 141}
228 142
229int is_hpet_enabled(void) 143/*
230{ 144 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
231 return use_hpet; 145 * it to the HPET timer of known frequency.
232} 146 */
233 147
234int is_hpet_capable(void) 148#define TICK_COUNT 100000000
149#define TICK_MIN 5000
150
151/*
152 * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none
153 * occurs between the reads of the hpet & TSC.
154 */
155static void __init read_hpet_tsc(int *hpet, int *tsc)
235{ 156{
236 if (!boot_hpet_disable && hpet_address) 157 int tsc1, tsc2, hpet1;
237 return 1; 158
238 return 0; 159 do {
160 tsc1 = get_cycles_sync();
161 hpet1 = hpet_readl(HPET_COUNTER);
162 tsc2 = get_cycles_sync();
163 } while (tsc2 - tsc1 > TICK_MIN);
164 *hpet = hpet1;
165 *tsc = tsc2;
239} 166}
240 167
241static int __init hpet_setup(char* str) 168unsigned int __init hpet_calibrate_tsc(void)
242{ 169{
243 if (str) { 170 int tsc_start, hpet_start;
244 if (!strncmp("disable", str, 7)) 171 int tsc_now, hpet_now;
245 boot_hpet_disable = 1; 172 unsigned long flags;
246 } 173
247 return 1; 174 local_irq_save(flags);
248} 175
176 read_hpet_tsc(&hpet_start, &tsc_start);
249 177
250__setup("hpet=", hpet_setup); 178 do {
179 local_irq_disable();
180 read_hpet_tsc(&hpet_now, &tsc_now);
181 local_irq_restore(flags);
182 } while ((tsc_now - tsc_start) < TICK_COUNT &&
183 (hpet_now - hpet_start) < TICK_COUNT);
184
185 return (tsc_now - tsc_start) * 1000000000L
186 / ((hpet_now - hpet_start) * hpet_period / 1000);
187}
251 188
252#ifdef CONFIG_HPET_EMULATE_RTC 189#ifdef CONFIG_HPET_EMULATE_RTC
253/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET 190/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
@@ -264,7 +201,6 @@ __setup("hpet=", hpet_setup);
264 * For (3), we use interrupts at 64Hz or user specified periodic 201 * For (3), we use interrupts at 64Hz or user specified periodic
265 * frequency, whichever is higher. 202 * frequency, whichever is higher.
266 */ 203 */
267#include <linux/mc146818rtc.h>
268#include <linux/rtc.h> 204#include <linux/rtc.h>
269 205
270#define DEFAULT_RTC_INT_FREQ 64 206#define DEFAULT_RTC_INT_FREQ 64
@@ -283,6 +219,11 @@ static unsigned long PIE_count;
283static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ 219static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
284static unsigned int hpet_t1_cmp; /* cached comparator register */ 220static unsigned int hpet_t1_cmp; /* cached comparator register */
285 221
222int is_hpet_enabled(void)
223{
224 return hpet_address != 0;
225}
226
286/* 227/*
287 * Timer 1 for RTC, we do not use periodic interrupt feature, 228 * Timer 1 for RTC, we do not use periodic interrupt feature,
288 * even if HPET supports periodic interrupts on Timer 1. 229 * even if HPET supports periodic interrupts on Timer 1.
@@ -367,8 +308,9 @@ static void hpet_rtc_timer_reinit(void)
367 if (PIE_on) 308 if (PIE_on)
368 PIE_count += lost_ints; 309 PIE_count += lost_ints;
369 310
370 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", 311 if (printk_ratelimit())
371 hpet_rtc_int_freq); 312 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
313 hpet_rtc_int_freq);
372 } 314 }
373} 315}
374 316
@@ -450,7 +392,7 @@ int hpet_rtc_dropped_irq(void)
450 return 1; 392 return 1;
451} 393}
452 394
453irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) 395irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
454{ 396{
455 struct rtc_time curr_time; 397 struct rtc_time curr_time;
456 unsigned long rtc_int_flag = 0; 398 unsigned long rtc_int_flag = 0;
@@ -495,3 +437,75 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
495} 437}
496#endif 438#endif
497 439
440static int __init nohpet_setup(char *s)
441{
442 nohpet = 1;
443 return 1;
444}
445
446__setup("nohpet", nohpet_setup);
447
448#define HPET_MASK 0xFFFFFFFF
449#define HPET_SHIFT 22
450
451/* FSEC = 10^-15 NSEC = 10^-9 */
452#define FSEC_PER_NSEC 1000000
453
454static void *hpet_ptr;
455
456static cycle_t read_hpet(void)
457{
458 return (cycle_t)readl(hpet_ptr);
459}
460
461static cycle_t __vsyscall_fn vread_hpet(void)
462{
463 return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
464}
465
466struct clocksource clocksource_hpet = {
467 .name = "hpet",
468 .rating = 250,
469 .read = read_hpet,
470 .mask = (cycle_t)HPET_MASK,
471 .mult = 0, /* set below */
472 .shift = HPET_SHIFT,
473 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
474 .vread = vread_hpet,
475};
476
477static int __init init_hpet_clocksource(void)
478{
479 unsigned long hpet_period;
480 void __iomem *hpet_base;
481 u64 tmp;
482
483 if (!hpet_address)
484 return -ENODEV;
485
486 /* calculate the hpet address: */
487 hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
488 hpet_ptr = hpet_base + HPET_COUNTER;
489
490 /* calculate the frequency: */
491 hpet_period = readl(hpet_base + HPET_PERIOD);
492
493 /*
494 * hpet period is in femto seconds per cycle
495 * so we need to convert this to ns/cyc units
496 * aproximated by mult/2^shift
497 *
498 * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
499 * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
500 * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
501 * (fsec/cyc << shift)/1000000 = mult
502 * (hpet_period << shift)/FSEC_PER_NSEC = mult
503 */
504 tmp = (u64)hpet_period << HPET_SHIFT;
505 do_div(tmp, FSEC_PER_NSEC);
506 clocksource_hpet.mult = (u32)tmp;
507
508 return clocksource_register(&clocksource_hpet);
509}
510
511module_init(init_hpet_clocksource);
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index d73c79e821f1..01e2cf0bdeb1 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -103,6 +103,7 @@ static void mask_and_ack_8259A(unsigned int);
103static struct irq_chip i8259A_chip = { 103static struct irq_chip i8259A_chip = {
104 .name = "XT-PIC", 104 .name = "XT-PIC",
105 .mask = disable_8259A_irq, 105 .mask = disable_8259A_irq,
106 .disable = disable_8259A_irq,
106 .unmask = enable_8259A_irq, 107 .unmask = enable_8259A_irq,
107 .mask_ack = mask_and_ack_8259A, 108 .mask_ack = mask_and_ack_8259A,
108}; 109};
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 566e64d966c4..950682f35766 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -810,11 +810,9 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
810 trigger == IOAPIC_LEVEL) 810 trigger == IOAPIC_LEVEL)
811 set_irq_chip_and_handler_name(irq, &ioapic_chip, 811 set_irq_chip_and_handler_name(irq, &ioapic_chip,
812 handle_fasteoi_irq, "fasteoi"); 812 handle_fasteoi_irq, "fasteoi");
813 else { 813 else
814 irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
815 set_irq_chip_and_handler_name(irq, &ioapic_chip, 814 set_irq_chip_and_handler_name(irq, &ioapic_chip,
816 handle_edge_irq, "edge"); 815 handle_edge_irq, "edge");
817 }
818} 816}
819static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq) 817static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
820{ 818{
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index 7554458dc9cb..ae8f91214f15 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -24,15 +24,6 @@
24#include <asm/msr.h> 24#include <asm/msr.h>
25#include <asm/vsyscall.h> 25#include <asm/vsyscall.h>
26 26
27/* The I/O port the PMTMR resides at.
28 * The location is detected during setup_arch(),
29 * in arch/i386/kernel/acpi/boot.c */
30u32 pmtmr_ioport __read_mostly;
31
32/* value of the Power timer at last timer interrupt */
33static u32 offset_delay;
34static u32 last_pmtmr_tick;
35
36#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */ 27#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
37 28
38static inline u32 cyc2us(u32 cycles) 29static inline u32 cyc2us(u32 cycles)
@@ -48,38 +39,6 @@ static inline u32 cyc2us(u32 cycles)
48 return (cycles >> 10); 39 return (cycles >> 10);
49} 40}
50 41
51int pmtimer_mark_offset(void)
52{
53 static int first_run = 1;
54 unsigned long tsc;
55 u32 lost;
56
57 u32 tick = inl(pmtmr_ioport);
58 u32 delta;
59
60 delta = cyc2us((tick - last_pmtmr_tick) & ACPI_PM_MASK);
61
62 last_pmtmr_tick = tick;
63 monotonic_base += delta * NSEC_PER_USEC;
64
65 delta += offset_delay;
66
67 lost = delta / (USEC_PER_SEC / HZ);
68 offset_delay = delta % (USEC_PER_SEC / HZ);
69
70 rdtscll(tsc);
71 vxtime.last_tsc = tsc - offset_delay * (u64)cpu_khz / 1000;
72
73 /* don't calculate delay for first run,
74 or if we've got less then a tick */
75 if (first_run || (lost < 1)) {
76 first_run = 0;
77 offset_delay = 0;
78 }
79
80 return lost - 1;
81}
82
83static unsigned pmtimer_wait_tick(void) 42static unsigned pmtimer_wait_tick(void)
84{ 43{
85 u32 a, b; 44 u32 a, b;
@@ -101,23 +60,6 @@ void pmtimer_wait(unsigned us)
101 } while (cyc2us(b - a) < us); 60 } while (cyc2us(b - a) < us);
102} 61}
103 62
104void pmtimer_resume(void)
105{
106 last_pmtmr_tick = inl(pmtmr_ioport);
107}
108
109unsigned int do_gettimeoffset_pm(void)
110{
111 u32 now, offset, delta = 0;
112
113 offset = last_pmtmr_tick;
114 now = inl(pmtmr_ioport);
115 delta = (now - offset) & ACPI_PM_MASK;
116
117 return offset_delay + cyc2us(delta);
118}
119
120
121static int __init nopmtimer_setup(char *s) 63static int __init nopmtimer_setup(char *s)
122{ 64{
123 pmtmr_ioport = 0; 65 pmtmr_ioport = 0;
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index daf19332f0dd..35443729aad8 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -148,217 +148,6 @@ static void __cpuinit smp_store_cpu_info(int id)
148 print_cpu_info(c); 148 print_cpu_info(c);
149} 149}
150 150
151/*
152 * New Funky TSC sync algorithm borrowed from IA64.
153 * Main advantage is that it doesn't reset the TSCs fully and
154 * in general looks more robust and it works better than my earlier
155 * attempts. I believe it was written by David Mosberger. Some minor
156 * adjustments for x86-64 by me -AK
157 *
158 * Original comment reproduced below.
159 *
160 * Synchronize TSC of the current (slave) CPU with the TSC of the
161 * MASTER CPU (normally the time-keeper CPU). We use a closed loop to
162 * eliminate the possibility of unaccounted-for errors (such as
163 * getting a machine check in the middle of a calibration step). The
164 * basic idea is for the slave to ask the master what itc value it has
165 * and to read its own itc before and after the master responds. Each
166 * iteration gives us three timestamps:
167 *
168 * slave master
169 *
170 * t0 ---\
171 * ---\
172 * --->
173 * tm
174 * /---
175 * /---
176 * t1 <---
177 *
178 *
179 * The goal is to adjust the slave's TSC such that tm falls exactly
180 * half-way between t0 and t1. If we achieve this, the clocks are
181 * synchronized provided the interconnect between the slave and the
182 * master is symmetric. Even if the interconnect were asymmetric, we
183 * would still know that the synchronization error is smaller than the
184 * roundtrip latency (t0 - t1).
185 *
186 * When the interconnect is quiet and symmetric, this lets us
187 * synchronize the TSC to within one or two cycles. However, we can
188 * only *guarantee* that the synchronization is accurate to within a
189 * round-trip time, which is typically in the range of several hundred
190 * cycles (e.g., ~500 cycles). In practice, this means that the TSCs
191 * are usually almost perfectly synchronized, but we shouldn't assume
192 * that the accuracy is much better than half a micro second or so.
193 *
194 * [there are other errors like the latency of RDTSC and of the
195 * WRMSR. These can also account to hundreds of cycles. So it's
196 * probably worse. It claims 153 cycles error on a dual Opteron,
197 * but I suspect the numbers are actually somewhat worse -AK]
198 */
199
200#define MASTER 0
201#define SLAVE (SMP_CACHE_BYTES/8)
202
203/* Intentionally don't use cpu_relax() while TSC synchronization
204 because we don't want to go into funky power save modi or cause
205 hypervisors to schedule us away. Going to sleep would likely affect
206 latency and low latency is the primary objective here. -AK */
207#define no_cpu_relax() barrier()
208
209static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
210static volatile __cpuinitdata unsigned long go[SLAVE + 1];
211static int notscsync __cpuinitdata;
212
213#undef DEBUG_TSC_SYNC
214
215#define NUM_ROUNDS 64 /* magic value */
216#define NUM_ITERS 5 /* likewise */
217
218/* Callback on boot CPU */
219static __cpuinit void sync_master(void *arg)
220{
221 unsigned long flags, i;
222
223 go[MASTER] = 0;
224
225 local_irq_save(flags);
226 {
227 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
228 while (!go[MASTER])
229 no_cpu_relax();
230 go[MASTER] = 0;
231 rdtscll(go[SLAVE]);
232 }
233 }
234 local_irq_restore(flags);
235}
236
237/*
238 * Return the number of cycles by which our tsc differs from the tsc
239 * on the master (time-keeper) CPU. A positive number indicates our
240 * tsc is ahead of the master, negative that it is behind.
241 */
242static inline long
243get_delta(long *rt, long *master)
244{
245 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
246 unsigned long tcenter, t0, t1, tm;
247 int i;
248
249 for (i = 0; i < NUM_ITERS; ++i) {
250 rdtscll(t0);
251 go[MASTER] = 1;
252 while (!(tm = go[SLAVE]))
253 no_cpu_relax();
254 go[SLAVE] = 0;
255 rdtscll(t1);
256
257 if (t1 - t0 < best_t1 - best_t0)
258 best_t0 = t0, best_t1 = t1, best_tm = tm;
259 }
260
261 *rt = best_t1 - best_t0;
262 *master = best_tm - best_t0;
263
264 /* average best_t0 and best_t1 without overflow: */
265 tcenter = (best_t0/2 + best_t1/2);
266 if (best_t0 % 2 + best_t1 % 2 == 2)
267 ++tcenter;
268 return tcenter - best_tm;
269}
270
271static __cpuinit void sync_tsc(unsigned int master)
272{
273 int i, done = 0;
274 long delta, adj, adjust_latency = 0;
275 unsigned long flags, rt, master_time_stamp, bound;
276#ifdef DEBUG_TSC_SYNC
277 static struct syncdebug {
278 long rt; /* roundtrip time */
279 long master; /* master's timestamp */
280 long diff; /* difference between midpoint and master's timestamp */
281 long lat; /* estimate of tsc adjustment latency */
282 } t[NUM_ROUNDS] __cpuinitdata;
283#endif
284
285 printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
286 smp_processor_id(), master);
287
288 go[MASTER] = 1;
289
290 /* It is dangerous to broadcast IPI as cpus are coming up,
291 * as they may not be ready to accept them. So since
292 * we only need to send the ipi to the boot cpu direct
293 * the message, and avoid the race.
294 */
295 smp_call_function_single(master, sync_master, NULL, 1, 0);
296
297 while (go[MASTER]) /* wait for master to be ready */
298 no_cpu_relax();
299
300 spin_lock_irqsave(&tsc_sync_lock, flags);
301 {
302 for (i = 0; i < NUM_ROUNDS; ++i) {
303 delta = get_delta(&rt, &master_time_stamp);
304 if (delta == 0) {
305 done = 1; /* let's lock on to this... */
306 bound = rt;
307 }
308
309 if (!done) {
310 unsigned long t;
311 if (i > 0) {
312 adjust_latency += -delta;
313 adj = -delta + adjust_latency/4;
314 } else
315 adj = -delta;
316
317 rdtscll(t);
318 wrmsrl(MSR_IA32_TSC, t + adj);
319 }
320#ifdef DEBUG_TSC_SYNC
321 t[i].rt = rt;
322 t[i].master = master_time_stamp;
323 t[i].diff = delta;
324 t[i].lat = adjust_latency/4;
325#endif
326 }
327 }
328 spin_unlock_irqrestore(&tsc_sync_lock, flags);
329
330#ifdef DEBUG_TSC_SYNC
331 for (i = 0; i < NUM_ROUNDS; ++i)
332 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
333 t[i].rt, t[i].master, t[i].diff, t[i].lat);
334#endif
335
336 printk(KERN_INFO
337 "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
338 "maxerr %lu cycles)\n",
339 smp_processor_id(), master, delta, rt);
340}
341
342static void __cpuinit tsc_sync_wait(void)
343{
344 /*
345 * When the CPU has synchronized TSCs assume the BIOS
346 * or the hardware already synced. Otherwise we could
347 * mess up a possible perfect synchronization with a
348 * not-quite-perfect algorithm.
349 */
350 if (notscsync || !cpu_has_tsc || !unsynchronized_tsc())
351 return;
352 sync_tsc(0);
353}
354
355static __init int notscsync_setup(char *s)
356{
357 notscsync = 1;
358 return 1;
359}
360__setup("notscsync", notscsync_setup);
361
362static atomic_t init_deasserted __cpuinitdata; 151static atomic_t init_deasserted __cpuinitdata;
363 152
364/* 153/*
@@ -546,6 +335,11 @@ void __cpuinit start_secondary(void)
546 /* otherwise gcc will move up the smp_processor_id before the cpu_init */ 335 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
547 barrier(); 336 barrier();
548 337
338 /*
339 * Check TSC sync first:
340 */
341 check_tsc_sync_target();
342
549 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id()); 343 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
550 setup_secondary_APIC_clock(); 344 setup_secondary_APIC_clock();
551 345
@@ -565,14 +359,6 @@ void __cpuinit start_secondary(void)
565 */ 359 */
566 set_cpu_sibling_map(smp_processor_id()); 360 set_cpu_sibling_map(smp_processor_id());
567 361
568 /*
569 * Wait for TSC sync to not schedule things before.
570 * We still process interrupts, which could see an inconsistent
571 * time in that window unfortunately.
572 * Do this here because TSC sync has global unprotected state.
573 */
574 tsc_sync_wait();
575
576 /* 362 /*
577 * We need to hold call_lock, so there is no inconsistency 363 * We need to hold call_lock, so there is no inconsistency
578 * between the time smp_call_function() determines number of 364 * between the time smp_call_function() determines number of
@@ -592,6 +378,7 @@ void __cpuinit start_secondary(void)
592 cpu_set(smp_processor_id(), cpu_online_map); 378 cpu_set(smp_processor_id(), cpu_online_map);
593 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 379 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
594 spin_unlock(&vector_lock); 380 spin_unlock(&vector_lock);
381
595 unlock_ipi_call_lock(); 382 unlock_ipi_call_lock();
596 383
597 cpu_idle(); 384 cpu_idle();
@@ -1168,6 +955,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
1168 /* Unleash the CPU! */ 955 /* Unleash the CPU! */
1169 Dprintk("waiting for cpu %d\n", cpu); 956 Dprintk("waiting for cpu %d\n", cpu);
1170 957
958 /*
959 * Make sure and check TSC sync:
960 */
961 check_tsc_sync_source(cpu);
962
1171 while (!cpu_isset(cpu, cpu_online_map)) 963 while (!cpu_isset(cpu, cpu_online_map))
1172 cpu_relax(); 964 cpu_relax();
1173 965
@@ -1190,7 +982,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
1190 smp_cleanup_boot(); 982 smp_cleanup_boot();
1191 setup_ioapic_dest(); 983 setup_ioapic_dest();
1192 check_nmi_watchdog(); 984 check_nmi_watchdog();
1193 time_init_gtod();
1194} 985}
1195 986
1196#ifdef CONFIG_HOTPLUG_CPU 987#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 3cc6886f1fb7..a87c51705620 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -42,10 +42,8 @@
42#include <linux/cpufreq.h> 42#include <linux/cpufreq.h>
43#include <linux/hpet.h> 43#include <linux/hpet.h>
44#include <asm/apic.h> 44#include <asm/apic.h>
45#include <asm/hpet.h>
45 46
46#ifdef CONFIG_CPU_FREQ
47static void cpufreq_delayed_get(void);
48#endif
49extern void i8254_timer_resume(void); 47extern void i8254_timer_resume(void);
50extern int using_apic_timer; 48extern int using_apic_timer;
51 49
@@ -55,128 +53,7 @@ DEFINE_SPINLOCK(rtc_lock);
55EXPORT_SYMBOL(rtc_lock); 53EXPORT_SYMBOL(rtc_lock);
56DEFINE_SPINLOCK(i8253_lock); 54DEFINE_SPINLOCK(i8253_lock);
57 55
58int nohpet __initdata = 0;
59static int notsc __initdata = 0;
60
61#define USEC_PER_TICK (USEC_PER_SEC / HZ)
62#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
63#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
64
65#define NS_SCALE 10 /* 2^10, carefully chosen */
66#define US_SCALE 32 /* 2^32, arbitralrily chosen */
67
68unsigned int cpu_khz; /* TSC clocks / usec, not used here */
69EXPORT_SYMBOL(cpu_khz);
70static unsigned long hpet_period; /* fsecs / HPET clock */
71unsigned long hpet_tick; /* HPET clocks / interrupt */
72int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
73unsigned long vxtime_hz = PIT_TICK_RATE;
74int report_lost_ticks; /* command line option */
75unsigned long long monotonic_base;
76
77struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
78
79volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; 56volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
80struct timespec __xtime __section_xtime;
81struct timezone __sys_tz __section_sys_tz;
82
83/*
84 * do_gettimeoffset() returns microseconds since last timer interrupt was
85 * triggered by hardware. A memory read of HPET is slower than a register read
86 * of TSC, but much more reliable. It's also synchronized to the timer
87 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
88 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
89 * This is not a problem, because jiffies hasn't updated either. They are bound
90 * together by xtime_lock.
91 */
92
93static inline unsigned int do_gettimeoffset_tsc(void)
94{
95 unsigned long t;
96 unsigned long x;
97 t = get_cycles_sync();
98 if (t < vxtime.last_tsc)
99 t = vxtime.last_tsc; /* hack */
100 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE;
101 return x;
102}
103
104static inline unsigned int do_gettimeoffset_hpet(void)
105{
106 /* cap counter read to one tick to avoid inconsistencies */
107 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
108 return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE;
109}
110
111unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
112
113/*
114 * This version of gettimeofday() has microsecond resolution and better than
115 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
116 * MHz) HPET timer.
117 */
118
119void do_gettimeofday(struct timeval *tv)
120{
121 unsigned long seq;
122 unsigned int sec, usec;
123
124 do {
125 seq = read_seqbegin(&xtime_lock);
126
127 sec = xtime.tv_sec;
128 usec = xtime.tv_nsec / NSEC_PER_USEC;
129
130 /* i386 does some correction here to keep the clock
131 monotonous even when ntpd is fixing drift.
132 But they didn't work for me, there is a non monotonic
133 clock anyways with ntp.
134 I dropped all corrections now until a real solution can
135 be found. Note when you fix it here you need to do the same
136 in arch/x86_64/kernel/vsyscall.c and export all needed
137 variables in vmlinux.lds. -AK */
138 usec += do_gettimeoffset();
139
140 } while (read_seqretry(&xtime_lock, seq));
141
142 tv->tv_sec = sec + usec / USEC_PER_SEC;
143 tv->tv_usec = usec % USEC_PER_SEC;
144}
145
146EXPORT_SYMBOL(do_gettimeofday);
147
148/*
149 * settimeofday() first undoes the correction that gettimeofday would do
150 * on the time, and then saves it. This is ugly, but has been like this for
151 * ages already.
152 */
153
154int do_settimeofday(struct timespec *tv)
155{
156 time_t wtm_sec, sec = tv->tv_sec;
157 long wtm_nsec, nsec = tv->tv_nsec;
158
159 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
160 return -EINVAL;
161
162 write_seqlock_irq(&xtime_lock);
163
164 nsec -= do_gettimeoffset() * NSEC_PER_USEC;
165
166 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
167 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
168
169 set_normalized_timespec(&xtime, sec, nsec);
170 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
171
172 ntp_clear();
173
174 write_sequnlock_irq(&xtime_lock);
175 clock_was_set();
176 return 0;
177}
178
179EXPORT_SYMBOL(do_settimeofday);
180 57
181unsigned long profile_pc(struct pt_regs *regs) 58unsigned long profile_pc(struct pt_regs *regs)
182{ 59{
@@ -267,84 +144,9 @@ static void set_rtc_mmss(unsigned long nowtime)
267} 144}
268 145
269 146
270/* monotonic_clock(): returns # of nanoseconds passed since time_init()
271 * Note: This function is required to return accurate
272 * time even in the absence of multiple timer ticks.
273 */
274static inline unsigned long long cycles_2_ns(unsigned long long cyc);
275unsigned long long monotonic_clock(void)
276{
277 unsigned long seq;
278 u32 last_offset, this_offset, offset;
279 unsigned long long base;
280
281 if (vxtime.mode == VXTIME_HPET) {
282 do {
283 seq = read_seqbegin(&xtime_lock);
284
285 last_offset = vxtime.last;
286 base = monotonic_base;
287 this_offset = hpet_readl(HPET_COUNTER);
288 } while (read_seqretry(&xtime_lock, seq));
289 offset = (this_offset - last_offset);
290 offset *= NSEC_PER_TICK / hpet_tick;
291 } else {
292 do {
293 seq = read_seqbegin(&xtime_lock);
294
295 last_offset = vxtime.last_tsc;
296 base = monotonic_base;
297 } while (read_seqretry(&xtime_lock, seq));
298 this_offset = get_cycles_sync();
299 offset = cycles_2_ns(this_offset - last_offset);
300 }
301 return base + offset;
302}
303EXPORT_SYMBOL(monotonic_clock);
304
305static noinline void handle_lost_ticks(int lost)
306{
307 static long lost_count;
308 static int warned;
309 if (report_lost_ticks) {
310 printk(KERN_WARNING "time.c: Lost %d timer tick(s)! ", lost);
311 print_symbol("rip %s)\n", get_irq_regs()->rip);
312 }
313
314 if (lost_count == 1000 && !warned) {
315 printk(KERN_WARNING "warning: many lost ticks.\n"
316 KERN_WARNING "Your time source seems to be instable or "
317 "some driver is hogging interupts\n");
318 print_symbol("rip %s\n", get_irq_regs()->rip);
319 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
320 printk(KERN_WARNING "Falling back to HPET\n");
321 if (hpet_use_timer)
322 vxtime.last = hpet_readl(HPET_T0_CMP) -
323 hpet_tick;
324 else
325 vxtime.last = hpet_readl(HPET_COUNTER);
326 vxtime.mode = VXTIME_HPET;
327 do_gettimeoffset = do_gettimeoffset_hpet;
328 }
329 /* else should fall back to PIT, but code missing. */
330 warned = 1;
331 } else
332 lost_count++;
333
334#ifdef CONFIG_CPU_FREQ
335 /* In some cases the CPU can change frequency without us noticing
336 Give cpufreq a change to catch up. */
337 if ((lost_count+1) % 25 == 0)
338 cpufreq_delayed_get();
339#endif
340}
341
342void main_timer_handler(void) 147void main_timer_handler(void)
343{ 148{
344 static unsigned long rtc_update = 0; 149 static unsigned long rtc_update = 0;
345 unsigned long tsc;
346 int delay = 0, offset = 0, lost = 0;
347
348/* 150/*
349 * Here we are in the timer irq handler. We have irqs locally disabled (so we 151 * Here we are in the timer irq handler. We have irqs locally disabled (so we
350 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running 152 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
@@ -354,72 +156,11 @@ void main_timer_handler(void)
354 156
355 write_seqlock(&xtime_lock); 157 write_seqlock(&xtime_lock);
356 158
357 if (vxtime.hpet_address)
358 offset = hpet_readl(HPET_COUNTER);
359
360 if (hpet_use_timer) {
361 /* if we're using the hpet timer functionality,
362 * we can more accurately know the counter value
363 * when the timer interrupt occured.
364 */
365 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
366 delay = hpet_readl(HPET_COUNTER) - offset;
367 } else if (!pmtmr_ioport) {
368 spin_lock(&i8253_lock);
369 outb_p(0x00, 0x43);
370 delay = inb_p(0x40);
371 delay |= inb(0x40) << 8;
372 spin_unlock(&i8253_lock);
373 delay = LATCH - 1 - delay;
374 }
375
376 tsc = get_cycles_sync();
377
378 if (vxtime.mode == VXTIME_HPET) {
379 if (offset - vxtime.last > hpet_tick) {
380 lost = (offset - vxtime.last) / hpet_tick - 1;
381 }
382
383 monotonic_base +=
384 (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick;
385
386 vxtime.last = offset;
387#ifdef CONFIG_X86_PM_TIMER
388 } else if (vxtime.mode == VXTIME_PMTMR) {
389 lost = pmtimer_mark_offset();
390#endif
391 } else {
392 offset = (((tsc - vxtime.last_tsc) *
393 vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK;
394
395 if (offset < 0)
396 offset = 0;
397
398 if (offset > USEC_PER_TICK) {
399 lost = offset / USEC_PER_TICK;
400 offset %= USEC_PER_TICK;
401 }
402
403 monotonic_base += cycles_2_ns(tsc - vxtime.last_tsc);
404
405 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
406
407 if ((((tsc - vxtime.last_tsc) *
408 vxtime.tsc_quot) >> US_SCALE) < offset)
409 vxtime.last_tsc = tsc -
410 (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1;
411 }
412
413 if (lost > 0)
414 handle_lost_ticks(lost);
415 else
416 lost = 0;
417
418/* 159/*
419 * Do the timer stuff. 160 * Do the timer stuff.
420 */ 161 */
421 162
422 do_timer(lost + 1); 163 do_timer(1);
423#ifndef CONFIG_SMP 164#ifndef CONFIG_SMP
424 update_process_times(user_mode(get_irq_regs())); 165 update_process_times(user_mode(get_irq_regs()));
425#endif 166#endif
@@ -460,40 +201,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
460 return IRQ_HANDLED; 201 return IRQ_HANDLED;
461} 202}
462 203
463static unsigned int cyc2ns_scale __read_mostly;
464
465static inline void set_cyc2ns_scale(unsigned long cpu_khz)
466{
467 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / cpu_khz;
468}
469
470static inline unsigned long long cycles_2_ns(unsigned long long cyc)
471{
472 return (cyc * cyc2ns_scale) >> NS_SCALE;
473}
474
475unsigned long long sched_clock(void)
476{
477 unsigned long a = 0;
478
479#if 0
480 /* Don't do a HPET read here. Using TSC always is much faster
481 and HPET may not be mapped yet when the scheduler first runs.
482 Disadvantage is a small drift between CPUs in some configurations,
483 but that should be tolerable. */
484 if (__vxtime.mode == VXTIME_HPET)
485 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> US_SCALE;
486#endif
487
488 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
489 which means it is not completely exact and may not be monotonous between
490 CPUs. But the errors should be too small to matter for scheduling
491 purposes. */
492
493 rdtscll(a);
494 return cycles_2_ns(a);
495}
496
497static unsigned long get_cmos_time(void) 204static unsigned long get_cmos_time(void)
498{ 205{
499 unsigned int year, mon, day, hour, min, sec; 206 unsigned int year, mon, day, hour, min, sec;
@@ -545,164 +252,6 @@ static unsigned long get_cmos_time(void)
545 return mktime(year, mon, day, hour, min, sec); 252 return mktime(year, mon, day, hour, min, sec);
546} 253}
547 254
548#ifdef CONFIG_CPU_FREQ
549
550/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
551 changes.
552
553 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
554 not that important because current Opteron setups do not support
555 scaling on SMP anyroads.
556
557 Should fix up last_tsc too. Currently gettimeofday in the
558 first tick after the change will be slightly wrong. */
559
560#include <linux/workqueue.h>
561
562static unsigned int cpufreq_delayed_issched = 0;
563static unsigned int cpufreq_init = 0;
564static struct work_struct cpufreq_delayed_get_work;
565
566static void handle_cpufreq_delayed_get(struct work_struct *v)
567{
568 unsigned int cpu;
569 for_each_online_cpu(cpu) {
570 cpufreq_get(cpu);
571 }
572 cpufreq_delayed_issched = 0;
573}
574
575/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
576 * to verify the CPU frequency the timing core thinks the CPU is running
577 * at is still correct.
578 */
579static void cpufreq_delayed_get(void)
580{
581 static int warned;
582 if (cpufreq_init && !cpufreq_delayed_issched) {
583 cpufreq_delayed_issched = 1;
584 if (!warned) {
585 warned = 1;
586 printk(KERN_DEBUG
587 "Losing some ticks... checking if CPU frequency changed.\n");
588 }
589 schedule_work(&cpufreq_delayed_get_work);
590 }
591}
592
593static unsigned int ref_freq = 0;
594static unsigned long loops_per_jiffy_ref = 0;
595
596static unsigned long cpu_khz_ref = 0;
597
598static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
599 void *data)
600{
601 struct cpufreq_freqs *freq = data;
602 unsigned long *lpj, dummy;
603
604 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
605 return 0;
606
607 lpj = &dummy;
608 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
609#ifdef CONFIG_SMP
610 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
611#else
612 lpj = &boot_cpu_data.loops_per_jiffy;
613#endif
614
615 if (!ref_freq) {
616 ref_freq = freq->old;
617 loops_per_jiffy_ref = *lpj;
618 cpu_khz_ref = cpu_khz;
619 }
620 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
621 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
622 (val == CPUFREQ_RESUMECHANGE)) {
623 *lpj =
624 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
625
626 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
627 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
628 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
629 }
630
631 set_cyc2ns_scale(cpu_khz_ref);
632
633 return 0;
634}
635
636static struct notifier_block time_cpufreq_notifier_block = {
637 .notifier_call = time_cpufreq_notifier
638};
639
640static int __init cpufreq_tsc(void)
641{
642 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
643 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
644 CPUFREQ_TRANSITION_NOTIFIER))
645 cpufreq_init = 1;
646 return 0;
647}
648
649core_initcall(cpufreq_tsc);
650
651#endif
652
653/*
654 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
655 * it to the HPET timer of known frequency.
656 */
657
658#define TICK_COUNT 100000000
659#define TICK_MIN 5000
660#define MAX_READ_RETRIES 5
661
662/*
663 * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none
664 * occurs between the reads of the hpet & TSC.
665 */
666static void __init read_hpet_tsc(int *hpet, int *tsc)
667{
668 int tsc1, tsc2, hpet1, retries = 0;
669 static int msg;
670
671 do {
672 tsc1 = get_cycles_sync();
673 hpet1 = hpet_readl(HPET_COUNTER);
674 tsc2 = get_cycles_sync();
675 } while (tsc2 - tsc1 > TICK_MIN && retries++ < MAX_READ_RETRIES);
676 if (retries >= MAX_READ_RETRIES && !msg++)
677 printk(KERN_WARNING
678 "hpet.c: exceeded max retries to read HPET & TSC\n");
679 *hpet = hpet1;
680 *tsc = tsc2;
681}
682
683
684static unsigned int __init hpet_calibrate_tsc(void)
685{
686 int tsc_start, hpet_start;
687 int tsc_now, hpet_now;
688 unsigned long flags;
689
690 local_irq_save(flags);
691 local_irq_disable();
692
693 read_hpet_tsc(&hpet_start, &tsc_start);
694
695 do {
696 local_irq_disable();
697 read_hpet_tsc(&hpet_now, &tsc_now);
698 local_irq_restore(flags);
699 } while ((tsc_now - tsc_start) < TICK_COUNT &&
700 (hpet_now - hpet_start) < TICK_COUNT);
701
702 return (tsc_now - tsc_start) * 1000000000L
703 / ((hpet_now - hpet_start) * hpet_period / 1000);
704}
705
706 255
707/* 256/*
708 * pit_calibrate_tsc() uses the speaker output (channel 2) of 257 * pit_calibrate_tsc() uses the speaker output (channel 2) of
@@ -733,124 +282,6 @@ static unsigned int __init pit_calibrate_tsc(void)
733 return (end - start) / 50; 282 return (end - start) / 50;
734} 283}
735 284
736#ifdef CONFIG_HPET
737static __init int late_hpet_init(void)
738{
739 struct hpet_data hd;
740 unsigned int ntimer;
741
742 if (!vxtime.hpet_address)
743 return 0;
744
745 memset(&hd, 0, sizeof (hd));
746
747 ntimer = hpet_readl(HPET_ID);
748 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
749 ntimer++;
750
751 /*
752 * Register with driver.
753 * Timer0 and Timer1 is used by platform.
754 */
755 hd.hd_phys_address = vxtime.hpet_address;
756 hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
757 hd.hd_nirqs = ntimer;
758 hd.hd_flags = HPET_DATA_PLATFORM;
759 hpet_reserve_timer(&hd, 0);
760#ifdef CONFIG_HPET_EMULATE_RTC
761 hpet_reserve_timer(&hd, 1);
762#endif
763 hd.hd_irq[0] = HPET_LEGACY_8254;
764 hd.hd_irq[1] = HPET_LEGACY_RTC;
765 if (ntimer > 2) {
766 struct hpet *hpet;
767 struct hpet_timer *timer;
768 int i;
769
770 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
771 timer = &hpet->hpet_timers[2];
772 for (i = 2; i < ntimer; timer++, i++)
773 hd.hd_irq[i] = (timer->hpet_config &
774 Tn_INT_ROUTE_CNF_MASK) >>
775 Tn_INT_ROUTE_CNF_SHIFT;
776
777 }
778
779 hpet_alloc(&hd);
780 return 0;
781}
782fs_initcall(late_hpet_init);
783#endif
784
785static int hpet_timer_stop_set_go(unsigned long tick)
786{
787 unsigned int cfg;
788
789/*
790 * Stop the timers and reset the main counter.
791 */
792
793 cfg = hpet_readl(HPET_CFG);
794 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
795 hpet_writel(cfg, HPET_CFG);
796 hpet_writel(0, HPET_COUNTER);
797 hpet_writel(0, HPET_COUNTER + 4);
798
799/*
800 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
801 * and period also hpet_tick.
802 */
803 if (hpet_use_timer) {
804 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
805 HPET_TN_32BIT, HPET_T0_CFG);
806 hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
807 hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
808 cfg |= HPET_CFG_LEGACY;
809 }
810/*
811 * Go!
812 */
813
814 cfg |= HPET_CFG_ENABLE;
815 hpet_writel(cfg, HPET_CFG);
816
817 return 0;
818}
819
820static int hpet_init(void)
821{
822 unsigned int id;
823
824 if (!vxtime.hpet_address)
825 return -1;
826 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
827 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
828
829/*
830 * Read the period, compute tick and quotient.
831 */
832
833 id = hpet_readl(HPET_ID);
834
835 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
836 return -1;
837
838 hpet_period = hpet_readl(HPET_PERIOD);
839 if (hpet_period < 100000 || hpet_period > 100000000)
840 return -1;
841
842 hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
843
844 hpet_use_timer = (id & HPET_ID_LEGSUP);
845
846 return hpet_timer_stop_set_go(hpet_tick);
847}
848
849static int hpet_reenable(void)
850{
851 return hpet_timer_stop_set_go(hpet_tick);
852}
853
854#define PIT_MODE 0x43 285#define PIT_MODE 0x43
855#define PIT_CH0 0x40 286#define PIT_CH0 0x40
856 287
@@ -878,7 +309,7 @@ void __init pit_stop_interrupt(void)
878void __init stop_timer_interrupt(void) 309void __init stop_timer_interrupt(void)
879{ 310{
880 char *name; 311 char *name;
881 if (vxtime.hpet_address) { 312 if (hpet_address) {
882 name = "HPET"; 313 name = "HPET";
883 hpet_timer_stop_set_go(0); 314 hpet_timer_stop_set_go(0);
884 } else { 315 } else {
@@ -888,12 +319,6 @@ void __init stop_timer_interrupt(void)
888 printk(KERN_INFO "timer: %s interrupt stopped.\n", name); 319 printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
889} 320}
890 321
891int __init time_setup(char *str)
892{
893 report_lost_ticks = 1;
894 return 1;
895}
896
897static struct irqaction irq0 = { 322static struct irqaction irq0 = {
898 timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL 323 timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
899}; 324};
@@ -901,124 +326,41 @@ static struct irqaction irq0 = {
901void __init time_init(void) 326void __init time_init(void)
902{ 327{
903 if (nohpet) 328 if (nohpet)
904 vxtime.hpet_address = 0; 329 hpet_address = 0;
905
906 xtime.tv_sec = get_cmos_time(); 330 xtime.tv_sec = get_cmos_time();
907 xtime.tv_nsec = 0; 331 xtime.tv_nsec = 0;
908 332
909 set_normalized_timespec(&wall_to_monotonic, 333 set_normalized_timespec(&wall_to_monotonic,
910 -xtime.tv_sec, -xtime.tv_nsec); 334 -xtime.tv_sec, -xtime.tv_nsec);
911 335
912 if (!hpet_init()) 336 if (hpet_arch_init())
913 vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period; 337 hpet_address = 0;
914 else
915 vxtime.hpet_address = 0;
916 338
917 if (hpet_use_timer) { 339 if (hpet_use_timer) {
918 /* set tick_nsec to use the proper rate for HPET */ 340 /* set tick_nsec to use the proper rate for HPET */
919 tick_nsec = TICK_NSEC_HPET; 341 tick_nsec = TICK_NSEC_HPET;
920 cpu_khz = hpet_calibrate_tsc(); 342 cpu_khz = hpet_calibrate_tsc();
921 timename = "HPET"; 343 timename = "HPET";
922#ifdef CONFIG_X86_PM_TIMER
923 } else if (pmtmr_ioport && !vxtime.hpet_address) {
924 vxtime_hz = PM_TIMER_FREQUENCY;
925 timename = "PM";
926 pit_init();
927 cpu_khz = pit_calibrate_tsc();
928#endif
929 } else { 344 } else {
930 pit_init(); 345 pit_init();
931 cpu_khz = pit_calibrate_tsc(); 346 cpu_khz = pit_calibrate_tsc();
932 timename = "PIT"; 347 timename = "PIT";
933 } 348 }
934 349
935 vxtime.mode = VXTIME_TSC;
936 vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
937 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
938 vxtime.last_tsc = get_cycles_sync();
939 set_cyc2ns_scale(cpu_khz);
940 setup_irq(0, &irq0);
941
942#ifndef CONFIG_SMP
943 time_init_gtod();
944#endif
945}
946
947/*
948 * Make an educated guess if the TSC is trustworthy and synchronized
949 * over all CPUs.
950 */
951__cpuinit int unsynchronized_tsc(void)
952{
953#ifdef CONFIG_SMP
954 if (apic_is_clustered_box())
955 return 1;
956#endif
957 /* Most intel systems have synchronized TSCs except for
958 multi node systems */
959 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
960#ifdef CONFIG_ACPI
961 /* But TSC doesn't tick in C3 so don't use it there */
962 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
963 return 1;
964#endif
965 return 0;
966 }
967
968 /* Assume multi socket systems are not synchronized */
969 return num_present_cpus() > 1;
970}
971
972/*
973 * Decide what mode gettimeofday should use.
974 */
975void time_init_gtod(void)
976{
977 char *timetype;
978
979 if (unsynchronized_tsc()) 350 if (unsynchronized_tsc())
980 notsc = 1; 351 mark_tsc_unstable();
981 352
982 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) 353 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
983 vgetcpu_mode = VGETCPU_RDTSCP; 354 vgetcpu_mode = VGETCPU_RDTSCP;
984 else 355 else
985 vgetcpu_mode = VGETCPU_LSL; 356 vgetcpu_mode = VGETCPU_LSL;
986 357
987 if (vxtime.hpet_address && notsc) { 358 set_cyc2ns_scale(cpu_khz);
988 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
989 if (hpet_use_timer)
990 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
991 else
992 vxtime.last = hpet_readl(HPET_COUNTER);
993 vxtime.mode = VXTIME_HPET;
994 do_gettimeoffset = do_gettimeoffset_hpet;
995#ifdef CONFIG_X86_PM_TIMER
996 /* Using PM for gettimeofday is quite slow, but we have no other
997 choice because the TSC is too unreliable on some systems. */
998 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
999 timetype = "PM";
1000 do_gettimeoffset = do_gettimeoffset_pm;
1001 vxtime.mode = VXTIME_PMTMR;
1002 sysctl_vsyscall = 0;
1003 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
1004#endif
1005 } else {
1006 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
1007 vxtime.mode = VXTIME_TSC;
1008 }
1009
1010 printk(KERN_INFO "time.c: Using %ld.%06ld MHz WALL %s GTOD %s timer.\n",
1011 vxtime_hz / 1000000, vxtime_hz % 1000000, timename, timetype);
1012 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", 359 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
1013 cpu_khz / 1000, cpu_khz % 1000); 360 cpu_khz / 1000, cpu_khz % 1000);
1014 vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz; 361 setup_irq(0, &irq0);
1015 vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
1016 vxtime.last_tsc = get_cycles_sync();
1017
1018 set_cyc2ns_scale(cpu_khz);
1019} 362}
1020 363
1021__setup("report_lost_ticks", time_setup);
1022 364
1023static long clock_cmos_diff; 365static long clock_cmos_diff;
1024static unsigned long sleep_start; 366static unsigned long sleep_start;
@@ -1055,7 +397,7 @@ static int timer_resume(struct sys_device *dev)
1055 sleep_length = 0; 397 sleep_length = 0;
1056 ctime = sleep_start; 398 ctime = sleep_start;
1057 } 399 }
1058 if (vxtime.hpet_address) 400 if (hpet_address)
1059 hpet_reenable(); 401 hpet_reenable();
1060 else 402 else
1061 i8254_timer_resume(); 403 i8254_timer_resume();
@@ -1064,20 +406,8 @@ static int timer_resume(struct sys_device *dev)
1064 write_seqlock_irqsave(&xtime_lock,flags); 406 write_seqlock_irqsave(&xtime_lock,flags);
1065 xtime.tv_sec = sec; 407 xtime.tv_sec = sec;
1066 xtime.tv_nsec = 0; 408 xtime.tv_nsec = 0;
1067 if (vxtime.mode == VXTIME_HPET) {
1068 if (hpet_use_timer)
1069 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
1070 else
1071 vxtime.last = hpet_readl(HPET_COUNTER);
1072#ifdef CONFIG_X86_PM_TIMER
1073 } else if (vxtime.mode == VXTIME_PMTMR) {
1074 pmtimer_resume();
1075#endif
1076 } else
1077 vxtime.last_tsc = get_cycles_sync();
1078 write_sequnlock_irqrestore(&xtime_lock,flags);
1079 jiffies += sleep_length; 409 jiffies += sleep_length;
1080 monotonic_base += sleep_length * (NSEC_PER_SEC/HZ); 410 write_sequnlock_irqrestore(&xtime_lock,flags);
1081 touch_softlockup_watchdog(); 411 touch_softlockup_watchdog();
1082 return 0; 412 return 0;
1083} 413}
@@ -1103,270 +433,3 @@ static int time_init_device(void)
1103} 433}
1104 434
1105device_initcall(time_init_device); 435device_initcall(time_init_device);
1106
1107#ifdef CONFIG_HPET_EMULATE_RTC
1108/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1109 * is enabled, we support RTC interrupt functionality in software.
1110 * RTC has 3 kinds of interrupts:
1111 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1112 * is updated
1113 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1114 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1115 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1116 * (1) and (2) above are implemented using polling at a frequency of
1117 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1118 * overhead. (DEFAULT_RTC_INT_FREQ)
1119 * For (3), we use interrupts at 64Hz or user specified periodic
1120 * frequency, whichever is higher.
1121 */
1122#include <linux/rtc.h>
1123
1124#define DEFAULT_RTC_INT_FREQ 64
1125#define RTC_NUM_INTS 1
1126
1127static unsigned long UIE_on;
1128static unsigned long prev_update_sec;
1129
1130static unsigned long AIE_on;
1131static struct rtc_time alarm_time;
1132
1133static unsigned long PIE_on;
1134static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1135static unsigned long PIE_count;
1136
1137static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1138static unsigned int hpet_t1_cmp; /* cached comparator register */
1139
1140int is_hpet_enabled(void)
1141{
1142 return vxtime.hpet_address != 0;
1143}
1144
1145/*
1146 * Timer 1 for RTC, we do not use periodic interrupt feature,
1147 * even if HPET supports periodic interrupts on Timer 1.
1148 * The reason being, to set up a periodic interrupt in HPET, we need to
1149 * stop the main counter. And if we do that everytime someone diables/enables
1150 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1151 * So, for the time being, simulate the periodic interrupt in software.
1152 *
1153 * hpet_rtc_timer_init() is called for the first time and during subsequent
1154 * interuppts reinit happens through hpet_rtc_timer_reinit().
1155 */
1156int hpet_rtc_timer_init(void)
1157{
1158 unsigned int cfg, cnt;
1159 unsigned long flags;
1160
1161 if (!is_hpet_enabled())
1162 return 0;
1163 /*
1164 * Set the counter 1 and enable the interrupts.
1165 */
1166 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1167 hpet_rtc_int_freq = PIE_freq;
1168 else
1169 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1170
1171 local_irq_save(flags);
1172
1173 cnt = hpet_readl(HPET_COUNTER);
1174 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1175 hpet_writel(cnt, HPET_T1_CMP);
1176 hpet_t1_cmp = cnt;
1177
1178 cfg = hpet_readl(HPET_T1_CFG);
1179 cfg &= ~HPET_TN_PERIODIC;
1180 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1181 hpet_writel(cfg, HPET_T1_CFG);
1182
1183 local_irq_restore(flags);
1184
1185 return 1;
1186}
1187
1188static void hpet_rtc_timer_reinit(void)
1189{
1190 unsigned int cfg, cnt, ticks_per_int, lost_ints;
1191
1192 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1193 cfg = hpet_readl(HPET_T1_CFG);
1194 cfg &= ~HPET_TN_ENABLE;
1195 hpet_writel(cfg, HPET_T1_CFG);
1196 return;
1197 }
1198
1199 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1200 hpet_rtc_int_freq = PIE_freq;
1201 else
1202 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1203
1204 /* It is more accurate to use the comparator value than current count.*/
1205 ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq;
1206 hpet_t1_cmp += ticks_per_int;
1207 hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
1208
1209 /*
1210 * If the interrupt handler was delayed too long, the write above tries
1211 * to schedule the next interrupt in the past and the hardware would
1212 * not interrupt until the counter had wrapped around.
1213 * So we have to check that the comparator wasn't set to a past time.
1214 */
1215 cnt = hpet_readl(HPET_COUNTER);
1216 if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) {
1217 lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1;
1218 /* Make sure that, even with the time needed to execute
1219 * this code, the next scheduled interrupt has been moved
1220 * back to the future: */
1221 lost_ints++;
1222
1223 hpet_t1_cmp += lost_ints * ticks_per_int;
1224 hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
1225
1226 if (PIE_on)
1227 PIE_count += lost_ints;
1228
1229 if (printk_ratelimit())
1230 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
1231 hpet_rtc_int_freq);
1232 }
1233}
1234
1235/*
1236 * The functions below are called from rtc driver.
1237 * Return 0 if HPET is not being used.
1238 * Otherwise do the necessary changes and return 1.
1239 */
1240int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1241{
1242 if (!is_hpet_enabled())
1243 return 0;
1244
1245 if (bit_mask & RTC_UIE)
1246 UIE_on = 0;
1247 if (bit_mask & RTC_PIE)
1248 PIE_on = 0;
1249 if (bit_mask & RTC_AIE)
1250 AIE_on = 0;
1251
1252 return 1;
1253}
1254
1255int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1256{
1257 int timer_init_reqd = 0;
1258
1259 if (!is_hpet_enabled())
1260 return 0;
1261
1262 if (!(PIE_on | AIE_on | UIE_on))
1263 timer_init_reqd = 1;
1264
1265 if (bit_mask & RTC_UIE) {
1266 UIE_on = 1;
1267 }
1268 if (bit_mask & RTC_PIE) {
1269 PIE_on = 1;
1270 PIE_count = 0;
1271 }
1272 if (bit_mask & RTC_AIE) {
1273 AIE_on = 1;
1274 }
1275
1276 if (timer_init_reqd)
1277 hpet_rtc_timer_init();
1278
1279 return 1;
1280}
1281
1282int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1283{
1284 if (!is_hpet_enabled())
1285 return 0;
1286
1287 alarm_time.tm_hour = hrs;
1288 alarm_time.tm_min = min;
1289 alarm_time.tm_sec = sec;
1290
1291 return 1;
1292}
1293
1294int hpet_set_periodic_freq(unsigned long freq)
1295{
1296 if (!is_hpet_enabled())
1297 return 0;
1298
1299 PIE_freq = freq;
1300 PIE_count = 0;
1301
1302 return 1;
1303}
1304
1305int hpet_rtc_dropped_irq(void)
1306{
1307 if (!is_hpet_enabled())
1308 return 0;
1309
1310 return 1;
1311}
1312
1313irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1314{
1315 struct rtc_time curr_time;
1316 unsigned long rtc_int_flag = 0;
1317 int call_rtc_interrupt = 0;
1318
1319 hpet_rtc_timer_reinit();
1320
1321 if (UIE_on | AIE_on) {
1322 rtc_get_rtc_time(&curr_time);
1323 }
1324 if (UIE_on) {
1325 if (curr_time.tm_sec != prev_update_sec) {
1326 /* Set update int info, call real rtc int routine */
1327 call_rtc_interrupt = 1;
1328 rtc_int_flag = RTC_UF;
1329 prev_update_sec = curr_time.tm_sec;
1330 }
1331 }
1332 if (PIE_on) {
1333 PIE_count++;
1334 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1335 /* Set periodic int info, call real rtc int routine */
1336 call_rtc_interrupt = 1;
1337 rtc_int_flag |= RTC_PF;
1338 PIE_count = 0;
1339 }
1340 }
1341 if (AIE_on) {
1342 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1343 (curr_time.tm_min == alarm_time.tm_min) &&
1344 (curr_time.tm_hour == alarm_time.tm_hour)) {
1345 /* Set alarm int info, call real rtc int routine */
1346 call_rtc_interrupt = 1;
1347 rtc_int_flag |= RTC_AF;
1348 }
1349 }
1350 if (call_rtc_interrupt) {
1351 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1352 rtc_interrupt(rtc_int_flag, dev_id);
1353 }
1354 return IRQ_HANDLED;
1355}
1356#endif
1357
1358static int __init nohpet_setup(char *s)
1359{
1360 nohpet = 1;
1361 return 1;
1362}
1363
1364__setup("nohpet", nohpet_setup);
1365
1366int __init notsc_setup(char *s)
1367{
1368 notsc = 1;
1369 return 1;
1370}
1371
1372__setup("notsc", notsc_setup);
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c
new file mode 100644
index 000000000000..895831865019
--- /dev/null
+++ b/arch/x86_64/kernel/tsc.c
@@ -0,0 +1,226 @@
1#include <linux/kernel.h>
2#include <linux/sched.h>
3#include <linux/interrupt.h>
4#include <linux/init.h>
5#include <linux/clocksource.h>
6#include <linux/time.h>
7#include <linux/acpi.h>
8#include <linux/cpufreq.h>
9
10#include <asm/timex.h>
11
12static int notsc __initdata = 0;
13
14unsigned int cpu_khz; /* TSC clocks / usec, not used here */
15EXPORT_SYMBOL(cpu_khz);
16
17static unsigned int cyc2ns_scale __read_mostly;
18
19void set_cyc2ns_scale(unsigned long khz)
20{
21 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
22}
23
24static unsigned long long cycles_2_ns(unsigned long long cyc)
25{
26 return (cyc * cyc2ns_scale) >> NS_SCALE;
27}
28
29unsigned long long sched_clock(void)
30{
31 unsigned long a = 0;
32
33 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
34 * which means it is not completely exact and may not be monotonous
35 * between CPUs. But the errors should be too small to matter for
36 * scheduling purposes.
37 */
38
39 rdtscll(a);
40 return cycles_2_ns(a);
41}
42
43static int tsc_unstable;
44
45static inline int check_tsc_unstable(void)
46{
47 return tsc_unstable;
48}
49#ifdef CONFIG_CPU_FREQ
50
51/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
52 * changes.
53 *
54 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
55 * not that important because current Opteron setups do not support
56 * scaling on SMP anyroads.
57 *
58 * Should fix up last_tsc too. Currently gettimeofday in the
59 * first tick after the change will be slightly wrong.
60 */
61
62#include <linux/workqueue.h>
63
64static unsigned int cpufreq_delayed_issched = 0;
65static unsigned int cpufreq_init = 0;
66static struct work_struct cpufreq_delayed_get_work;
67
68static void handle_cpufreq_delayed_get(struct work_struct *v)
69{
70 unsigned int cpu;
71 for_each_online_cpu(cpu) {
72 cpufreq_get(cpu);
73 }
74 cpufreq_delayed_issched = 0;
75}
76
77static unsigned int ref_freq = 0;
78static unsigned long loops_per_jiffy_ref = 0;
79
80static unsigned long cpu_khz_ref = 0;
81
82static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
83 void *data)
84{
85 struct cpufreq_freqs *freq = data;
86 unsigned long *lpj, dummy;
87
88 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
89 return 0;
90
91 lpj = &dummy;
92 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
93#ifdef CONFIG_SMP
94 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
95#else
96 lpj = &boot_cpu_data.loops_per_jiffy;
97#endif
98
99 if (!ref_freq) {
100 ref_freq = freq->old;
101 loops_per_jiffy_ref = *lpj;
102 cpu_khz_ref = cpu_khz;
103 }
104 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
105 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
106 (val == CPUFREQ_RESUMECHANGE)) {
107 *lpj =
108 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
109
110 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
111 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
112 mark_tsc_unstable();
113 }
114
115 set_cyc2ns_scale(cpu_khz_ref);
116
117 return 0;
118}
119
120static struct notifier_block time_cpufreq_notifier_block = {
121 .notifier_call = time_cpufreq_notifier
122};
123
124static int __init cpufreq_tsc(void)
125{
126 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
127 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
128 CPUFREQ_TRANSITION_NOTIFIER))
129 cpufreq_init = 1;
130 return 0;
131}
132
133core_initcall(cpufreq_tsc);
134
135#endif
136
137static int tsc_unstable = 0;
138
139/*
140 * Make an educated guess if the TSC is trustworthy and synchronized
141 * over all CPUs.
142 */
143__cpuinit int unsynchronized_tsc(void)
144{
145 if (tsc_unstable)
146 return 1;
147
148#ifdef CONFIG_SMP
149 if (apic_is_clustered_box())
150 return 1;
151#endif
152 /* Most intel systems have synchronized TSCs except for
153 multi node systems */
154 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
155#ifdef CONFIG_ACPI
156 /* But TSC doesn't tick in C3 so don't use it there */
157 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
158 return 1;
159#endif
160 return 0;
161 }
162
163 /* Assume multi socket systems are not synchronized */
164 return num_present_cpus() > 1;
165}
166
167int __init notsc_setup(char *s)
168{
169 notsc = 1;
170 return 1;
171}
172
173__setup("notsc", notsc_setup);
174
175
176/* clock source code: */
177static cycle_t read_tsc(void)
178{
179 cycle_t ret = (cycle_t)get_cycles_sync();
180 return ret;
181}
182
183static cycle_t __vsyscall_fn vread_tsc(void)
184{
185 cycle_t ret = (cycle_t)get_cycles_sync();
186 return ret;
187}
188
189static struct clocksource clocksource_tsc = {
190 .name = "tsc",
191 .rating = 300,
192 .read = read_tsc,
193 .mask = CLOCKSOURCE_MASK(64),
194 .shift = 22,
195 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
196 CLOCK_SOURCE_MUST_VERIFY,
197 .vread = vread_tsc,
198};
199
200void mark_tsc_unstable(void)
201{
202 if (!tsc_unstable) {
203 tsc_unstable = 1;
204 /* Change only the rating, when not registered */
205 if (clocksource_tsc.mult)
206 clocksource_change_rating(&clocksource_tsc, 0);
207 else
208 clocksource_tsc.rating = 0;
209 }
210}
211EXPORT_SYMBOL_GPL(mark_tsc_unstable);
212
213static int __init init_tsc_clocksource(void)
214{
215 if (!notsc) {
216 clocksource_tsc.mult = clocksource_khz2mult(cpu_khz,
217 clocksource_tsc.shift);
218 if (check_tsc_unstable())
219 clocksource_tsc.rating = 0;
220
221 return clocksource_register(&clocksource_tsc);
222 }
223 return 0;
224}
225
226module_init(init_tsc_clocksource);
diff --git a/arch/x86_64/kernel/tsc_sync.c b/arch/x86_64/kernel/tsc_sync.c
new file mode 100644
index 000000000000..014f0db45dfa
--- /dev/null
+++ b/arch/x86_64/kernel/tsc_sync.c
@@ -0,0 +1,187 @@
1/*
2 * arch/x86_64/kernel/tsc_sync.c: check TSC synchronization.
3 *
4 * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
5 *
6 * We check whether all boot CPUs have their TSC's synchronized,
7 * print a warning if not and turn off the TSC clock-source.
8 *
9 * The warp-check is point-to-point between two CPUs, the CPU
10 * initiating the bootup is the 'source CPU', the freshly booting
11 * CPU is the 'target CPU'.
12 *
13 * Only two CPUs may participate - they can enter in any order.
14 * ( The serial nature of the boot logic and the CPU hotplug lock
15 * protects against more than 2 CPUs entering this code. )
16 */
17#include <linux/spinlock.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/smp.h>
21#include <linux/nmi.h>
22#include <asm/tsc.h>
23
24/*
25 * Entry/exit counters that make sure that both CPUs
26 * run the measurement code at once:
27 */
28static __cpuinitdata atomic_t start_count;
29static __cpuinitdata atomic_t stop_count;
30
31/*
32 * We use a raw spinlock in this exceptional case, because
33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps:
35 */
36static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
37static __cpuinitdata cycles_t last_tsc;
38static __cpuinitdata cycles_t max_warp;
39static __cpuinitdata int nr_warps;
40
41/*
42 * TSC-warp measurement loop running on both CPUs:
43 */
44static __cpuinit void check_tsc_warp(void)
45{
46 cycles_t start, now, prev, end;
47 int i;
48
49 start = get_cycles_sync();
50 /*
51 * The measurement runs for 20 msecs:
52 */
53 end = start + cpu_khz * 20ULL;
54 now = start;
55
56 for (i = 0; ; i++) {
57 /*
58 * We take the global lock, measure TSC, save the
59 * previous TSC that was measured (possibly on
60 * another CPU) and update the previous TSC timestamp.
61 */
62 __raw_spin_lock(&sync_lock);
63 prev = last_tsc;
64 now = get_cycles_sync();
65 last_tsc = now;
66 __raw_spin_unlock(&sync_lock);
67
68 /*
69 * Be nice every now and then (and also check whether
70 * measurement is done [we also insert a 100 million
71 * loops safety exit, so we dont lock up in case the
72 * TSC readout is totally broken]):
73 */
74 if (unlikely(!(i & 7))) {
75 if (now > end || i > 100000000)
76 break;
77 cpu_relax();
78 touch_nmi_watchdog();
79 }
80 /*
81 * Outside the critical section we can now see whether
82 * we saw a time-warp of the TSC going backwards:
83 */
84 if (unlikely(prev > now)) {
85 __raw_spin_lock(&sync_lock);
86 max_warp = max(max_warp, prev - now);
87 nr_warps++;
88 __raw_spin_unlock(&sync_lock);
89 }
90
91 }
92}
93
94/*
95 * Source CPU calls into this - it waits for the freshly booted
96 * target CPU to arrive and then starts the measurement:
97 */
98void __cpuinit check_tsc_sync_source(int cpu)
99{
100 int cpus = 2;
101
102 /*
103 * No need to check if we already know that the TSC is not
104 * synchronized:
105 */
106 if (unsynchronized_tsc())
107 return;
108
109 printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:",
110 smp_processor_id(), cpu);
111
112 /*
113 * Reset it - in case this is a second bootup:
114 */
115 atomic_set(&stop_count, 0);
116
117 /*
118 * Wait for the target to arrive:
119 */
120 while (atomic_read(&start_count) != cpus-1)
121 cpu_relax();
122 /*
123 * Trigger the target to continue into the measurement too:
124 */
125 atomic_inc(&start_count);
126
127 check_tsc_warp();
128
129 while (atomic_read(&stop_count) != cpus-1)
130 cpu_relax();
131
132 /*
133 * Reset it - just in case we boot another CPU later:
134 */
135 atomic_set(&start_count, 0);
136
137 if (nr_warps) {
138 printk("\n");
139 printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs,"
140 " turning off TSC clock.\n", max_warp);
141 mark_tsc_unstable();
142 nr_warps = 0;
143 max_warp = 0;
144 last_tsc = 0;
145 } else {
146 printk(" passed.\n");
147 }
148
149 /*
150 * Let the target continue with the bootup:
151 */
152 atomic_inc(&stop_count);
153}
154
155/*
156 * Freshly booted CPUs call into this:
157 */
158void __cpuinit check_tsc_sync_target(void)
159{
160 int cpus = 2;
161
162 if (unsynchronized_tsc())
163 return;
164
165 /*
166 * Register this CPU's participation and wait for the
167 * source CPU to start the measurement:
168 */
169 atomic_inc(&start_count);
170 while (atomic_read(&start_count) != cpus)
171 cpu_relax();
172
173 check_tsc_warp();
174
175 /*
176 * Ok, we are done:
177 */
178 atomic_inc(&stop_count);
179
180 /*
181 * Wait for the source CPU to print stuff:
182 */
183 while (atomic_read(&stop_count) != cpus)
184 cpu_relax();
185}
186#undef NR_LOOPS
187
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index c360c4225244..b73212c0a550 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -88,31 +88,25 @@ SECTIONS
88 __vsyscall_0 = VSYSCALL_VIRT_ADDR; 88 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
89 89
90 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 90 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
91 .xtime_lock : AT(VLOAD(.xtime_lock)) { *(.xtime_lock) } 91 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { *(.vsyscall_fn) }
92 xtime_lock = VVIRT(.xtime_lock); 92 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
93 93 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data))
94 .vxtime : AT(VLOAD(.vxtime)) { *(.vxtime) } 94 { *(.vsyscall_gtod_data) }
95 vxtime = VVIRT(.vxtime); 95 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
96 96
97 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) } 97 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) }
98 vgetcpu_mode = VVIRT(.vgetcpu_mode); 98 vgetcpu_mode = VVIRT(.vgetcpu_mode);
99 99
100 .sys_tz : AT(VLOAD(.sys_tz)) { *(.sys_tz) }
101 sys_tz = VVIRT(.sys_tz);
102
103 .sysctl_vsyscall : AT(VLOAD(.sysctl_vsyscall)) { *(.sysctl_vsyscall) }
104 sysctl_vsyscall = VVIRT(.sysctl_vsyscall);
105
106 .xtime : AT(VLOAD(.xtime)) { *(.xtime) }
107 xtime = VVIRT(.xtime);
108
109 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 100 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
110 .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) } 101 .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
111 jiffies = VVIRT(.jiffies); 102 jiffies = VVIRT(.jiffies);
112 103
113 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { *(.vsyscall_1) } 104 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
114 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { *(.vsyscall_2) } 105 { *(.vsyscall_1) }
115 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) } 106 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2))
107 { *(.vsyscall_2) }
108 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3))
109 { *(.vsyscall_3) }
116 110
117 . = VSYSCALL_VIRT_ADDR + 4096; 111 . = VSYSCALL_VIRT_ADDR + 4096;
118 112
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 313dc6ad780b..180ff919eaf9 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -26,6 +26,7 @@
26#include <linux/seqlock.h> 26#include <linux/seqlock.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/sysctl.h> 28#include <linux/sysctl.h>
29#include <linux/clocksource.h>
29#include <linux/getcpu.h> 30#include <linux/getcpu.h>
30#include <linux/cpu.h> 31#include <linux/cpu.h>
31#include <linux/smp.h> 32#include <linux/smp.h>
@@ -34,6 +35,7 @@
34#include <asm/vsyscall.h> 35#include <asm/vsyscall.h>
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/page.h> 37#include <asm/page.h>
38#include <asm/unistd.h>
37#include <asm/fixmap.h> 39#include <asm/fixmap.h>
38#include <asm/errno.h> 40#include <asm/errno.h>
39#include <asm/io.h> 41#include <asm/io.h>
@@ -44,56 +46,41 @@
44#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) 46#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
45#define __syscall_clobber "r11","rcx","memory" 47#define __syscall_clobber "r11","rcx","memory"
46 48
47int __sysctl_vsyscall __section_sysctl_vsyscall = 1; 49struct vsyscall_gtod_data_t {
48seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED; 50 seqlock_t lock;
51 int sysctl_enabled;
52 struct timeval wall_time_tv;
53 struct timezone sys_tz;
54 cycle_t offset_base;
55 struct clocksource clock;
56};
49int __vgetcpu_mode __section_vgetcpu_mode; 57int __vgetcpu_mode __section_vgetcpu_mode;
50 58
51#include <asm/unistd.h> 59struct vsyscall_gtod_data_t __vsyscall_gtod_data __section_vsyscall_gtod_data =
52
53static __always_inline void timeval_normalize(struct timeval * tv)
54{ 60{
55 time_t __sec; 61 .lock = SEQLOCK_UNLOCKED,
56 62 .sysctl_enabled = 1,
57 __sec = tv->tv_usec / 1000000; 63};
58 if (__sec) {
59 tv->tv_usec %= 1000000;
60 tv->tv_sec += __sec;
61 }
62}
63 64
64static __always_inline void do_vgettimeofday(struct timeval * tv) 65void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
65{ 66{
66 long sequence, t; 67 unsigned long flags;
67 unsigned long sec, usec; 68
68 69 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
69 do { 70 /* copy vsyscall data */
70 sequence = read_seqbegin(&__xtime_lock); 71 vsyscall_gtod_data.clock = *clock;
71 72 vsyscall_gtod_data.wall_time_tv.tv_sec = wall_time->tv_sec;
72 sec = __xtime.tv_sec; 73 vsyscall_gtod_data.wall_time_tv.tv_usec = wall_time->tv_nsec/1000;
73 usec = __xtime.tv_nsec / 1000; 74 vsyscall_gtod_data.sys_tz = sys_tz;
74 75 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
75 if (__vxtime.mode != VXTIME_HPET) {
76 t = get_cycles_sync();
77 if (t < __vxtime.last_tsc)
78 t = __vxtime.last_tsc;
79 usec += ((t - __vxtime.last_tsc) *
80 __vxtime.tsc_quot) >> 32;
81 /* See comment in x86_64 do_gettimeofday. */
82 } else {
83 usec += ((readl((void __iomem *)
84 fix_to_virt(VSYSCALL_HPET) + 0xf0) -
85 __vxtime.last) * __vxtime.quot) >> 32;
86 }
87 } while (read_seqretry(&__xtime_lock, sequence));
88
89 tv->tv_sec = sec + usec / 1000000;
90 tv->tv_usec = usec % 1000000;
91} 76}
92 77
93/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */ 78/* RED-PEN may want to readd seq locking, but then the variable should be
79 * write-once.
80 */
94static __always_inline void do_get_tz(struct timezone * tz) 81static __always_inline void do_get_tz(struct timezone * tz)
95{ 82{
96 *tz = __sys_tz; 83 *tz = __vsyscall_gtod_data.sys_tz;
97} 84}
98 85
99static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) 86static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
@@ -101,7 +88,8 @@ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
101 int ret; 88 int ret;
102 asm volatile("vsysc2: syscall" 89 asm volatile("vsysc2: syscall"
103 : "=a" (ret) 90 : "=a" (ret)
104 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber ); 91 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
92 : __syscall_clobber );
105 return ret; 93 return ret;
106} 94}
107 95
@@ -114,10 +102,44 @@ static __always_inline long time_syscall(long *t)
114 return secs; 102 return secs;
115} 103}
116 104
105static __always_inline void do_vgettimeofday(struct timeval * tv)
106{
107 cycle_t now, base, mask, cycle_delta;
108 unsigned long seq, mult, shift, nsec_delta;
109 cycle_t (*vread)(void);
110 do {
111 seq = read_seqbegin(&__vsyscall_gtod_data.lock);
112
113 vread = __vsyscall_gtod_data.clock.vread;
114 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
115 gettimeofday(tv,0);
116 return;
117 }
118 now = vread();
119 base = __vsyscall_gtod_data.clock.cycle_last;
120 mask = __vsyscall_gtod_data.clock.mask;
121 mult = __vsyscall_gtod_data.clock.mult;
122 shift = __vsyscall_gtod_data.clock.shift;
123
124 *tv = __vsyscall_gtod_data.wall_time_tv;
125
126 } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
127
128 /* calculate interval: */
129 cycle_delta = (now - base) & mask;
130 /* convert to nsecs: */
131 nsec_delta = (cycle_delta * mult) >> shift;
132
133 /* convert to usecs and add to timespec: */
134 tv->tv_usec += nsec_delta / NSEC_PER_USEC;
135 while (tv->tv_usec > USEC_PER_SEC) {
136 tv->tv_sec += 1;
137 tv->tv_usec -= USEC_PER_SEC;
138 }
139}
140
117int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) 141int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
118{ 142{
119 if (!__sysctl_vsyscall)
120 return gettimeofday(tv,tz);
121 if (tv) 143 if (tv)
122 do_vgettimeofday(tv); 144 do_vgettimeofday(tv);
123 if (tz) 145 if (tz)
@@ -129,11 +151,11 @@ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
129 * unlikely */ 151 * unlikely */
130time_t __vsyscall(1) vtime(time_t *t) 152time_t __vsyscall(1) vtime(time_t *t)
131{ 153{
132 if (!__sysctl_vsyscall) 154 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
133 return time_syscall(t); 155 return time_syscall(t);
134 else if (t) 156 else if (t)
135 *t = __xtime.tv_sec; 157 *t = __vsyscall_gtod_data.wall_time_tv.tv_sec;
136 return __xtime.tv_sec; 158 return __vsyscall_gtod_data.wall_time_tv.tv_sec;
137} 159}
138 160
139/* Fast way to get current CPU and node. 161/* Fast way to get current CPU and node.
@@ -210,7 +232,7 @@ static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
210 ret = -ENOMEM; 232 ret = -ENOMEM;
211 goto out; 233 goto out;
212 } 234 }
213 if (!sysctl_vsyscall) { 235 if (!vsyscall_gtod_data.sysctl_enabled) {
214 writew(SYSCALL, map1); 236 writew(SYSCALL, map1);
215 writew(SYSCALL, map2); 237 writew(SYSCALL, map2);
216 } else { 238 } else {
@@ -232,7 +254,8 @@ static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
232 254
233static ctl_table kernel_table2[] = { 255static ctl_table kernel_table2[] = {
234 { .ctl_name = 99, .procname = "vsyscall64", 256 { .ctl_name = 99, .procname = "vsyscall64",
235 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644, 257 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
258 .mode = 0644,
236 .strategy = vsyscall_sysctl_nostrat, 259 .strategy = vsyscall_sysctl_nostrat,
237 .proc_handler = vsyscall_sysctl_change }, 260 .proc_handler = vsyscall_sysctl_change },
238 {} 261 {}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 20eacc2c9e0e..e942ffe8b57e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -13,6 +13,7 @@ config ACPI
13 depends on IA64 || X86 13 depends on IA64 || X86
14 depends on PCI 14 depends on PCI
15 depends on PM 15 depends on PM
16 select PNP
16 default y 17 default y
17 ---help--- 18 ---help---
18 Advanced Configuration and Power Interface (ACPI) support for 19 Advanced Configuration and Power Interface (ACPI) support for
@@ -132,15 +133,6 @@ config ACPI_VIDEO
132 Note that this is an ref. implementation only. It may or may not work 133 Note that this is an ref. implementation only. It may or may not work
133 for your integrated video device. 134 for your integrated video device.
134 135
135config ACPI_HOTKEY
136 tristate "Generic Hotkey (EXPERIMENTAL)"
137 depends on EXPERIMENTAL
138 depends on X86
139 default n
140 help
141 Experimental consolidated hotkey driver.
142 If you are unsure, say N.
143
144config ACPI_FAN 136config ACPI_FAN
145 tristate "Fan" 137 tristate "Fan"
146 default y 138 default y
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 856c32bccacb..5956e9f64a8b 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -46,7 +46,6 @@ obj-$(CONFIG_ACPI_FAN) += fan.o
46obj-$(CONFIG_ACPI_DOCK) += dock.o 46obj-$(CONFIG_ACPI_DOCK) += dock.o
47obj-$(CONFIG_ACPI_BAY) += bay.o 47obj-$(CONFIG_ACPI_BAY) += bay.o
48obj-$(CONFIG_ACPI_VIDEO) += video.o 48obj-$(CONFIG_ACPI_VIDEO) += video.o
49obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o
50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o 49obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
51obj-$(CONFIG_ACPI_POWER) += power.o 50obj-$(CONFIG_ACPI_POWER) += power.o
52obj-$(CONFIG_ACPI_PROCESSOR) += processor.o 51obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 6daeace796a8..37c7dc4f9fe5 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -35,7 +35,6 @@
35#define ACPI_AC_COMPONENT 0x00020000 35#define ACPI_AC_COMPONENT 0x00020000
36#define ACPI_AC_CLASS "ac_adapter" 36#define ACPI_AC_CLASS "ac_adapter"
37#define ACPI_AC_HID "ACPI0003" 37#define ACPI_AC_HID "ACPI0003"
38#define ACPI_AC_DRIVER_NAME "ACPI AC Adapter Driver"
39#define ACPI_AC_DEVICE_NAME "AC Adapter" 38#define ACPI_AC_DEVICE_NAME "AC Adapter"
40#define ACPI_AC_FILE_STATE "state" 39#define ACPI_AC_FILE_STATE "state"
41#define ACPI_AC_NOTIFY_STATUS 0x80 40#define ACPI_AC_NOTIFY_STATUS 0x80
@@ -44,10 +43,10 @@
44#define ACPI_AC_STATUS_UNKNOWN 0xFF 43#define ACPI_AC_STATUS_UNKNOWN 0xFF
45 44
46#define _COMPONENT ACPI_AC_COMPONENT 45#define _COMPONENT ACPI_AC_COMPONENT
47ACPI_MODULE_NAME("acpi_ac") 46ACPI_MODULE_NAME("ac");
48 47
49 MODULE_AUTHOR("Paul Diefenbaugh"); 48MODULE_AUTHOR("Paul Diefenbaugh");
50MODULE_DESCRIPTION(ACPI_AC_DRIVER_NAME); 49MODULE_DESCRIPTION("ACPI AC Adapter Driver");
51MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
52 51
53extern struct proc_dir_entry *acpi_lock_ac_dir(void); 52extern struct proc_dir_entry *acpi_lock_ac_dir(void);
@@ -58,7 +57,7 @@ static int acpi_ac_remove(struct acpi_device *device, int type);
58static int acpi_ac_open_fs(struct inode *inode, struct file *file); 57static int acpi_ac_open_fs(struct inode *inode, struct file *file);
59 58
60static struct acpi_driver acpi_ac_driver = { 59static struct acpi_driver acpi_ac_driver = {
61 .name = ACPI_AC_DRIVER_NAME, 60 .name = "ac",
62 .class = ACPI_AC_CLASS, 61 .class = ACPI_AC_CLASS,
63 .ids = ACPI_AC_HID, 62 .ids = ACPI_AC_HID,
64 .ops = { 63 .ops = {
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index cd946ed192d3..c26172671fd8 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -35,14 +35,13 @@
35#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000UL 35#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000UL
36#define ACPI_MEMORY_DEVICE_CLASS "memory" 36#define ACPI_MEMORY_DEVICE_CLASS "memory"
37#define ACPI_MEMORY_DEVICE_HID "PNP0C80" 37#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
38#define ACPI_MEMORY_DEVICE_DRIVER_NAME "Hotplug Mem Driver"
39#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device" 38#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
40 39
41#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT 40#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT
42 41
43ACPI_MODULE_NAME("acpi_memory") 42ACPI_MODULE_NAME("acpi_memhotplug");
44 MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>"); 43MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>");
45MODULE_DESCRIPTION(ACPI_MEMORY_DEVICE_DRIVER_NAME); 44MODULE_DESCRIPTION("Hotplug Mem Driver");
46MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
47 46
48/* ACPI _STA method values */ 47/* ACPI _STA method values */
@@ -60,7 +59,7 @@ static int acpi_memory_device_remove(struct acpi_device *device, int type);
60static int acpi_memory_device_start(struct acpi_device *device); 59static int acpi_memory_device_start(struct acpi_device *device);
61 60
62static struct acpi_driver acpi_memory_device_driver = { 61static struct acpi_driver acpi_memory_device_driver = {
63 .name = ACPI_MEMORY_DEVICE_DRIVER_NAME, 62 .name = "acpi_memhotplug",
64 .class = ACPI_MEMORY_DEVICE_CLASS, 63 .class = ACPI_MEMORY_DEVICE_CLASS,
65 .ids = ACPI_MEMORY_DEVICE_HID, 64 .ids = ACPI_MEMORY_DEVICE_HID,
66 .ops = { 65 .ops = {
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index 31ad70a6e22e..772299fb5f9d 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -141,6 +141,7 @@ struct asus_hotk {
141 W5A, //W5A 141 W5A, //W5A
142 W3V, //W3030V 142 W3V, //W3030V
143 xxN, //M2400N, M3700N, M5200N, M6800N, S1300N, S5200N 143 xxN, //M2400N, M3700N, M5200N, M6800N, S1300N, S5200N
144 A4S, //Z81sp
144 //(Centrino) 145 //(Centrino)
145 END_MODEL 146 END_MODEL
146 } model; //Models currently supported 147 } model; //Models currently supported
@@ -397,7 +398,16 @@ static struct model_data model_conf[END_MODEL] = {
397 .brightness_set = "SPLV", 398 .brightness_set = "SPLV",
398 .brightness_get = "GPLV", 399 .brightness_get = "GPLV",
399 .display_set = "SDSP", 400 .display_set = "SDSP",
400 .display_get = "\\ADVG"} 401 .display_get = "\\ADVG"},
402
403 {
404 .name = "A4S",
405 .brightness_set = "SPLV",
406 .brightness_get = "GPLV",
407 .mt_bt_switch = "BLED",
408 .mt_wled = "WLED"
409 }
410
401}; 411};
402 412
403/* procdir we use */ 413/* procdir we use */
@@ -421,7 +431,7 @@ static struct asus_hotk *hotk;
421static int asus_hotk_add(struct acpi_device *device); 431static int asus_hotk_add(struct acpi_device *device);
422static int asus_hotk_remove(struct acpi_device *device, int type); 432static int asus_hotk_remove(struct acpi_device *device, int type);
423static struct acpi_driver asus_hotk_driver = { 433static struct acpi_driver asus_hotk_driver = {
424 .name = ACPI_HOTK_NAME, 434 .name = "asus_acpi",
425 .class = ACPI_HOTK_CLASS, 435 .class = ACPI_HOTK_CLASS,
426 .ids = ACPI_HOTK_HID, 436 .ids = ACPI_HOTK_HID,
427 .ops = { 437 .ops = {
@@ -1117,6 +1127,8 @@ static int asus_model_match(char *model)
1117 return W3V; 1127 return W3V;
1118 else if (strncmp(model, "W5A", 3) == 0) 1128 else if (strncmp(model, "W5A", 3) == 0)
1119 return W5A; 1129 return W5A;
1130 else if (strncmp(model, "A4S", 3) == 0)
1131 return A4S;
1120 else 1132 else
1121 return END_MODEL; 1133 return END_MODEL;
1122} 1134}
@@ -1365,10 +1377,6 @@ static int __init asus_acpi_init(void)
1365 if (acpi_disabled) 1377 if (acpi_disabled)
1366 return -ENODEV; 1378 return -ENODEV;
1367 1379
1368 if (!acpi_specific_hotkey_enabled) {
1369 printk(KERN_ERR "Using generic hotkey driver\n");
1370 return -ENODEV;
1371 }
1372 asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir); 1380 asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir);
1373 if (!asus_proc_dir) { 1381 if (!asus_proc_dir) {
1374 printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n"); 1382 printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n");
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 2f4521a48fe7..02de49ef1bce 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -42,7 +42,6 @@
42#define ACPI_BATTERY_COMPONENT 0x00040000 42#define ACPI_BATTERY_COMPONENT 0x00040000
43#define ACPI_BATTERY_CLASS "battery" 43#define ACPI_BATTERY_CLASS "battery"
44#define ACPI_BATTERY_HID "PNP0C0A" 44#define ACPI_BATTERY_HID "PNP0C0A"
45#define ACPI_BATTERY_DRIVER_NAME "ACPI Battery Driver"
46#define ACPI_BATTERY_DEVICE_NAME "Battery" 45#define ACPI_BATTERY_DEVICE_NAME "Battery"
47#define ACPI_BATTERY_FILE_INFO "info" 46#define ACPI_BATTERY_FILE_INFO "info"
48#define ACPI_BATTERY_FILE_STATUS "state" 47#define ACPI_BATTERY_FILE_STATUS "state"
@@ -53,10 +52,10 @@
53#define ACPI_BATTERY_UNITS_AMPS "mA" 52#define ACPI_BATTERY_UNITS_AMPS "mA"
54 53
55#define _COMPONENT ACPI_BATTERY_COMPONENT 54#define _COMPONENT ACPI_BATTERY_COMPONENT
56ACPI_MODULE_NAME("acpi_battery") 55ACPI_MODULE_NAME("battery");
57 56
58 MODULE_AUTHOR("Paul Diefenbaugh"); 57MODULE_AUTHOR("Paul Diefenbaugh");
59MODULE_DESCRIPTION(ACPI_BATTERY_DRIVER_NAME); 58MODULE_DESCRIPTION("ACPI Battery Driver");
60MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
61 60
62extern struct proc_dir_entry *acpi_lock_battery_dir(void); 61extern struct proc_dir_entry *acpi_lock_battery_dir(void);
@@ -67,7 +66,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type);
67static int acpi_battery_resume(struct acpi_device *device); 66static int acpi_battery_resume(struct acpi_device *device);
68 67
69static struct acpi_driver acpi_battery_driver = { 68static struct acpi_driver acpi_battery_driver = {
70 .name = ACPI_BATTERY_DRIVER_NAME, 69 .name = "battery",
71 .class = ACPI_BATTERY_CLASS, 70 .class = ACPI_BATTERY_CLASS,
72 .ids = ACPI_BATTERY_HID, 71 .ids = ACPI_BATTERY_HID,
73 .ops = { 72 .ops = {
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
index 91082ce6f5d1..fb3f31b5e69f 100644
--- a/drivers/acpi/bay.c
+++ b/drivers/acpi/bay.c
@@ -32,11 +32,9 @@
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34 34
35#define ACPI_BAY_DRIVER_NAME "ACPI Removable Drive Bay Driver" 35ACPI_MODULE_NAME("bay");
36
37ACPI_MODULE_NAME("bay")
38MODULE_AUTHOR("Kristen Carlson Accardi"); 36MODULE_AUTHOR("Kristen Carlson Accardi");
39MODULE_DESCRIPTION(ACPI_BAY_DRIVER_NAME); 37MODULE_DESCRIPTION("ACPI Removable Drive Bay Driver");
40MODULE_LICENSE("GPL"); 38MODULE_LICENSE("GPL");
41#define ACPI_BAY_CLASS "bay" 39#define ACPI_BAY_CLASS "bay"
42#define ACPI_BAY_COMPONENT 0x10000000 40#define ACPI_BAY_COMPONENT 0x10000000
@@ -47,18 +45,6 @@ MODULE_LICENSE("GPL");
47 acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\ 45 acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\
48 printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); } 46 printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
49static void bay_notify(acpi_handle handle, u32 event, void *data); 47static void bay_notify(acpi_handle handle, u32 event, void *data);
50static int acpi_bay_add(struct acpi_device *device);
51static int acpi_bay_remove(struct acpi_device *device, int type);
52
53static struct acpi_driver acpi_bay_driver = {
54 .name = ACPI_BAY_DRIVER_NAME,
55 .class = ACPI_BAY_CLASS,
56 .ids = ACPI_BAY_HID,
57 .ops = {
58 .add = acpi_bay_add,
59 .remove = acpi_bay_remove,
60 },
61};
62 48
63struct bay { 49struct bay {
64 acpi_handle handle; 50 acpi_handle handle;
@@ -234,14 +220,6 @@ int eject_removable_drive(struct device *dev)
234} 220}
235EXPORT_SYMBOL_GPL(eject_removable_drive); 221EXPORT_SYMBOL_GPL(eject_removable_drive);
236 222
237static int acpi_bay_add(struct acpi_device *device)
238{
239 bay_dprintk(device->handle, "adding bay device");
240 strcpy(acpi_device_name(device), "Dockable Bay");
241 strcpy(acpi_device_class(device), "bay");
242 return 0;
243}
244
245static int acpi_bay_add_fs(struct bay *bay) 223static int acpi_bay_add_fs(struct bay *bay)
246{ 224{
247 int ret; 225 int ret;
@@ -303,7 +281,7 @@ static int bay_add(acpi_handle handle, int id)
303 281
304 /* initialize platform device stuff */ 282 /* initialize platform device stuff */
305 pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0); 283 pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0);
306 if (pdev == NULL) { 284 if (IS_ERR(pdev)) {
307 printk(KERN_ERR PREFIX "Error registering bay device\n"); 285 printk(KERN_ERR PREFIX "Error registering bay device\n");
308 goto bay_add_err; 286 goto bay_add_err;
309 } 287 }
@@ -339,52 +317,6 @@ bay_add_err:
339 return -ENODEV; 317 return -ENODEV;
340} 318}
341 319
342static int acpi_bay_remove(struct acpi_device *device, int type)
343{
344 /*** FIXME: do something here */
345 return 0;
346}
347
348/**
349 * bay_create_acpi_device - add new devices to acpi
350 * @handle - handle of the device to add
351 *
352 * This function will create a new acpi_device for the given
353 * handle if one does not exist already. This should cause
354 * acpi to scan for drivers for the given devices, and call
355 * matching driver's add routine.
356 *
357 * Returns a pointer to the acpi_device corresponding to the handle.
358 */
359static struct acpi_device * bay_create_acpi_device(acpi_handle handle)
360{
361 struct acpi_device *device = NULL;
362 struct acpi_device *parent_device;
363 acpi_handle parent;
364 int ret;
365
366 bay_dprintk(handle, "Trying to get device");
367 if (acpi_bus_get_device(handle, &device)) {
368 /*
369 * no device created for this object,
370 * so we should create one.
371 */
372 bay_dprintk(handle, "No device for handle");
373 acpi_get_parent(handle, &parent);
374 if (acpi_bus_get_device(parent, &parent_device))
375 parent_device = NULL;
376
377 ret = acpi_bus_add(&device, parent_device, handle,
378 ACPI_BUS_TYPE_DEVICE);
379 if (ret) {
380 pr_debug("error adding bus, %x\n",
381 -ret);
382 return NULL;
383 }
384 }
385 return device;
386}
387
388/** 320/**
389 * bay_notify - act upon an acpi bay notification 321 * bay_notify - act upon an acpi bay notification
390 * @handle: the bay handle 322 * @handle: the bay handle
@@ -394,38 +326,19 @@ static struct acpi_device * bay_create_acpi_device(acpi_handle handle)
394 */ 326 */
395static void bay_notify(acpi_handle handle, u32 event, void *data) 327static void bay_notify(acpi_handle handle, u32 event, void *data)
396{ 328{
397 struct acpi_device *dev; 329 struct bay *bay_dev = (struct bay *)data;
330 struct device *dev = &bay_dev->pdev->dev;
398 331
399 bay_dprintk(handle, "Bay event"); 332 bay_dprintk(handle, "Bay event");
400 333
401 switch(event) { 334 switch(event) {
402 case ACPI_NOTIFY_BUS_CHECK: 335 case ACPI_NOTIFY_BUS_CHECK:
403 printk("Bus Check\n");
404 case ACPI_NOTIFY_DEVICE_CHECK: 336 case ACPI_NOTIFY_DEVICE_CHECK:
405 printk("Device Check\n");
406 dev = bay_create_acpi_device(handle);
407 if (dev)
408 acpi_bus_generate_event(dev, event, 0);
409 else
410 printk("No device for generating event\n");
411 /* wouldn't it be a good idea to just rescan SATA
412 * right here?
413 */
414 break;
415 case ACPI_NOTIFY_EJECT_REQUEST: 337 case ACPI_NOTIFY_EJECT_REQUEST:
416 printk("Eject request\n"); 338 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
417 dev = bay_create_acpi_device(handle);
418 if (dev)
419 acpi_bus_generate_event(dev, event, 0);
420 else
421 printk("No device for generating eventn");
422
423 /* wouldn't it be a good idea to just call the
424 * eject_device here if we were a SATA device?
425 */
426 break; 339 break;
427 default: 340 default:
428 printk("unknown event %d\n", event); 341 printk(KERN_ERR PREFIX "Bay: unknown event %d\n", event);
429 } 342 }
430} 343}
431 344
@@ -457,10 +370,6 @@ static int __init bay_init(void)
457 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 370 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
458 ACPI_UINT32_MAX, find_bay, &bays, NULL); 371 ACPI_UINT32_MAX, find_bay, &bays, NULL);
459 372
460 if (bays)
461 if ((acpi_bus_register_driver(&acpi_bay_driver) < 0))
462 printk(KERN_ERR "Unable to register bay driver\n");
463
464 if (!bays) 373 if (!bays)
465 return -ENODEV; 374 return -ENODEV;
466 375
@@ -481,8 +390,6 @@ static void __exit bay_exit(void)
481 kfree(bay->name); 390 kfree(bay->name);
482 kfree(bay); 391 kfree(bay);
483 } 392 }
484
485 acpi_bus_unregister_driver(&acpi_bay_driver);
486} 393}
487 394
488postcore_initcall(bay_init); 395postcore_initcall(bay_init);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c26468da4295..fd37e19360d0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -39,7 +39,7 @@
39#include <acpi/acpi_drivers.h> 39#include <acpi/acpi_drivers.h>
40 40
41#define _COMPONENT ACPI_BUS_COMPONENT 41#define _COMPONENT ACPI_BUS_COMPONENT
42ACPI_MODULE_NAME("acpi_bus") 42ACPI_MODULE_NAME("bus");
43#ifdef CONFIG_X86 43#ifdef CONFIG_X86
44extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger); 44extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
45#endif 45#endif
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index c726612fafb6..cb4110b50cd0 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -34,7 +34,6 @@
34#include <acpi/acpi_drivers.h> 34#include <acpi/acpi_drivers.h>
35 35
36#define ACPI_BUTTON_COMPONENT 0x00080000 36#define ACPI_BUTTON_COMPONENT 0x00080000
37#define ACPI_BUTTON_DRIVER_NAME "ACPI Button Driver"
38#define ACPI_BUTTON_CLASS "button" 37#define ACPI_BUTTON_CLASS "button"
39#define ACPI_BUTTON_FILE_INFO "info" 38#define ACPI_BUTTON_FILE_INFO "info"
40#define ACPI_BUTTON_FILE_STATE "state" 39#define ACPI_BUTTON_FILE_STATE "state"
@@ -61,10 +60,10 @@
61#define ACPI_BUTTON_TYPE_LID 0x05 60#define ACPI_BUTTON_TYPE_LID 0x05
62 61
63#define _COMPONENT ACPI_BUTTON_COMPONENT 62#define _COMPONENT ACPI_BUTTON_COMPONENT
64ACPI_MODULE_NAME("acpi_button") 63ACPI_MODULE_NAME("button");
65 64
66MODULE_AUTHOR("Paul Diefenbaugh"); 65MODULE_AUTHOR("Paul Diefenbaugh");
67MODULE_DESCRIPTION(ACPI_BUTTON_DRIVER_NAME); 66MODULE_DESCRIPTION("ACPI Button Driver");
68MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
69 68
70static int acpi_button_add(struct acpi_device *device); 69static int acpi_button_add(struct acpi_device *device);
@@ -73,7 +72,7 @@ static int acpi_button_info_open_fs(struct inode *inode, struct file *file);
73static int acpi_button_state_open_fs(struct inode *inode, struct file *file); 72static int acpi_button_state_open_fs(struct inode *inode, struct file *file);
74 73
75static struct acpi_driver acpi_button_driver = { 74static struct acpi_driver acpi_button_driver = {
76 .name = ACPI_BUTTON_DRIVER_NAME, 75 .name = "button",
77 .class = ACPI_BUTTON_CLASS, 76 .class = ACPI_BUTTON_CLASS,
78 .ids = "button_power,button_sleep,PNP0C0D,PNP0C0C,PNP0C0E", 77 .ids = "button_power,button_sleep,PNP0C0D,PNP0C0C,PNP0C0E",
79 .ops = { 78 .ops = {
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index 4a9b7bf6f44e..f9db4f444bd0 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -31,7 +31,7 @@
31#include <acpi/actypes.h> 31#include <acpi/actypes.h>
32#include <acpi/acutils.h> 32#include <acpi/acutils.h>
33 33
34ACPI_MODULE_NAME("cm_sbs") 34ACPI_MODULE_NAME("cm_sbs");
35#define ACPI_AC_CLASS "ac_adapter" 35#define ACPI_AC_CLASS "ac_adapter"
36#define ACPI_BATTERY_CLASS "battery" 36#define ACPI_BATTERY_CLASS "battery"
37#define ACPI_SBS_COMPONENT 0x00080000 37#define ACPI_SBS_COMPONENT 0x00080000
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 69a68fd394cf..0930d9413dfa 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -35,7 +35,6 @@
35#include <acpi/acpi_drivers.h> 35#include <acpi/acpi_drivers.h>
36#include <acpi/container.h> 36#include <acpi/container.h>
37 37
38#define ACPI_CONTAINER_DRIVER_NAME "ACPI container driver"
39#define ACPI_CONTAINER_DEVICE_NAME "ACPI container device" 38#define ACPI_CONTAINER_DEVICE_NAME "ACPI container device"
40#define ACPI_CONTAINER_CLASS "container" 39#define ACPI_CONTAINER_CLASS "container"
41 40
@@ -44,10 +43,10 @@
44 43
45#define ACPI_CONTAINER_COMPONENT 0x01000000 44#define ACPI_CONTAINER_COMPONENT 0x01000000
46#define _COMPONENT ACPI_CONTAINER_COMPONENT 45#define _COMPONENT ACPI_CONTAINER_COMPONENT
47ACPI_MODULE_NAME("acpi_container") 46ACPI_MODULE_NAME("container");
48 47
49 MODULE_AUTHOR("Anil S Keshavamurthy"); 48MODULE_AUTHOR("Anil S Keshavamurthy");
50MODULE_DESCRIPTION(ACPI_CONTAINER_DRIVER_NAME); 49MODULE_DESCRIPTION("ACPI container driver");
51MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
52 51
53#define ACPI_STA_PRESENT (0x00000001) 52#define ACPI_STA_PRESENT (0x00000001)
@@ -56,7 +55,7 @@ static int acpi_container_add(struct acpi_device *device);
56static int acpi_container_remove(struct acpi_device *device, int type); 55static int acpi_container_remove(struct acpi_device *device, int type);
57 56
58static struct acpi_driver acpi_container_driver = { 57static struct acpi_driver acpi_container_driver = {
59 .name = ACPI_CONTAINER_DRIVER_NAME, 58 .name = "container",
60 .class = ACPI_CONTAINER_CLASS, 59 .class = ACPI_CONTAINER_CLASS,
61 .ids = "ACPI0004,PNP0A05,PNP0A06", 60 .ids = "ACPI0004,PNP0A05,PNP0A06",
62 .ops = { 61 .ops = {
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index d48f65a8f658..bf513e07b773 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -12,7 +12,7 @@
12#include <acpi/acglobal.h> 12#include <acpi/acglobal.h>
13 13
14#define _COMPONENT ACPI_SYSTEM_COMPONENT 14#define _COMPONENT ACPI_SYSTEM_COMPONENT
15ACPI_MODULE_NAME("debug") 15ACPI_MODULE_NAME("debug");
16 16
17#ifdef MODULE_PARAM_PREFIX 17#ifdef MODULE_PARAM_PREFIX
18#undef MODULE_PARAM_PREFIX 18#undef MODULE_PARAM_PREFIX
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 1cbe61905824..1683e5c5b94c 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -231,10 +231,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
231 * Obtain the method mutex if necessary. Do not acquire mutex for a 231 * Obtain the method mutex if necessary. Do not acquire mutex for a
232 * recursive call. 232 * recursive call.
233 */ 233 */
234 if (!walk_state || 234 if (acpi_os_get_thread_id() !=
235 !obj_desc->method.mutex->mutex.owner_thread || 235 obj_desc->method.mutex->mutex.owner_thread_id) {
236 (walk_state->thread !=
237 obj_desc->method.mutex->mutex.owner_thread)) {
238 /* 236 /*
239 * Acquire the method mutex. This releases the interpreter if we 237 * Acquire the method mutex. This releases the interpreter if we
240 * block (and reacquires it before it returns) 238 * block (and reacquires it before it returns)
@@ -248,14 +246,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
248 } 246 }
249 247
250 /* Update the mutex and walk info and save the original sync_level */ 248 /* Update the mutex and walk info and save the original sync_level */
249 obj_desc->method.mutex->mutex.owner_thread_id =
250 acpi_os_get_thread_id();
251 251
252 if (walk_state) { 252 if (walk_state) {
253 obj_desc->method.mutex->mutex. 253 obj_desc->method.mutex->mutex.
254 original_sync_level = 254 original_sync_level =
255 walk_state->thread->current_sync_level; 255 walk_state->thread->current_sync_level;
256 256
257 obj_desc->method.mutex->mutex.owner_thread =
258 walk_state->thread;
259 walk_state->thread->current_sync_level = 257 walk_state->thread->current_sync_level =
260 obj_desc->method.sync_level; 258 obj_desc->method.sync_level;
261 } else { 259 } else {
@@ -569,7 +567,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
569 567
570 acpi_os_release_mutex(method_desc->method.mutex->mutex. 568 acpi_os_release_mutex(method_desc->method.mutex->mutex.
571 os_mutex); 569 os_mutex);
572 method_desc->method.mutex->mutex.owner_thread = NULL; 570 method_desc->method.mutex->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
573 } 571 }
574 } 572 }
575 573
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 688e83a16906..54a697f9aa18 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -32,11 +32,11 @@
32#include <acpi/acpi_bus.h> 32#include <acpi/acpi_bus.h>
33#include <acpi/acpi_drivers.h> 33#include <acpi/acpi_drivers.h>
34 34
35#define ACPI_DOCK_DRIVER_NAME "ACPI Dock Station Driver" 35#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
36 36
37ACPI_MODULE_NAME("dock") 37ACPI_MODULE_NAME("dock");
38MODULE_AUTHOR("Kristen Carlson Accardi"); 38MODULE_AUTHOR("Kristen Carlson Accardi");
39MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_NAME); 39MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
40MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
41 41
42static struct atomic_notifier_head dock_notifier_list; 42static struct atomic_notifier_head dock_notifier_list;
@@ -741,7 +741,7 @@ static int dock_add(acpi_handle handle)
741 goto dock_add_err; 741 goto dock_add_err;
742 } 742 }
743 743
744 printk(KERN_INFO PREFIX "%s \n", ACPI_DOCK_DRIVER_NAME); 744 printk(KERN_INFO PREFIX "%s \n", ACPI_DOCK_DRIVER_DESCRIPTION);
745 745
746 return 0; 746 return 0;
747 747
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 743ce27fa0bb..ab6888373795 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -38,11 +38,10 @@
38#include <acpi/actypes.h> 38#include <acpi/actypes.h>
39 39
40#define _COMPONENT ACPI_EC_COMPONENT 40#define _COMPONENT ACPI_EC_COMPONENT
41ACPI_MODULE_NAME("acpi_ec") 41ACPI_MODULE_NAME("ec");
42#define ACPI_EC_COMPONENT 0x00100000 42#define ACPI_EC_COMPONENT 0x00100000
43#define ACPI_EC_CLASS "embedded_controller" 43#define ACPI_EC_CLASS "embedded_controller"
44#define ACPI_EC_HID "PNP0C09" 44#define ACPI_EC_HID "PNP0C09"
45#define ACPI_EC_DRIVER_NAME "ACPI Embedded Controller Driver"
46#define ACPI_EC_DEVICE_NAME "Embedded Controller" 45#define ACPI_EC_DEVICE_NAME "Embedded Controller"
47#define ACPI_EC_FILE_INFO "info" 46#define ACPI_EC_FILE_INFO "info"
48#undef PREFIX 47#undef PREFIX
@@ -80,7 +79,7 @@ static int acpi_ec_stop(struct acpi_device *device, int type);
80static int acpi_ec_add(struct acpi_device *device); 79static int acpi_ec_add(struct acpi_device *device);
81 80
82static struct acpi_driver acpi_ec_driver = { 81static struct acpi_driver acpi_ec_driver = {
83 .name = ACPI_EC_DRIVER_NAME, 82 .name = "ec",
84 .class = ACPI_EC_CLASS, 83 .class = ACPI_EC_CLASS,
85 .ids = ACPI_EC_HID, 84 .ids = ACPI_EC_HID,
86 .ops = { 85 .ops = {
@@ -280,8 +279,10 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
280 mutex_lock(&ec->lock); 279 mutex_lock(&ec->lock);
281 if (ec->global_lock) { 280 if (ec->global_lock) {
282 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 281 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
283 if (ACPI_FAILURE(status)) 282 if (ACPI_FAILURE(status)) {
283 mutex_unlock(&ec->lock);
284 return -ENODEV; 284 return -ENODEV;
285 }
285 } 286 }
286 287
287 /* Make sure GPE is enabled before doing transaction */ 288 /* Make sure GPE is enabled before doing transaction */
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 959a893c8d1f..3b23562e6f92 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -13,7 +13,7 @@
13#include <acpi/acpi_drivers.h> 13#include <acpi/acpi_drivers.h>
14 14
15#define _COMPONENT ACPI_SYSTEM_COMPONENT 15#define _COMPONENT ACPI_SYSTEM_COMPONENT
16ACPI_MODULE_NAME("event") 16ACPI_MODULE_NAME("event");
17 17
18/* Global vars for handling event proc entry */ 18/* Global vars for handling event proc entry */
19static DEFINE_SPINLOCK(acpi_system_event_lock); 19static DEFINE_SPINLOCK(acpi_system_event_lock);
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index dfac3ecc596e..635ba449ebc2 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -636,17 +636,6 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
636 } 636 }
637 } 637 }
638 638
639 if (!acpi_gbl_system_awake_and_running) {
640 /*
641 * We just woke up because of a wake GPE. Disable any further GPEs
642 * until we are fully up and running (Only wake GPEs should be enabled
643 * at this time, but we just brute-force disable them all.)
644 * 1) We must disable this particular wake GPE so it won't fire again
645 * 2) We want to disable all wake GPEs, since we are now awake
646 */
647 (void)acpi_hw_disable_all_gpes();
648 }
649
650 /* 639 /*
651 * Dispatch the GPE to either an installed handler, or the control method 640 * Dispatch the GPE to either an installed handler, or the control method
652 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke 641 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index 1b784ffe54c3..d572700197f3 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -196,12 +196,11 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
196 notify_info->notify.value = (u16) notify_value; 196 notify_info->notify.value = (u16) notify_value;
197 notify_info->notify.handler_obj = handler_obj; 197 notify_info->notify.handler_obj = handler_obj;
198 198
199 status = 199 acpi_ex_relinquish_interpreter();
200 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, 200
201 notify_info); 201 acpi_ev_notify_dispatch(notify_info);
202 if (ACPI_FAILURE(status)) { 202
203 acpi_ut_delete_generic_state(notify_info); 203 acpi_ex_reacquire_interpreter();
204 }
205 } 204 }
206 205
207 if (!handler_obj) { 206 if (!handler_obj) {
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 68d283fd60e7..1a73c14df2c5 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -134,7 +134,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[8] = {
134static struct acpi_exdump_info acpi_ex_dump_mutex[5] = { 134static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
135 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL}, 135 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
136 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"}, 136 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
137 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, 137 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread_id), "Owner Thread"},
138 {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), 138 {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
139 "Acquire Depth"}, 139 "Acquire Depth"},
140 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"} 140 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index 5101bad5baf8..4eb883bda6ae 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -66,10 +66,9 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
66 * 66 *
67 ******************************************************************************/ 67 ******************************************************************************/
68 68
69void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc) 69void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc,
70 struct acpi_thread_state *thread)
70{ 71{
71 struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
72
73 if (!thread) { 72 if (!thread) {
74 return; 73 return;
75 } 74 }
@@ -174,16 +173,13 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
174 173
175 /* Support for multiple acquires by the owning thread */ 174 /* Support for multiple acquires by the owning thread */
176 175
177 if (obj_desc->mutex.owner_thread) { 176 if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) {
178 if (obj_desc->mutex.owner_thread->thread_id == 177 /*
179 walk_state->thread->thread_id) { 178 * The mutex is already owned by this thread, just increment the
180 /* 179 * acquisition depth
181 * The mutex is already owned by this thread, just increment the 180 */
182 * acquisition depth 181 obj_desc->mutex.acquisition_depth++;
183 */ 182 return_ACPI_STATUS(AE_OK);
184 obj_desc->mutex.acquisition_depth++;
185 return_ACPI_STATUS(AE_OK);
186 }
187 } 183 }
188 184
189 /* Acquire the mutex, wait if necessary. Special case for Global Lock */ 185 /* Acquire the mutex, wait if necessary. Special case for Global Lock */
@@ -206,7 +202,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
206 202
207 /* Have the mutex: update mutex and walk info and save the sync_level */ 203 /* Have the mutex: update mutex and walk info and save the sync_level */
208 204
209 obj_desc->mutex.owner_thread = walk_state->thread; 205 obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id();
210 obj_desc->mutex.acquisition_depth = 1; 206 obj_desc->mutex.acquisition_depth = 1;
211 obj_desc->mutex.original_sync_level = 207 obj_desc->mutex.original_sync_level =
212 walk_state->thread->current_sync_level; 208 walk_state->thread->current_sync_level;
@@ -246,7 +242,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
246 242
247 /* The mutex must have been previously acquired in order to release it */ 243 /* The mutex must have been previously acquired in order to release it */
248 244
249 if (!obj_desc->mutex.owner_thread) { 245 if (!obj_desc->mutex.owner_thread_id) {
250 ACPI_ERROR((AE_INFO, 246 ACPI_ERROR((AE_INFO,
251 "Cannot release Mutex [%4.4s], not acquired", 247 "Cannot release Mutex [%4.4s], not acquired",
252 acpi_ut_get_node_name(obj_desc->mutex.node))); 248 acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -266,14 +262,14 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
266 * The Mutex is owned, but this thread must be the owner. 262 * The Mutex is owned, but this thread must be the owner.
267 * Special case for Global Lock, any thread can release 263 * Special case for Global Lock, any thread can release
268 */ 264 */
269 if ((obj_desc->mutex.owner_thread->thread_id != 265 if ((obj_desc->mutex.owner_thread_id !=
270 walk_state->thread->thread_id) 266 walk_state->thread->thread_id)
271 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) { 267 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
272 ACPI_ERROR((AE_INFO, 268 ACPI_ERROR((AE_INFO,
273 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", 269 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
274 (unsigned long)walk_state->thread->thread_id, 270 (unsigned long)walk_state->thread->thread_id,
275 acpi_ut_get_node_name(obj_desc->mutex.node), 271 acpi_ut_get_node_name(obj_desc->mutex.node),
276 (unsigned long)obj_desc->mutex.owner_thread->thread_id)); 272 (unsigned long)obj_desc->mutex.owner_thread_id));
277 return_ACPI_STATUS(AE_AML_NOT_OWNER); 273 return_ACPI_STATUS(AE_AML_NOT_OWNER);
278 } 274 }
279 275
@@ -300,7 +296,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
300 296
301 /* Unlink the mutex from the owner's list */ 297 /* Unlink the mutex from the owner's list */
302 298
303 acpi_ex_unlink_mutex(obj_desc); 299 acpi_ex_unlink_mutex(obj_desc, walk_state->thread);
304 300
305 /* Release the mutex, special case for Global Lock */ 301 /* Release the mutex, special case for Global Lock */
306 302
@@ -312,7 +308,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
312 308
313 /* Update the mutex and restore sync_level */ 309 /* Update the mutex and restore sync_level */
314 310
315 obj_desc->mutex.owner_thread = NULL; 311 obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
316 walk_state->thread->current_sync_level = 312 walk_state->thread->current_sync_level =
317 obj_desc->mutex.original_sync_level; 313 obj_desc->mutex.original_sync_level;
318 314
@@ -367,7 +363,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
367 363
368 /* Mark mutex unowned */ 364 /* Mark mutex unowned */
369 365
370 obj_desc->mutex.owner_thread = NULL; 366 obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
371 367
372 /* Update Thread sync_level (Last mutex is the important one) */ 368 /* Update Thread sync_level (Last mutex is the important one) */
373 369
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index af22fdf73413..ec655c539492 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -36,14 +36,13 @@
36 36
37#define ACPI_FAN_COMPONENT 0x00200000 37#define ACPI_FAN_COMPONENT 0x00200000
38#define ACPI_FAN_CLASS "fan" 38#define ACPI_FAN_CLASS "fan"
39#define ACPI_FAN_DRIVER_NAME "ACPI Fan Driver"
40#define ACPI_FAN_FILE_STATE "state" 39#define ACPI_FAN_FILE_STATE "state"
41 40
42#define _COMPONENT ACPI_FAN_COMPONENT 41#define _COMPONENT ACPI_FAN_COMPONENT
43ACPI_MODULE_NAME("acpi_fan") 42ACPI_MODULE_NAME("fan");
44 43
45 MODULE_AUTHOR("Paul Diefenbaugh"); 44MODULE_AUTHOR("Paul Diefenbaugh");
46MODULE_DESCRIPTION(ACPI_FAN_DRIVER_NAME); 45MODULE_DESCRIPTION("ACPI Fan Driver");
47MODULE_LICENSE("GPL"); 46MODULE_LICENSE("GPL");
48 47
49static int acpi_fan_add(struct acpi_device *device); 48static int acpi_fan_add(struct acpi_device *device);
@@ -52,7 +51,7 @@ static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
52static int acpi_fan_resume(struct acpi_device *device); 51static int acpi_fan_resume(struct acpi_device *device);
53 52
54static struct acpi_driver acpi_fan_driver = { 53static struct acpi_driver acpi_fan_driver = {
55 .name = ACPI_FAN_DRIVER_NAME, 54 .name = "fan",
56 .class = ACPI_FAN_CLASS, 55 .class = ACPI_FAN_CLASS,
57 .ids = "PNP0C0B", 56 .ids = "PNP0C0B",
58 .ops = { 57 .ops = {
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 7b6c9ff9bebe..4334c208841a 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -241,3 +241,65 @@ static int __init init_acpi_device_notify(void)
241} 241}
242 242
243arch_initcall(init_acpi_device_notify); 243arch_initcall(init_acpi_device_notify);
244
245
246#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
247
248/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find
249 * its device node and pass extra config data. This helps its driver use
250 * capabilities that the now-obsolete mc146818 didn't have, and informs it
251 * that this board's RTC is wakeup-capable (per ACPI spec).
252 */
253#include <linux/mc146818rtc.h>
254
255static struct cmos_rtc_board_info rtc_info;
256
257
258/* PNP devices are registered in a subsys_initcall();
259 * ACPI specifies the PNP IDs to use.
260 */
261#include <linux/pnp.h>
262
263static int __init pnp_match(struct device *dev, void *data)
264{
265 static const char *ids[] = { "PNP0b00", "PNP0b01", "PNP0b02", };
266 struct pnp_dev *pnp = to_pnp_dev(dev);
267 int i;
268
269 for (i = 0; i < ARRAY_SIZE(ids); i++) {
270 if (compare_pnp_id(pnp->id, ids[i]) != 0)
271 return 1;
272 }
273 return 0;
274}
275
276static struct device *__init get_rtc_dev(void)
277{
278 return bus_find_device(&pnp_bus_type, NULL, NULL, pnp_match);
279}
280
281static int __init acpi_rtc_init(void)
282{
283 struct device *dev = get_rtc_dev();
284
285 if (dev) {
286 rtc_info.rtc_day_alarm = acpi_gbl_FADT.day_alarm;
287 rtc_info.rtc_mon_alarm = acpi_gbl_FADT.month_alarm;
288 rtc_info.rtc_century = acpi_gbl_FADT.century;
289
290 /* NOTE: acpi_gbl_FADT->rtcs4 is NOT currently useful */
291
292 dev->platform_data = &rtc_info;
293
294 /* RTC always wakes from S1/S2/S3, and often S4/STD */
295 device_init_wakeup(dev, 1);
296
297 put_device(dev);
298 } else
299 pr_debug("ACPI: RTC unavailable?\n");
300 return 0;
301}
302/* do this between RTC subsys_initcall() and rtc_cmos driver_initcall() */
303fs_initcall(acpi_rtc_init);
304
305#endif
diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c
deleted file mode 100644
index 8edfb92f7ede..000000000000
--- a/drivers/acpi/hotkey.c
+++ /dev/null
@@ -1,1042 +0,0 @@
1/*
2 * hotkey.c - ACPI Hotkey Driver ($Revision: 0.2 $)
3 *
4 * Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/proc_fs.h>
29#include <linux/sched.h>
30#include <linux/kmod.h>
31#include <linux/seq_file.h>
32#include <acpi/acpi_drivers.h>
33#include <acpi/acpi_bus.h>
34#include <asm/uaccess.h>
35
36#define HOTKEY_ACPI_VERSION "0.1"
37
38#define HOTKEY_PROC "hotkey"
39#define HOTKEY_EV_CONFIG "event_config"
40#define HOTKEY_PL_CONFIG "poll_config"
41#define HOTKEY_ACTION "action"
42#define HOTKEY_INFO "info"
43
44#define ACPI_HOTK_NAME "Generic Hotkey Driver"
45#define ACPI_HOTK_CLASS "Hotkey"
46#define ACPI_HOTK_DEVICE_NAME "Hotkey"
47#define ACPI_HOTK_HID "Unknown?"
48#define ACPI_HOTKEY_COMPONENT 0x20000000
49
50#define ACPI_HOTKEY_EVENT 0x1
51#define ACPI_HOTKEY_POLLING 0x2
52#define ACPI_UNDEFINED_EVENT 0xf
53
54#define RESULT_STR_LEN 80
55
56#define ACTION_METHOD 0
57#define POLL_METHOD 1
58
59#define IS_EVENT(e) ((e) <= 10000 && (e) >0)
60#define IS_POLL(e) ((e) > 10000)
61#define IS_OTHERS(e) ((e)<=0 || (e)>=20000)
62#define _COMPONENT ACPI_HOTKEY_COMPONENT
63ACPI_MODULE_NAME("acpi_hotkey")
64
65 MODULE_AUTHOR("luming.yu@intel.com");
66MODULE_DESCRIPTION(ACPI_HOTK_NAME);
67MODULE_LICENSE("GPL");
68
69/* standardized internal hotkey number/event */
70enum {
71 /* Video Extension event */
72 HK_EVENT_CYCLE_OUTPUT_DEVICE = 0x80,
73 HK_EVENT_OUTPUT_DEVICE_STATUS_CHANGE,
74 HK_EVENT_CYCLE_DISPLAY_OUTPUT,
75 HK_EVENT_NEXT_DISPLAY_OUTPUT,
76 HK_EVENT_PREVIOUS_DISPLAY_OUTPUT,
77 HK_EVENT_CYCLE_BRIGHTNESS,
78 HK_EVENT_INCREASE_BRIGHTNESS,
79 HK_EVENT_DECREASE_BRIGHTNESS,
80 HK_EVENT_ZERO_BRIGHTNESS,
81 HK_EVENT_DISPLAY_DEVICE_OFF,
82
83 /* Snd Card event */
84 HK_EVENT_VOLUME_MUTE,
85 HK_EVENT_VOLUME_INCLREASE,
86 HK_EVENT_VOLUME_DECREASE,
87
88 /* running state control */
89 HK_EVENT_ENTERRING_S3,
90 HK_EVENT_ENTERRING_S4,
91 HK_EVENT_ENTERRING_S5,
92};
93
94enum conf_entry_enum {
95 bus_handle = 0,
96 bus_method = 1,
97 action_handle = 2,
98 method = 3,
99 LAST_CONF_ENTRY
100};
101
102/* procdir we use */
103static struct proc_dir_entry *hotkey_proc_dir;
104static struct proc_dir_entry *hotkey_config;
105static struct proc_dir_entry *hotkey_poll_config;
106static struct proc_dir_entry *hotkey_action;
107static struct proc_dir_entry *hotkey_info;
108
109/* linkage for all type of hotkey */
110struct acpi_hotkey_link {
111 struct list_head entries;
112 int hotkey_type; /* event or polling based hotkey */
113 int hotkey_standard_num; /* standardized hotkey(event) number */
114};
115
116/* event based hotkey */
117struct acpi_event_hotkey {
118 struct acpi_hotkey_link hotkey_link;
119 int flag;
120 acpi_handle bus_handle; /* bus to install notify handler */
121 int external_hotkey_num; /* external hotkey/event number */
122 acpi_handle action_handle; /* acpi handle attached aml action method */
123 char *action_method; /* action method */
124};
125
126/*
127 * There are two ways to poll status
128 * 1. directy call read_xxx method, without any arguments passed in
129 * 2. call write_xxx method, with arguments passed in, you need
130 * the result is saved in acpi_polling_hotkey.poll_result.
131 * anthoer read command through polling interface.
132 *
133 */
134
135/* polling based hotkey */
136struct acpi_polling_hotkey {
137 struct acpi_hotkey_link hotkey_link;
138 int flag;
139 acpi_handle poll_handle; /* acpi handle attached polling method */
140 char *poll_method; /* poll method */
141 acpi_handle action_handle; /* acpi handle attached action method */
142 char *action_method; /* action method */
143 union acpi_object *poll_result; /* polling_result */
144 struct proc_dir_entry *proc;
145};
146
147/* hotkey object union */
148union acpi_hotkey {
149 struct list_head entries;
150 struct acpi_hotkey_link link;
151 struct acpi_event_hotkey event_hotkey;
152 struct acpi_polling_hotkey poll_hotkey;
153};
154
155/* hotkey object list */
156struct acpi_hotkey_list {
157 struct list_head *entries;
158 int count;
159};
160
161static int auto_hotkey_add(struct acpi_device *device);
162static int auto_hotkey_remove(struct acpi_device *device, int type);
163
164static struct acpi_driver hotkey_driver = {
165 .name = ACPI_HOTK_NAME,
166 .class = ACPI_HOTK_CLASS,
167 .ids = ACPI_HOTK_HID,
168 .ops = {
169 .add = auto_hotkey_add,
170 .remove = auto_hotkey_remove,
171 },
172};
173
174static void free_hotkey_device(union acpi_hotkey *key);
175static void free_hotkey_buffer(union acpi_hotkey *key);
176static void free_poll_hotkey_buffer(union acpi_hotkey *key);
177static int hotkey_open_config(struct inode *inode, struct file *file);
178static int hotkey_poll_open_config(struct inode *inode, struct file *file);
179static ssize_t hotkey_write_config(struct file *file,
180 const char __user * buffer,
181 size_t count, loff_t * data);
182static int hotkey_info_open_fs(struct inode *inode, struct file *file);
183static int hotkey_action_open_fs(struct inode *inode, struct file *file);
184static ssize_t hotkey_execute_aml_method(struct file *file,
185 const char __user * buffer,
186 size_t count, loff_t * data);
187static int hotkey_config_seq_show(struct seq_file *seq, void *offset);
188static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset);
189static int hotkey_polling_open_fs(struct inode *inode, struct file *file);
190static union acpi_hotkey *get_hotkey_by_event(struct
191 acpi_hotkey_list
192 *hotkey_list, int event);
193
194/* event based config */
195static const struct file_operations hotkey_config_fops = {
196 .open = hotkey_open_config,
197 .read = seq_read,
198 .write = hotkey_write_config,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
203/* polling based config */
204static const struct file_operations hotkey_poll_config_fops = {
205 .open = hotkey_poll_open_config,
206 .read = seq_read,
207 .write = hotkey_write_config,
208 .llseek = seq_lseek,
209 .release = single_release,
210};
211
212/* hotkey driver info */
213static const struct file_operations hotkey_info_fops = {
214 .open = hotkey_info_open_fs,
215 .read = seq_read,
216 .llseek = seq_lseek,
217 .release = single_release,
218};
219
220/* action */
221static const struct file_operations hotkey_action_fops = {
222 .open = hotkey_action_open_fs,
223 .read = seq_read,
224 .write = hotkey_execute_aml_method,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
229/* polling results */
230static const struct file_operations hotkey_polling_fops = {
231 .open = hotkey_polling_open_fs,
232 .read = seq_read,
233 .llseek = seq_lseek,
234 .release = single_release,
235};
236
237struct acpi_hotkey_list global_hotkey_list; /* link all ev or pl hotkey */
238struct list_head hotkey_entries; /* head of the list of hotkey_list */
239
240static int hotkey_info_seq_show(struct seq_file *seq, void *offset)
241{
242
243 seq_printf(seq, "Hotkey generic driver ver: %s\n", HOTKEY_ACPI_VERSION);
244
245 return 0;
246}
247
248static int hotkey_info_open_fs(struct inode *inode, struct file *file)
249{
250 return single_open(file, hotkey_info_seq_show, PDE(inode)->data);
251}
252
253static char *format_result(union acpi_object *object)
254{
255 char *buf;
256
257 buf = kzalloc(RESULT_STR_LEN, GFP_KERNEL);
258 if (!buf)
259 return NULL;
260 /* Now, just support integer type */
261 if (object->type == ACPI_TYPE_INTEGER)
262 sprintf(buf, "%d\n", (u32) object->integer.value);
263 return buf;
264}
265
266static int hotkey_polling_seq_show(struct seq_file *seq, void *offset)
267{
268 struct acpi_polling_hotkey *poll_hotkey = seq->private;
269 char *buf;
270
271
272 if (poll_hotkey->poll_result) {
273 buf = format_result(poll_hotkey->poll_result);
274 if (buf)
275 seq_printf(seq, "%s", buf);
276 kfree(buf);
277 }
278 return 0;
279}
280
281static int hotkey_polling_open_fs(struct inode *inode, struct file *file)
282{
283 return single_open(file, hotkey_polling_seq_show, PDE(inode)->data);
284}
285
286static int hotkey_action_open_fs(struct inode *inode, struct file *file)
287{
288 return single_open(file, hotkey_info_seq_show, PDE(inode)->data);
289}
290
291/* Mapping external hotkey number to standardized hotkey event num */
292static int hotkey_get_internal_event(int event, struct acpi_hotkey_list *list)
293{
294 struct list_head *entries;
295 int val = -1;
296
297
298 list_for_each(entries, list->entries) {
299 union acpi_hotkey *key =
300 container_of(entries, union acpi_hotkey, entries);
301 if (key->link.hotkey_type == ACPI_HOTKEY_EVENT
302 && key->event_hotkey.external_hotkey_num == event) {
303 val = key->link.hotkey_standard_num;
304 break;
305 }
306 }
307
308 return val;
309}
310
311static void
312acpi_hotkey_notify_handler(acpi_handle handle, u32 event, void *data)
313{
314 struct acpi_device *device = NULL;
315 u32 internal_event;
316
317
318 if (acpi_bus_get_device(handle, &device))
319 return;
320
321 internal_event = hotkey_get_internal_event(event, &global_hotkey_list);
322 acpi_bus_generate_event(device, internal_event, 0);
323
324 return;
325}
326
327/* Need to invent automatically hotkey add method */
328static int auto_hotkey_add(struct acpi_device *device)
329{
330 /* Implement me */
331 return 0;
332}
333
334/* Need to invent automatically hotkey remove method */
335static int auto_hotkey_remove(struct acpi_device *device, int type)
336{
337 /* Implement me */
338 return 0;
339}
340
341/* Create a proc file for each polling method */
342static int create_polling_proc(union acpi_hotkey *device)
343{
344 struct proc_dir_entry *proc;
345 char proc_name[80];
346 mode_t mode;
347
348 mode = S_IFREG | S_IRUGO | S_IWUGO;
349
350 sprintf(proc_name, "%d", device->link.hotkey_standard_num);
351 /*
352 strcat(proc_name, device->poll_hotkey.poll_method);
353 */
354 proc = create_proc_entry(proc_name, mode, hotkey_proc_dir);
355
356 if (!proc) {
357 return -ENODEV;
358 } else {
359 proc->proc_fops = &hotkey_polling_fops;
360 proc->owner = THIS_MODULE;
361 proc->data = device;
362 proc->uid = 0;
363 proc->gid = 0;
364 device->poll_hotkey.proc = proc;
365 }
366 return 0;
367}
368
369static int hotkey_add(union acpi_hotkey *device)
370{
371 int status = 0;
372 struct acpi_device *dev = NULL;
373
374
375 if (device->link.hotkey_type == ACPI_HOTKEY_EVENT) {
376 acpi_bus_get_device(device->event_hotkey.bus_handle, &dev);
377 status = acpi_install_notify_handler(dev->handle,
378 ACPI_DEVICE_NOTIFY,
379 acpi_hotkey_notify_handler,
380 dev);
381 } else /* Add polling hotkey */
382 create_polling_proc(device);
383
384 global_hotkey_list.count++;
385
386 list_add_tail(&device->link.entries, global_hotkey_list.entries);
387
388 return status;
389}
390
391static int hotkey_remove(union acpi_hotkey *device)
392{
393 struct list_head *entries, *next;
394
395
396 list_for_each_safe(entries, next, global_hotkey_list.entries) {
397 union acpi_hotkey *key =
398 container_of(entries, union acpi_hotkey, entries);
399 if (key->link.hotkey_standard_num ==
400 device->link.hotkey_standard_num) {
401 list_del(&key->link.entries);
402 free_hotkey_device(key);
403 global_hotkey_list.count--;
404 break;
405 }
406 }
407 kfree(device);
408 return 0;
409}
410
411static int hotkey_update(union acpi_hotkey *key)
412{
413 struct list_head *entries;
414
415
416 list_for_each(entries, global_hotkey_list.entries) {
417 union acpi_hotkey *tmp =
418 container_of(entries, union acpi_hotkey, entries);
419 if (tmp->link.hotkey_standard_num ==
420 key->link.hotkey_standard_num) {
421 if (key->link.hotkey_type == ACPI_HOTKEY_EVENT) {
422 free_hotkey_buffer(tmp);
423 tmp->event_hotkey.bus_handle =
424 key->event_hotkey.bus_handle;
425 tmp->event_hotkey.external_hotkey_num =
426 key->event_hotkey.external_hotkey_num;
427 tmp->event_hotkey.action_handle =
428 key->event_hotkey.action_handle;
429 tmp->event_hotkey.action_method =
430 key->event_hotkey.action_method;
431 kfree(key);
432 } else {
433 /*
434 char proc_name[80];
435
436 sprintf(proc_name, "%d", tmp->link.hotkey_standard_num);
437 strcat(proc_name, tmp->poll_hotkey.poll_method);
438 remove_proc_entry(proc_name,hotkey_proc_dir);
439 */
440 free_poll_hotkey_buffer(tmp);
441 tmp->poll_hotkey.poll_handle =
442 key->poll_hotkey.poll_handle;
443 tmp->poll_hotkey.poll_method =
444 key->poll_hotkey.poll_method;
445 tmp->poll_hotkey.action_handle =
446 key->poll_hotkey.action_handle;
447 tmp->poll_hotkey.action_method =
448 key->poll_hotkey.action_method;
449 tmp->poll_hotkey.poll_result =
450 key->poll_hotkey.poll_result;
451 /*
452 create_polling_proc(tmp);
453 */
454 kfree(key);
455 }
456 return 0;
457 break;
458 }
459 }
460
461 return -ENODEV;
462}
463
464static void free_hotkey_device(union acpi_hotkey *key)
465{
466 struct acpi_device *dev;
467
468
469 if (key->link.hotkey_type == ACPI_HOTKEY_EVENT) {
470 acpi_bus_get_device(key->event_hotkey.bus_handle, &dev);
471 if (dev->handle)
472 acpi_remove_notify_handler(dev->handle,
473 ACPI_DEVICE_NOTIFY,
474 acpi_hotkey_notify_handler);
475 free_hotkey_buffer(key);
476 } else {
477 char proc_name[80];
478
479 sprintf(proc_name, "%d", key->link.hotkey_standard_num);
480 /*
481 strcat(proc_name, key->poll_hotkey.poll_method);
482 */
483 remove_proc_entry(proc_name, hotkey_proc_dir);
484 free_poll_hotkey_buffer(key);
485 }
486 kfree(key);
487 return;
488}
489
490static void free_hotkey_buffer(union acpi_hotkey *key)
491{
492 /* key would never be null, action method could be */
493 kfree(key->event_hotkey.action_method);
494}
495
496static void free_poll_hotkey_buffer(union acpi_hotkey *key)
497{
498 /* key would never be null, others could be*/
499 kfree(key->poll_hotkey.action_method);
500 kfree(key->poll_hotkey.poll_method);
501 kfree(key->poll_hotkey.poll_result);
502}
503static int
504init_hotkey_device(union acpi_hotkey *key, char **config_entry,
505 int std_num, int external_num)
506{
507 acpi_handle tmp_handle;
508 acpi_status status = AE_OK;
509
510 if (std_num < 0 || IS_POLL(std_num) || !key)
511 goto do_fail;
512
513 if (!config_entry[bus_handle] || !config_entry[action_handle]
514 || !config_entry[method])
515 goto do_fail;
516
517 key->link.hotkey_type = ACPI_HOTKEY_EVENT;
518 key->link.hotkey_standard_num = std_num;
519 key->event_hotkey.flag = 0;
520 key->event_hotkey.action_method = config_entry[method];
521
522 status = acpi_get_handle(NULL, config_entry[bus_handle],
523 &(key->event_hotkey.bus_handle));
524 if (ACPI_FAILURE(status))
525 goto do_fail_zero;
526 key->event_hotkey.external_hotkey_num = external_num;
527 status = acpi_get_handle(NULL, config_entry[action_handle],
528 &(key->event_hotkey.action_handle));
529 if (ACPI_FAILURE(status))
530 goto do_fail_zero;
531 status = acpi_get_handle(key->event_hotkey.action_handle,
532 config_entry[method], &tmp_handle);
533 if (ACPI_FAILURE(status))
534 goto do_fail_zero;
535 return AE_OK;
536do_fail_zero:
537 key->event_hotkey.action_method = NULL;
538do_fail:
539 return -ENODEV;
540}
541
542static int
543init_poll_hotkey_device(union acpi_hotkey *key, char **config_entry,
544 int std_num)
545{
546 acpi_status status = AE_OK;
547 acpi_handle tmp_handle;
548
549 if (std_num < 0 || IS_EVENT(std_num) || !key)
550 goto do_fail;
551 if (!config_entry[bus_handle] ||!config_entry[bus_method] ||
552 !config_entry[action_handle] || !config_entry[method])
553 goto do_fail;
554
555 key->link.hotkey_type = ACPI_HOTKEY_POLLING;
556 key->link.hotkey_standard_num = std_num;
557 key->poll_hotkey.flag = 0;
558 key->poll_hotkey.poll_method = config_entry[bus_method];
559 key->poll_hotkey.action_method = config_entry[method];
560
561 status = acpi_get_handle(NULL, config_entry[bus_handle],
562 &(key->poll_hotkey.poll_handle));
563 if (ACPI_FAILURE(status))
564 goto do_fail_zero;
565 status = acpi_get_handle(key->poll_hotkey.poll_handle,
566 config_entry[bus_method], &tmp_handle);
567 if (ACPI_FAILURE(status))
568 goto do_fail_zero;
569 status =
570 acpi_get_handle(NULL, config_entry[action_handle],
571 &(key->poll_hotkey.action_handle));
572 if (ACPI_FAILURE(status))
573 goto do_fail_zero;
574 status = acpi_get_handle(key->poll_hotkey.action_handle,
575 config_entry[method], &tmp_handle);
576 if (ACPI_FAILURE(status))
577 goto do_fail_zero;
578 key->poll_hotkey.poll_result =
579 kmalloc(sizeof(union acpi_object), GFP_KERNEL);
580 if (!key->poll_hotkey.poll_result)
581 goto do_fail_zero;
582 return AE_OK;
583
584do_fail_zero:
585 key->poll_hotkey.poll_method = NULL;
586 key->poll_hotkey.action_method = NULL;
587do_fail:
588 return -ENODEV;
589}
590
591static int hotkey_open_config(struct inode *inode, struct file *file)
592{
593 return (single_open
594 (file, hotkey_config_seq_show, PDE(inode)->data));
595}
596
597static int hotkey_poll_open_config(struct inode *inode, struct file *file)
598{
599 return (single_open
600 (file, hotkey_poll_config_seq_show, PDE(inode)->data));
601}
602
603static int hotkey_config_seq_show(struct seq_file *seq, void *offset)
604{
605 struct acpi_hotkey_list *hotkey_list = &global_hotkey_list;
606 struct list_head *entries;
607 char bus_name[ACPI_PATHNAME_MAX] = { 0 };
608 char action_name[ACPI_PATHNAME_MAX] = { 0 };
609 struct acpi_buffer bus = { ACPI_PATHNAME_MAX, bus_name };
610 struct acpi_buffer act = { ACPI_PATHNAME_MAX, action_name };
611
612
613 list_for_each(entries, hotkey_list->entries) {
614 union acpi_hotkey *key =
615 container_of(entries, union acpi_hotkey, entries);
616 if (key->link.hotkey_type == ACPI_HOTKEY_EVENT) {
617 acpi_get_name(key->event_hotkey.bus_handle,
618 ACPI_NAME_TYPE_MAX, &bus);
619 acpi_get_name(key->event_hotkey.action_handle,
620 ACPI_NAME_TYPE_MAX, &act);
621 seq_printf(seq, "%s:%s:%s:%d:%d\n", bus_name,
622 action_name,
623 key->event_hotkey.action_method,
624 key->link.hotkey_standard_num,
625 key->event_hotkey.external_hotkey_num);
626 }
627 }
628 seq_puts(seq, "\n");
629 return 0;
630}
631
632static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset)
633{
634 struct acpi_hotkey_list *hotkey_list = &global_hotkey_list;
635 struct list_head *entries;
636 char bus_name[ACPI_PATHNAME_MAX] = { 0 };
637 char action_name[ACPI_PATHNAME_MAX] = { 0 };
638 struct acpi_buffer bus = { ACPI_PATHNAME_MAX, bus_name };
639 struct acpi_buffer act = { ACPI_PATHNAME_MAX, action_name };
640
641
642 list_for_each(entries, hotkey_list->entries) {
643 union acpi_hotkey *key =
644 container_of(entries, union acpi_hotkey, entries);
645 if (key->link.hotkey_type == ACPI_HOTKEY_POLLING) {
646 acpi_get_name(key->poll_hotkey.poll_handle,
647 ACPI_NAME_TYPE_MAX, &bus);
648 acpi_get_name(key->poll_hotkey.action_handle,
649 ACPI_NAME_TYPE_MAX, &act);
650 seq_printf(seq, "%s:%s:%s:%s:%d\n", bus_name,
651 key->poll_hotkey.poll_method,
652 action_name,
653 key->poll_hotkey.action_method,
654 key->link.hotkey_standard_num);
655 }
656 }
657 seq_puts(seq, "\n");
658 return 0;
659}
660
661static int
662get_parms(char *config_record, int *cmd, char **config_entry,
663 int *internal_event_num, int *external_event_num)
664{
665/* the format of *config_record =
666 * "1:\d+:*" : "cmd:internal_event_num"
667 * "\d+:\w+:\w+:\w+:\w+:\d+:\d+" :
668 * "cmd:bus_handle:bus_method:action_handle:method:internal_event_num:external_event_num"
669 */
670 char *tmp, *tmp1, count;
671 int i;
672
673 sscanf(config_record, "%d", cmd);
674 if (*cmd == 1) {
675 if (sscanf(config_record, "%d:%d", cmd, internal_event_num) !=
676 2)
677 goto do_fail;
678 else
679 return (6);
680 }
681 tmp = strchr(config_record, ':');
682 if (!tmp)
683 goto do_fail;
684 tmp++;
685 for (i = 0; i < LAST_CONF_ENTRY; i++) {
686 tmp1 = strchr(tmp, ':');
687 if (!tmp1) {
688 goto do_fail;
689 }
690 count = tmp1 - tmp;
691 config_entry[i] = kzalloc(count + 1, GFP_KERNEL);
692 if (!config_entry[i])
693 goto handle_failure;
694 strncpy(config_entry[i], tmp, count);
695 tmp = tmp1 + 1;
696 }
697 if (sscanf(tmp, "%d:%d", internal_event_num, external_event_num) <= 0)
698 goto handle_failure;
699 if (!IS_OTHERS(*internal_event_num)) {
700 return 6;
701 }
702handle_failure:
703 while (i-- > 0)
704 kfree(config_entry[i]);
705do_fail:
706 return -1;
707}
708
709/* count is length for one input record */
710static ssize_t hotkey_write_config(struct file *file,
711 const char __user * buffer,
712 size_t count, loff_t * data)
713{
714 char *config_record = NULL;
715 char *config_entry[LAST_CONF_ENTRY];
716 int cmd, internal_event_num, external_event_num;
717 int ret = 0;
718 union acpi_hotkey *key = kzalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
719
720 if (!key)
721 return -ENOMEM;
722
723 config_record = kzalloc(count + 1, GFP_KERNEL);
724 if (!config_record) {
725 kfree(key);
726 return -ENOMEM;
727 }
728
729 if (copy_from_user(config_record, buffer, count)) {
730 kfree(config_record);
731 kfree(key);
732 printk(KERN_ERR PREFIX "Invalid data\n");
733 return -EINVAL;
734 }
735 ret = get_parms(config_record, &cmd, config_entry,
736 &internal_event_num, &external_event_num);
737 kfree(config_record);
738 if (ret != 6) {
739 printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret);
740 return -EINVAL;
741 }
742
743 if (cmd == 1) {
744 union acpi_hotkey *tmp = NULL;
745 tmp = get_hotkey_by_event(&global_hotkey_list,
746 internal_event_num);
747 if (!tmp)
748 printk(KERN_ERR PREFIX "Invalid key\n");
749 else
750 memcpy(key, tmp, sizeof(union acpi_hotkey));
751 goto cont_cmd;
752 }
753 if (IS_EVENT(internal_event_num)) {
754 if (init_hotkey_device(key, config_entry,
755 internal_event_num, external_event_num))
756 goto init_hotkey_fail;
757 } else {
758 if (init_poll_hotkey_device(key, config_entry,
759 internal_event_num))
760 goto init_poll_hotkey_fail;
761 }
762cont_cmd:
763 switch (cmd) {
764 case 0:
765 if (get_hotkey_by_event(&global_hotkey_list,
766 key->link.hotkey_standard_num))
767 goto fail_out;
768 else
769 hotkey_add(key);
770 break;
771 case 1:
772 hotkey_remove(key);
773 break;
774 case 2:
775 /* key is kfree()ed if matched*/
776 if (hotkey_update(key))
777 goto fail_out;
778 break;
779 default:
780 goto fail_out;
781 break;
782 }
783 return count;
784
785init_poll_hotkey_fail: /* failed init_poll_hotkey_device */
786 kfree(config_entry[bus_method]);
787 config_entry[bus_method] = NULL;
788init_hotkey_fail: /* failed init_hotkey_device */
789 kfree(config_entry[method]);
790fail_out:
791 kfree(config_entry[bus_handle]);
792 kfree(config_entry[action_handle]);
793 /* No double free since elements =NULL for error cases */
794 if (IS_EVENT(internal_event_num)) {
795 if (config_entry[bus_method])
796 kfree(config_entry[bus_method]);
797 free_hotkey_buffer(key); /* frees [method] */
798 } else
799 free_poll_hotkey_buffer(key); /* frees [bus_method]+[method] */
800 kfree(key);
801 printk(KERN_ERR PREFIX "invalid key\n");
802 return -EINVAL;
803}
804
805/*
806 * This function evaluates an ACPI method, given an int as parameter, the
807 * method is searched within the scope of the handle, can be NULL. The output
808 * of the method is written is output, which can also be NULL
809 *
810 * returns 1 if write is successful, 0 else.
811 */
812static int write_acpi_int(acpi_handle handle, const char *method, int val,
813 struct acpi_buffer *output)
814{
815 struct acpi_object_list params; /* list of input parameters (an int here) */
816 union acpi_object in_obj; /* the only param we use */
817 acpi_status status;
818
819 params.count = 1;
820 params.pointer = &in_obj;
821 in_obj.type = ACPI_TYPE_INTEGER;
822 in_obj.integer.value = val;
823
824 status = acpi_evaluate_object(handle, (char *)method, &params, output);
825
826 return (status == AE_OK);
827}
828
829static int read_acpi_int(acpi_handle handle, const char *method,
830 union acpi_object *val)
831{
832 struct acpi_buffer output;
833 union acpi_object out_obj;
834 acpi_status status;
835
836 output.length = sizeof(out_obj);
837 output.pointer = &out_obj;
838
839 status = acpi_evaluate_object(handle, (char *)method, NULL, &output);
840 if (val) {
841 val->integer.value = out_obj.integer.value;
842 val->type = out_obj.type;
843 } else
844 printk(KERN_ERR PREFIX "null val pointer\n");
845 return ((status == AE_OK)
846 && (out_obj.type == ACPI_TYPE_INTEGER));
847}
848
849static union acpi_hotkey *get_hotkey_by_event(struct
850 acpi_hotkey_list
851 *hotkey_list, int event)
852{
853 struct list_head *entries;
854
855 list_for_each(entries, hotkey_list->entries) {
856 union acpi_hotkey *key =
857 container_of(entries, union acpi_hotkey, entries);
858 if (key->link.hotkey_standard_num == event) {
859 return (key);
860 }
861 }
862 return (NULL);
863}
864
865/*
866 * user call AML method interface:
867 * Call convention:
868 * echo "event_num: arg type : value"
869 * example: echo "1:1:30" > /proc/acpi/action
870 * Just support 1 integer arg passing to AML method
871 */
872
873static ssize_t hotkey_execute_aml_method(struct file *file,
874 const char __user * buffer,
875 size_t count, loff_t * data)
876{
877 struct acpi_hotkey_list *hotkey_list = &global_hotkey_list;
878 char *arg;
879 int event, method_type, type, value;
880 union acpi_hotkey *key;
881
882
883 arg = kzalloc(count + 1, GFP_KERNEL);
884 if (!arg)
885 return -ENOMEM;
886
887 if (copy_from_user(arg, buffer, count)) {
888 kfree(arg);
889 printk(KERN_ERR PREFIX "Invalid argument 2\n");
890 return -EINVAL;
891 }
892
893 if (sscanf(arg, "%d:%d:%d:%d", &event, &method_type, &type, &value) !=
894 4) {
895 kfree(arg);
896 printk(KERN_ERR PREFIX "Invalid argument 3\n");
897 return -EINVAL;
898 }
899 kfree(arg);
900 if (type == ACPI_TYPE_INTEGER) {
901 key = get_hotkey_by_event(hotkey_list, event);
902 if (!key)
903 goto do_fail;
904 if (IS_EVENT(event))
905 write_acpi_int(key->event_hotkey.action_handle,
906 key->event_hotkey.action_method, value,
907 NULL);
908 else if (IS_POLL(event)) {
909 if (method_type == POLL_METHOD)
910 read_acpi_int(key->poll_hotkey.poll_handle,
911 key->poll_hotkey.poll_method,
912 key->poll_hotkey.poll_result);
913 else if (method_type == ACTION_METHOD)
914 write_acpi_int(key->poll_hotkey.action_handle,
915 key->poll_hotkey.action_method,
916 value, NULL);
917 else
918 goto do_fail;
919
920 }
921 } else {
922 printk(KERN_WARNING "Not supported\n");
923 return -EINVAL;
924 }
925 return count;
926 do_fail:
927 return -EINVAL;
928
929}
930
931static int __init hotkey_init(void)
932{
933 int result;
934 mode_t mode = S_IFREG | S_IRUGO | S_IWUGO;
935
936
937 if (acpi_disabled)
938 return -ENODEV;
939
940 if (acpi_specific_hotkey_enabled) {
941 printk("Using specific hotkey driver\n");
942 return -ENODEV;
943 }
944
945 hotkey_proc_dir = proc_mkdir(HOTKEY_PROC, acpi_root_dir);
946 if (!hotkey_proc_dir) {
947 return (-ENODEV);
948 }
949 hotkey_proc_dir->owner = THIS_MODULE;
950
951 hotkey_config =
952 create_proc_entry(HOTKEY_EV_CONFIG, mode, hotkey_proc_dir);
953 if (!hotkey_config) {
954 goto do_fail1;
955 } else {
956 hotkey_config->proc_fops = &hotkey_config_fops;
957 hotkey_config->data = &global_hotkey_list;
958 hotkey_config->owner = THIS_MODULE;
959 hotkey_config->uid = 0;
960 hotkey_config->gid = 0;
961 }
962
963 hotkey_poll_config =
964 create_proc_entry(HOTKEY_PL_CONFIG, mode, hotkey_proc_dir);
965 if (!hotkey_poll_config) {
966 goto do_fail2;
967 } else {
968 hotkey_poll_config->proc_fops = &hotkey_poll_config_fops;
969 hotkey_poll_config->data = &global_hotkey_list;
970 hotkey_poll_config->owner = THIS_MODULE;
971 hotkey_poll_config->uid = 0;
972 hotkey_poll_config->gid = 0;
973 }
974
975 hotkey_action = create_proc_entry(HOTKEY_ACTION, mode, hotkey_proc_dir);
976 if (!hotkey_action) {
977 goto do_fail3;
978 } else {
979 hotkey_action->proc_fops = &hotkey_action_fops;
980 hotkey_action->owner = THIS_MODULE;
981 hotkey_action->uid = 0;
982 hotkey_action->gid = 0;
983 }
984
985 hotkey_info = create_proc_entry(HOTKEY_INFO, mode, hotkey_proc_dir);
986 if (!hotkey_info) {
987 goto do_fail4;
988 } else {
989 hotkey_info->proc_fops = &hotkey_info_fops;
990 hotkey_info->owner = THIS_MODULE;
991 hotkey_info->uid = 0;
992 hotkey_info->gid = 0;
993 }
994
995 result = acpi_bus_register_driver(&hotkey_driver);
996 if (result < 0)
997 goto do_fail5;
998 global_hotkey_list.count = 0;
999 global_hotkey_list.entries = &hotkey_entries;
1000
1001 INIT_LIST_HEAD(&hotkey_entries);
1002
1003 return (0);
1004
1005 do_fail5:
1006 remove_proc_entry(HOTKEY_INFO, hotkey_proc_dir);
1007 do_fail4:
1008 remove_proc_entry(HOTKEY_ACTION, hotkey_proc_dir);
1009 do_fail3:
1010 remove_proc_entry(HOTKEY_PL_CONFIG, hotkey_proc_dir);
1011 do_fail2:
1012 remove_proc_entry(HOTKEY_EV_CONFIG, hotkey_proc_dir);
1013 do_fail1:
1014 remove_proc_entry(HOTKEY_PROC, acpi_root_dir);
1015 return (-ENODEV);
1016}
1017
1018static void __exit hotkey_exit(void)
1019{
1020 struct list_head *entries, *next;
1021
1022
1023 list_for_each_safe(entries, next, global_hotkey_list.entries) {
1024 union acpi_hotkey *key =
1025 container_of(entries, union acpi_hotkey, entries);
1026
1027 acpi_os_wait_events_complete(NULL);
1028 list_del(&key->link.entries);
1029 global_hotkey_list.count--;
1030 free_hotkey_device(key);
1031 }
1032 acpi_bus_unregister_driver(&hotkey_driver);
1033 remove_proc_entry(HOTKEY_EV_CONFIG, hotkey_proc_dir);
1034 remove_proc_entry(HOTKEY_PL_CONFIG, hotkey_proc_dir);
1035 remove_proc_entry(HOTKEY_ACTION, hotkey_proc_dir);
1036 remove_proc_entry(HOTKEY_INFO, hotkey_proc_dir);
1037 remove_proc_entry(HOTKEY_PROC, acpi_root_dir);
1038 return;
1039}
1040
1041module_init(hotkey_init);
1042module_exit(hotkey_exit);
diff --git a/drivers/acpi/i2c_ec.c b/drivers/acpi/i2c_ec.c
index 76ec8b63e69f..acab4a481897 100644
--- a/drivers/acpi/i2c_ec.c
+++ b/drivers/acpi/i2c_ec.c
@@ -27,18 +27,17 @@
27#define ACPI_EC_HC_COMPONENT 0x00080000 27#define ACPI_EC_HC_COMPONENT 0x00080000
28#define ACPI_EC_HC_CLASS "ec_hc_smbus" 28#define ACPI_EC_HC_CLASS "ec_hc_smbus"
29#define ACPI_EC_HC_HID "ACPI0001" 29#define ACPI_EC_HC_HID "ACPI0001"
30#define ACPI_EC_HC_DRIVER_NAME "ACPI EC HC smbus driver"
31#define ACPI_EC_HC_DEVICE_NAME "EC HC smbus" 30#define ACPI_EC_HC_DEVICE_NAME "EC HC smbus"
32 31
33#define _COMPONENT ACPI_EC_HC_COMPONENT 32#define _COMPONENT ACPI_EC_HC_COMPONENT
34 33
35ACPI_MODULE_NAME("acpi_smbus") 34ACPI_MODULE_NAME("i2c_ec");
36 35
37static int acpi_ec_hc_add(struct acpi_device *device); 36static int acpi_ec_hc_add(struct acpi_device *device);
38static int acpi_ec_hc_remove(struct acpi_device *device, int type); 37static int acpi_ec_hc_remove(struct acpi_device *device, int type);
39 38
40static struct acpi_driver acpi_ec_hc_driver = { 39static struct acpi_driver acpi_ec_hc_driver = {
41 .name = ACPI_EC_HC_DRIVER_NAME, 40 .name = "i2c_ec",
42 .class = ACPI_EC_HC_CLASS, 41 .class = ACPI_EC_HC_CLASS,
43 .ids = ACPI_EC_HC_HID, 42 .ids = ACPI_EC_HC_HID,
44 .ops = { 43 .ops = {
diff --git a/drivers/acpi/ibm_acpi.c b/drivers/acpi/ibm_acpi.c
index c6144ca66638..1a0ed3dc409c 100644
--- a/drivers/acpi/ibm_acpi.c
+++ b/drivers/acpi/ibm_acpi.c
@@ -496,6 +496,10 @@ static int ibm_acpi_driver_init(void)
496 printk(IBM_INFO "%s v%s\n", IBM_DESC, IBM_VERSION); 496 printk(IBM_INFO "%s v%s\n", IBM_DESC, IBM_VERSION);
497 printk(IBM_INFO "%s\n", IBM_URL); 497 printk(IBM_INFO "%s\n", IBM_URL);
498 498
499 if (ibm_thinkpad_ec_found)
500 printk(IBM_INFO "ThinkPad EC firmware %s\n",
501 ibm_thinkpad_ec_found);
502
499 return 0; 503 return 0;
500} 504}
501 505
@@ -2617,7 +2621,7 @@ static void __init ibm_handle_init(char *name,
2617 ibm_handle_init(#object, &object##_handle, *object##_parent, \ 2621 ibm_handle_init(#object, &object##_handle, *object##_parent, \
2618 object##_paths, ARRAY_SIZE(object##_paths), &object##_path) 2622 object##_paths, ARRAY_SIZE(object##_paths), &object##_path)
2619 2623
2620static int set_ibm_param(const char *val, struct kernel_param *kp) 2624static int __init set_ibm_param(const char *val, struct kernel_param *kp)
2621{ 2625{
2622 unsigned int i; 2626 unsigned int i;
2623 2627
@@ -2659,7 +2663,8 @@ static void acpi_ibm_exit(void)
2659 for (i = ARRAY_SIZE(ibms) - 1; i >= 0; i--) 2663 for (i = ARRAY_SIZE(ibms) - 1; i >= 0; i--)
2660 ibm_exit(&ibms[i]); 2664 ibm_exit(&ibms[i]);
2661 2665
2662 remove_proc_entry(IBM_DIR, acpi_root_dir); 2666 if (proc_dir)
2667 remove_proc_entry(IBM_DIR, acpi_root_dir);
2663 2668
2664 if (ibm_thinkpad_ec_found) 2669 if (ibm_thinkpad_ec_found)
2665 kfree(ibm_thinkpad_ec_found); 2670 kfree(ibm_thinkpad_ec_found);
@@ -2696,11 +2701,6 @@ static int __init acpi_ibm_init(void)
2696 if (acpi_disabled) 2701 if (acpi_disabled)
2697 return -ENODEV; 2702 return -ENODEV;
2698 2703
2699 if (!acpi_specific_hotkey_enabled) {
2700 printk(IBM_ERR "using generic hotkey driver\n");
2701 return -ENODEV;
2702 }
2703
2704 /* ec is required because many other handles are relative to it */ 2704 /* ec is required because many other handles are relative to it */
2705 IBM_HANDLE_INIT(ec); 2705 IBM_HANDLE_INIT(ec);
2706 if (!ec_handle) { 2706 if (!ec_handle) {
@@ -2710,9 +2710,6 @@ static int __init acpi_ibm_init(void)
2710 2710
2711 /* Models with newer firmware report the EC in DMI */ 2711 /* Models with newer firmware report the EC in DMI */
2712 ibm_thinkpad_ec_found = check_dmi_for_ec(); 2712 ibm_thinkpad_ec_found = check_dmi_for_ec();
2713 if (ibm_thinkpad_ec_found)
2714 printk(IBM_INFO "ThinkPad EC firmware %s\n",
2715 ibm_thinkpad_ec_found);
2716 2713
2717 /* these handles are not required */ 2714 /* these handles are not required */
2718 IBM_HANDLE_INIT(vid); 2715 IBM_HANDLE_INIT(vid);
@@ -2742,6 +2739,7 @@ static int __init acpi_ibm_init(void)
2742 proc_dir = proc_mkdir(IBM_DIR, acpi_root_dir); 2739 proc_dir = proc_mkdir(IBM_DIR, acpi_root_dir);
2743 if (!proc_dir) { 2740 if (!proc_dir) {
2744 printk(IBM_ERR "unable to create proc dir %s", IBM_DIR); 2741 printk(IBM_ERR "unable to create proc dir %s", IBM_DIR);
2742 acpi_ibm_exit();
2745 return -ENODEV; 2743 return -ENODEV;
2746 } 2744 }
2747 proc_dir->owner = THIS_MODULE; 2745 proc_dir->owner = THIS_MODULE;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 4a9faff4c01d..8fcd6a15517f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -33,7 +33,7 @@
33 33
34#define ACPI_NUMA 0x80000000 34#define ACPI_NUMA 0x80000000
35#define _COMPONENT ACPI_NUMA 35#define _COMPONENT ACPI_NUMA
36ACPI_MODULE_NAME("numa") 36ACPI_MODULE_NAME("numa");
37 37
38static nodemask_t nodes_found_map = NODE_MASK_NONE; 38static nodemask_t nodes_found_map = NODE_MASK_NONE;
39#define PXM_INVAL -1 39#define PXM_INVAL -1
@@ -45,12 +45,6 @@ int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES] 45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 47
48extern int __init acpi_table_parse_madt_family(char *id,
49 unsigned long madt_size,
50 int entry_id,
51 acpi_madt_entry_handler handler,
52 unsigned int max_entries);
53
54int __cpuinit pxm_to_node(int pxm) 48int __cpuinit pxm_to_node(int pxm)
55{ 49{
56 if (pxm < 0) 50 if (pxm < 0)
@@ -208,9 +202,9 @@ static int __init acpi_parse_srat(struct acpi_table_header *table)
208 202
209int __init 203int __init
210acpi_table_parse_srat(enum acpi_srat_type id, 204acpi_table_parse_srat(enum acpi_srat_type id,
211 acpi_madt_entry_handler handler, unsigned int max_entries) 205 acpi_table_entry_handler handler, unsigned int max_entries)
212{ 206{
213 return acpi_table_parse_madt_family(ACPI_SIG_SRAT, 207 return acpi_table_parse_entries(ACPI_SIG_SRAT,
214 sizeof(struct acpi_table_srat), id, 208 sizeof(struct acpi_table_srat), id,
215 handler, max_entries); 209 handler, max_entries);
216} 210}
@@ -220,9 +214,7 @@ int __init acpi_numa_init(void)
220 int result; 214 int result;
221 215
222 /* SRAT: Static Resource Affinity Table */ 216 /* SRAT: Static Resource Affinity Table */
223 result = acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat); 217 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
224
225 if (result > 0) {
226 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 218 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
227 acpi_parse_processor_affinity, 219 acpi_parse_processor_affinity,
228 NR_CPUS); 220 NR_CPUS);
@@ -230,7 +222,7 @@ int __init acpi_numa_init(void)
230 } 222 }
231 223
232 /* SLIT: System Locality Information Table */ 224 /* SLIT: System Locality Information Table */
233 result = acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); 225 acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
234 226
235 acpi_numa_arch_fixup(); 227 acpi_numa_arch_fixup();
236 return 0; 228 return 0;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 0f6f3bcbc8eb..971eca4864fa 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -46,7 +46,7 @@
46#include <linux/efi.h> 46#include <linux/efi.h>
47 47
48#define _COMPONENT ACPI_OS_SERVICES 48#define _COMPONENT ACPI_OS_SERVICES
49ACPI_MODULE_NAME("osl") 49ACPI_MODULE_NAME("osl");
50#define PREFIX "ACPI: " 50#define PREFIX "ACPI: "
51struct acpi_os_dpc { 51struct acpi_os_dpc {
52 acpi_osd_exec_callback function; 52 acpi_osd_exec_callback function;
@@ -68,9 +68,6 @@ EXPORT_SYMBOL(acpi_in_debugger);
68extern char line_buf[80]; 68extern char line_buf[80];
69#endif /*ENABLE_DEBUGGER */ 69#endif /*ENABLE_DEBUGGER */
70 70
71int acpi_specific_hotkey_enabled = TRUE;
72EXPORT_SYMBOL(acpi_specific_hotkey_enabled);
73
74static unsigned int acpi_irq_irq; 71static unsigned int acpi_irq_irq;
75static acpi_osd_handler acpi_irq_handler; 72static acpi_osd_handler acpi_irq_handler;
76static void *acpi_irq_context; 73static void *acpi_irq_context;
@@ -205,7 +202,7 @@ void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
205{ 202{
206 if (phys > ULONG_MAX) { 203 if (phys > ULONG_MAX) {
207 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 204 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
208 return 0; 205 return NULL;
209 } 206 }
210 if (acpi_gbl_permanent_mmap) 207 if (acpi_gbl_permanent_mmap)
211 /* 208 /*
@@ -890,26 +887,6 @@ u32 acpi_os_get_line(char *buffer)
890} 887}
891#endif /* ACPI_FUTURE_USAGE */ 888#endif /* ACPI_FUTURE_USAGE */
892 889
893/* Assumes no unreadable holes inbetween */
894u8 acpi_os_readable(void *ptr, acpi_size len)
895{
896#if defined(__i386__) || defined(__x86_64__)
897 char tmp;
898 return !__get_user(tmp, (char __user *)ptr)
899 && !__get_user(tmp, (char __user *)ptr + len - 1);
900#endif
901 return 1;
902}
903
904#ifdef ACPI_FUTURE_USAGE
905u8 acpi_os_writable(void *ptr, acpi_size len)
906{
907 /* could do dummy write (racy) or a kernel page table lookup.
908 The later may be difficult at early boot when kmap doesn't work yet. */
909 return 1;
910}
911#endif
912
913acpi_status acpi_os_signal(u32 function, void *info) 890acpi_status acpi_os_signal(u32 function, void *info)
914{ 891{
915 switch (function) { 892 switch (function) {
@@ -1012,14 +989,6 @@ static int __init acpi_wake_gpes_always_on_setup(char *str)
1012 989
1013__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); 990__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1014 991
1015static int __init acpi_hotkey_setup(char *str)
1016{
1017 acpi_specific_hotkey_enabled = FALSE;
1018 return 1;
1019}
1020
1021__setup("acpi_generic_hotkey", acpi_hotkey_setup);
1022
1023/* 992/*
1024 * max_cstate is defined in the base kernel so modules can 993 * max_cstate is defined in the base kernel so modules can
1025 * change it w/o depending on the state of the processor module. 994 * change it w/o depending on the state of the processor module.
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 55f57a61c55e..028969370bbf 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -36,7 +36,7 @@
36#include <acpi/acpi_drivers.h> 36#include <acpi/acpi_drivers.h>
37 37
38#define _COMPONENT ACPI_PCI_COMPONENT 38#define _COMPONENT ACPI_PCI_COMPONENT
39ACPI_MODULE_NAME("pci_bind") 39ACPI_MODULE_NAME("pci_bind");
40 40
41struct acpi_pci_data { 41struct acpi_pci_data {
42 struct acpi_pci_id id; 42 struct acpi_pci_id id;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index fe7d007833ad..dd3186abe07a 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -38,7 +38,7 @@
38#include <acpi/acpi_drivers.h> 38#include <acpi/acpi_drivers.h>
39 39
40#define _COMPONENT ACPI_PCI_COMPONENT 40#define _COMPONENT ACPI_PCI_COMPONENT
41ACPI_MODULE_NAME("pci_irq") 41ACPI_MODULE_NAME("pci_irq");
42 42
43static struct acpi_prt_list acpi_prt; 43static struct acpi_prt_list acpi_prt;
44static DEFINE_SPINLOCK(acpi_prt_lock); 44static DEFINE_SPINLOCK(acpi_prt_lock);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 0f683c8c6fbc..acc594771379 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -44,10 +44,9 @@
44#include <acpi/acpi_drivers.h> 44#include <acpi/acpi_drivers.h>
45 45
46#define _COMPONENT ACPI_PCI_COMPONENT 46#define _COMPONENT ACPI_PCI_COMPONENT
47ACPI_MODULE_NAME("pci_link") 47ACPI_MODULE_NAME("pci_link");
48#define ACPI_PCI_LINK_CLASS "pci_irq_routing" 48#define ACPI_PCI_LINK_CLASS "pci_irq_routing"
49#define ACPI_PCI_LINK_HID "PNP0C0F" 49#define ACPI_PCI_LINK_HID "PNP0C0F"
50#define ACPI_PCI_LINK_DRIVER_NAME "ACPI PCI Interrupt Link Driver"
51#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link" 50#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link"
52#define ACPI_PCI_LINK_FILE_INFO "info" 51#define ACPI_PCI_LINK_FILE_INFO "info"
53#define ACPI_PCI_LINK_FILE_STATUS "state" 52#define ACPI_PCI_LINK_FILE_STATUS "state"
@@ -56,7 +55,7 @@ static int acpi_pci_link_add(struct acpi_device *device);
56static int acpi_pci_link_remove(struct acpi_device *device, int type); 55static int acpi_pci_link_remove(struct acpi_device *device, int type);
57 56
58static struct acpi_driver acpi_pci_link_driver = { 57static struct acpi_driver acpi_pci_link_driver = {
59 .name = ACPI_PCI_LINK_DRIVER_NAME, 58 .name = "pci_link",
60 .class = ACPI_PCI_LINK_CLASS, 59 .class = ACPI_PCI_LINK_CLASS,
61 .ids = ACPI_PCI_LINK_HID, 60 .ids = ACPI_PCI_LINK_HID,
62 .ops = { 61 .ops = {
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 4ecf701687e8..ad4145a37786 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -36,17 +36,16 @@
36#include <acpi/acpi_drivers.h> 36#include <acpi/acpi_drivers.h>
37 37
38#define _COMPONENT ACPI_PCI_COMPONENT 38#define _COMPONENT ACPI_PCI_COMPONENT
39ACPI_MODULE_NAME("pci_root") 39ACPI_MODULE_NAME("pci_root");
40#define ACPI_PCI_ROOT_CLASS "pci_bridge" 40#define ACPI_PCI_ROOT_CLASS "pci_bridge"
41#define ACPI_PCI_ROOT_HID "PNP0A03" 41#define ACPI_PCI_ROOT_HID "PNP0A03"
42#define ACPI_PCI_ROOT_DRIVER_NAME "ACPI PCI Root Bridge Driver"
43#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge" 42#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
44static int acpi_pci_root_add(struct acpi_device *device); 43static int acpi_pci_root_add(struct acpi_device *device);
45static int acpi_pci_root_remove(struct acpi_device *device, int type); 44static int acpi_pci_root_remove(struct acpi_device *device, int type);
46static int acpi_pci_root_start(struct acpi_device *device); 45static int acpi_pci_root_start(struct acpi_device *device);
47 46
48static struct acpi_driver acpi_pci_root_driver = { 47static struct acpi_driver acpi_pci_root_driver = {
49 .name = ACPI_PCI_ROOT_DRIVER_NAME, 48 .name = "pci_root",
50 .class = ACPI_PCI_ROOT_CLASS, 49 .class = ACPI_PCI_ROOT_CLASS,
51 .ids = ACPI_PCI_ROOT_HID, 50 .ids = ACPI_PCI_ROOT_HID,
52 .ops = { 51 .ops = {
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index e768eb362932..00d6118ff1ef 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -45,10 +45,9 @@
45#include <acpi/acpi_drivers.h> 45#include <acpi/acpi_drivers.h>
46 46
47#define _COMPONENT ACPI_POWER_COMPONENT 47#define _COMPONENT ACPI_POWER_COMPONENT
48ACPI_MODULE_NAME("acpi_power") 48ACPI_MODULE_NAME("power");
49#define ACPI_POWER_COMPONENT 0x00800000 49#define ACPI_POWER_COMPONENT 0x00800000
50#define ACPI_POWER_CLASS "power_resource" 50#define ACPI_POWER_CLASS "power_resource"
51#define ACPI_POWER_DRIVER_NAME "ACPI Power Resource Driver"
52#define ACPI_POWER_DEVICE_NAME "Power Resource" 51#define ACPI_POWER_DEVICE_NAME "Power Resource"
53#define ACPI_POWER_FILE_INFO "info" 52#define ACPI_POWER_FILE_INFO "info"
54#define ACPI_POWER_FILE_STATUS "state" 53#define ACPI_POWER_FILE_STATUS "state"
@@ -61,7 +60,7 @@ static int acpi_power_resume(struct acpi_device *device);
61static int acpi_power_open_fs(struct inode *inode, struct file *file); 60static int acpi_power_open_fs(struct inode *inode, struct file *file);
62 61
63static struct acpi_driver acpi_power_driver = { 62static struct acpi_driver acpi_power_driver = {
64 .name = ACPI_POWER_DRIVER_NAME, 63 .name = "power",
65 .class = ACPI_POWER_CLASS, 64 .class = ACPI_POWER_CLASS,
66 .ids = ACPI_POWER_HID, 65 .ids = ACPI_POWER_HID,
67 .ops = { 66 .ops = {
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 0079bc51082c..99d1516d1e70 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -60,7 +60,6 @@
60 60
61#define ACPI_PROCESSOR_COMPONENT 0x01000000 61#define ACPI_PROCESSOR_COMPONENT 0x01000000
62#define ACPI_PROCESSOR_CLASS "processor" 62#define ACPI_PROCESSOR_CLASS "processor"
63#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
64#define ACPI_PROCESSOR_DEVICE_NAME "Processor" 63#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
65#define ACPI_PROCESSOR_FILE_INFO "info" 64#define ACPI_PROCESSOR_FILE_INFO "info"
66#define ACPI_PROCESSOR_FILE_THROTTLING "throttling" 65#define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
@@ -74,10 +73,10 @@
74#define ACPI_STA_PRESENT 0x00000001 73#define ACPI_STA_PRESENT 0x00000001
75 74
76#define _COMPONENT ACPI_PROCESSOR_COMPONENT 75#define _COMPONENT ACPI_PROCESSOR_COMPONENT
77ACPI_MODULE_NAME("acpi_processor") 76ACPI_MODULE_NAME("processor_core");
78 77
79 MODULE_AUTHOR("Paul Diefenbaugh"); 78MODULE_AUTHOR("Paul Diefenbaugh");
80MODULE_DESCRIPTION(ACPI_PROCESSOR_DRIVER_NAME); 79MODULE_DESCRIPTION("ACPI Processor Driver");
81MODULE_LICENSE("GPL"); 80MODULE_LICENSE("GPL");
82 81
83static int acpi_processor_add(struct acpi_device *device); 82static int acpi_processor_add(struct acpi_device *device);
@@ -89,7 +88,7 @@ static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
89static int acpi_processor_handle_eject(struct acpi_processor *pr); 88static int acpi_processor_handle_eject(struct acpi_processor *pr);
90 89
91static struct acpi_driver acpi_processor_driver = { 90static struct acpi_driver acpi_processor_driver = {
92 .name = ACPI_PROCESSOR_DRIVER_NAME, 91 .name = "processor",
93 .class = ACPI_PROCESSOR_CLASS, 92 .class = ACPI_PROCESSOR_CLASS,
94 .ids = ACPI_PROCESSOR_HID, 93 .ids = ACPI_PROCESSOR_HID,
95 .ops = { 94 .ops = {
@@ -404,7 +403,7 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
404 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) { 403 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
405 /* First check against id */ 404 /* First check against id */
406 if (lsapic->processor_id == acpi_id) { 405 if (lsapic->processor_id == acpi_id) {
407 *apic_id = lsapic->id; 406 *apic_id = (lsapic->id << 8) | lsapic->eid;
408 return 1; 407 return 1;
409 /* Check against optional uid */ 408 /* Check against optional uid */
410 } else if (entry->length >= 16 && 409 } else if (entry->length >= 16 &&
@@ -1005,7 +1004,7 @@ static int __init acpi_processor_init(void)
1005#ifdef CONFIG_SMP 1004#ifdef CONFIG_SMP
1006 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, 1005 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1007 (struct acpi_table_header **)&madt))) 1006 (struct acpi_table_header **)&madt)))
1008 madt = 0; 1007 madt = NULL;
1009#endif 1008#endif
1010 1009
1011 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1010 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 6c6751b1405b..60773005b8af 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -39,6 +39,25 @@
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/sched.h> /* need_resched() */ 40#include <linux/sched.h> /* need_resched() */
41#include <linux/latency.h> 41#include <linux/latency.h>
42#include <linux/clockchips.h>
43
44/*
45 * Include the apic definitions for x86 to have the APIC timer related defines
46 * available also for UP (on SMP it gets magically included via linux/smp.h).
47 * asm/acpi.h is not an option, as it would require more include magic. Also
48 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
49 */
50#ifdef CONFIG_X86
51#include <asm/apic.h>
52#endif
53
54/*
55 * Include the apic definitions for x86 to have the APIC timer related defines
56 * available also for UP (on SMP it gets magically included via linux/smp.h).
57 */
58#ifdef CONFIG_X86
59#include <asm/apic.h>
60#endif
42 61
43#include <asm/io.h> 62#include <asm/io.h>
44#include <asm/uaccess.h> 63#include <asm/uaccess.h>
@@ -48,9 +67,8 @@
48 67
49#define ACPI_PROCESSOR_COMPONENT 0x01000000 68#define ACPI_PROCESSOR_COMPONENT 0x01000000
50#define ACPI_PROCESSOR_CLASS "processor" 69#define ACPI_PROCESSOR_CLASS "processor"
51#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
52#define _COMPONENT ACPI_PROCESSOR_COMPONENT 70#define _COMPONENT ACPI_PROCESSOR_COMPONENT
53ACPI_MODULE_NAME("acpi_processor") 71ACPI_MODULE_NAME("processor_idle");
54#define ACPI_PROCESSOR_FILE_POWER "power" 72#define ACPI_PROCESSOR_FILE_POWER "power"
55#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 73#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
56#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 74#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
@@ -238,6 +256,81 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
238 } 256 }
239} 257}
240 258
259#ifdef ARCH_APICTIMER_STOPS_ON_C3
260
261/*
262 * Some BIOS implementations switch to C3 in the published C2 state.
263 * This seems to be a common problem on AMD boxen, but other vendors
264 * are affected too. We pick the most conservative approach: we assume
265 * that the local APIC stops in both C2 and C3.
266 */
267static void acpi_timer_check_state(int state, struct acpi_processor *pr,
268 struct acpi_processor_cx *cx)
269{
270 struct acpi_processor_power *pwr = &pr->power;
271
272 /*
273 * Check, if one of the previous states already marked the lapic
274 * unstable
275 */
276 if (pwr->timer_broadcast_on_state < state)
277 return;
278
279 if (cx->type >= ACPI_STATE_C2)
280 pr->power.timer_broadcast_on_state = state;
281}
282
283static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
284{
285#ifdef CONFIG_GENERIC_CLOCKEVENTS
286 unsigned long reason;
287
288 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
289 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
290
291 clockevents_notify(reason, &pr->id);
292#else
293 cpumask_t mask = cpumask_of_cpu(pr->id);
294
295 if (pr->power.timer_broadcast_on_state < INT_MAX)
296 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
297 else
298 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
299#endif
300}
301
302/* Power(C) State timer broadcast control */
303static void acpi_state_timer_broadcast(struct acpi_processor *pr,
304 struct acpi_processor_cx *cx,
305 int broadcast)
306{
307#ifdef CONFIG_GENERIC_CLOCKEVENTS
308
309 int state = cx - pr->power.states;
310
311 if (state >= pr->power.timer_broadcast_on_state) {
312 unsigned long reason;
313
314 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
315 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
316 clockevents_notify(reason, &pr->id);
317 }
318#endif
319}
320
321#else
322
323static void acpi_timer_check_state(int state, struct acpi_processor *pr,
324 struct acpi_processor_cx *cstate) { }
325static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
326static void acpi_state_timer_broadcast(struct acpi_processor *pr,
327 struct acpi_processor_cx *cx,
328 int broadcast)
329{
330}
331
332#endif
333
241static void acpi_processor_idle(void) 334static void acpi_processor_idle(void)
242{ 335{
243 struct acpi_processor *pr = NULL; 336 struct acpi_processor *pr = NULL;
@@ -382,6 +475,7 @@ static void acpi_processor_idle(void)
382 /* Get start time (ticks) */ 475 /* Get start time (ticks) */
383 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 476 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
384 /* Invoke C2 */ 477 /* Invoke C2 */
478 acpi_state_timer_broadcast(pr, cx, 1);
385 acpi_cstate_enter(cx); 479 acpi_cstate_enter(cx);
386 /* Get end time (ticks) */ 480 /* Get end time (ticks) */
387 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 481 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
@@ -396,6 +490,7 @@ static void acpi_processor_idle(void)
396 /* Compute time (ticks) that we were actually asleep */ 490 /* Compute time (ticks) that we were actually asleep */
397 sleep_ticks = 491 sleep_ticks =
398 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; 492 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
493 acpi_state_timer_broadcast(pr, cx, 0);
399 break; 494 break;
400 495
401 case ACPI_STATE_C3: 496 case ACPI_STATE_C3:
@@ -417,6 +512,7 @@ static void acpi_processor_idle(void)
417 /* Get start time (ticks) */ 512 /* Get start time (ticks) */
418 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 513 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
419 /* Invoke C3 */ 514 /* Invoke C3 */
515 acpi_state_timer_broadcast(pr, cx, 1);
420 acpi_cstate_enter(cx); 516 acpi_cstate_enter(cx);
421 /* Get end time (ticks) */ 517 /* Get end time (ticks) */
422 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 518 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
@@ -436,6 +532,7 @@ static void acpi_processor_idle(void)
436 /* Compute time (ticks) that we were actually asleep */ 532 /* Compute time (ticks) that we were actually asleep */
437 sleep_ticks = 533 sleep_ticks =
438 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; 534 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
535 acpi_state_timer_broadcast(pr, cx, 0);
439 break; 536 break;
440 537
441 default: 538 default:
@@ -904,11 +1001,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
904 unsigned int i; 1001 unsigned int i;
905 unsigned int working = 0; 1002 unsigned int working = 0;
906 1003
907#ifdef ARCH_APICTIMER_STOPS_ON_C3 1004 pr->power.timer_broadcast_on_state = INT_MAX;
908 int timer_broadcast = 0;
909 cpumask_t mask = cpumask_of_cpu(pr->id);
910 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
911#endif
912 1005
913 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 1006 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
914 struct acpi_processor_cx *cx = &pr->power.states[i]; 1007 struct acpi_processor_cx *cx = &pr->power.states[i];
@@ -920,21 +1013,14 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
920 1013
921 case ACPI_STATE_C2: 1014 case ACPI_STATE_C2:
922 acpi_processor_power_verify_c2(cx); 1015 acpi_processor_power_verify_c2(cx);
923#ifdef ARCH_APICTIMER_STOPS_ON_C3 1016 if (cx->valid)
924 /* Some AMD systems fake C3 as C2, but still 1017 acpi_timer_check_state(i, pr, cx);
925 have timer troubles */
926 if (cx->valid &&
927 boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
928 timer_broadcast++;
929#endif
930 break; 1018 break;
931 1019
932 case ACPI_STATE_C3: 1020 case ACPI_STATE_C3:
933 acpi_processor_power_verify_c3(pr, cx); 1021 acpi_processor_power_verify_c3(pr, cx);
934#ifdef ARCH_APICTIMER_STOPS_ON_C3
935 if (cx->valid) 1022 if (cx->valid)
936 timer_broadcast++; 1023 acpi_timer_check_state(i, pr, cx);
937#endif
938 break; 1024 break;
939 } 1025 }
940 1026
@@ -942,10 +1028,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
942 working++; 1028 working++;
943 } 1029 }
944 1030
945#ifdef ARCH_APICTIMER_STOPS_ON_C3 1031 acpi_propagate_timer_broadcast(pr);
946 if (timer_broadcast)
947 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
948#endif
949 1032
950 return (working); 1033 return (working);
951} 1034}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 058f13cf3b79..2f2e7964226d 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -44,10 +44,9 @@
44 44
45#define ACPI_PROCESSOR_COMPONENT 0x01000000 45#define ACPI_PROCESSOR_COMPONENT 0x01000000
46#define ACPI_PROCESSOR_CLASS "processor" 46#define ACPI_PROCESSOR_CLASS "processor"
47#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
48#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 47#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
49#define _COMPONENT ACPI_PROCESSOR_COMPONENT 48#define _COMPONENT ACPI_PROCESSOR_COMPONENT
50ACPI_MODULE_NAME("acpi_processor") 49ACPI_MODULE_NAME("processor_perflib");
51 50
52static DEFINE_MUTEX(performance_mutex); 51static DEFINE_MUTEX(performance_mutex);
53 52
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 40fecd67ad83..06e6f3fb8825 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -41,9 +41,8 @@
41 41
42#define ACPI_PROCESSOR_COMPONENT 0x01000000 42#define ACPI_PROCESSOR_COMPONENT 0x01000000
43#define ACPI_PROCESSOR_CLASS "processor" 43#define ACPI_PROCESSOR_CLASS "processor"
44#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
45#define _COMPONENT ACPI_PROCESSOR_COMPONENT 44#define _COMPONENT ACPI_PROCESSOR_COMPONENT
46ACPI_MODULE_NAME("acpi_processor") 45ACPI_MODULE_NAME("processor_thermal");
47 46
48/* -------------------------------------------------------------------------- 47/* --------------------------------------------------------------------------
49 Limit Interface 48 Limit Interface
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 89dff3639abe..b33486009f41 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -41,9 +41,8 @@
41 41
42#define ACPI_PROCESSOR_COMPONENT 0x01000000 42#define ACPI_PROCESSOR_COMPONENT 0x01000000
43#define ACPI_PROCESSOR_CLASS "processor" 43#define ACPI_PROCESSOR_CLASS "processor"
44#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
45#define _COMPONENT ACPI_PROCESSOR_COMPONENT 44#define _COMPONENT ACPI_PROCESSOR_COMPONENT
46ACPI_MODULE_NAME("acpi_processor") 45ACPI_MODULE_NAME("processor_throttling");
47 46
48/* -------------------------------------------------------------------------- 47/* --------------------------------------------------------------------------
49 Throttling Control 48 Throttling Control
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index f58fc7447ab4..1eab2034c9a5 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -59,7 +59,6 @@ extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
59#define ACPI_AC_CLASS "ac_adapter" 59#define ACPI_AC_CLASS "ac_adapter"
60#define ACPI_BATTERY_CLASS "battery" 60#define ACPI_BATTERY_CLASS "battery"
61#define ACPI_SBS_HID "ACPI0002" 61#define ACPI_SBS_HID "ACPI0002"
62#define ACPI_SBS_DRIVER_NAME "ACPI Smart Battery System Driver"
63#define ACPI_SBS_DEVICE_NAME "Smart Battery System" 62#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
64#define ACPI_SBS_FILE_INFO "info" 63#define ACPI_SBS_FILE_INFO "info"
65#define ACPI_SBS_FILE_STATE "state" 64#define ACPI_SBS_FILE_STATE "state"
@@ -78,7 +77,7 @@ extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
78#define MAX_SBS_BAT 4 77#define MAX_SBS_BAT 4
79#define MAX_SMBUS_ERR 1 78#define MAX_SMBUS_ERR 1
80 79
81ACPI_MODULE_NAME("acpi_sbs"); 80ACPI_MODULE_NAME("sbs");
82 81
83MODULE_AUTHOR("Rich Townsend"); 82MODULE_AUTHOR("Rich Townsend");
84MODULE_DESCRIPTION("Smart Battery System ACPI interface driver"); 83MODULE_DESCRIPTION("Smart Battery System ACPI interface driver");
@@ -110,7 +109,7 @@ static void acpi_battery_smbus_err_handler(struct acpi_ec_smbus *smbus);
110static void acpi_sbs_update_queue(void *data); 109static void acpi_sbs_update_queue(void *data);
111 110
112static struct acpi_driver acpi_sbs_driver = { 111static struct acpi_driver acpi_sbs_driver = {
113 .name = ACPI_SBS_DRIVER_NAME, 112 .name = "sbs",
114 .class = ACPI_SBS_CLASS, 113 .class = ACPI_SBS_CLASS,
115 .ids = ACPI_SBS_HID, 114 .ids = ACPI_SBS_HID,
116 .ops = { 115 .ops = {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 64f26db10c8e..bb0e0da39fb1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -11,13 +11,12 @@
11#include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */ 11#include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */
12 12
13#define _COMPONENT ACPI_BUS_COMPONENT 13#define _COMPONENT ACPI_BUS_COMPONENT
14ACPI_MODULE_NAME("scan") 14ACPI_MODULE_NAME("scan");
15#define STRUCT_TO_INT(s) (*((int*)&s)) 15#define STRUCT_TO_INT(s) (*((int*)&s))
16extern struct acpi_device *acpi_root; 16extern struct acpi_device *acpi_root;
17 17
18#define ACPI_BUS_CLASS "system_bus" 18#define ACPI_BUS_CLASS "system_bus"
19#define ACPI_BUS_HID "ACPI_BUS" 19#define ACPI_BUS_HID "ACPI_BUS"
20#define ACPI_BUS_DRIVER_NAME "ACPI Bus Driver"
21#define ACPI_BUS_DEVICE_NAME "System Bus" 20#define ACPI_BUS_DEVICE_NAME "System Bus"
22 21
23static LIST_HEAD(acpi_device_list); 22static LIST_HEAD(acpi_device_list);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 7147b0bdab0a..83a8d3097904 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -31,14 +31,13 @@
31#include <acpi/acpi_drivers.h> 31#include <acpi/acpi_drivers.h>
32 32
33#define _COMPONENT ACPI_SYSTEM_COMPONENT 33#define _COMPONENT ACPI_SYSTEM_COMPONENT
34ACPI_MODULE_NAME("acpi_system") 34ACPI_MODULE_NAME("system");
35#ifdef MODULE_PARAM_PREFIX 35#ifdef MODULE_PARAM_PREFIX
36#undef MODULE_PARAM_PREFIX 36#undef MODULE_PARAM_PREFIX
37#endif 37#endif
38#define MODULE_PARAM_PREFIX "acpi." 38#define MODULE_PARAM_PREFIX "acpi."
39 39
40#define ACPI_SYSTEM_CLASS "system" 40#define ACPI_SYSTEM_CLASS "system"
41#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver"
42#define ACPI_SYSTEM_DEVICE_NAME "System" 41#define ACPI_SYSTEM_DEVICE_NAME "System"
43#define ACPI_SYSTEM_FILE_INFO "info" 42#define ACPI_SYSTEM_FILE_INFO "info"
44#define ACPI_SYSTEM_FILE_EVENT "event" 43#define ACPI_SYSTEM_FILE_EVENT "event"
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 45bd17313c4a..849e2c361804 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -169,40 +169,40 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header * header)
169 169
170 170
171int __init 171int __init
172acpi_table_parse_madt_family(char *id, 172acpi_table_parse_entries(char *id,
173 unsigned long madt_size, 173 unsigned long table_size,
174 int entry_id, 174 int entry_id,
175 acpi_madt_entry_handler handler, 175 acpi_table_entry_handler handler,
176 unsigned int max_entries) 176 unsigned int max_entries)
177{ 177{
178 struct acpi_table_header *madt = NULL; 178 struct acpi_table_header *table_header = NULL;
179 struct acpi_subtable_header *entry; 179 struct acpi_subtable_header *entry;
180 unsigned int count = 0; 180 unsigned int count = 0;
181 unsigned long madt_end; 181 unsigned long table_end;
182 182
183 if (!handler) 183 if (!handler)
184 return -EINVAL; 184 return -EINVAL;
185 185
186 /* Locate the MADT (if exists). There should only be one. */ 186 /* Locate the table (if exists). There should only be one. */
187 acpi_get_table(id, 0, &madt); 187 acpi_get_table(id, 0, &table_header);
188 188
189 if (!madt) { 189 if (!table_header) {
190 printk(KERN_WARNING PREFIX "%4.4s not present\n", id); 190 printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
191 return -ENODEV; 191 return -ENODEV;
192 } 192 }
193 193
194 madt_end = (unsigned long)madt + madt->length; 194 table_end = (unsigned long)table_header + table_header->length;
195 195
196 /* Parse all entries looking for a match. */ 196 /* Parse all entries looking for a match. */
197 197
198 entry = (struct acpi_subtable_header *) 198 entry = (struct acpi_subtable_header *)
199 ((unsigned long)madt + madt_size); 199 ((unsigned long)table_header + table_size);
200 200
201 while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) < 201 while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
202 madt_end) { 202 table_end) {
203 if (entry->type == entry_id 203 if (entry->type == entry_id
204 && (!max_entries || count++ < max_entries)) 204 && (!max_entries || count++ < max_entries))
205 if (handler(entry, madt_end)) 205 if (handler(entry, table_end))
206 return -EINVAL; 206 return -EINVAL;
207 207
208 entry = (struct acpi_subtable_header *) 208 entry = (struct acpi_subtable_header *)
@@ -218,13 +218,22 @@ acpi_table_parse_madt_family(char *id,
218 218
219int __init 219int __init
220acpi_table_parse_madt(enum acpi_madt_type id, 220acpi_table_parse_madt(enum acpi_madt_type id,
221 acpi_madt_entry_handler handler, unsigned int max_entries) 221 acpi_table_entry_handler handler, unsigned int max_entries)
222{ 222{
223 return acpi_table_parse_madt_family(ACPI_SIG_MADT, 223 return acpi_table_parse_entries(ACPI_SIG_MADT,
224 sizeof(struct acpi_table_madt), id, 224 sizeof(struct acpi_table_madt), id,
225 handler, max_entries); 225 handler, max_entries);
226} 226}
227 227
228/**
229 * acpi_table_parse - find table with @id, run @handler on it
230 *
231 * @id: table id to find
232 * @handler: handler to run
233 *
234 * Scan the ACPI System Descriptor Table (STD) for a table matching @id,
235 * run @handler on it. Return 0 if table found, return on if not.
236 */
228int __init acpi_table_parse(char *id, acpi_table_handler handler) 237int __init acpi_table_parse(char *id, acpi_table_handler handler)
229{ 238{
230 struct acpi_table_header *table = NULL; 239 struct acpi_table_header *table = NULL;
@@ -234,9 +243,9 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
234 acpi_get_table(id, 0, &table); 243 acpi_get_table(id, 0, &table);
235 if (table) { 244 if (table) {
236 handler(table); 245 handler(table);
237 return 1;
238 } else
239 return 0; 246 return 0;
247 } else
248 return 1;
240} 249}
241 250
242/* 251/*
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 807978d5381a..417ef5fa7666 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -338,9 +338,9 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
338 int i; 338 int i;
339 acpi_status status = AE_NOT_EXIST; 339 acpi_status status = AE_NOT_EXIST;
340 340
341 ACPI_FUNCTION_TRACE(acpi_unload_table); 341 ACPI_FUNCTION_TRACE(acpi_unload_table_id);
342 342
343 /* Find table from the requested type list */ 343 /* Find table in the global table list */
344 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) { 344 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
345 if (id != acpi_gbl_root_table_list.tables[i].owner_id) { 345 if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
346 continue; 346 continue;
@@ -352,8 +352,9 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
352 * simply a position within the hierarchy 352 * simply a position within the hierarchy
353 */ 353 */
354 acpi_tb_delete_namespace_by_owner(i); 354 acpi_tb_delete_namespace_by_owner(i);
355 acpi_tb_release_owner_id(i); 355 status = acpi_tb_release_owner_id(i);
356 acpi_tb_set_table_loaded_flag(i, FALSE); 356 acpi_tb_set_table_loaded_flag(i, FALSE);
357 break;
357 } 358 }
358 return_ACPI_STATUS(status); 359 return_ACPI_STATUS(status);
359} 360}
@@ -408,7 +409,7 @@ acpi_get_table(char *signature,
408 } 409 }
409 410
410 if (!acpi_gbl_permanent_mmap) { 411 if (!acpi_gbl_permanent_mmap) {
411 acpi_gbl_root_table_list.tables[i].pointer = 0; 412 acpi_gbl_root_table_list.tables[i].pointer = NULL;
412 } 413 }
413 414
414 return (status); 415 return (status);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 986afd470a14..15022bc86336 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -47,7 +47,6 @@
47 47
48#define ACPI_THERMAL_COMPONENT 0x04000000 48#define ACPI_THERMAL_COMPONENT 0x04000000
49#define ACPI_THERMAL_CLASS "thermal_zone" 49#define ACPI_THERMAL_CLASS "thermal_zone"
50#define ACPI_THERMAL_DRIVER_NAME "ACPI Thermal Zone Driver"
51#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone" 50#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
52#define ACPI_THERMAL_FILE_STATE "state" 51#define ACPI_THERMAL_FILE_STATE "state"
53#define ACPI_THERMAL_FILE_TEMPERATURE "temperature" 52#define ACPI_THERMAL_FILE_TEMPERATURE "temperature"
@@ -71,10 +70,10 @@
71#define CELSIUS_TO_KELVIN(t) ((t+273)*10) 70#define CELSIUS_TO_KELVIN(t) ((t+273)*10)
72 71
73#define _COMPONENT ACPI_THERMAL_COMPONENT 72#define _COMPONENT ACPI_THERMAL_COMPONENT
74ACPI_MODULE_NAME("acpi_thermal") 73ACPI_MODULE_NAME("thermal");
75 74
76MODULE_AUTHOR("Paul Diefenbaugh"); 75MODULE_AUTHOR("Paul Diefenbaugh");
77MODULE_DESCRIPTION(ACPI_THERMAL_DRIVER_NAME); 76MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
78MODULE_LICENSE("GPL"); 77MODULE_LICENSE("GPL");
79 78
80static int tzp; 79static int tzp;
@@ -99,7 +98,7 @@ static ssize_t acpi_thermal_write_polling(struct file *, const char __user *,
99 size_t, loff_t *); 98 size_t, loff_t *);
100 99
101static struct acpi_driver acpi_thermal_driver = { 100static struct acpi_driver acpi_thermal_driver = {
102 .name = ACPI_THERMAL_DRIVER_NAME, 101 .name = "thermal",
103 .class = ACPI_THERMAL_CLASS, 102 .class = ACPI_THERMAL_CLASS,
104 .ids = ACPI_THERMAL_HID, 103 .ids = ACPI_THERMAL_HID,
105 .ops = { 104 .ops = {
@@ -270,7 +269,7 @@ static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
270 269
271 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 270 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
272 "Polling frequency set to %lu seconds\n", 271 "Polling frequency set to %lu seconds\n",
273 tz->polling_frequency)); 272 tz->polling_frequency/10));
274 273
275 return 0; 274 return 0;
276} 275}
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index d9b651ffcdc0..faf8a5232d8e 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -125,7 +125,7 @@ static int write_acpi_int(const char *methodName, int val)
125 union acpi_object in_objs[1]; 125 union acpi_object in_objs[1];
126 acpi_status status; 126 acpi_status status;
127 127
128 params.count = sizeof(in_objs) / sizeof(in_objs[0]); 128 params.count = ARRAY_SIZE(in_objs);
129 params.pointer = in_objs; 129 params.pointer = in_objs;
130 in_objs[0].type = ACPI_TYPE_INTEGER; 130 in_objs[0].type = ACPI_TYPE_INTEGER;
131 in_objs[0].integer.value = val; 131 in_objs[0].integer.value = val;
@@ -561,10 +561,6 @@ static int __init toshiba_acpi_init(void)
561 if (acpi_disabled) 561 if (acpi_disabled)
562 return -ENODEV; 562 return -ENODEV;
563 563
564 if (!acpi_specific_hotkey_enabled) {
565 printk(MY_INFO "Using generic hotkey driver\n");
566 return -ENODEV;
567 }
568 /* simple device detection: look for HCI method */ 564 /* simple device detection: look for HCI method */
569 if (is_valid_acpi_path(METHOD_HCI_1)) 565 if (is_valid_acpi_path(METHOD_HCI_1))
570 method_hci = METHOD_HCI_1; 566 method_hci = METHOD_HCI_1;
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index f777cebdc46d..673a0caa4073 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -170,7 +170,6 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
170 acpi_os_delete_mutex(object->mutex.os_mutex); 170 acpi_os_delete_mutex(object->mutex.os_mutex);
171 acpi_gbl_global_lock_mutex = NULL; 171 acpi_gbl_global_lock_mutex = NULL;
172 } else { 172 } else {
173 acpi_ex_unlink_mutex(object);
174 acpi_os_delete_mutex(object->mutex.os_mutex); 173 acpi_os_delete_mutex(object->mutex.os_mutex);
175 } 174 }
176 break; 175 break;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 68a809fa7b19..34f157571080 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -31,7 +31,7 @@
31#include <acpi/acpi_drivers.h> 31#include <acpi/acpi_drivers.h>
32 32
33#define _COMPONENT ACPI_BUS_COMPONENT 33#define _COMPONENT ACPI_BUS_COMPONENT
34ACPI_MODULE_NAME("acpi_utils") 34ACPI_MODULE_NAME("utils");
35 35
36/* -------------------------------------------------------------------------- 36/* --------------------------------------------------------------------------
37 Object Evaluation Helpers 37 Object Evaluation Helpers
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index e0b97add8c63..bf525cca3b63 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -40,7 +40,6 @@
40 40
41#define ACPI_VIDEO_COMPONENT 0x08000000 41#define ACPI_VIDEO_COMPONENT 0x08000000
42#define ACPI_VIDEO_CLASS "video" 42#define ACPI_VIDEO_CLASS "video"
43#define ACPI_VIDEO_DRIVER_NAME "ACPI Video Driver"
44#define ACPI_VIDEO_BUS_NAME "Video Bus" 43#define ACPI_VIDEO_BUS_NAME "Video Bus"
45#define ACPI_VIDEO_DEVICE_NAME "Video Device" 44#define ACPI_VIDEO_DEVICE_NAME "Video Device"
46#define ACPI_VIDEO_NOTIFY_SWITCH 0x80 45#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
@@ -65,17 +64,17 @@
65#define ACPI_VIDEO_DISPLAY_LCD 4 64#define ACPI_VIDEO_DISPLAY_LCD 4
66 65
67#define _COMPONENT ACPI_VIDEO_COMPONENT 66#define _COMPONENT ACPI_VIDEO_COMPONENT
68ACPI_MODULE_NAME("acpi_video") 67ACPI_MODULE_NAME("video");
69 68
70 MODULE_AUTHOR("Bruno Ducrot"); 69MODULE_AUTHOR("Bruno Ducrot");
71MODULE_DESCRIPTION(ACPI_VIDEO_DRIVER_NAME); 70MODULE_DESCRIPTION("ACPI Video Driver");
72MODULE_LICENSE("GPL"); 71MODULE_LICENSE("GPL");
73 72
74static int acpi_video_bus_add(struct acpi_device *device); 73static int acpi_video_bus_add(struct acpi_device *device);
75static int acpi_video_bus_remove(struct acpi_device *device, int type); 74static int acpi_video_bus_remove(struct acpi_device *device, int type);
76 75
77static struct acpi_driver acpi_video_bus = { 76static struct acpi_driver acpi_video_bus = {
78 .name = ACPI_VIDEO_DRIVER_NAME, 77 .name = "video",
79 .class = ACPI_VIDEO_CLASS, 78 .class = ACPI_VIDEO_CLASS,
80 .ids = ACPI_VIDEO_HID, 79 .ids = ACPI_VIDEO_HID,
81 .ops = { 80 .ops = {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 25d8d3f778a1..2cf8251728d2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1410,7 +1410,16 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1410 } 1410 }
1411 1411
1412 tf.protocol = ATA_PROT_PIO; 1412 tf.protocol = ATA_PROT_PIO;
1413 tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */ 1413
1414 /* Some devices choke if TF registers contain garbage. Make
1415 * sure those are properly initialized.
1416 */
1417 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1418
1419 /* Device presence detection is unreliable on some
1420 * controllers. Always poll IDENTIFY if available.
1421 */
1422 tf.flags |= ATA_TFLAG_POLLING;
1414 1423
1415 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1424 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1416 id, sizeof(id[0]) * ATA_ID_WORDS); 1425 id, sizeof(id[0]) * ATA_ID_WORDS);
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 4223e10de6a0..98c1fee4b305 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -89,9 +89,10 @@ static int probe_all; /* Set to check all ISA port ranges */
89static int ht6560a; /* HT 6560A on primary 1, secondary 2, both 3 */ 89static int ht6560a; /* HT 6560A on primary 1, secondary 2, both 3 */
90static int ht6560b; /* HT 6560A on primary 1, secondary 2, both 3 */ 90static int ht6560b; /* HT 6560A on primary 1, secondary 2, both 3 */
91static int opti82c611a; /* Opti82c611A on primary 1, secondary 2, both 3 */ 91static int opti82c611a; /* Opti82c611A on primary 1, secondary 2, both 3 */
92static int opti82c46x; /* Opti 82c465MV present (pri/sec autodetect) */ 92static int opti82c46x; /* Opti 82c465MV present (pri/sec autodetect) */
93static int autospeed; /* Chip present which snoops speed changes */ 93static int autospeed; /* Chip present which snoops speed changes */
94static int pio_mask = 0x1F; /* PIO range for autospeed devices */ 94static int pio_mask = 0x1F; /* PIO range for autospeed devices */
95static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
95 96
96/** 97/**
97 * legacy_set_mode - mode setting 98 * legacy_set_mode - mode setting
@@ -113,6 +114,7 @@ static int legacy_set_mode(struct ata_port *ap, struct ata_device **unused)
113 for (i = 0; i < ATA_MAX_DEVICES; i++) { 114 for (i = 0; i < ATA_MAX_DEVICES; i++) {
114 struct ata_device *dev = &ap->device[i]; 115 struct ata_device *dev = &ap->device[i];
115 if (ata_dev_enabled(dev)) { 116 if (ata_dev_enabled(dev)) {
117 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
116 dev->pio_mode = XFER_PIO_0; 118 dev->pio_mode = XFER_PIO_0;
117 dev->xfer_mode = XFER_PIO_0; 119 dev->xfer_mode = XFER_PIO_0;
118 dev->xfer_shift = ATA_SHIFT_PIO; 120 dev->xfer_shift = ATA_SHIFT_PIO;
@@ -695,6 +697,7 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
695 void __iomem *io_addr, *ctrl_addr; 697 void __iomem *io_addr, *ctrl_addr;
696 int pio_modes = pio_mask; 698 int pio_modes = pio_mask;
697 u32 mask = (1 << port); 699 u32 mask = (1 << port);
700 u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
698 int ret; 701 int ret;
699 702
700 pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0); 703 pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0);
@@ -715,6 +718,7 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
715 if (ht6560a & mask) { 718 if (ht6560a & mask) {
716 ops = &ht6560a_port_ops; 719 ops = &ht6560a_port_ops;
717 pio_modes = 0x07; 720 pio_modes = 0x07;
721 iordy = ATA_FLAG_NO_IORDY;
718 } 722 }
719 if (ht6560b & mask) { 723 if (ht6560b & mask) {
720 ops = &ht6560b_port_ops; 724 ops = &ht6560b_port_ops;
@@ -750,6 +754,7 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
750 printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n"); 754 printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n");
751 pio_modes = 0x07; 755 pio_modes = 0x07;
752 ops = &pdc20230_port_ops; 756 ops = &pdc20230_port_ops;
757 iordy = ATA_FLAG_NO_IORDY;
753 udelay(100); 758 udelay(100);
754 inb(0x1F5); 759 inb(0x1F5);
755 } else { 760 } else {
@@ -767,6 +772,7 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
767 /* Chip does mode setting by command snooping */ 772 /* Chip does mode setting by command snooping */
768 if (ops == &legacy_port_ops && (autospeed & mask)) 773 if (ops == &legacy_port_ops && (autospeed & mask))
769 ops = &simple_port_ops; 774 ops = &simple_port_ops;
775
770 memset(&ae, 0, sizeof(struct ata_probe_ent)); 776 memset(&ae, 0, sizeof(struct ata_probe_ent));
771 INIT_LIST_HEAD(&ae.node); 777 INIT_LIST_HEAD(&ae.node);
772 ae.dev = &pdev->dev; 778 ae.dev = &pdev->dev;
@@ -776,7 +782,7 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
776 ae.pio_mask = pio_modes; 782 ae.pio_mask = pio_modes;
777 ae.irq = irq; 783 ae.irq = irq;
778 ae.irq_flags = 0; 784 ae.irq_flags = 0;
779 ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST; 785 ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST|iordy;
780 ae.port[0].cmd_addr = io_addr; 786 ae.port[0].cmd_addr = io_addr;
781 ae.port[0].altstatus_addr = ctrl_addr; 787 ae.port[0].altstatus_addr = ctrl_addr;
782 ae.port[0].ctl_addr = ctrl_addr; 788 ae.port[0].ctl_addr = ctrl_addr;
@@ -945,6 +951,7 @@ module_param(ht6560b, int, 0);
945module_param(opti82c611a, int, 0); 951module_param(opti82c611a, int, 0);
946module_param(opti82c46x, int, 0); 952module_param(opti82c46x, int, 0);
947module_param(pio_mask, int, 0); 953module_param(pio_mask, int, 0);
954module_param(iordy_mask, int, 0);
948 955
949module_init(legacy_init); 956module_init(legacy_init);
950module_exit(legacy_exit); 957module_exit(legacy_exit);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 1b3b4ed8eb19..4362141976ad 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -264,16 +264,18 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
264 if (type == 6580) { 264 if (type == 6580) {
265 ae.port_ops = &qdi6580_port_ops; 265 ae.port_ops = &qdi6580_port_ops;
266 ae.pio_mask = 0x1F; 266 ae.pio_mask = 0x1F;
267 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
267 } else { 268 } else {
268 ae.port_ops = &qdi6500_port_ops; 269 ae.port_ops = &qdi6500_port_ops;
269 ae.pio_mask = 0x07; /* Actually PIO3 !IORDY is possible */ 270 ae.pio_mask = 0x07; /* Actually PIO3 !IORDY is possible */
271 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
272 ATA_FLAG_NO_IORDY;
270 } 273 }
271 274
272 ae.sht = &qdi_sht; 275 ae.sht = &qdi_sht;
273 ae.n_ports = 1; 276 ae.n_ports = 1;
274 ae.irq = irq; 277 ae.irq = irq;
275 ae.irq_flags = 0; 278 ae.irq_flags = 0;
276 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
277 ae.port[0].cmd_addr = io_addr; 279 ae.port[0].cmd_addr = io_addr;
278 ae.port[0].altstatus_addr = ctl_addr; 280 ae.port[0].altstatus_addr = ctl_addr;
279 ae.port[0].ctl_addr = ctl_addr; 281 ae.port[0].ctl_addr = ctl_addr;
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index f2fa158d07ca..96e890fd645b 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -187,7 +187,9 @@ static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
187{ 187{
188 struct ata_port *ap = qc->ap; 188 struct ata_port *ap = qc->ap;
189 189
190 udelay(100);
190 sl82c105_reset_engine(ap); 191 sl82c105_reset_engine(ap);
192 udelay(100);
191 193
192 /* Set the clocks for DMA */ 194 /* Set the clocks for DMA */
193 sl82c105_configure_dmamode(ap, qc->dev); 195 sl82c105_configure_dmamode(ap, qc->dev);
@@ -216,6 +218,7 @@ static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
216 218
217 ata_bmdma_stop(qc); 219 ata_bmdma_stop(qc);
218 sl82c105_reset_engine(ap); 220 sl82c105_reset_engine(ap);
221 udelay(100);
219 222
220 /* This will redo the initial setup of the DMA device to matching 223 /* This will redo the initial setup of the DMA device to matching
221 PIO timings */ 224 PIO timings */
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 095ef1b2cd0e..ab92f208dae2 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -827,7 +827,8 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
827 /* freeze if hotplugged or controller error */ 827 /* freeze if hotplugged or controller error */
828 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | 828 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
829 NV_ADMA_STAT_HOTUNPLUG | 829 NV_ADMA_STAT_HOTUNPLUG |
830 NV_ADMA_STAT_TIMEOUT))) { 830 NV_ADMA_STAT_TIMEOUT |
831 NV_ADMA_STAT_SERROR))) {
831 struct ata_eh_info *ehi = &ap->eh_info; 832 struct ata_eh_info *ehi = &ap->eh_info;
832 833
833 ata_ehi_clear_desc(ehi); 834 ata_ehi_clear_desc(ehi);
@@ -841,6 +842,9 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
841 } else if (status & NV_ADMA_STAT_HOTUNPLUG) { 842 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
842 ata_ehi_hotplugged(ehi); 843 ata_ehi_hotplugged(ehi);
843 ata_ehi_push_desc(ehi, ": hot unplug"); 844 ata_ehi_push_desc(ehi, ": hot unplug");
845 } else if (status & NV_ADMA_STAT_SERROR) {
846 /* let libata analyze SError and figure out the cause */
847 ata_ehi_push_desc(ehi, ": SError");
844 } 848 }
845 ata_port_freeze(ap); 849 ata_port_freeze(ap);
846 continue; 850 continue;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index b2e2e695c92e..cf9ed8c39301 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -119,9 +119,7 @@ static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
119static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 119static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
120static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 120static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
121static irqreturn_t pdc_interrupt (int irq, void *dev_instance); 121static irqreturn_t pdc_interrupt (int irq, void *dev_instance);
122static void pdc_eng_timeout(struct ata_port *ap);
123static int pdc_port_start(struct ata_port *ap); 122static int pdc_port_start(struct ata_port *ap);
124static void pdc_pata_phy_reset(struct ata_port *ap);
125static void pdc_qc_prep(struct ata_queued_cmd *qc); 123static void pdc_qc_prep(struct ata_queued_cmd *qc);
126static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 124static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
127static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 125static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
@@ -215,12 +213,12 @@ static const struct ata_port_operations pdc_pata_ops = {
215 .dev_select = ata_std_dev_select, 213 .dev_select = ata_std_dev_select,
216 .check_atapi_dma = pdc_check_atapi_dma, 214 .check_atapi_dma = pdc_check_atapi_dma,
217 215
218 .phy_reset = pdc_pata_phy_reset,
219
220 .qc_prep = pdc_qc_prep, 216 .qc_prep = pdc_qc_prep,
221 .qc_issue = pdc_qc_issue_prot, 217 .qc_issue = pdc_qc_issue_prot,
218 .freeze = pdc_freeze,
219 .thaw = pdc_thaw,
220 .error_handler = pdc_error_handler,
222 .data_xfer = ata_data_xfer, 221 .data_xfer = ata_data_xfer,
223 .eng_timeout = pdc_eng_timeout,
224 .irq_handler = pdc_interrupt, 222 .irq_handler = pdc_interrupt,
225 .irq_clear = pdc_irq_clear, 223 .irq_clear = pdc_irq_clear,
226 .irq_on = ata_irq_on, 224 .irq_on = ata_irq_on,
@@ -253,7 +251,7 @@ static const struct ata_port_info pdc_port_info[] = {
253 /* board_20619 */ 251 /* board_20619 */
254 { 252 {
255 .sht = &pdc_ata_sht, 253 .sht = &pdc_ata_sht,
256 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS, 254 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
257 .pio_mask = 0x1f, /* pio0-4 */ 255 .pio_mask = 0x1f, /* pio0-4 */
258 .mwdma_mask = 0x07, /* mwdma0-2 */ 256 .mwdma_mask = 0x07, /* mwdma0-2 */
259 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 257 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -389,14 +387,6 @@ static void pdc_pata_cbl_detect(struct ata_port *ap)
389 ap->cbl = ATA_CBL_PATA80; 387 ap->cbl = ATA_CBL_PATA80;
390} 388}
391 389
392static void pdc_pata_phy_reset(struct ata_port *ap)
393{
394 pdc_pata_cbl_detect(ap);
395 pdc_reset_port(ap);
396 ata_port_probe(ap);
397 ata_bus_reset(ap);
398}
399
400static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) 390static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
401{ 391{
402 if (sc_reg > SCR_CONTROL || ap->cbl != ATA_CBL_SATA) 392 if (sc_reg > SCR_CONTROL || ap->cbl != ATA_CBL_SATA)
@@ -564,6 +554,13 @@ static void pdc_thaw(struct ata_port *ap)
564 readl(mmio + PDC_CTLSTAT); /* flush */ 554 readl(mmio + PDC_CTLSTAT); /* flush */
565} 555}
566 556
557static int pdc_pre_reset(struct ata_port *ap)
558{
559 if (!sata_scr_valid(ap))
560 pdc_pata_cbl_detect(ap);
561 return ata_std_prereset(ap);
562}
563
567static void pdc_error_handler(struct ata_port *ap) 564static void pdc_error_handler(struct ata_port *ap)
568{ 565{
569 ata_reset_fn_t hardreset; 566 ata_reset_fn_t hardreset;
@@ -576,7 +573,7 @@ static void pdc_error_handler(struct ata_port *ap)
576 hardreset = sata_std_hardreset; 573 hardreset = sata_std_hardreset;
577 574
578 /* perform recovery */ 575 /* perform recovery */
579 ata_do_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, 576 ata_do_eh(ap, pdc_pre_reset, ata_std_softreset, hardreset,
580 ata_std_postreset); 577 ata_std_postreset);
581} 578}
582 579
@@ -592,43 +589,6 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
592 pdc_reset_port(ap); 589 pdc_reset_port(ap);
593} 590}
594 591
595static void pdc_eng_timeout(struct ata_port *ap)
596{
597 struct ata_host *host = ap->host;
598 u8 drv_stat;
599 struct ata_queued_cmd *qc;
600 unsigned long flags;
601
602 DPRINTK("ENTER\n");
603
604 spin_lock_irqsave(&host->lock, flags);
605
606 qc = ata_qc_from_tag(ap, ap->active_tag);
607
608 switch (qc->tf.protocol) {
609 case ATA_PROT_DMA:
610 case ATA_PROT_NODATA:
611 ata_port_printk(ap, KERN_ERR, "command timeout\n");
612 drv_stat = ata_wait_idle(ap);
613 qc->err_mask |= __ac_err_mask(drv_stat);
614 break;
615
616 default:
617 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
618
619 ata_port_printk(ap, KERN_ERR,
620 "unknown timeout, cmd 0x%x stat 0x%x\n",
621 qc->tf.command, drv_stat);
622
623 qc->err_mask |= ac_err_mask(drv_stat);
624 break;
625 }
626
627 spin_unlock_irqrestore(&host->lock, flags);
628 ata_eh_qc_complete(qc);
629 DPRINTK("EXIT\n");
630}
631
632static inline unsigned int pdc_host_intr( struct ata_port *ap, 592static inline unsigned int pdc_host_intr( struct ata_port *ap,
633 struct ata_queued_cmd *qc) 593 struct ata_queued_cmd *qc)
634{ 594{
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 3d9daf231115..2fd037bde090 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -346,6 +346,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
346 struct ata_probe_ent *probe_ent; 346 struct ata_probe_ent *probe_ent;
347 void __iomem *mmio_base; 347 void __iomem *mmio_base;
348 int rc; 348 int rc;
349 u8 cls;
349 350
350 if (!printed_version++) 351 if (!printed_version++)
351 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 352 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -383,9 +384,12 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
383 INIT_LIST_HEAD(&probe_ent->node); 384 INIT_LIST_HEAD(&probe_ent->node);
384 385
385 /* 386 /*
386 * Due to a bug in the chip, the default cache line size can't be used 387 * Due to a bug in the chip, the default cache line size can't be
388 * used (unless the default is non-zero).
387 */ 389 */
388 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80); 390 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls);
391 if (cls == 0x00)
392 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
389 393
390 if (pci_enable_msi(pdev) == 0) 394 if (pci_enable_msi(pdev) == 0)
391 pci_intx(pdev, 0); 395 pci_intx(pdev, 0);
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 3e581603d0a8..a0d04a23dacd 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -1,6 +1,7 @@
1agpgart-y := backend.o frontend.o generic.o isoch.o 1agpgart-y := backend.o frontend.o generic.o isoch.o
2 2
3obj-$(CONFIG_AGP) += agpgart.o 3obj-$(CONFIG_AGP) += agpgart.o
4obj-$(CONFIG_COMPAT) += compat_ioctl.o
4obj-$(CONFIG_AGP_ALI) += ali-agp.o 5obj-$(CONFIG_AGP_ALI) += ali-agp.o
5obj-$(CONFIG_AGP_ATI) += ati-agp.o 6obj-$(CONFIG_AGP_ATI) += ati-agp.o
6obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o 7obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 1d59e2a5b9aa..9bd68d9f0f59 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -114,6 +114,7 @@ struct agp_bridge_driver {
114 void (*free_by_type)(struct agp_memory *); 114 void (*free_by_type)(struct agp_memory *);
115 void *(*agp_alloc_page)(struct agp_bridge_data *); 115 void *(*agp_alloc_page)(struct agp_bridge_data *);
116 void (*agp_destroy_page)(void *); 116 void (*agp_destroy_page)(void *);
117 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
117}; 118};
118 119
119struct agp_bridge_data { 120struct agp_bridge_data {
@@ -218,6 +219,7 @@ struct agp_bridge_data {
218#define I810_PTE_MAIN_UNCACHED 0x00000000 219#define I810_PTE_MAIN_UNCACHED 0x00000000
219#define I810_PTE_LOCAL 0x00000002 220#define I810_PTE_LOCAL 0x00000002
220#define I810_PTE_VALID 0x00000001 221#define I810_PTE_VALID 0x00000001
222#define I830_PTE_SYSTEM_CACHED 0x00000006
221#define I810_SMRAM_MISCC 0x70 223#define I810_SMRAM_MISCC 0x70
222#define I810_GFX_MEM_WIN_SIZE 0x00010000 224#define I810_GFX_MEM_WIN_SIZE 0x00010000
223#define I810_GFX_MEM_WIN_32M 0x00010000 225#define I810_GFX_MEM_WIN_32M 0x00010000
@@ -270,8 +272,16 @@ void global_cache_flush(void);
270void get_agp_version(struct agp_bridge_data *bridge); 272void get_agp_version(struct agp_bridge_data *bridge);
271unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 273unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
272 unsigned long addr, int type); 274 unsigned long addr, int type);
275int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
276 int type);
273struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev); 277struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
274 278
279/* generic functions for user-populated AGP memory types */
280struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
281void agp_alloc_page_array(size_t size, struct agp_memory *mem);
282void agp_free_page_array(struct agp_memory *mem);
283
284
275/* generic routines for agp>=3 */ 285/* generic routines for agp>=3 */
276int agp3_generic_fetch_size(void); 286int agp3_generic_fetch_size(void);
277void agp3_generic_tlbflush(struct agp_memory *mem); 287void agp3_generic_tlbflush(struct agp_memory *mem);
@@ -288,6 +298,8 @@ extern struct aper_size_info_16 agp3_generic_sizes[];
288extern int agp_off; 298extern int agp_off;
289extern int agp_try_unsupported_boot; 299extern int agp_try_unsupported_boot;
290 300
301long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
302
291/* Chipset independant registers (from AGP Spec) */ 303/* Chipset independant registers (from AGP Spec) */
292#define AGP_APBASE 0x10 304#define AGP_APBASE 0x10
293 305
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 5a31ec7c62fc..98177a93076f 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -214,6 +214,7 @@ static struct agp_bridge_driver ali_generic_bridge = {
214 .free_by_type = agp_generic_free_by_type, 214 .free_by_type = agp_generic_free_by_type,
215 .agp_alloc_page = agp_generic_alloc_page, 215 .agp_alloc_page = agp_generic_alloc_page,
216 .agp_destroy_page = ali_destroy_page, 216 .agp_destroy_page = ali_destroy_page,
217 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
217}; 218};
218 219
219static struct agp_bridge_driver ali_m1541_bridge = { 220static struct agp_bridge_driver ali_m1541_bridge = {
@@ -237,6 +238,7 @@ static struct agp_bridge_driver ali_m1541_bridge = {
237 .free_by_type = agp_generic_free_by_type, 238 .free_by_type = agp_generic_free_by_type,
238 .agp_alloc_page = m1541_alloc_page, 239 .agp_alloc_page = m1541_alloc_page,
239 .agp_destroy_page = m1541_destroy_page, 240 .agp_destroy_page = m1541_destroy_page,
241 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
240}; 242};
241 243
242 244
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index b4e00a343da9..b0acf41c0db9 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -91,6 +91,9 @@ static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
91 int num_entries, status; 91 int num_entries, status;
92 void *temp; 92 void *temp;
93 93
94 if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
95 return -EINVAL;
96
94 temp = agp_bridge->current_size; 97 temp = agp_bridge->current_size;
95 num_entries = A_SIZE_FIX(temp)->num_entries; 98 num_entries = A_SIZE_FIX(temp)->num_entries;
96 if ((pg_start + mem->page_count) > num_entries) 99 if ((pg_start + mem->page_count) > num_entries)
@@ -142,6 +145,7 @@ struct agp_bridge_driver alpha_core_agp_driver = {
142 .free_by_type = agp_generic_free_by_type, 145 .free_by_type = agp_generic_free_by_type,
143 .agp_alloc_page = agp_generic_alloc_page, 146 .agp_alloc_page = agp_generic_alloc_page,
144 .agp_destroy_page = agp_generic_destroy_page, 147 .agp_destroy_page = agp_generic_destroy_page,
148 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
145}; 149};
146 150
147struct agp_bridge_data *alpha_bridge; 151struct agp_bridge_data *alpha_bridge;
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index c85c8cadb6df..3d8d448bf394 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -381,6 +381,7 @@ static struct agp_bridge_driver amd_irongate_driver = {
381 .free_by_type = agp_generic_free_by_type, 381 .free_by_type = agp_generic_free_by_type,
382 .agp_alloc_page = agp_generic_alloc_page, 382 .agp_alloc_page = agp_generic_alloc_page,
383 .agp_destroy_page = agp_generic_destroy_page, 383 .agp_destroy_page = agp_generic_destroy_page,
384 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
384}; 385};
385 386
386static struct agp_device_ids amd_agp_device_ids[] __devinitdata = 387static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 93d2209fee4c..636d984ed4a6 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -62,12 +62,18 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
62{ 62{
63 int i, j, num_entries; 63 int i, j, num_entries;
64 long long tmp; 64 long long tmp;
65 int mask_type;
66 struct agp_bridge_data *bridge = mem->bridge;
65 u32 pte; 67 u32 pte;
66 68
67 num_entries = agp_num_entries(); 69 num_entries = agp_num_entries();
68 70
69 if (type != 0 || mem->type != 0) 71 if (type != mem->type)
70 return -EINVAL; 72 return -EINVAL;
73 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
74 if (mask_type != 0)
75 return -EINVAL;
76
71 77
72 /* Make sure we can fit the range in the gatt table. */ 78 /* Make sure we can fit the range in the gatt table. */
73 /* FIXME: could wrap */ 79 /* FIXME: could wrap */
@@ -90,7 +96,7 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
90 96
91 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 97 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
92 tmp = agp_bridge->driver->mask_memory(agp_bridge, 98 tmp = agp_bridge->driver->mask_memory(agp_bridge,
93 mem->memory[i], mem->type); 99 mem->memory[i], mask_type);
94 100
95 BUG_ON(tmp & 0xffffff0000000ffcULL); 101 BUG_ON(tmp & 0xffffff0000000ffcULL);
96 pte = (tmp & 0x000000ff00000000ULL) >> 28; 102 pte = (tmp & 0x000000ff00000000ULL) >> 28;
@@ -247,6 +253,7 @@ static struct agp_bridge_driver amd_8151_driver = {
247 .free_by_type = agp_generic_free_by_type, 253 .free_by_type = agp_generic_free_by_type,
248 .agp_alloc_page = agp_generic_alloc_page, 254 .agp_alloc_page = agp_generic_alloc_page,
249 .agp_destroy_page = agp_generic_destroy_page, 255 .agp_destroy_page = agp_generic_destroy_page,
256 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
250}; 257};
251 258
252/* Some basic sanity checks for the aperture. */ 259/* Some basic sanity checks for the aperture. */
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 9987dc2e0c3f..77c9ad68fba9 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -431,6 +431,7 @@ static struct agp_bridge_driver ati_generic_bridge = {
431 .free_by_type = agp_generic_free_by_type, 431 .free_by_type = agp_generic_free_by_type,
432 .agp_alloc_page = agp_generic_alloc_page, 432 .agp_alloc_page = agp_generic_alloc_page,
433 .agp_destroy_page = agp_generic_destroy_page, 433 .agp_destroy_page = agp_generic_destroy_page,
434 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
434}; 435};
435 436
436 437
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index d59e037ddd12..ebdd6dd66edb 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -43,7 +43,7 @@
43 * fix some real stupidity. It's only by chance we can bump 43 * fix some real stupidity. It's only by chance we can bump
44 * past 0.99 at all due to some boolean logic error. */ 44 * past 0.99 at all due to some boolean logic error. */
45#define AGPGART_VERSION_MAJOR 0 45#define AGPGART_VERSION_MAJOR 0
46#define AGPGART_VERSION_MINOR 101 46#define AGPGART_VERSION_MINOR 102
47static const struct agp_version agp_current_version = 47static const struct agp_version agp_current_version =
48{ 48{
49 .major = AGPGART_VERSION_MAJOR, 49 .major = AGPGART_VERSION_MAJOR,
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
new file mode 100644
index 000000000000..fcb4b1bf0d4e
--- /dev/null
+++ b/drivers/char/agp/compat_ioctl.c
@@ -0,0 +1,282 @@
1/*
2 * AGPGART driver frontend compatibility ioctls
3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2003 Dave Jones
5 * Copyright (C) 1999 Jeff Hartmann
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/kernel.h>
30#include <linux/pci.h>
31#include <linux/agpgart.h>
32#include <asm/uaccess.h>
33#include "agp.h"
34#include "compat_ioctl.h"
35
36static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
37{
38 struct agp_info32 userinfo;
39 struct agp_kern_info kerninfo;
40
41 agp_copy_info(agp_bridge, &kerninfo);
42
43 userinfo.version.major = kerninfo.version.major;
44 userinfo.version.minor = kerninfo.version.minor;
45 userinfo.bridge_id = kerninfo.device->vendor |
46 (kerninfo.device->device << 16);
47 userinfo.agp_mode = kerninfo.mode;
48 userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
49 userinfo.aper_size = kerninfo.aper_size;
50 userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
51 userinfo.pg_used = kerninfo.current_memory;
52
53 if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
54 return -EFAULT;
55
56 return 0;
57}
58
59static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
60{
61 struct agp_region32 ureserve;
62 struct agp_region kreserve;
63 struct agp_client *client;
64 struct agp_file_private *client_priv;
65
66 DBG("");
67 if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
68 return -EFAULT;
69
70 if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
71 return -EFAULT;
72
73 kreserve.pid = ureserve.pid;
74 kreserve.seg_count = ureserve.seg_count;
75
76 client = agp_find_client_by_pid(kreserve.pid);
77
78 if (kreserve.seg_count == 0) {
79 /* remove a client */
80 client_priv = agp_find_private(kreserve.pid);
81
82 if (client_priv != NULL) {
83 set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
84 set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
85 }
86 if (client == NULL) {
87 /* client is already removed */
88 return 0;
89 }
90 return agp_remove_client(kreserve.pid);
91 } else {
92 struct agp_segment32 *usegment;
93 struct agp_segment *ksegment;
94 int seg;
95
96 if (ureserve.seg_count >= 16384)
97 return -EINVAL;
98
99 usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL);
100 if (!usegment)
101 return -ENOMEM;
102
103 ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL);
104 if (!ksegment) {
105 kfree(usegment);
106 return -ENOMEM;
107 }
108
109 if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
110 sizeof(*usegment) * ureserve.seg_count)) {
111 kfree(usegment);
112 kfree(ksegment);
113 return -EFAULT;
114 }
115
116 for (seg = 0; seg < ureserve.seg_count; seg++) {
117 ksegment[seg].pg_start = usegment[seg].pg_start;
118 ksegment[seg].pg_count = usegment[seg].pg_count;
119 ksegment[seg].prot = usegment[seg].prot;
120 }
121
122 kfree(usegment);
123 kreserve.seg_list = ksegment;
124
125 if (client == NULL) {
126 /* Create the client and add the segment */
127 client = agp_create_client(kreserve.pid);
128
129 if (client == NULL) {
130 kfree(ksegment);
131 return -ENOMEM;
132 }
133 client_priv = agp_find_private(kreserve.pid);
134
135 if (client_priv != NULL) {
136 set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
137 set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
138 }
139 }
140 return agp_create_segment(client, &kreserve);
141 }
142 /* Will never really happen */
143 return -EINVAL;
144}
145
146static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
147{
148 struct agp_memory *memory;
149 struct agp_allocate32 alloc;
150
151 DBG("");
152 if (copy_from_user(&alloc, arg, sizeof(alloc)))
153 return -EFAULT;
154
155 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
156
157 if (memory == NULL)
158 return -ENOMEM;
159
160 alloc.key = memory->key;
161 alloc.physical = memory->physical;
162
163 if (copy_to_user(arg, &alloc, sizeof(alloc))) {
164 agp_free_memory_wrap(memory);
165 return -EFAULT;
166 }
167 return 0;
168}
169
170static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
171{
172 struct agp_bind32 bind_info;
173 struct agp_memory *memory;
174
175 DBG("");
176 if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
177 return -EFAULT;
178
179 memory = agp_find_mem_by_key(bind_info.key);
180
181 if (memory == NULL)
182 return -EINVAL;
183
184 return agp_bind_memory(memory, bind_info.pg_start);
185}
186
187static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
188{
189 struct agp_memory *memory;
190 struct agp_unbind32 unbind;
191
192 DBG("");
193 if (copy_from_user(&unbind, arg, sizeof(unbind)))
194 return -EFAULT;
195
196 memory = agp_find_mem_by_key(unbind.key);
197
198 if (memory == NULL)
199 return -EINVAL;
200
201 return agp_unbind_memory(memory);
202}
203
204long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
205{
206 struct agp_file_private *curr_priv = file->private_data;
207 int ret_val = -ENOTTY;
208
209 mutex_lock(&(agp_fe.agp_mutex));
210
211 if ((agp_fe.current_controller == NULL) &&
212 (cmd != AGPIOC_ACQUIRE32)) {
213 ret_val = -EINVAL;
214 goto ioctl_out;
215 }
216 if ((agp_fe.backend_acquired != TRUE) &&
217 (cmd != AGPIOC_ACQUIRE32)) {
218 ret_val = -EBUSY;
219 goto ioctl_out;
220 }
221 if (cmd != AGPIOC_ACQUIRE32) {
222 if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
223 ret_val = -EPERM;
224 goto ioctl_out;
225 }
226 /* Use the original pid of the controller,
227 * in case it's threaded */
228
229 if (agp_fe.current_controller->pid != curr_priv->my_pid) {
230 ret_val = -EBUSY;
231 goto ioctl_out;
232 }
233 }
234
235 switch (cmd) {
236 case AGPIOC_INFO32:
237 ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
238 break;
239
240 case AGPIOC_ACQUIRE32:
241 ret_val = agpioc_acquire_wrap(curr_priv);
242 break;
243
244 case AGPIOC_RELEASE32:
245 ret_val = agpioc_release_wrap(curr_priv);
246 break;
247
248 case AGPIOC_SETUP32:
249 ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
250 break;
251
252 case AGPIOC_RESERVE32:
253 ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
254 break;
255
256 case AGPIOC_PROTECT32:
257 ret_val = agpioc_protect_wrap(curr_priv);
258 break;
259
260 case AGPIOC_ALLOCATE32:
261 ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
262 break;
263
264 case AGPIOC_DEALLOCATE32:
265 ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
266 break;
267
268 case AGPIOC_BIND32:
269 ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
270 break;
271
272 case AGPIOC_UNBIND32:
273 ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
274 break;
275 }
276
277ioctl_out:
278 DBG("ioctl returns %d\n", ret_val);
279 mutex_unlock(&(agp_fe.agp_mutex));
280 return ret_val;
281}
282
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
new file mode 100644
index 000000000000..71939d637236
--- /dev/null
+++ b/drivers/char/agp/compat_ioctl.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright (C) 1999 Jeff Hartmann
3 * Copyright (C) 1999 Precision Insight, Inc.
4 * Copyright (C) 1999 Xi Graphics, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
22 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#ifndef _AGP_COMPAT_IOCTL_H
27#define _AGP_COMPAT_IOCTL_H
28
29#include <linux/compat.h>
30#include <linux/agpgart.h>
31
32#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t)
33#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1)
34#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2)
35#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t)
36#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t)
37#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t)
38#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
39#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
40#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
41#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
42
43struct agp_info32 {
44 struct agp_version version; /* version of the driver */
45 u32 bridge_id; /* bridge vendor/device */
46 u32 agp_mode; /* mode info of bridge */
47 compat_long_t aper_base; /* base of aperture */
48 compat_size_t aper_size; /* size of aperture */
49 compat_size_t pg_total; /* max pages (swap + system) */
50 compat_size_t pg_system; /* max pages (system) */
51 compat_size_t pg_used; /* current pages used */
52};
53
54/*
55 * The "prot" down below needs still a "sleep" flag somehow ...
56 */
57struct agp_segment32 {
58 compat_off_t pg_start; /* starting page to populate */
59 compat_size_t pg_count; /* number of pages */
60 compat_int_t prot; /* prot flags for mmap */
61};
62
63struct agp_region32 {
64 compat_pid_t pid; /* pid of process */
65 compat_size_t seg_count; /* number of segments */
66 struct agp_segment32 *seg_list;
67};
68
69struct agp_allocate32 {
70 compat_int_t key; /* tag of allocation */
71 compat_size_t pg_count; /* number of pages */
72 u32 type; /* 0 == normal, other devspec */
73 u32 physical; /* device specific (some devices
74 * need a phys address of the
75 * actual page behind the gatt
76 * table) */
77};
78
79struct agp_bind32 {
80 compat_int_t key; /* tag of allocation */
81 compat_off_t pg_start; /* starting page to populate */
82};
83
84struct agp_unbind32 {
85 compat_int_t key; /* tag of allocation */
86 u32 priority; /* priority for paging out */
87};
88
89extern struct agp_front_data agp_fe;
90
91int agpioc_acquire_wrap(struct agp_file_private *priv);
92int agpioc_release_wrap(struct agp_file_private *priv);
93int agpioc_protect_wrap(struct agp_file_private *priv);
94int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
95int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
96struct agp_file_private *agp_find_private(pid_t pid);
97struct agp_client *agp_create_client(pid_t id);
98int agp_remove_client(pid_t id);
99int agp_create_segment(struct agp_client *client, struct agp_region *region);
100void agp_free_memory_wrap(struct agp_memory *memory);
101struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
102struct agp_memory *agp_find_mem_by_key(int key);
103struct agp_client *agp_find_client_by_pid(pid_t id);
104
105#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 30f730ff81c1..658cb1a72d2c 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -335,6 +335,7 @@ static struct agp_bridge_driver efficeon_driver = {
335 .free_by_type = agp_generic_free_by_type, 335 .free_by_type = agp_generic_free_by_type,
336 .agp_alloc_page = agp_generic_alloc_page, 336 .agp_alloc_page = agp_generic_alloc_page,
337 .agp_destroy_page = agp_generic_destroy_page, 337 .agp_destroy_page = agp_generic_destroy_page,
338 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
338}; 339};
339 340
340static int __devinit agp_efficeon_probe(struct pci_dev *pdev, 341static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 0f2ed2aa2d81..679d7f972439 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -41,9 +41,9 @@
41#include <asm/pgtable.h> 41#include <asm/pgtable.h>
42#include "agp.h" 42#include "agp.h"
43 43
44static struct agp_front_data agp_fe; 44struct agp_front_data agp_fe;
45 45
46static struct agp_memory *agp_find_mem_by_key(int key) 46struct agp_memory *agp_find_mem_by_key(int key)
47{ 47{
48 struct agp_memory *curr; 48 struct agp_memory *curr;
49 49
@@ -159,7 +159,7 @@ static pgprot_t agp_convert_mmap_flags(int prot)
159 return vm_get_page_prot(prot_bits); 159 return vm_get_page_prot(prot_bits);
160} 160}
161 161
162static int agp_create_segment(struct agp_client *client, struct agp_region *region) 162int agp_create_segment(struct agp_client *client, struct agp_region *region)
163{ 163{
164 struct agp_segment_priv **ret_seg; 164 struct agp_segment_priv **ret_seg;
165 struct agp_segment_priv *seg; 165 struct agp_segment_priv *seg;
@@ -211,7 +211,7 @@ static void agp_insert_into_pool(struct agp_memory * temp)
211 211
212/* File private list routines */ 212/* File private list routines */
213 213
214static struct agp_file_private *agp_find_private(pid_t pid) 214struct agp_file_private *agp_find_private(pid_t pid)
215{ 215{
216 struct agp_file_private *curr; 216 struct agp_file_private *curr;
217 217
@@ -266,13 +266,13 @@ static void agp_remove_file_private(struct agp_file_private * priv)
266 * Wrappers for agp_free_memory & agp_allocate_memory 266 * Wrappers for agp_free_memory & agp_allocate_memory
267 * These make sure that internal lists are kept updated. 267 * These make sure that internal lists are kept updated.
268 */ 268 */
269static void agp_free_memory_wrap(struct agp_memory *memory) 269void agp_free_memory_wrap(struct agp_memory *memory)
270{ 270{
271 agp_remove_from_pool(memory); 271 agp_remove_from_pool(memory);
272 agp_free_memory(memory); 272 agp_free_memory(memory);
273} 273}
274 274
275static struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) 275struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
276{ 276{
277 struct agp_memory *memory; 277 struct agp_memory *memory;
278 278
@@ -484,7 +484,7 @@ static struct agp_controller *agp_find_controller_for_client(pid_t id)
484 return NULL; 484 return NULL;
485} 485}
486 486
487static struct agp_client *agp_find_client_by_pid(pid_t id) 487struct agp_client *agp_find_client_by_pid(pid_t id)
488{ 488{
489 struct agp_client *temp; 489 struct agp_client *temp;
490 490
@@ -509,7 +509,7 @@ static void agp_insert_client(struct agp_client *client)
509 agp_fe.current_controller->num_clients++; 509 agp_fe.current_controller->num_clients++;
510} 510}
511 511
512static struct agp_client *agp_create_client(pid_t id) 512struct agp_client *agp_create_client(pid_t id)
513{ 513{
514 struct agp_client *new_client; 514 struct agp_client *new_client;
515 515
@@ -522,7 +522,7 @@ static struct agp_client *agp_create_client(pid_t id)
522 return new_client; 522 return new_client;
523} 523}
524 524
525static int agp_remove_client(pid_t id) 525int agp_remove_client(pid_t id)
526{ 526{
527 struct agp_client *client; 527 struct agp_client *client;
528 struct agp_client *prev_client; 528 struct agp_client *prev_client;
@@ -746,7 +746,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
746 return 0; 746 return 0;
747} 747}
748 748
749static int agpioc_acquire_wrap(struct agp_file_private *priv) 749int agpioc_acquire_wrap(struct agp_file_private *priv)
750{ 750{
751 struct agp_controller *controller; 751 struct agp_controller *controller;
752 752
@@ -789,14 +789,14 @@ static int agpioc_acquire_wrap(struct agp_file_private *priv)
789 return 0; 789 return 0;
790} 790}
791 791
792static int agpioc_release_wrap(struct agp_file_private *priv) 792int agpioc_release_wrap(struct agp_file_private *priv)
793{ 793{
794 DBG(""); 794 DBG("");
795 agp_controller_release_current(agp_fe.current_controller, priv); 795 agp_controller_release_current(agp_fe.current_controller, priv);
796 return 0; 796 return 0;
797} 797}
798 798
799static int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg) 799int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
800{ 800{
801 struct agp_setup mode; 801 struct agp_setup mode;
802 802
@@ -876,7 +876,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
876 return -EINVAL; 876 return -EINVAL;
877} 877}
878 878
879static int agpioc_protect_wrap(struct agp_file_private *priv) 879int agpioc_protect_wrap(struct agp_file_private *priv)
880{ 880{
881 DBG(""); 881 DBG("");
882 /* This function is not currently implemented */ 882 /* This function is not currently implemented */
@@ -892,6 +892,9 @@ static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
892 if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate))) 892 if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
893 return -EFAULT; 893 return -EFAULT;
894 894
895 if (alloc.type >= AGP_USER_TYPES)
896 return -EINVAL;
897
895 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); 898 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
896 899
897 if (memory == NULL) 900 if (memory == NULL)
@@ -907,7 +910,7 @@ static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
907 return 0; 910 return 0;
908} 911}
909 912
910static int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg) 913int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
911{ 914{
912 struct agp_memory *memory; 915 struct agp_memory *memory;
913 916
@@ -1043,6 +1046,9 @@ static const struct file_operations agp_fops =
1043 .read = agp_read, 1046 .read = agp_read,
1044 .write = agp_write, 1047 .write = agp_write,
1045 .ioctl = agp_ioctl, 1048 .ioctl = agp_ioctl,
1049#ifdef CONFIG_COMPAT
1050 .compat_ioctl = compat_agp_ioctl,
1051#endif
1046 .mmap = agp_mmap, 1052 .mmap = agp_mmap,
1047 .open = agp_open, 1053 .open = agp_open,
1048 .release = agp_release, 1054 .release = agp_release,
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 3491d6f84bc6..7923337c3d26 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -101,6 +101,63 @@ static int agp_get_key(void)
101 return -1; 101 return -1;
102} 102}
103 103
104/*
105 * Use kmalloc if possible for the page list. Otherwise fall back to
106 * vmalloc. This speeds things up and also saves memory for small AGP
107 * regions.
108 */
109
110void agp_alloc_page_array(size_t size, struct agp_memory *mem)
111{
112 mem->memory = NULL;
113 mem->vmalloc_flag = 0;
114
115 if (size <= 2*PAGE_SIZE)
116 mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
117 if (mem->memory == NULL) {
118 mem->memory = vmalloc(size);
119 mem->vmalloc_flag = 1;
120 }
121}
122EXPORT_SYMBOL(agp_alloc_page_array);
123
124void agp_free_page_array(struct agp_memory *mem)
125{
126 if (mem->vmalloc_flag) {
127 vfree(mem->memory);
128 } else {
129 kfree(mem->memory);
130 }
131}
132EXPORT_SYMBOL(agp_free_page_array);
133
134
135static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
136{
137 struct agp_memory *new;
138 unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
139
140 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
141 if (new == NULL)
142 return NULL;
143
144 new->key = agp_get_key();
145
146 if (new->key < 0) {
147 kfree(new);
148 return NULL;
149 }
150
151 agp_alloc_page_array(alloc_size, new);
152
153 if (new->memory == NULL) {
154 agp_free_key(new->key);
155 kfree(new);
156 return NULL;
157 }
158 new->num_scratch_pages = 0;
159 return new;
160}
104 161
105struct agp_memory *agp_create_memory(int scratch_pages) 162struct agp_memory *agp_create_memory(int scratch_pages)
106{ 163{
@@ -116,7 +173,8 @@ struct agp_memory *agp_create_memory(int scratch_pages)
116 kfree(new); 173 kfree(new);
117 return NULL; 174 return NULL;
118 } 175 }
119 new->memory = vmalloc(PAGE_SIZE * scratch_pages); 176
177 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
120 178
121 if (new->memory == NULL) { 179 if (new->memory == NULL) {
122 agp_free_key(new->key); 180 agp_free_key(new->key);
@@ -124,6 +182,7 @@ struct agp_memory *agp_create_memory(int scratch_pages)
124 return NULL; 182 return NULL;
125 } 183 }
126 new->num_scratch_pages = scratch_pages; 184 new->num_scratch_pages = scratch_pages;
185 new->type = AGP_NORMAL_MEMORY;
127 return new; 186 return new;
128} 187}
129EXPORT_SYMBOL(agp_create_memory); 188EXPORT_SYMBOL(agp_create_memory);
@@ -146,6 +205,11 @@ void agp_free_memory(struct agp_memory *curr)
146 if (curr->is_bound == TRUE) 205 if (curr->is_bound == TRUE)
147 agp_unbind_memory(curr); 206 agp_unbind_memory(curr);
148 207
208 if (curr->type >= AGP_USER_TYPES) {
209 agp_generic_free_by_type(curr);
210 return;
211 }
212
149 if (curr->type != 0) { 213 if (curr->type != 0) {
150 curr->bridge->driver->free_by_type(curr); 214 curr->bridge->driver->free_by_type(curr);
151 return; 215 return;
@@ -157,7 +221,7 @@ void agp_free_memory(struct agp_memory *curr)
157 flush_agp_mappings(); 221 flush_agp_mappings();
158 } 222 }
159 agp_free_key(curr->key); 223 agp_free_key(curr->key);
160 vfree(curr->memory); 224 agp_free_page_array(curr);
161 kfree(curr); 225 kfree(curr);
162} 226}
163EXPORT_SYMBOL(agp_free_memory); 227EXPORT_SYMBOL(agp_free_memory);
@@ -188,6 +252,13 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
188 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 252 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
189 return NULL; 253 return NULL;
190 254
255 if (type >= AGP_USER_TYPES) {
256 new = agp_generic_alloc_user(page_count, type);
257 if (new)
258 new->bridge = bridge;
259 return new;
260 }
261
191 if (type != 0) { 262 if (type != 0) {
192 new = bridge->driver->alloc_by_type(page_count, type); 263 new = bridge->driver->alloc_by_type(page_count, type);
193 if (new) 264 if (new)
@@ -960,6 +1031,7 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
960 off_t j; 1031 off_t j;
961 void *temp; 1032 void *temp;
962 struct agp_bridge_data *bridge; 1033 struct agp_bridge_data *bridge;
1034 int mask_type;
963 1035
964 bridge = mem->bridge; 1036 bridge = mem->bridge;
965 if (!bridge) 1037 if (!bridge)
@@ -995,7 +1067,11 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
995 num_entries -= agp_memory_reserved/PAGE_SIZE; 1067 num_entries -= agp_memory_reserved/PAGE_SIZE;
996 if (num_entries < 0) num_entries = 0; 1068 if (num_entries < 0) num_entries = 0;
997 1069
998 if (type != 0 || mem->type != 0) { 1070 if (type != mem->type)
1071 return -EINVAL;
1072
1073 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1074 if (mask_type != 0) {
999 /* The generic routines know nothing of memory types */ 1075 /* The generic routines know nothing of memory types */
1000 return -EINVAL; 1076 return -EINVAL;
1001 } 1077 }
@@ -1018,7 +1094,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1018 } 1094 }
1019 1095
1020 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1096 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1021 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j); 1097 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type),
1098 bridge->gatt_table+j);
1022 } 1099 }
1023 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1100 readl(bridge->gatt_table+j-1); /* PCI Posting. */
1024 1101
@@ -1032,6 +1109,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1032{ 1109{
1033 size_t i; 1110 size_t i;
1034 struct agp_bridge_data *bridge; 1111 struct agp_bridge_data *bridge;
1112 int mask_type;
1035 1113
1036 bridge = mem->bridge; 1114 bridge = mem->bridge;
1037 if (!bridge) 1115 if (!bridge)
@@ -1040,7 +1118,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1040 if (mem->page_count == 0) 1118 if (mem->page_count == 0)
1041 return 0; 1119 return 0;
1042 1120
1043 if (type != 0 || mem->type != 0) { 1121 if (type != mem->type)
1122 return -EINVAL;
1123
1124 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1125 if (mask_type != 0) {
1044 /* The generic routines know nothing of memory types */ 1126 /* The generic routines know nothing of memory types */
1045 return -EINVAL; 1127 return -EINVAL;
1046 } 1128 }
@@ -1056,22 +1138,40 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1056} 1138}
1057EXPORT_SYMBOL(agp_generic_remove_memory); 1139EXPORT_SYMBOL(agp_generic_remove_memory);
1058 1140
1059
1060struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1141struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1061{ 1142{
1062 return NULL; 1143 return NULL;
1063} 1144}
1064EXPORT_SYMBOL(agp_generic_alloc_by_type); 1145EXPORT_SYMBOL(agp_generic_alloc_by_type);
1065 1146
1066
1067void agp_generic_free_by_type(struct agp_memory *curr) 1147void agp_generic_free_by_type(struct agp_memory *curr)
1068{ 1148{
1069 vfree(curr->memory); 1149 agp_free_page_array(curr);
1070 agp_free_key(curr->key); 1150 agp_free_key(curr->key);
1071 kfree(curr); 1151 kfree(curr);
1072} 1152}
1073EXPORT_SYMBOL(agp_generic_free_by_type); 1153EXPORT_SYMBOL(agp_generic_free_by_type);
1074 1154
1155struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1156{
1157 struct agp_memory *new;
1158 int i;
1159 int pages;
1160
1161 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1162 new = agp_create_user_memory(page_count);
1163 if (new == NULL)
1164 return NULL;
1165
1166 for (i = 0; i < page_count; i++)
1167 new->memory[i] = 0;
1168 new->page_count = 0;
1169 new->type = type;
1170 new->num_scratch_pages = pages;
1171
1172 return new;
1173}
1174EXPORT_SYMBOL(agp_generic_alloc_user);
1075 1175
1076/* 1176/*
1077 * Basic Page Allocation Routines - 1177 * Basic Page Allocation Routines -
@@ -1165,6 +1265,15 @@ unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1165} 1265}
1166EXPORT_SYMBOL(agp_generic_mask_memory); 1266EXPORT_SYMBOL(agp_generic_mask_memory);
1167 1267
1268int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1269 int type)
1270{
1271 if (type >= AGP_USER_TYPES)
1272 return 0;
1273 return type;
1274}
1275EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1276
1168/* 1277/*
1169 * These functions are implemented according to the AGPv3 spec, 1278 * These functions are implemented according to the AGPv3 spec,
1170 * which covers implementation details that had previously been 1279 * which covers implementation details that had previously been
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 907fb66ec4a9..847deabf7f9b 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -438,6 +438,7 @@ struct agp_bridge_driver hp_zx1_driver = {
438 .free_by_type = agp_generic_free_by_type, 438 .free_by_type = agp_generic_free_by_type,
439 .agp_alloc_page = agp_generic_alloc_page, 439 .agp_alloc_page = agp_generic_alloc_page,
440 .agp_destroy_page = agp_generic_destroy_page, 440 .agp_destroy_page = agp_generic_destroy_page,
441 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
441 .cant_use_aperture = 1, 442 .cant_use_aperture = 1,
442}; 443};
443 444
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 91769443d8fe..3e7618653abd 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -293,6 +293,9 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem,
293 pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", 293 pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
294 mem, pg_start, type, mem->memory[0]); 294 mem, pg_start, type, mem->memory[0]);
295 295
296 if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
297 return -EINVAL;
298
296 io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; 299 io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
297 300
298 temp = agp_bridge->current_size; 301 temp = agp_bridge->current_size;
@@ -396,6 +399,9 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem,
396 struct lp_desc *start, *end, *lp; 399 struct lp_desc *start, *end, *lp;
397 void *temp; 400 void *temp;
398 401
402 if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
403 return -EINVAL;
404
399 temp = agp_bridge->current_size; 405 temp = agp_bridge->current_size;
400 num_entries = A_SIZE_8(temp)->num_entries; 406 num_entries = A_SIZE_8(temp)->num_entries;
401 407
@@ -572,6 +578,7 @@ struct agp_bridge_driver intel_i460_driver = {
572#endif 578#endif
573 .alloc_by_type = agp_generic_alloc_by_type, 579 .alloc_by_type = agp_generic_alloc_by_type,
574 .free_by_type = agp_generic_free_by_type, 580 .free_by_type = agp_generic_free_by_type,
581 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
575 .cant_use_aperture = 1, 582 .cant_use_aperture = 1,
576}; 583};
577 584
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index a3011de51f7c..06b0bb6d982f 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -5,6 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/kernel.h>
8#include <linux/pagemap.h> 9#include <linux/pagemap.h>
9#include <linux/agp_backend.h> 10#include <linux/agp_backend.h>
10#include "agp.h" 11#include "agp.h"
@@ -24,6 +25,9 @@
24 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB) 25 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB)
25 26
26 27
28extern int agp_memory_reserved;
29
30
27/* Intel 815 register */ 31/* Intel 815 register */
28#define INTEL_815_APCONT 0x51 32#define INTEL_815_APCONT 0x51
29#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF 33#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
@@ -68,12 +72,15 @@ static struct aper_size_info_fixed intel_i810_sizes[] =
68 72
69#define AGP_DCACHE_MEMORY 1 73#define AGP_DCACHE_MEMORY 1
70#define AGP_PHYS_MEMORY 2 74#define AGP_PHYS_MEMORY 2
75#define INTEL_AGP_CACHED_MEMORY 3
71 76
72static struct gatt_mask intel_i810_masks[] = 77static struct gatt_mask intel_i810_masks[] =
73{ 78{
74 {.mask = I810_PTE_VALID, .type = 0}, 79 {.mask = I810_PTE_VALID, .type = 0},
75 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, 80 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
76 {.mask = I810_PTE_VALID, .type = 0} 81 {.mask = I810_PTE_VALID, .type = 0},
82 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
83 .type = INTEL_AGP_CACHED_MEMORY}
77}; 84};
78 85
79static struct _intel_i810_private { 86static struct _intel_i810_private {
@@ -117,13 +124,15 @@ static int intel_i810_configure(void)
117 124
118 current_size = A_SIZE_FIX(agp_bridge->current_size); 125 current_size = A_SIZE_FIX(agp_bridge->current_size);
119 126
120 pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
121 temp &= 0xfff80000;
122
123 intel_i810_private.registers = ioremap(temp, 128 * 4096);
124 if (!intel_i810_private.registers) { 127 if (!intel_i810_private.registers) {
125 printk(KERN_ERR PFX "Unable to remap memory.\n"); 128 pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
126 return -ENOMEM; 129 temp &= 0xfff80000;
130
131 intel_i810_private.registers = ioremap(temp, 128 * 4096);
132 if (!intel_i810_private.registers) {
133 printk(KERN_ERR PFX "Unable to remap memory.\n");
134 return -ENOMEM;
135 }
127 } 136 }
128 137
129 if ((readl(intel_i810_private.registers+I810_DRAM_CTL) 138 if ((readl(intel_i810_private.registers+I810_DRAM_CTL)
@@ -201,62 +210,79 @@ static void i8xx_destroy_pages(void *addr)
201 atomic_dec(&agp_bridge->current_memory_agp); 210 atomic_dec(&agp_bridge->current_memory_agp);
202} 211}
203 212
213static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
214 int type)
215{
216 if (type < AGP_USER_TYPES)
217 return type;
218 else if (type == AGP_USER_CACHED_MEMORY)
219 return INTEL_AGP_CACHED_MEMORY;
220 else
221 return 0;
222}
223
204static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 224static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
205 int type) 225 int type)
206{ 226{
207 int i, j, num_entries; 227 int i, j, num_entries;
208 void *temp; 228 void *temp;
229 int ret = -EINVAL;
230 int mask_type;
209 231
210 if (mem->page_count == 0) 232 if (mem->page_count == 0)
211 return 0; 233 goto out;
212 234
213 temp = agp_bridge->current_size; 235 temp = agp_bridge->current_size;
214 num_entries = A_SIZE_FIX(temp)->num_entries; 236 num_entries = A_SIZE_FIX(temp)->num_entries;
215 237
216 if ((pg_start + mem->page_count) > num_entries) 238 if ((pg_start + mem->page_count) > num_entries)
217 return -EINVAL; 239 goto out_err;
218 240
219 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
220 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
221 return -EBUSY;
222 }
223 241
224 if (type != 0 || mem->type != 0) { 242 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
225 if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) { 243 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
226 /* special insert */ 244 ret = -EBUSY;
227 if (!mem->is_flushed) { 245 goto out_err;
228 global_cache_flush();
229 mem->is_flushed = TRUE;
230 }
231
232 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
233 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4));
234 }
235 readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
236
237 agp_bridge->driver->tlb_flush(mem);
238 return 0;
239 } 246 }
240 if ((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY))
241 goto insert;
242 return -EINVAL;
243 } 247 }
244 248
245insert: 249 if (type != mem->type)
246 if (!mem->is_flushed) { 250 goto out_err;
247 global_cache_flush();
248 mem->is_flushed = TRUE;
249 }
250 251
251 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 252 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
252 writel(agp_bridge->driver->mask_memory(agp_bridge, 253
253 mem->memory[i], mem->type), 254 switch (mask_type) {
254 intel_i810_private.registers+I810_PTE_BASE+(j*4)); 255 case AGP_DCACHE_MEMORY:
256 if (!mem->is_flushed)
257 global_cache_flush();
258 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
259 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
260 intel_i810_private.registers+I810_PTE_BASE+(i*4));
261 }
262 readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4));
263 break;
264 case AGP_PHYS_MEMORY:
265 case AGP_NORMAL_MEMORY:
266 if (!mem->is_flushed)
267 global_cache_flush();
268 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
269 writel(agp_bridge->driver->mask_memory(agp_bridge,
270 mem->memory[i],
271 mask_type),
272 intel_i810_private.registers+I810_PTE_BASE+(j*4));
273 }
274 readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4));
275 break;
276 default:
277 goto out_err;
255 } 278 }
256 readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4)); /* PCI Posting. */
257 279
258 agp_bridge->driver->tlb_flush(mem); 280 agp_bridge->driver->tlb_flush(mem);
259 return 0; 281out:
282 ret = 0;
283out_err:
284 mem->is_flushed = 1;
285 return ret;
260} 286}
261 287
262static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, 288static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
@@ -337,12 +363,11 @@ static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
337 new->type = AGP_DCACHE_MEMORY; 363 new->type = AGP_DCACHE_MEMORY;
338 new->page_count = pg_count; 364 new->page_count = pg_count;
339 new->num_scratch_pages = 0; 365 new->num_scratch_pages = 0;
340 vfree(new->memory); 366 agp_free_page_array(new);
341 return new; 367 return new;
342 } 368 }
343 if (type == AGP_PHYS_MEMORY) 369 if (type == AGP_PHYS_MEMORY)
344 return alloc_agpphysmem_i8xx(pg_count, type); 370 return alloc_agpphysmem_i8xx(pg_count, type);
345
346 return NULL; 371 return NULL;
347} 372}
348 373
@@ -357,7 +382,7 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
357 gart_to_virt(curr->memory[0])); 382 gart_to_virt(curr->memory[0]));
358 global_flush_tlb(); 383 global_flush_tlb();
359 } 384 }
360 vfree(curr->memory); 385 agp_free_page_array(curr);
361 } 386 }
362 kfree(curr); 387 kfree(curr);
363} 388}
@@ -619,9 +644,11 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
619{ 644{
620 int i,j,num_entries; 645 int i,j,num_entries;
621 void *temp; 646 void *temp;
647 int ret = -EINVAL;
648 int mask_type;
622 649
623 if (mem->page_count == 0) 650 if (mem->page_count == 0)
624 return 0; 651 goto out;
625 652
626 temp = agp_bridge->current_size; 653 temp = agp_bridge->current_size;
627 num_entries = A_SIZE_FIX(temp)->num_entries; 654 num_entries = A_SIZE_FIX(temp)->num_entries;
@@ -631,34 +658,41 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
631 pg_start,intel_i830_private.gtt_entries); 658 pg_start,intel_i830_private.gtt_entries);
632 659
633 printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n"); 660 printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
634 return -EINVAL; 661 goto out_err;
635 } 662 }
636 663
637 if ((pg_start + mem->page_count) > num_entries) 664 if ((pg_start + mem->page_count) > num_entries)
638 return -EINVAL; 665 goto out_err;
639 666
640 /* The i830 can't check the GTT for entries since its read only, 667 /* The i830 can't check the GTT for entries since its read only,
641 * depend on the caller to make the correct offset decisions. 668 * depend on the caller to make the correct offset decisions.
642 */ 669 */
643 670
644 if ((type != 0 && type != AGP_PHYS_MEMORY) || 671 if (type != mem->type)
645 (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) 672 goto out_err;
646 return -EINVAL; 673
674 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
647 675
648 if (!mem->is_flushed) { 676 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
677 mask_type != INTEL_AGP_CACHED_MEMORY)
678 goto out_err;
679
680 if (!mem->is_flushed)
649 global_cache_flush(); 681 global_cache_flush();
650 mem->is_flushed = TRUE;
651 }
652 682
653 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 683 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
654 writel(agp_bridge->driver->mask_memory(agp_bridge, 684 writel(agp_bridge->driver->mask_memory(agp_bridge,
655 mem->memory[i], mem->type), 685 mem->memory[i], mask_type),
656 intel_i830_private.registers+I810_PTE_BASE+(j*4)); 686 intel_i830_private.registers+I810_PTE_BASE+(j*4));
657 } 687 }
658 readl(intel_i830_private.registers+I810_PTE_BASE+((j-1)*4)); 688 readl(intel_i830_private.registers+I810_PTE_BASE+((j-1)*4));
659
660 agp_bridge->driver->tlb_flush(mem); 689 agp_bridge->driver->tlb_flush(mem);
661 return 0; 690
691out:
692 ret = 0;
693out_err:
694 mem->is_flushed = 1;
695 return ret;
662} 696}
663 697
664static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start, 698static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
@@ -687,7 +721,6 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
687{ 721{
688 if (type == AGP_PHYS_MEMORY) 722 if (type == AGP_PHYS_MEMORY)
689 return alloc_agpphysmem_i8xx(pg_count, type); 723 return alloc_agpphysmem_i8xx(pg_count, type);
690
691 /* always return NULL for other allocation types for now */ 724 /* always return NULL for other allocation types for now */
692 return NULL; 725 return NULL;
693} 726}
@@ -734,9 +767,11 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
734{ 767{
735 int i,j,num_entries; 768 int i,j,num_entries;
736 void *temp; 769 void *temp;
770 int ret = -EINVAL;
771 int mask_type;
737 772
738 if (mem->page_count == 0) 773 if (mem->page_count == 0)
739 return 0; 774 goto out;
740 775
741 temp = agp_bridge->current_size; 776 temp = agp_bridge->current_size;
742 num_entries = A_SIZE_FIX(temp)->num_entries; 777 num_entries = A_SIZE_FIX(temp)->num_entries;
@@ -746,33 +781,41 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
746 pg_start,intel_i830_private.gtt_entries); 781 pg_start,intel_i830_private.gtt_entries);
747 782
748 printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n"); 783 printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
749 return -EINVAL; 784 goto out_err;
750 } 785 }
751 786
752 if ((pg_start + mem->page_count) > num_entries) 787 if ((pg_start + mem->page_count) > num_entries)
753 return -EINVAL; 788 goto out_err;
754 789
755 /* The i830 can't check the GTT for entries since its read only, 790 /* The i915 can't check the GTT for entries since its read only,
756 * depend on the caller to make the correct offset decisions. 791 * depend on the caller to make the correct offset decisions.
757 */ 792 */
758 793
759 if ((type != 0 && type != AGP_PHYS_MEMORY) || 794 if (type != mem->type)
760 (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) 795 goto out_err;
761 return -EINVAL; 796
797 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
762 798
763 if (!mem->is_flushed) { 799 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
800 mask_type != INTEL_AGP_CACHED_MEMORY)
801 goto out_err;
802
803 if (!mem->is_flushed)
764 global_cache_flush(); 804 global_cache_flush();
765 mem->is_flushed = TRUE;
766 }
767 805
768 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 806 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
769 writel(agp_bridge->driver->mask_memory(agp_bridge, 807 writel(agp_bridge->driver->mask_memory(agp_bridge,
770 mem->memory[i], mem->type), intel_i830_private.gtt+j); 808 mem->memory[i], mask_type), intel_i830_private.gtt+j);
771 } 809 }
772 readl(intel_i830_private.gtt+j-1);
773 810
811 readl(intel_i830_private.gtt+j-1);
774 agp_bridge->driver->tlb_flush(mem); 812 agp_bridge->driver->tlb_flush(mem);
775 return 0; 813
814 out:
815 ret = 0;
816 out_err:
817 mem->is_flushed = 1;
818 return ret;
776} 819}
777 820
778static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start, 821static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
@@ -803,7 +846,7 @@ static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
803 */ 846 */
804static int intel_i9xx_fetch_size(void) 847static int intel_i9xx_fetch_size(void)
805{ 848{
806 int num_sizes = sizeof(intel_i830_sizes) / sizeof(*intel_i830_sizes); 849 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
807 int aper_size; /* size in megabytes */ 850 int aper_size; /* size in megabytes */
808 int i; 851 int i;
809 852
@@ -1384,6 +1427,7 @@ static struct agp_bridge_driver intel_generic_driver = {
1384 .free_by_type = agp_generic_free_by_type, 1427 .free_by_type = agp_generic_free_by_type,
1385 .agp_alloc_page = agp_generic_alloc_page, 1428 .agp_alloc_page = agp_generic_alloc_page,
1386 .agp_destroy_page = agp_generic_destroy_page, 1429 .agp_destroy_page = agp_generic_destroy_page,
1430 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1387}; 1431};
1388 1432
1389static struct agp_bridge_driver intel_810_driver = { 1433static struct agp_bridge_driver intel_810_driver = {
@@ -1408,6 +1452,7 @@ static struct agp_bridge_driver intel_810_driver = {
1408 .free_by_type = intel_i810_free_by_type, 1452 .free_by_type = intel_i810_free_by_type,
1409 .agp_alloc_page = agp_generic_alloc_page, 1453 .agp_alloc_page = agp_generic_alloc_page,
1410 .agp_destroy_page = agp_generic_destroy_page, 1454 .agp_destroy_page = agp_generic_destroy_page,
1455 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1411}; 1456};
1412 1457
1413static struct agp_bridge_driver intel_815_driver = { 1458static struct agp_bridge_driver intel_815_driver = {
@@ -1431,6 +1476,7 @@ static struct agp_bridge_driver intel_815_driver = {
1431 .free_by_type = agp_generic_free_by_type, 1476 .free_by_type = agp_generic_free_by_type,
1432 .agp_alloc_page = agp_generic_alloc_page, 1477 .agp_alloc_page = agp_generic_alloc_page,
1433 .agp_destroy_page = agp_generic_destroy_page, 1478 .agp_destroy_page = agp_generic_destroy_page,
1479 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1434}; 1480};
1435 1481
1436static struct agp_bridge_driver intel_830_driver = { 1482static struct agp_bridge_driver intel_830_driver = {
@@ -1455,6 +1501,7 @@ static struct agp_bridge_driver intel_830_driver = {
1455 .free_by_type = intel_i810_free_by_type, 1501 .free_by_type = intel_i810_free_by_type,
1456 .agp_alloc_page = agp_generic_alloc_page, 1502 .agp_alloc_page = agp_generic_alloc_page,
1457 .agp_destroy_page = agp_generic_destroy_page, 1503 .agp_destroy_page = agp_generic_destroy_page,
1504 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1458}; 1505};
1459 1506
1460static struct agp_bridge_driver intel_820_driver = { 1507static struct agp_bridge_driver intel_820_driver = {
@@ -1478,6 +1525,7 @@ static struct agp_bridge_driver intel_820_driver = {
1478 .free_by_type = agp_generic_free_by_type, 1525 .free_by_type = agp_generic_free_by_type,
1479 .agp_alloc_page = agp_generic_alloc_page, 1526 .agp_alloc_page = agp_generic_alloc_page,
1480 .agp_destroy_page = agp_generic_destroy_page, 1527 .agp_destroy_page = agp_generic_destroy_page,
1528 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1481}; 1529};
1482 1530
1483static struct agp_bridge_driver intel_830mp_driver = { 1531static struct agp_bridge_driver intel_830mp_driver = {
@@ -1501,6 +1549,7 @@ static struct agp_bridge_driver intel_830mp_driver = {
1501 .free_by_type = agp_generic_free_by_type, 1549 .free_by_type = agp_generic_free_by_type,
1502 .agp_alloc_page = agp_generic_alloc_page, 1550 .agp_alloc_page = agp_generic_alloc_page,
1503 .agp_destroy_page = agp_generic_destroy_page, 1551 .agp_destroy_page = agp_generic_destroy_page,
1552 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1504}; 1553};
1505 1554
1506static struct agp_bridge_driver intel_840_driver = { 1555static struct agp_bridge_driver intel_840_driver = {
@@ -1524,6 +1573,7 @@ static struct agp_bridge_driver intel_840_driver = {
1524 .free_by_type = agp_generic_free_by_type, 1573 .free_by_type = agp_generic_free_by_type,
1525 .agp_alloc_page = agp_generic_alloc_page, 1574 .agp_alloc_page = agp_generic_alloc_page,
1526 .agp_destroy_page = agp_generic_destroy_page, 1575 .agp_destroy_page = agp_generic_destroy_page,
1576 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1527}; 1577};
1528 1578
1529static struct agp_bridge_driver intel_845_driver = { 1579static struct agp_bridge_driver intel_845_driver = {
@@ -1547,6 +1597,7 @@ static struct agp_bridge_driver intel_845_driver = {
1547 .free_by_type = agp_generic_free_by_type, 1597 .free_by_type = agp_generic_free_by_type,
1548 .agp_alloc_page = agp_generic_alloc_page, 1598 .agp_alloc_page = agp_generic_alloc_page,
1549 .agp_destroy_page = agp_generic_destroy_page, 1599 .agp_destroy_page = agp_generic_destroy_page,
1600 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1550}; 1601};
1551 1602
1552static struct agp_bridge_driver intel_850_driver = { 1603static struct agp_bridge_driver intel_850_driver = {
@@ -1570,6 +1621,7 @@ static struct agp_bridge_driver intel_850_driver = {
1570 .free_by_type = agp_generic_free_by_type, 1621 .free_by_type = agp_generic_free_by_type,
1571 .agp_alloc_page = agp_generic_alloc_page, 1622 .agp_alloc_page = agp_generic_alloc_page,
1572 .agp_destroy_page = agp_generic_destroy_page, 1623 .agp_destroy_page = agp_generic_destroy_page,
1624 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1573}; 1625};
1574 1626
1575static struct agp_bridge_driver intel_860_driver = { 1627static struct agp_bridge_driver intel_860_driver = {
@@ -1593,6 +1645,7 @@ static struct agp_bridge_driver intel_860_driver = {
1593 .free_by_type = agp_generic_free_by_type, 1645 .free_by_type = agp_generic_free_by_type,
1594 .agp_alloc_page = agp_generic_alloc_page, 1646 .agp_alloc_page = agp_generic_alloc_page,
1595 .agp_destroy_page = agp_generic_destroy_page, 1647 .agp_destroy_page = agp_generic_destroy_page,
1648 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1596}; 1649};
1597 1650
1598static struct agp_bridge_driver intel_915_driver = { 1651static struct agp_bridge_driver intel_915_driver = {
@@ -1617,6 +1670,7 @@ static struct agp_bridge_driver intel_915_driver = {
1617 .free_by_type = intel_i810_free_by_type, 1670 .free_by_type = intel_i810_free_by_type,
1618 .agp_alloc_page = agp_generic_alloc_page, 1671 .agp_alloc_page = agp_generic_alloc_page,
1619 .agp_destroy_page = agp_generic_destroy_page, 1672 .agp_destroy_page = agp_generic_destroy_page,
1673 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1620}; 1674};
1621 1675
1622static struct agp_bridge_driver intel_i965_driver = { 1676static struct agp_bridge_driver intel_i965_driver = {
@@ -1641,6 +1695,7 @@ static struct agp_bridge_driver intel_i965_driver = {
1641 .free_by_type = intel_i810_free_by_type, 1695 .free_by_type = intel_i810_free_by_type,
1642 .agp_alloc_page = agp_generic_alloc_page, 1696 .agp_alloc_page = agp_generic_alloc_page,
1643 .agp_destroy_page = agp_generic_destroy_page, 1697 .agp_destroy_page = agp_generic_destroy_page,
1698 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1644}; 1699};
1645 1700
1646static struct agp_bridge_driver intel_7505_driver = { 1701static struct agp_bridge_driver intel_7505_driver = {
@@ -1664,6 +1719,7 @@ static struct agp_bridge_driver intel_7505_driver = {
1664 .free_by_type = agp_generic_free_by_type, 1719 .free_by_type = agp_generic_free_by_type,
1665 .agp_alloc_page = agp_generic_alloc_page, 1720 .agp_alloc_page = agp_generic_alloc_page,
1666 .agp_destroy_page = agp_generic_destroy_page, 1721 .agp_destroy_page = agp_generic_destroy_page,
1722 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1667}; 1723};
1668 1724
1669static int find_i810(u16 device) 1725static int find_i810(u16 device)
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index df7f37b2739a..2563286b2fcf 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -310,6 +310,7 @@ static struct agp_bridge_driver nvidia_driver = {
310 .free_by_type = agp_generic_free_by_type, 310 .free_by_type = agp_generic_free_by_type,
311 .agp_alloc_page = agp_generic_alloc_page, 311 .agp_alloc_page = agp_generic_alloc_page,
312 .agp_destroy_page = agp_generic_destroy_page, 312 .agp_destroy_page = agp_generic_destroy_page,
313 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
313}; 314};
314 315
315static int __devinit agp_nvidia_probe(struct pci_dev *pdev, 316static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 17c50b0f83f0..b7b4590673ae 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -228,6 +228,7 @@ struct agp_bridge_driver parisc_agp_driver = {
228 .free_by_type = agp_generic_free_by_type, 228 .free_by_type = agp_generic_free_by_type,
229 .agp_alloc_page = agp_generic_alloc_page, 229 .agp_alloc_page = agp_generic_alloc_page,
230 .agp_destroy_page = agp_generic_destroy_page, 230 .agp_destroy_page = agp_generic_destroy_page,
231 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
231 .cant_use_aperture = 1, 232 .cant_use_aperture = 1,
232}; 233};
233 234
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index 902648db7efa..92d1dc45b9be 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -265,6 +265,7 @@ struct agp_bridge_driver sgi_tioca_driver = {
265 .free_by_type = agp_generic_free_by_type, 265 .free_by_type = agp_generic_free_by_type,
266 .agp_alloc_page = sgi_tioca_alloc_page, 266 .agp_alloc_page = sgi_tioca_alloc_page,
267 .agp_destroy_page = agp_generic_destroy_page, 267 .agp_destroy_page = agp_generic_destroy_page,
268 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
268 .cant_use_aperture = 1, 269 .cant_use_aperture = 1,
269 .needs_scratch_page = 0, 270 .needs_scratch_page = 0,
270 .num_aperture_sizes = 1, 271 .num_aperture_sizes = 1,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index a00fd48a6f05..60342b708152 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -140,6 +140,7 @@ static struct agp_bridge_driver sis_driver = {
140 .free_by_type = agp_generic_free_by_type, 140 .free_by_type = agp_generic_free_by_type,
141 .agp_alloc_page = agp_generic_alloc_page, 141 .agp_alloc_page = agp_generic_alloc_page,
142 .agp_destroy_page = agp_generic_destroy_page, 142 .agp_destroy_page = agp_generic_destroy_page,
143 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
143}; 144};
144 145
145static struct agp_device_ids sis_agp_device_ids[] __devinitdata = 146static struct agp_device_ids sis_agp_device_ids[] __devinitdata =
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 4f2d7d99902f..9f5ae7714f85 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -444,6 +444,7 @@ static struct agp_bridge_driver sworks_driver = {
444 .free_by_type = agp_generic_free_by_type, 444 .free_by_type = agp_generic_free_by_type,
445 .agp_alloc_page = agp_generic_alloc_page, 445 .agp_alloc_page = agp_generic_alloc_page,
446 .agp_destroy_page = agp_generic_destroy_page, 446 .agp_destroy_page = agp_generic_destroy_page,
447 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
447}; 448};
448 449
449static int __devinit agp_serverworks_probe(struct pci_dev *pdev, 450static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index dffc19382f7e..6c45702e542c 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -510,6 +510,7 @@ struct agp_bridge_driver uninorth_agp_driver = {
510 .free_by_type = agp_generic_free_by_type, 510 .free_by_type = agp_generic_free_by_type,
511 .agp_alloc_page = agp_generic_alloc_page, 511 .agp_alloc_page = agp_generic_alloc_page,
512 .agp_destroy_page = agp_generic_destroy_page, 512 .agp_destroy_page = agp_generic_destroy_page,
513 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
513 .cant_use_aperture = 1, 514 .cant_use_aperture = 1,
514}; 515};
515 516
@@ -534,6 +535,7 @@ struct agp_bridge_driver u3_agp_driver = {
534 .free_by_type = agp_generic_free_by_type, 535 .free_by_type = agp_generic_free_by_type,
535 .agp_alloc_page = agp_generic_alloc_page, 536 .agp_alloc_page = agp_generic_alloc_page,
536 .agp_destroy_page = agp_generic_destroy_page, 537 .agp_destroy_page = agp_generic_destroy_page,
538 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
537 .cant_use_aperture = 1, 539 .cant_use_aperture = 1,
538 .needs_scratch_page = 1, 540 .needs_scratch_page = 1,
539}; 541};
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 2ded7a280d7f..2e7c04370cd9 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -191,6 +191,7 @@ static struct agp_bridge_driver via_agp3_driver = {
191 .free_by_type = agp_generic_free_by_type, 191 .free_by_type = agp_generic_free_by_type,
192 .agp_alloc_page = agp_generic_alloc_page, 192 .agp_alloc_page = agp_generic_alloc_page,
193 .agp_destroy_page = agp_generic_destroy_page, 193 .agp_destroy_page = agp_generic_destroy_page,
194 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
194}; 195};
195 196
196static struct agp_bridge_driver via_driver = { 197static struct agp_bridge_driver via_driver = {
@@ -214,6 +215,7 @@ static struct agp_bridge_driver via_driver = {
214 .free_by_type = agp_generic_free_by_type, 215 .free_by_type = agp_generic_free_by_type,
215 .agp_alloc_page = agp_generic_alloc_page, 216 .agp_alloc_page = agp_generic_alloc_page,
216 .agp_destroy_page = agp_generic_destroy_page, 217 .agp_destroy_page = agp_generic_destroy_page,
218 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
217}; 219};
218 220
219static struct agp_device_ids via_agp_device_ids[] __devinitdata = 221static struct agp_device_ids via_agp_device_ids[] __devinitdata =
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 1aa93a752a9c..ae76a9ffe89f 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -117,7 +117,7 @@ __setup("hcheck_reboot", hangcheck_parse_reboot);
117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); 117__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
118#endif /* not MODULE */ 118#endif /* not MODULE */
119 119
120#if defined(CONFIG_X86_64) || defined(CONFIG_S390) 120#if defined(CONFIG_S390)
121# define HAVE_MONOTONIC 121# define HAVE_MONOTONIC
122# define TIMER_FREQ 1000000000ULL 122# define TIMER_FREQ 1000000000ULL
123#elif defined(CONFIG_IA64) 123#elif defined(CONFIG_IA64)
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index be73c80d699d..1d8c4ae61551 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -36,6 +36,7 @@
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37#include <linux/kexec.h> 37#include <linux/kexec.h>
38#include <linux/irq.h> 38#include <linux/irq.h>
39#include <linux/hrtimer.h>
39 40
40#include <asm/ptrace.h> 41#include <asm/ptrace.h>
41#include <asm/irq_regs.h> 42#include <asm/irq_regs.h>
@@ -158,6 +159,17 @@ static struct sysrq_key_op sysrq_sync_op = {
158 .enable_mask = SYSRQ_ENABLE_SYNC, 159 .enable_mask = SYSRQ_ENABLE_SYNC,
159}; 160};
160 161
162static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
163{
164 sysrq_timer_list_show();
165}
166
167static struct sysrq_key_op sysrq_show_timers_op = {
168 .handler = sysrq_handle_show_timers,
169 .help_msg = "show-all-timers(Q)",
170 .action_msg = "Show Pending Timers",
171};
172
161static void sysrq_handle_mountro(int key, struct tty_struct *tty) 173static void sysrq_handle_mountro(int key, struct tty_struct *tty)
162{ 174{
163 emergency_remount(); 175 emergency_remount();
@@ -335,7 +347,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
335 /* o: This will often be registered as 'Off' at init time */ 347 /* o: This will often be registered as 'Off' at init time */
336 NULL, /* o */ 348 NULL, /* o */
337 &sysrq_showregs_op, /* p */ 349 &sysrq_showregs_op, /* p */
338 NULL, /* q */ 350 &sysrq_show_timers_op, /* q */
339 &sysrq_unraw_op, /* r */ 351 &sysrq_unraw_op, /* r */
340 &sysrq_sync_op, /* s */ 352 &sysrq_sync_op, /* s */
341 &sysrq_showstate_op, /* t */ 353 &sysrq_showstate_op, /* t */
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index b6bcdbbf57b3..ccaa6a39cb4b 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -16,15 +16,13 @@
16 * This file is licensed under the GPL v2. 16 * This file is licensed under the GPL v2.
17 */ 17 */
18 18
19#include <linux/acpi_pmtmr.h>
19#include <linux/clocksource.h> 20#include <linux/clocksource.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/pci.h> 23#include <linux/pci.h>
23#include <asm/io.h> 24#include <asm/io.h>
24 25
25/* Number of PMTMR ticks expected during calibration run */
26#define PMTMR_TICKS_PER_SEC 3579545
27
28/* 26/*
29 * The I/O port the PMTMR resides at. 27 * The I/O port the PMTMR resides at.
30 * The location is detected during setup_arch(), 28 * The location is detected during setup_arch(),
@@ -32,15 +30,13 @@
32 */ 30 */
33u32 pmtmr_ioport __read_mostly; 31u32 pmtmr_ioport __read_mostly;
34 32
35#define ACPI_PM_MASK CLOCKSOURCE_MASK(24) /* limit it to 24 bits */
36
37static inline u32 read_pmtmr(void) 33static inline u32 read_pmtmr(void)
38{ 34{
39 /* mask the output to 24 bits */ 35 /* mask the output to 24 bits */
40 return inl(pmtmr_ioport) & ACPI_PM_MASK; 36 return inl(pmtmr_ioport) & ACPI_PM_MASK;
41} 37}
42 38
43static cycle_t acpi_pm_read_verified(void) 39u32 acpi_pm_read_verified(void)
44{ 40{
45 u32 v1 = 0, v2 = 0, v3 = 0; 41 u32 v1 = 0, v2 = 0, v3 = 0;
46 42
@@ -57,7 +53,12 @@ static cycle_t acpi_pm_read_verified(void)
57 } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) 53 } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
58 || (v3 > v1 && v3 < v2))); 54 || (v3 > v1 && v3 < v2)));
59 55
60 return (cycle_t)v2; 56 return v2;
57}
58
59static cycle_t acpi_pm_read_slow(void)
60{
61 return (cycle_t)acpi_pm_read_verified();
61} 62}
62 63
63static cycle_t acpi_pm_read(void) 64static cycle_t acpi_pm_read(void)
@@ -72,7 +73,8 @@ static struct clocksource clocksource_acpi_pm = {
72 .mask = (cycle_t)ACPI_PM_MASK, 73 .mask = (cycle_t)ACPI_PM_MASK,
73 .mult = 0, /*to be caluclated*/ 74 .mult = 0, /*to be caluclated*/
74 .shift = 22, 75 .shift = 22,
75 .is_continuous = 1, 76 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
77
76}; 78};
77 79
78 80
@@ -87,7 +89,7 @@ __setup("acpi_pm_good", acpi_pm_good_setup);
87 89
88static inline void acpi_pm_need_workaround(void) 90static inline void acpi_pm_need_workaround(void)
89{ 91{
90 clocksource_acpi_pm.read = acpi_pm_read_verified; 92 clocksource_acpi_pm.read = acpi_pm_read_slow;
91 clocksource_acpi_pm.rating = 110; 93 clocksource_acpi_pm.rating = 110;
92} 94}
93 95
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
index bf4d3d50d1c4..4f3925ceb360 100644
--- a/drivers/clocksource/cyclone.c
+++ b/drivers/clocksource/cyclone.c
@@ -31,7 +31,7 @@ static struct clocksource clocksource_cyclone = {
31 .mask = CYCLONE_TIMER_MASK, 31 .mask = CYCLONE_TIMER_MASK,
32 .mult = 10, 32 .mult = 10,
33 .shift = 0, 33 .shift = 0,
34 .is_continuous = 1, 34 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
35}; 35};
36 36
37static int __init init_cyclone_clocksource(void) 37static int __init init_cyclone_clocksource(void)
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index 22915cc46ba7..b92da677aa5d 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -57,7 +57,7 @@ static struct clocksource cs_hrt = {
57 .rating = 250, 57 .rating = 250,
58 .read = read_hrt, 58 .read = read_hrt,
59 .mask = CLOCKSOURCE_MASK(32), 59 .mask = CLOCKSOURCE_MASK(32),
60 .is_continuous = 1, 60 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
61 /* mult, shift are set based on mhz27 flag */ 61 /* mult, shift are set based on mhz27 flag */
62}; 62};
63 63
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 491779af8d55..d155e81b5c97 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -16,7 +16,7 @@ config CPU_FREQ
16if CPU_FREQ 16if CPU_FREQ
17 17
18config CPU_FREQ_TABLE 18config CPU_FREQ_TABLE
19 def_tristate m 19 tristate
20 20
21config CPU_FREQ_DEBUG 21config CPU_FREQ_DEBUG
22 bool "Enable CPUfreq debugging" 22 bool "Enable CPUfreq debugging"
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a45cc89e387a..f52facc570f5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -41,8 +41,67 @@ static struct cpufreq_driver *cpufreq_driver;
41static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; 41static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
42static DEFINE_SPINLOCK(cpufreq_driver_lock); 42static DEFINE_SPINLOCK(cpufreq_driver_lock);
43 43
44/*
45 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
46 * all cpufreq/hotplug/workqueue/etc related lock issues.
47 *
48 * The rules for this semaphore:
49 * - Any routine that wants to read from the policy structure will
50 * do a down_read on this semaphore.
51 * - Any routine that will write to the policy structure and/or may take away
52 * the policy altogether (eg. CPU hotplug), will hold this lock in write
53 * mode before doing so.
54 *
55 * Additional rules:
56 * - All holders of the lock should check to make sure that the CPU they
57 * are concerned with are online after they get the lock.
58 * - Governor routines that can be called in cpufreq hotplug path should not
59 * take this sem as top level hotplug notifier handler takes this.
60 */
61static DEFINE_PER_CPU(int, policy_cpu);
62static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
63
64#define lock_policy_rwsem(mode, cpu) \
65int lock_policy_rwsem_##mode \
66(int cpu) \
67{ \
68 int policy_cpu = per_cpu(policy_cpu, cpu); \
69 BUG_ON(policy_cpu == -1); \
70 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
71 if (unlikely(!cpu_online(cpu))) { \
72 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
73 return -1; \
74 } \
75 \
76 return 0; \
77}
78
79lock_policy_rwsem(read, cpu);
80EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
81
82lock_policy_rwsem(write, cpu);
83EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
84
85void unlock_policy_rwsem_read(int cpu)
86{
87 int policy_cpu = per_cpu(policy_cpu, cpu);
88 BUG_ON(policy_cpu == -1);
89 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
90}
91EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
92
93void unlock_policy_rwsem_write(int cpu)
94{
95 int policy_cpu = per_cpu(policy_cpu, cpu);
96 BUG_ON(policy_cpu == -1);
97 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
98}
99EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
100
101
44/* internal prototypes */ 102/* internal prototypes */
45static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 103static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
104static unsigned int __cpufreq_get(unsigned int cpu);
46static void handle_update(struct work_struct *work); 105static void handle_update(struct work_struct *work);
47 106
48/** 107/**
@@ -415,12 +474,8 @@ static ssize_t store_##file_name \
415 if (ret != 1) \ 474 if (ret != 1) \
416 return -EINVAL; \ 475 return -EINVAL; \
417 \ 476 \
418 lock_cpu_hotplug(); \
419 mutex_lock(&policy->lock); \
420 ret = __cpufreq_set_policy(policy, &new_policy); \ 477 ret = __cpufreq_set_policy(policy, &new_policy); \
421 policy->user_policy.object = policy->object; \ 478 policy->user_policy.object = policy->object; \
422 mutex_unlock(&policy->lock); \
423 unlock_cpu_hotplug(); \
424 \ 479 \
425 return ret ? ret : count; \ 480 return ret ? ret : count; \
426} 481}
@@ -434,7 +489,7 @@ store_one(scaling_max_freq,max);
434static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, 489static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
435 char *buf) 490 char *buf)
436{ 491{
437 unsigned int cur_freq = cpufreq_get(policy->cpu); 492 unsigned int cur_freq = __cpufreq_get(policy->cpu);
438 if (!cur_freq) 493 if (!cur_freq)
439 return sprintf(buf, "<unknown>"); 494 return sprintf(buf, "<unknown>");
440 return sprintf(buf, "%u\n", cur_freq); 495 return sprintf(buf, "%u\n", cur_freq);
@@ -479,18 +534,12 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
479 &new_policy.governor)) 534 &new_policy.governor))
480 return -EINVAL; 535 return -EINVAL;
481 536
482 lock_cpu_hotplug();
483
484 /* Do not use cpufreq_set_policy here or the user_policy.max 537 /* Do not use cpufreq_set_policy here or the user_policy.max
485 will be wrongly overridden */ 538 will be wrongly overridden */
486 mutex_lock(&policy->lock);
487 ret = __cpufreq_set_policy(policy, &new_policy); 539 ret = __cpufreq_set_policy(policy, &new_policy);
488 540
489 policy->user_policy.policy = policy->policy; 541 policy->user_policy.policy = policy->policy;
490 policy->user_policy.governor = policy->governor; 542 policy->user_policy.governor = policy->governor;
491 mutex_unlock(&policy->lock);
492
493 unlock_cpu_hotplug();
494 543
495 if (ret) 544 if (ret)
496 return ret; 545 return ret;
@@ -595,11 +644,17 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
595 policy = cpufreq_cpu_get(policy->cpu); 644 policy = cpufreq_cpu_get(policy->cpu);
596 if (!policy) 645 if (!policy)
597 return -EINVAL; 646 return -EINVAL;
647
648 if (lock_policy_rwsem_read(policy->cpu) < 0)
649 return -EINVAL;
650
598 if (fattr->show) 651 if (fattr->show)
599 ret = fattr->show(policy, buf); 652 ret = fattr->show(policy, buf);
600 else 653 else
601 ret = -EIO; 654 ret = -EIO;
602 655
656 unlock_policy_rwsem_read(policy->cpu);
657
603 cpufreq_cpu_put(policy); 658 cpufreq_cpu_put(policy);
604 return ret; 659 return ret;
605} 660}
@@ -613,11 +668,17 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
613 policy = cpufreq_cpu_get(policy->cpu); 668 policy = cpufreq_cpu_get(policy->cpu);
614 if (!policy) 669 if (!policy)
615 return -EINVAL; 670 return -EINVAL;
671
672 if (lock_policy_rwsem_write(policy->cpu) < 0)
673 return -EINVAL;
674
616 if (fattr->store) 675 if (fattr->store)
617 ret = fattr->store(policy, buf, count); 676 ret = fattr->store(policy, buf, count);
618 else 677 else
619 ret = -EIO; 678 ret = -EIO;
620 679
680 unlock_policy_rwsem_write(policy->cpu);
681
621 cpufreq_cpu_put(policy); 682 cpufreq_cpu_put(policy);
622 return ret; 683 return ret;
623} 684}
@@ -691,8 +752,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
691 policy->cpu = cpu; 752 policy->cpu = cpu;
692 policy->cpus = cpumask_of_cpu(cpu); 753 policy->cpus = cpumask_of_cpu(cpu);
693 754
694 mutex_init(&policy->lock); 755 /* Initially set CPU itself as the policy_cpu */
695 mutex_lock(&policy->lock); 756 per_cpu(policy_cpu, cpu) = cpu;
757 lock_policy_rwsem_write(cpu);
758
696 init_completion(&policy->kobj_unregister); 759 init_completion(&policy->kobj_unregister);
697 INIT_WORK(&policy->update, handle_update); 760 INIT_WORK(&policy->update, handle_update);
698 761
@@ -702,7 +765,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
702 ret = cpufreq_driver->init(policy); 765 ret = cpufreq_driver->init(policy);
703 if (ret) { 766 if (ret) {
704 dprintk("initialization failed\n"); 767 dprintk("initialization failed\n");
705 mutex_unlock(&policy->lock); 768 unlock_policy_rwsem_write(cpu);
706 goto err_out; 769 goto err_out;
707 } 770 }
708 771
@@ -716,6 +779,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
716 */ 779 */
717 managed_policy = cpufreq_cpu_get(j); 780 managed_policy = cpufreq_cpu_get(j);
718 if (unlikely(managed_policy)) { 781 if (unlikely(managed_policy)) {
782
783 /* Set proper policy_cpu */
784 unlock_policy_rwsem_write(cpu);
785 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
786
787 if (lock_policy_rwsem_write(cpu) < 0)
788 goto err_out_driver_exit;
789
719 spin_lock_irqsave(&cpufreq_driver_lock, flags); 790 spin_lock_irqsave(&cpufreq_driver_lock, flags);
720 managed_policy->cpus = policy->cpus; 791 managed_policy->cpus = policy->cpus;
721 cpufreq_cpu_data[cpu] = managed_policy; 792 cpufreq_cpu_data[cpu] = managed_policy;
@@ -726,13 +797,13 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
726 &managed_policy->kobj, 797 &managed_policy->kobj,
727 "cpufreq"); 798 "cpufreq");
728 if (ret) { 799 if (ret) {
729 mutex_unlock(&policy->lock); 800 unlock_policy_rwsem_write(cpu);
730 goto err_out_driver_exit; 801 goto err_out_driver_exit;
731 } 802 }
732 803
733 cpufreq_debug_enable_ratelimit(); 804 cpufreq_debug_enable_ratelimit();
734 mutex_unlock(&policy->lock);
735 ret = 0; 805 ret = 0;
806 unlock_policy_rwsem_write(cpu);
736 goto err_out_driver_exit; /* call driver->exit() */ 807 goto err_out_driver_exit; /* call driver->exit() */
737 } 808 }
738 } 809 }
@@ -746,7 +817,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
746 817
747 ret = kobject_register(&policy->kobj); 818 ret = kobject_register(&policy->kobj);
748 if (ret) { 819 if (ret) {
749 mutex_unlock(&policy->lock); 820 unlock_policy_rwsem_write(cpu);
750 goto err_out_driver_exit; 821 goto err_out_driver_exit;
751 } 822 }
752 /* set up files for this cpu device */ 823 /* set up files for this cpu device */
@@ -761,8 +832,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
761 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 832 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
762 833
763 spin_lock_irqsave(&cpufreq_driver_lock, flags); 834 spin_lock_irqsave(&cpufreq_driver_lock, flags);
764 for_each_cpu_mask(j, policy->cpus) 835 for_each_cpu_mask(j, policy->cpus) {
765 cpufreq_cpu_data[j] = policy; 836 cpufreq_cpu_data[j] = policy;
837 per_cpu(policy_cpu, j) = policy->cpu;
838 }
766 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 839 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
767 840
768 /* symlink affected CPUs */ 841 /* symlink affected CPUs */
@@ -778,14 +851,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
778 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, 851 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
779 "cpufreq"); 852 "cpufreq");
780 if (ret) { 853 if (ret) {
781 mutex_unlock(&policy->lock); 854 unlock_policy_rwsem_write(cpu);
782 goto err_out_unregister; 855 goto err_out_unregister;
783 } 856 }
784 } 857 }
785 858
786 policy->governor = NULL; /* to assure that the starting sequence is 859 policy->governor = NULL; /* to assure that the starting sequence is
787 * run in cpufreq_set_policy */ 860 * run in cpufreq_set_policy */
788 mutex_unlock(&policy->lock); 861 unlock_policy_rwsem_write(cpu);
789 862
790 /* set default policy */ 863 /* set default policy */
791 ret = cpufreq_set_policy(&new_policy); 864 ret = cpufreq_set_policy(&new_policy);
@@ -826,11 +899,13 @@ module_out:
826 899
827 900
828/** 901/**
829 * cpufreq_remove_dev - remove a CPU device 902 * __cpufreq_remove_dev - remove a CPU device
830 * 903 *
831 * Removes the cpufreq interface for a CPU device. 904 * Removes the cpufreq interface for a CPU device.
905 * Caller should already have policy_rwsem in write mode for this CPU.
906 * This routine frees the rwsem before returning.
832 */ 907 */
833static int cpufreq_remove_dev (struct sys_device * sys_dev) 908static int __cpufreq_remove_dev (struct sys_device * sys_dev)
834{ 909{
835 unsigned int cpu = sys_dev->id; 910 unsigned int cpu = sys_dev->id;
836 unsigned long flags; 911 unsigned long flags;
@@ -849,6 +924,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
849 if (!data) { 924 if (!data) {
850 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 925 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
851 cpufreq_debug_enable_ratelimit(); 926 cpufreq_debug_enable_ratelimit();
927 unlock_policy_rwsem_write(cpu);
852 return -EINVAL; 928 return -EINVAL;
853 } 929 }
854 cpufreq_cpu_data[cpu] = NULL; 930 cpufreq_cpu_data[cpu] = NULL;
@@ -865,6 +941,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
865 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 941 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
866 cpufreq_cpu_put(data); 942 cpufreq_cpu_put(data);
867 cpufreq_debug_enable_ratelimit(); 943 cpufreq_debug_enable_ratelimit();
944 unlock_policy_rwsem_write(cpu);
868 return 0; 945 return 0;
869 } 946 }
870#endif 947#endif
@@ -873,6 +950,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
873 if (!kobject_get(&data->kobj)) { 950 if (!kobject_get(&data->kobj)) {
874 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 951 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
875 cpufreq_debug_enable_ratelimit(); 952 cpufreq_debug_enable_ratelimit();
953 unlock_policy_rwsem_write(cpu);
876 return -EFAULT; 954 return -EFAULT;
877 } 955 }
878 956
@@ -906,10 +984,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
906 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 984 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
907#endif 985#endif
908 986
909 mutex_lock(&data->lock);
910 if (cpufreq_driver->target) 987 if (cpufreq_driver->target)
911 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 988 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
912 mutex_unlock(&data->lock); 989
990 unlock_policy_rwsem_write(cpu);
913 991
914 kobject_unregister(&data->kobj); 992 kobject_unregister(&data->kobj);
915 993
@@ -933,6 +1011,18 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
933} 1011}
934 1012
935 1013
1014static int cpufreq_remove_dev (struct sys_device * sys_dev)
1015{
1016 unsigned int cpu = sys_dev->id;
1017 int retval;
1018 if (unlikely(lock_policy_rwsem_write(cpu)))
1019 BUG();
1020
1021 retval = __cpufreq_remove_dev(sys_dev);
1022 return retval;
1023}
1024
1025
936static void handle_update(struct work_struct *work) 1026static void handle_update(struct work_struct *work)
937{ 1027{
938 struct cpufreq_policy *policy = 1028 struct cpufreq_policy *policy =
@@ -980,9 +1070,12 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
980 unsigned int ret_freq = 0; 1070 unsigned int ret_freq = 0;
981 1071
982 if (policy) { 1072 if (policy) {
983 mutex_lock(&policy->lock); 1073 if (unlikely(lock_policy_rwsem_read(cpu)))
1074 return ret_freq;
1075
984 ret_freq = policy->cur; 1076 ret_freq = policy->cur;
985 mutex_unlock(&policy->lock); 1077
1078 unlock_policy_rwsem_read(cpu);
986 cpufreq_cpu_put(policy); 1079 cpufreq_cpu_put(policy);
987 } 1080 }
988 1081
@@ -991,24 +1084,13 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
991EXPORT_SYMBOL(cpufreq_quick_get); 1084EXPORT_SYMBOL(cpufreq_quick_get);
992 1085
993 1086
994/** 1087static unsigned int __cpufreq_get(unsigned int cpu)
995 * cpufreq_get - get the current CPU frequency (in kHz)
996 * @cpu: CPU number
997 *
998 * Get the CPU current (static) CPU frequency
999 */
1000unsigned int cpufreq_get(unsigned int cpu)
1001{ 1088{
1002 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1089 struct cpufreq_policy *policy = cpufreq_cpu_data[cpu];
1003 unsigned int ret_freq = 0; 1090 unsigned int ret_freq = 0;
1004 1091
1005 if (!policy)
1006 return 0;
1007
1008 if (!cpufreq_driver->get) 1092 if (!cpufreq_driver->get)
1009 goto out; 1093 return (ret_freq);
1010
1011 mutex_lock(&policy->lock);
1012 1094
1013 ret_freq = cpufreq_driver->get(cpu); 1095 ret_freq = cpufreq_driver->get(cpu);
1014 1096
@@ -1022,11 +1104,33 @@ unsigned int cpufreq_get(unsigned int cpu)
1022 } 1104 }
1023 } 1105 }
1024 1106
1025 mutex_unlock(&policy->lock); 1107 return (ret_freq);
1108}
1026 1109
1027out: 1110/**
1028 cpufreq_cpu_put(policy); 1111 * cpufreq_get - get the current CPU frequency (in kHz)
1112 * @cpu: CPU number
1113 *
1114 * Get the CPU current (static) CPU frequency
1115 */
1116unsigned int cpufreq_get(unsigned int cpu)
1117{
1118 unsigned int ret_freq = 0;
1119 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1120
1121 if (!policy)
1122 goto out;
1123
1124 if (unlikely(lock_policy_rwsem_read(cpu)))
1125 goto out_policy;
1126
1127 ret_freq = __cpufreq_get(cpu);
1029 1128
1129 unlock_policy_rwsem_read(cpu);
1130
1131out_policy:
1132 cpufreq_cpu_put(policy);
1133out:
1030 return (ret_freq); 1134 return (ret_freq);
1031} 1135}
1032EXPORT_SYMBOL(cpufreq_get); 1136EXPORT_SYMBOL(cpufreq_get);
@@ -1278,7 +1382,6 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1278 *********************************************************************/ 1382 *********************************************************************/
1279 1383
1280 1384
1281/* Must be called with lock_cpu_hotplug held */
1282int __cpufreq_driver_target(struct cpufreq_policy *policy, 1385int __cpufreq_driver_target(struct cpufreq_policy *policy,
1283 unsigned int target_freq, 1386 unsigned int target_freq,
1284 unsigned int relation) 1387 unsigned int relation)
@@ -1304,20 +1407,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1304 if (!policy) 1407 if (!policy)
1305 return -EINVAL; 1408 return -EINVAL;
1306 1409
1307 lock_cpu_hotplug(); 1410 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1308 mutex_lock(&policy->lock); 1411 return -EINVAL;
1309 1412
1310 ret = __cpufreq_driver_target(policy, target_freq, relation); 1413 ret = __cpufreq_driver_target(policy, target_freq, relation);
1311 1414
1312 mutex_unlock(&policy->lock); 1415 unlock_policy_rwsem_write(policy->cpu);
1313 unlock_cpu_hotplug();
1314 1416
1315 cpufreq_cpu_put(policy); 1417 cpufreq_cpu_put(policy);
1316 return ret; 1418 return ret;
1317} 1419}
1318EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1420EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1319 1421
1320int cpufreq_driver_getavg(struct cpufreq_policy *policy) 1422int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
1321{ 1423{
1322 int ret = 0; 1424 int ret = 0;
1323 1425
@@ -1325,20 +1427,15 @@ int cpufreq_driver_getavg(struct cpufreq_policy *policy)
1325 if (!policy) 1427 if (!policy)
1326 return -EINVAL; 1428 return -EINVAL;
1327 1429
1328 mutex_lock(&policy->lock);
1329
1330 if (cpu_online(policy->cpu) && cpufreq_driver->getavg) 1430 if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
1331 ret = cpufreq_driver->getavg(policy->cpu); 1431 ret = cpufreq_driver->getavg(policy->cpu);
1332 1432
1333 mutex_unlock(&policy->lock);
1334
1335 cpufreq_cpu_put(policy); 1433 cpufreq_cpu_put(policy);
1336 return ret; 1434 return ret;
1337} 1435}
1338EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); 1436EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1339 1437
1340/* 1438/*
1341 * Locking: Must be called with the lock_cpu_hotplug() lock held
1342 * when "event" is CPUFREQ_GOV_LIMITS 1439 * when "event" is CPUFREQ_GOV_LIMITS
1343 */ 1440 */
1344 1441
@@ -1420,9 +1517,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1420 if (!cpu_policy) 1517 if (!cpu_policy)
1421 return -EINVAL; 1518 return -EINVAL;
1422 1519
1423 mutex_lock(&cpu_policy->lock);
1424 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); 1520 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1425 mutex_unlock(&cpu_policy->lock);
1426 1521
1427 cpufreq_cpu_put(cpu_policy); 1522 cpufreq_cpu_put(cpu_policy);
1428 return 0; 1523 return 0;
@@ -1433,7 +1528,6 @@ EXPORT_SYMBOL(cpufreq_get_policy);
1433/* 1528/*
1434 * data : current policy. 1529 * data : current policy.
1435 * policy : policy to be set. 1530 * policy : policy to be set.
1436 * Locking: Must be called with the lock_cpu_hotplug() lock held
1437 */ 1531 */
1438static int __cpufreq_set_policy(struct cpufreq_policy *data, 1532static int __cpufreq_set_policy(struct cpufreq_policy *data,
1439 struct cpufreq_policy *policy) 1533 struct cpufreq_policy *policy)
@@ -1539,10 +1633,9 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1539 if (!data) 1633 if (!data)
1540 return -EINVAL; 1634 return -EINVAL;
1541 1635
1542 lock_cpu_hotplug(); 1636 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1637 return -EINVAL;
1543 1638
1544 /* lock this CPU */
1545 mutex_lock(&data->lock);
1546 1639
1547 ret = __cpufreq_set_policy(data, policy); 1640 ret = __cpufreq_set_policy(data, policy);
1548 data->user_policy.min = data->min; 1641 data->user_policy.min = data->min;
@@ -1550,9 +1643,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1550 data->user_policy.policy = data->policy; 1643 data->user_policy.policy = data->policy;
1551 data->user_policy.governor = data->governor; 1644 data->user_policy.governor = data->governor;
1552 1645
1553 mutex_unlock(&data->lock); 1646 unlock_policy_rwsem_write(policy->cpu);
1554 1647
1555 unlock_cpu_hotplug();
1556 cpufreq_cpu_put(data); 1648 cpufreq_cpu_put(data);
1557 1649
1558 return ret; 1650 return ret;
@@ -1576,8 +1668,8 @@ int cpufreq_update_policy(unsigned int cpu)
1576 if (!data) 1668 if (!data)
1577 return -ENODEV; 1669 return -ENODEV;
1578 1670
1579 lock_cpu_hotplug(); 1671 if (unlikely(lock_policy_rwsem_write(cpu)))
1580 mutex_lock(&data->lock); 1672 return -EINVAL;
1581 1673
1582 dprintk("updating policy for CPU %u\n", cpu); 1674 dprintk("updating policy for CPU %u\n", cpu);
1583 memcpy(&policy, data, sizeof(struct cpufreq_policy)); 1675 memcpy(&policy, data, sizeof(struct cpufreq_policy));
@@ -1602,8 +1694,8 @@ int cpufreq_update_policy(unsigned int cpu)
1602 1694
1603 ret = __cpufreq_set_policy(data, &policy); 1695 ret = __cpufreq_set_policy(data, &policy);
1604 1696
1605 mutex_unlock(&data->lock); 1697 unlock_policy_rwsem_write(cpu);
1606 unlock_cpu_hotplug(); 1698
1607 cpufreq_cpu_put(data); 1699 cpufreq_cpu_put(data);
1608 return ret; 1700 return ret;
1609} 1701}
@@ -1613,31 +1705,28 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1613 unsigned long action, void *hcpu) 1705 unsigned long action, void *hcpu)
1614{ 1706{
1615 unsigned int cpu = (unsigned long)hcpu; 1707 unsigned int cpu = (unsigned long)hcpu;
1616 struct cpufreq_policy *policy;
1617 struct sys_device *sys_dev; 1708 struct sys_device *sys_dev;
1709 struct cpufreq_policy *policy;
1618 1710
1619 sys_dev = get_cpu_sysdev(cpu); 1711 sys_dev = get_cpu_sysdev(cpu);
1620
1621 if (sys_dev) { 1712 if (sys_dev) {
1622 switch (action) { 1713 switch (action) {
1623 case CPU_ONLINE: 1714 case CPU_ONLINE:
1624 cpufreq_add_dev(sys_dev); 1715 cpufreq_add_dev(sys_dev);
1625 break; 1716 break;
1626 case CPU_DOWN_PREPARE: 1717 case CPU_DOWN_PREPARE:
1627 /* 1718 if (unlikely(lock_policy_rwsem_write(cpu)))
1628 * We attempt to put this cpu in lowest frequency 1719 BUG();
1629 * possible before going down. This will permit 1720
1630 * hardware-managed P-State to switch other related
1631 * threads to min or higher speeds if possible.
1632 */
1633 policy = cpufreq_cpu_data[cpu]; 1721 policy = cpufreq_cpu_data[cpu];
1634 if (policy) { 1722 if (policy) {
1635 cpufreq_driver_target(policy, policy->min, 1723 __cpufreq_driver_target(policy, policy->min,
1636 CPUFREQ_RELATION_H); 1724 CPUFREQ_RELATION_H);
1637 } 1725 }
1726 __cpufreq_remove_dev(sys_dev);
1638 break; 1727 break;
1639 case CPU_DEAD: 1728 case CPU_DOWN_FAILED:
1640 cpufreq_remove_dev(sys_dev); 1729 cpufreq_add_dev(sys_dev);
1641 break; 1730 break;
1642 } 1731 }
1643 } 1732 }
@@ -1751,3 +1840,16 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1751 return 0; 1840 return 0;
1752} 1841}
1753EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 1842EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1843
1844static int __init cpufreq_core_init(void)
1845{
1846 int cpu;
1847
1848 for_each_possible_cpu(cpu) {
1849 per_cpu(policy_cpu, cpu) = -1;
1850 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1851 }
1852 return 0;
1853}
1854
1855core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 05d6c22ba07c..26f440ccc3fb 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -429,14 +429,12 @@ static void dbs_check_cpu(int cpu)
429static void do_dbs_timer(struct work_struct *work) 429static void do_dbs_timer(struct work_struct *work)
430{ 430{
431 int i; 431 int i;
432 lock_cpu_hotplug();
433 mutex_lock(&dbs_mutex); 432 mutex_lock(&dbs_mutex);
434 for_each_online_cpu(i) 433 for_each_online_cpu(i)
435 dbs_check_cpu(i); 434 dbs_check_cpu(i);
436 schedule_delayed_work(&dbs_work, 435 schedule_delayed_work(&dbs_work,
437 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 436 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
438 mutex_unlock(&dbs_mutex); 437 mutex_unlock(&dbs_mutex);
439 unlock_cpu_hotplug();
440} 438}
441 439
442static inline void dbs_timer_init(void) 440static inline void dbs_timer_init(void)
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index f697449327c6..d60bcb9d14cc 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -52,19 +52,20 @@ static unsigned int def_sampling_rate;
52static void do_dbs_timer(struct work_struct *work); 52static void do_dbs_timer(struct work_struct *work);
53 53
54/* Sampling types */ 54/* Sampling types */
55enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; 55enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
56 56
57struct cpu_dbs_info_s { 57struct cpu_dbs_info_s {
58 cputime64_t prev_cpu_idle; 58 cputime64_t prev_cpu_idle;
59 cputime64_t prev_cpu_wall; 59 cputime64_t prev_cpu_wall;
60 struct cpufreq_policy *cur_policy; 60 struct cpufreq_policy *cur_policy;
61 struct delayed_work work; 61 struct delayed_work work;
62 enum dbs_sample sample_type;
63 unsigned int enable;
64 struct cpufreq_frequency_table *freq_table; 62 struct cpufreq_frequency_table *freq_table;
65 unsigned int freq_lo; 63 unsigned int freq_lo;
66 unsigned int freq_lo_jiffies; 64 unsigned int freq_lo_jiffies;
67 unsigned int freq_hi_jiffies; 65 unsigned int freq_hi_jiffies;
66 int cpu;
67 unsigned int enable:1,
68 sample_type:1;
68}; 69};
69static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
70 71
@@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
402 if (load < (dbs_tuners_ins.up_threshold - 10)) { 403 if (load < (dbs_tuners_ins.up_threshold - 10)) {
403 unsigned int freq_next, freq_cur; 404 unsigned int freq_next, freq_cur;
404 405
405 freq_cur = cpufreq_driver_getavg(policy); 406 freq_cur = __cpufreq_driver_getavg(policy);
406 if (!freq_cur) 407 if (!freq_cur)
407 freq_cur = policy->cur; 408 freq_cur = policy->cur;
408 409
@@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
423 424
424static void do_dbs_timer(struct work_struct *work) 425static void do_dbs_timer(struct work_struct *work)
425{ 426{
426 unsigned int cpu = smp_processor_id(); 427 struct cpu_dbs_info_s *dbs_info =
427 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 428 container_of(work, struct cpu_dbs_info_s, work.work);
428 enum dbs_sample sample_type = dbs_info->sample_type; 429 unsigned int cpu = dbs_info->cpu;
430 int sample_type = dbs_info->sample_type;
431
429 /* We want all CPUs to do sampling nearly on same jiffy */ 432 /* We want all CPUs to do sampling nearly on same jiffy */
430 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 433 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
431 434
@@ -434,15 +437,19 @@ static void do_dbs_timer(struct work_struct *work)
434 437
435 delay -= jiffies % delay; 438 delay -= jiffies % delay;
436 439
437 if (!dbs_info->enable) 440 if (lock_policy_rwsem_write(cpu) < 0)
441 return;
442
443 if (!dbs_info->enable) {
444 unlock_policy_rwsem_write(cpu);
438 return; 445 return;
446 }
447
439 /* Common NORMAL_SAMPLE setup */ 448 /* Common NORMAL_SAMPLE setup */
440 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 449 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
441 if (!dbs_tuners_ins.powersave_bias || 450 if (!dbs_tuners_ins.powersave_bias ||
442 sample_type == DBS_NORMAL_SAMPLE) { 451 sample_type == DBS_NORMAL_SAMPLE) {
443 lock_cpu_hotplug();
444 dbs_check_cpu(dbs_info); 452 dbs_check_cpu(dbs_info);
445 unlock_cpu_hotplug();
446 if (dbs_info->freq_lo) { 453 if (dbs_info->freq_lo) {
447 /* Setup timer for SUB_SAMPLE */ 454 /* Setup timer for SUB_SAMPLE */
448 dbs_info->sample_type = DBS_SUB_SAMPLE; 455 dbs_info->sample_type = DBS_SUB_SAMPLE;
@@ -454,26 +461,27 @@ static void do_dbs_timer(struct work_struct *work)
454 CPUFREQ_RELATION_H); 461 CPUFREQ_RELATION_H);
455 } 462 }
456 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 463 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
464 unlock_policy_rwsem_write(cpu);
457} 465}
458 466
459static inline void dbs_timer_init(unsigned int cpu) 467static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
460{ 468{
461 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
462 /* We want all CPUs to do sampling nearly on same jiffy */ 469 /* We want all CPUs to do sampling nearly on same jiffy */
463 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 470 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
464 delay -= jiffies % delay; 471 delay -= jiffies % delay;
465 472
473 dbs_info->enable = 1;
466 ondemand_powersave_bias_init(); 474 ondemand_powersave_bias_init();
467 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
468 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 475 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
469 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 476 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
477 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
478 delay);
470} 479}
471 480
472static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 481static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
473{ 482{
474 dbs_info->enable = 0; 483 dbs_info->enable = 0;
475 cancel_delayed_work(&dbs_info->work); 484 cancel_delayed_work(&dbs_info->work);
476 flush_workqueue(kondemand_wq);
477} 485}
478 486
479static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 487static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -502,21 +510,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
502 510
503 mutex_lock(&dbs_mutex); 511 mutex_lock(&dbs_mutex);
504 dbs_enable++; 512 dbs_enable++;
505 if (dbs_enable == 1) {
506 kondemand_wq = create_workqueue("kondemand");
507 if (!kondemand_wq) {
508 printk(KERN_ERR
509 "Creation of kondemand failed\n");
510 dbs_enable--;
511 mutex_unlock(&dbs_mutex);
512 return -ENOSPC;
513 }
514 }
515 513
516 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 514 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
517 if (rc) { 515 if (rc) {
518 if (dbs_enable == 1)
519 destroy_workqueue(kondemand_wq);
520 dbs_enable--; 516 dbs_enable--;
521 mutex_unlock(&dbs_mutex); 517 mutex_unlock(&dbs_mutex);
522 return rc; 518 return rc;
@@ -530,7 +526,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
530 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 526 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
531 j_dbs_info->prev_cpu_wall = get_jiffies_64(); 527 j_dbs_info->prev_cpu_wall = get_jiffies_64();
532 } 528 }
533 this_dbs_info->enable = 1; 529 this_dbs_info->cpu = cpu;
534 /* 530 /*
535 * Start the timerschedule work, when this governor 531 * Start the timerschedule work, when this governor
536 * is used for first time 532 * is used for first time
@@ -550,7 +546,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
550 546
551 dbs_tuners_ins.sampling_rate = def_sampling_rate; 547 dbs_tuners_ins.sampling_rate = def_sampling_rate;
552 } 548 }
553 dbs_timer_init(policy->cpu); 549 dbs_timer_init(this_dbs_info);
554 550
555 mutex_unlock(&dbs_mutex); 551 mutex_unlock(&dbs_mutex);
556 break; 552 break;
@@ -560,9 +556,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
560 dbs_timer_exit(this_dbs_info); 556 dbs_timer_exit(this_dbs_info);
561 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 557 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
562 dbs_enable--; 558 dbs_enable--;
563 if (dbs_enable == 0)
564 destroy_workqueue(kondemand_wq);
565
566 mutex_unlock(&dbs_mutex); 559 mutex_unlock(&dbs_mutex);
567 560
568 break; 561 break;
@@ -591,12 +584,18 @@ static struct cpufreq_governor cpufreq_gov_dbs = {
591 584
592static int __init cpufreq_gov_dbs_init(void) 585static int __init cpufreq_gov_dbs_init(void)
593{ 586{
587 kondemand_wq = create_workqueue("kondemand");
588 if (!kondemand_wq) {
589 printk(KERN_ERR "Creation of kondemand failed\n");
590 return -EFAULT;
591 }
594 return cpufreq_register_governor(&cpufreq_gov_dbs); 592 return cpufreq_register_governor(&cpufreq_gov_dbs);
595} 593}
596 594
597static void __exit cpufreq_gov_dbs_exit(void) 595static void __exit cpufreq_gov_dbs_exit(void)
598{ 596{
599 cpufreq_unregister_governor(&cpufreq_gov_dbs); 597 cpufreq_unregister_governor(&cpufreq_gov_dbs);
598 destroy_workqueue(kondemand_wq);
600} 599}
601 600
602 601
@@ -608,3 +607,4 @@ MODULE_LICENSE("GPL");
608 607
609module_init(cpufreq_gov_dbs_init); 608module_init(cpufreq_gov_dbs_init);
610module_exit(cpufreq_gov_dbs_exit); 609module_exit(cpufreq_gov_dbs_exit);
610
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 91ad342a6051..d1c7cac9316c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -370,12 +370,10 @@ __exit cpufreq_stats_exit(void)
370 cpufreq_unregister_notifier(&notifier_trans_block, 370 cpufreq_unregister_notifier(&notifier_trans_block,
371 CPUFREQ_TRANSITION_NOTIFIER); 371 CPUFREQ_TRANSITION_NOTIFIER);
372 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 372 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
373 lock_cpu_hotplug();
374 for_each_online_cpu(cpu) { 373 for_each_online_cpu(cpu) {
375 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, 374 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
376 CPU_DEAD, (void *)(long)cpu); 375 CPU_DEAD, (void *)(long)cpu);
377 } 376 }
378 unlock_cpu_hotplug();
379} 377}
380 378
381MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 379MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 2a4eb0bfaf30..860345c7799a 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -71,7 +71,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
71 71
72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
73 73
74 lock_cpu_hotplug();
75 mutex_lock(&userspace_mutex); 74 mutex_lock(&userspace_mutex);
76 if (!cpu_is_managed[policy->cpu]) 75 if (!cpu_is_managed[policy->cpu])
77 goto err; 76 goto err;
@@ -94,7 +93,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
94 93
95 err: 94 err:
96 mutex_unlock(&userspace_mutex); 95 mutex_unlock(&userspace_mutex);
97 unlock_cpu_hotplug();
98 return ret; 96 return ret;
99} 97}
100 98
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index cd251efda410..0a26e0663542 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -546,7 +546,7 @@ static void ads7846_rx(void *ads)
546 ts->spi->dev.bus_id, ts->tc.ignore, Rt); 546 ts->spi->dev.bus_id, ts->tc.ignore, Rt);
547#endif 547#endif
548 hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD), 548 hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD),
549 HRTIMER_REL); 549 HRTIMER_MODE_REL);
550 return; 550 return;
551 } 551 }
552 552
@@ -578,7 +578,8 @@ static void ads7846_rx(void *ads)
578#endif 578#endif
579 } 579 }
580 580
581 hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD), HRTIMER_REL); 581 hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD),
582 HRTIMER_MODE_REL);
582} 583}
583 584
584static int ads7846_debounce(void *ads, int data_idx, int *val) 585static int ads7846_debounce(void *ads, int data_idx, int *val)
@@ -667,7 +668,7 @@ static void ads7846_rx_val(void *ads)
667 status); 668 status);
668} 669}
669 670
670static int ads7846_timer(struct hrtimer *handle) 671static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
671{ 672{
672 struct ads7846 *ts = container_of(handle, struct ads7846, timer); 673 struct ads7846 *ts = container_of(handle, struct ads7846, timer);
673 int status = 0; 674 int status = 0;
@@ -724,7 +725,7 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
724 disable_irq(ts->spi->irq); 725 disable_irq(ts->spi->irq);
725 ts->pending = 1; 726 ts->pending = 1;
726 hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY), 727 hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY),
727 HRTIMER_REL); 728 HRTIMER_MODE_REL);
728 } 729 }
729 } 730 }
730 spin_unlock_irqrestore(&ts->lock, flags); 731 spin_unlock_irqrestore(&ts->lock, flags);
@@ -862,7 +863,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
862 ts->spi = spi; 863 ts->spi = spi;
863 ts->input = input_dev; 864 ts->input = input_dev;
864 865
865 hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_REL); 866 hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
866 ts->timer.function = ads7846_timer; 867 ts->timer.function = ads7846_timer;
867 868
868 spin_lock_init(&ts->lock); 869 spin_lock_init(&ts->lock);
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
index 835b806a9de7..077e297d8c72 100644
--- a/drivers/isdn/gigaset/Makefile
+++ b/drivers/isdn/gigaset/Makefile
@@ -5,4 +5,4 @@ ser_gigaset-y := ser-gigaset.o asyncdata.o
5 5
6obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o 6obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o
7obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o 7obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o
8obj-$(CONFIG_GIGASET_M105) += ser_gigaset.o gigaset.o 8obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o gigaset.o
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index bedae4ad3f74..80b199fa0aa9 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -107,4 +107,19 @@ config MSI_LAPTOP
107 107
108 If you have an MSI S270 laptop, say Y or M here. 108 If you have an MSI S270 laptop, say Y or M here.
109 109
110config SONY_LAPTOP
111 tristate "Sony Laptop Extras"
112 depends on X86 && ACPI
113 select BACKLIGHT_CLASS_DEVICE
114 ---help---
115 This mini-driver drives the SNC device present in the ACPI BIOS of
116 the Sony Vaio laptops.
117
118 It gives access to some extra laptop functionalities. In its current
119 form, this driver let the user set or query the screen brightness
120 through the backlight subsystem and remove/apply power to some
121 devices.
122
123 Read <file:Documentation/sony-laptop.txt> for more information.
124
110endmenu 125endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 35da53c409c0..7793ccd79049 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_LKDTM) += lkdtm.o
11obj-$(CONFIG_TIFM_CORE) += tifm_core.o 11obj-$(CONFIG_TIFM_CORE) += tifm_core.o
12obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 12obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
13obj-$(CONFIG_SGI_IOC4) += ioc4.o 13obj-$(CONFIG_SGI_IOC4) += ioc4.o
14obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 861c39935f99..e4e2b707a353 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -1088,11 +1088,6 @@ static int __init asus_laptop_init(void)
1088 if (acpi_disabled) 1088 if (acpi_disabled)
1089 return -ENODEV; 1089 return -ENODEV;
1090 1090
1091 if (!acpi_specific_hotkey_enabled) {
1092 printk(ASUS_ERR "Using generic hotkey driver\n");
1093 return -ENODEV;
1094 }
1095
1096 result = acpi_bus_register_driver(&asus_hotk_driver); 1091 result = acpi_bus_register_driver(&asus_hotk_driver);
1097 if (result < 0) 1092 if (result < 0)
1098 return result; 1093 return result;
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
new file mode 100644
index 000000000000..cabbed0015e4
--- /dev/null
+++ b/drivers/misc/sony-laptop.c
@@ -0,0 +1,562 @@
1/*
2 * ACPI Sony Notebook Control Driver (SNC)
3 *
4 * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net>
5 * Copyright (C) 2007 Mattia Dongili <malattia@linux.it>
6 *
7 * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c
8 * which are copyrighted by their respective authors.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/init.h>
30#include <linux/types.h>
31#include <linux/backlight.h>
32#include <linux/platform_device.h>
33#include <linux/err.h>
34#include <acpi/acpi_drivers.h>
35#include <acpi/acpi_bus.h>
36#include <asm/uaccess.h>
37
38#define ACPI_SNC_CLASS "sony"
39#define ACPI_SNC_HID "SNY5001"
40#define ACPI_SNC_DRIVER_NAME "ACPI Sony Notebook Control Driver v0.4"
41
42/* the device uses 1-based values, while the backlight subsystem uses
43 0-based values */
44#define SONY_MAX_BRIGHTNESS 8
45
46#define LOG_PFX KERN_WARNING "sony-laptop: "
47
48MODULE_AUTHOR("Stelian Pop, Mattia Dongili");
49MODULE_DESCRIPTION(ACPI_SNC_DRIVER_NAME);
50MODULE_LICENSE("GPL");
51
52static int debug;
53module_param(debug, int, 0);
54MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help "
55 "the development of this driver");
56
57static ssize_t sony_acpi_show(struct device *, struct device_attribute *,
58 char *);
59static ssize_t sony_acpi_store(struct device *, struct device_attribute *,
60 const char *, size_t);
61static int boolean_validate(const int, const int);
62static int brightness_default_validate(const int, const int);
63
64#define SNC_VALIDATE_IN 0
65#define SNC_VALIDATE_OUT 1
66
67struct sony_acpi_value {
68 char *name; /* name of the entry */
69 char **acpiget; /* names of the ACPI get function */
70 char **acpiset; /* names of the ACPI set function */
71 int (*validate)(const int, const int); /* input/output validation */
72 int value; /* current setting */
73 int valid; /* Has ever been set */
74 int debug; /* active only in debug mode ? */
75 struct device_attribute devattr; /* sysfs atribute */
76};
77
78#define HANDLE_NAMES(_name, _values...) \
79 static char *snc_##_name[] = { _values, NULL }
80
81#define SONY_ACPI_VALUE(_name, _getters, _setters, _validate, _debug) \
82 { \
83 .name = __stringify(_name), \
84 .acpiget = _getters, \
85 .acpiset = _setters, \
86 .validate = _validate, \
87 .debug = _debug, \
88 .devattr = __ATTR(_name, 0, sony_acpi_show, sony_acpi_store), \
89 }
90
91#define SONY_ACPI_VALUE_NULL { .name = NULL }
92
93HANDLE_NAMES(fnkey_get, "GHKE");
94
95HANDLE_NAMES(brightness_def_get, "GPBR");
96HANDLE_NAMES(brightness_def_set, "SPBR");
97
98HANDLE_NAMES(cdpower_get, "GCDP");
99HANDLE_NAMES(cdpower_set, "SCDP", "CDPW");
100
101HANDLE_NAMES(audiopower_get, "GAZP");
102HANDLE_NAMES(audiopower_set, "AZPW");
103
104HANDLE_NAMES(lanpower_get, "GLNP");
105HANDLE_NAMES(lanpower_set, "LNPW");
106
107HANDLE_NAMES(PID_get, "GPID");
108
109HANDLE_NAMES(CTR_get, "GCTR");
110HANDLE_NAMES(CTR_set, "SCTR");
111
112HANDLE_NAMES(PCR_get, "GPCR");
113HANDLE_NAMES(PCR_set, "SPCR");
114
115HANDLE_NAMES(CMI_get, "GCMI");
116HANDLE_NAMES(CMI_set, "SCMI");
117
118static struct sony_acpi_value sony_acpi_values[] = {
119 SONY_ACPI_VALUE(brightness_default, snc_brightness_def_get,
120 snc_brightness_def_set, brightness_default_validate, 0),
121 SONY_ACPI_VALUE(fnkey, snc_fnkey_get, NULL, NULL, 0),
122 SONY_ACPI_VALUE(cdpower, snc_cdpower_get, snc_cdpower_set, boolean_validate, 0),
123 SONY_ACPI_VALUE(audiopower, snc_audiopower_get, snc_audiopower_set,
124 boolean_validate, 0),
125 SONY_ACPI_VALUE(lanpower, snc_lanpower_get, snc_lanpower_set,
126 boolean_validate, 1),
127 /* unknown methods */
128 SONY_ACPI_VALUE(PID, snc_PID_get, NULL, NULL, 1),
129 SONY_ACPI_VALUE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1),
130 SONY_ACPI_VALUE(PCR, snc_PCR_get, snc_PCR_set, NULL, 1),
131 SONY_ACPI_VALUE(CMI, snc_CMI_get, snc_CMI_set, NULL, 1),
132 SONY_ACPI_VALUE_NULL
133};
134
135static acpi_handle sony_acpi_handle;
136static struct acpi_device *sony_acpi_acpi_device = NULL;
137
138/*
139 * acpi_evaluate_object wrappers
140 */
141static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
142{
143 struct acpi_buffer output;
144 union acpi_object out_obj;
145 acpi_status status;
146
147 output.length = sizeof(out_obj);
148 output.pointer = &out_obj;
149
150 status = acpi_evaluate_object(handle, name, NULL, &output);
151 if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
152 *result = out_obj.integer.value;
153 return 0;
154 }
155
156 printk(LOG_PFX "acpi_callreadfunc failed\n");
157
158 return -1;
159}
160
161static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
162 int *result)
163{
164 struct acpi_object_list params;
165 union acpi_object in_obj;
166 struct acpi_buffer output;
167 union acpi_object out_obj;
168 acpi_status status;
169
170 params.count = 1;
171 params.pointer = &in_obj;
172 in_obj.type = ACPI_TYPE_INTEGER;
173 in_obj.integer.value = value;
174
175 output.length = sizeof(out_obj);
176 output.pointer = &out_obj;
177
178 status = acpi_evaluate_object(handle, name, &params, &output);
179 if (status == AE_OK) {
180 if (result != NULL) {
181 if (out_obj.type != ACPI_TYPE_INTEGER) {
182 printk(LOG_PFX "acpi_evaluate_object bad "
183 "return type\n");
184 return -1;
185 }
186 *result = out_obj.integer.value;
187 }
188 return 0;
189 }
190
191 printk(LOG_PFX "acpi_evaluate_object failed\n");
192
193 return -1;
194}
195
196/*
197 * sony_acpi_values input/output validate functions
198 */
199
200/* brightness_default_validate:
201 *
202 * manipulate input output values to keep consistency with the
203 * backlight framework for which brightness values are 0-based.
204 */
205static int brightness_default_validate(const int direction, const int value)
206{
207 switch (direction) {
208 case SNC_VALIDATE_OUT:
209 return value - 1;
210 case SNC_VALIDATE_IN:
211 if (value >= 0 && value < SONY_MAX_BRIGHTNESS)
212 return value + 1;
213 }
214 return -EINVAL;
215}
216
217/* boolean_validate:
218 *
219 * on input validate boolean values 0/1, on output just pass the
220 * received value.
221 */
222static int boolean_validate(const int direction, const int value)
223{
224 if (direction == SNC_VALIDATE_IN) {
225 if (value != 0 && value != 1)
226 return -EINVAL;
227 }
228 return value;
229}
230
231/*
232 * Sysfs show/store common to all sony_acpi_values
233 */
234static ssize_t sony_acpi_show(struct device *dev, struct device_attribute *attr,
235 char *buffer)
236{
237 int value;
238 struct sony_acpi_value *item =
239 container_of(attr, struct sony_acpi_value, devattr);
240
241 if (!*item->acpiget)
242 return -EIO;
243
244 if (acpi_callgetfunc(sony_acpi_handle, *item->acpiget, &value) < 0)
245 return -EIO;
246
247 if (item->validate)
248 value = item->validate(SNC_VALIDATE_OUT, value);
249
250 return snprintf(buffer, PAGE_SIZE, "%d\n", value);
251}
252
253static ssize_t sony_acpi_store(struct device *dev,
254 struct device_attribute *attr,
255 const char *buffer, size_t count)
256{
257 int value;
258 struct sony_acpi_value *item =
259 container_of(attr, struct sony_acpi_value, devattr);
260
261 if (!item->acpiset)
262 return -EIO;
263
264 if (count > 31)
265 return -EINVAL;
266
267 value = simple_strtoul(buffer, NULL, 10);
268
269 if (item->validate)
270 value = item->validate(SNC_VALIDATE_IN, value);
271
272 if (value < 0)
273 return value;
274
275 if (acpi_callsetfunc(sony_acpi_handle, *item->acpiset, value, NULL) < 0)
276 return -EIO;
277 item->value = value;
278 item->valid = 1;
279 return count;
280}
281
282/*
283 * Platform device
284 */
285static struct platform_driver sncpf_driver = {
286 .driver = {
287 .name = "sony-laptop",
288 .owner = THIS_MODULE,
289 }
290};
291static struct platform_device *sncpf_device;
292
293static int sony_snc_pf_add(void)
294{
295 acpi_handle handle;
296 struct sony_acpi_value *item;
297 int ret = 0;
298
299 ret = platform_driver_register(&sncpf_driver);
300 if (ret)
301 goto out;
302
303 sncpf_device = platform_device_alloc("sony-laptop", -1);
304 if (!sncpf_device) {
305 ret = -ENOMEM;
306 goto out_platform_registered;
307 }
308
309 ret = platform_device_add(sncpf_device);
310 if (ret)
311 goto out_platform_alloced;
312
313 for (item = sony_acpi_values; item->name; ++item) {
314
315 if (!debug && item->debug)
316 continue;
317
318 /* find the available acpiget as described in the DSDT */
319 for (; item->acpiget && *item->acpiget; ++item->acpiget) {
320 if (ACPI_SUCCESS(acpi_get_handle(sony_acpi_handle,
321 *item->acpiget,
322 &handle))) {
323 if (debug)
324 printk(LOG_PFX "Found %s getter: %s\n",
325 item->name, *item->acpiget);
326 item->devattr.attr.mode |= S_IRUGO;
327 break;
328 }
329 }
330
331 /* find the available acpiset as described in the DSDT */
332 for (; item->acpiset && *item->acpiset; ++item->acpiset) {
333 if (ACPI_SUCCESS(acpi_get_handle(sony_acpi_handle,
334 *item->acpiset,
335 &handle))) {
336 if (debug)
337 printk(LOG_PFX "Found %s setter: %s\n",
338 item->name, *item->acpiset);
339 item->devattr.attr.mode |= S_IWUSR;
340 break;
341 }
342 }
343
344 if (item->devattr.attr.mode != 0) {
345 ret =
346 device_create_file(&sncpf_device->dev,
347 &item->devattr);
348 if (ret)
349 goto out_sysfs;
350 }
351 }
352
353 return 0;
354
355 out_sysfs:
356 for (item = sony_acpi_values; item->name; ++item) {
357 device_remove_file(&sncpf_device->dev, &item->devattr);
358 }
359 platform_device_del(sncpf_device);
360 out_platform_alloced:
361 platform_device_put(sncpf_device);
362 out_platform_registered:
363 platform_driver_unregister(&sncpf_driver);
364 out:
365 return ret;
366}
367
368static void sony_snc_pf_remove(void)
369{
370 struct sony_acpi_value *item;
371
372 for (item = sony_acpi_values; item->name; ++item) {
373 device_remove_file(&sncpf_device->dev, &item->devattr);
374 }
375
376 platform_device_del(sncpf_device);
377 platform_device_put(sncpf_device);
378 platform_driver_unregister(&sncpf_driver);
379}
380
381/*
382 * Backlight device
383 */
384static int sony_backlight_update_status(struct backlight_device *bd)
385{
386 return acpi_callsetfunc(sony_acpi_handle, "SBRT",
387 bd->props->brightness + 1, NULL);
388}
389
390static int sony_backlight_get_brightness(struct backlight_device *bd)
391{
392 int value;
393
394 if (acpi_callgetfunc(sony_acpi_handle, "GBRT", &value))
395 return 0;
396 /* brightness levels are 1-based, while backlight ones are 0-based */
397 return value - 1;
398}
399
400static struct backlight_device *sony_backlight_device;
401static struct backlight_properties sony_backlight_properties = {
402 .owner = THIS_MODULE,
403 .update_status = sony_backlight_update_status,
404 .get_brightness = sony_backlight_get_brightness,
405 .max_brightness = SONY_MAX_BRIGHTNESS - 1,
406};
407
408/*
409 * ACPI callbacks
410 */
411static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
412{
413 if (debug)
414 printk(LOG_PFX "sony_acpi_notify, event: %d\n", event);
415 acpi_bus_generate_event(sony_acpi_acpi_device, 1, event);
416}
417
418static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
419 void *context, void **return_value)
420{
421 struct acpi_namespace_node *node;
422 union acpi_operand_object *operand;
423
424 node = (struct acpi_namespace_node *)handle;
425 operand = (union acpi_operand_object *)node->object;
426
427 printk(LOG_PFX "method: name: %4.4s, args %X\n", node->name.ascii,
428 (u32) operand->method.param_count);
429
430 return AE_OK;
431}
432
433/*
434 * ACPI device
435 */
436static int sony_acpi_resume(struct acpi_device *device)
437{
438 struct sony_acpi_value *item;
439
440 for (item = sony_acpi_values; item->name; item++) {
441 int ret;
442
443 if (!item->valid)
444 continue;
445 ret = acpi_callsetfunc(sony_acpi_handle, *item->acpiset,
446 item->value, NULL);
447 if (ret < 0) {
448 printk("%s: %d\n", __FUNCTION__, ret);
449 break;
450 }
451 }
452 return 0;
453}
454
455static int sony_acpi_add(struct acpi_device *device)
456{
457 acpi_status status;
458 int result;
459 acpi_handle handle;
460
461 sony_acpi_acpi_device = device;
462
463 sony_acpi_handle = device->handle;
464
465 if (debug) {
466 status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_acpi_handle,
467 1, sony_walk_callback, NULL, NULL);
468 if (ACPI_FAILURE(status)) {
469 printk(LOG_PFX "unable to walk acpi resources\n");
470 result = -ENODEV;
471 goto outwalk;
472 }
473 }
474
475 status = acpi_install_notify_handler(sony_acpi_handle,
476 ACPI_DEVICE_NOTIFY,
477 sony_acpi_notify, NULL);
478 if (ACPI_FAILURE(status)) {
479 printk(LOG_PFX "unable to install notify handler\n");
480 result = -ENODEV;
481 goto outwalk;
482 }
483
484 if (ACPI_SUCCESS(acpi_get_handle(sony_acpi_handle, "GBRT", &handle))) {
485 sony_backlight_device = backlight_device_register("sony", NULL,
486 NULL,
487 &sony_backlight_properties);
488
489 if (IS_ERR(sony_backlight_device)) {
490 printk(LOG_PFX "unable to register backlight device\n");
491 sony_backlight_device = NULL;
492 } else
493 sony_backlight_properties.brightness =
494 sony_backlight_get_brightness
495 (sony_backlight_device);
496 }
497
498 if (sony_snc_pf_add())
499 goto outbacklight;
500
501 printk(KERN_INFO ACPI_SNC_DRIVER_NAME " successfully installed\n");
502
503 return 0;
504
505 outbacklight:
506 if (sony_backlight_device)
507 backlight_device_unregister(sony_backlight_device);
508
509 status = acpi_remove_notify_handler(sony_acpi_handle,
510 ACPI_DEVICE_NOTIFY,
511 sony_acpi_notify);
512 if (ACPI_FAILURE(status))
513 printk(LOG_PFX "unable to remove notify handler\n");
514 outwalk:
515 return result;
516}
517
518static int sony_acpi_remove(struct acpi_device *device, int type)
519{
520 acpi_status status;
521
522 if (sony_backlight_device)
523 backlight_device_unregister(sony_backlight_device);
524
525 sony_acpi_acpi_device = NULL;
526
527 status = acpi_remove_notify_handler(sony_acpi_handle,
528 ACPI_DEVICE_NOTIFY,
529 sony_acpi_notify);
530 if (ACPI_FAILURE(status))
531 printk(LOG_PFX "unable to remove notify handler\n");
532
533 sony_snc_pf_remove();
534
535 printk(KERN_INFO ACPI_SNC_DRIVER_NAME " successfully removed\n");
536
537 return 0;
538}
539
540static struct acpi_driver sony_acpi_driver = {
541 .name = ACPI_SNC_DRIVER_NAME,
542 .class = ACPI_SNC_CLASS,
543 .ids = ACPI_SNC_HID,
544 .ops = {
545 .add = sony_acpi_add,
546 .remove = sony_acpi_remove,
547 .resume = sony_acpi_resume,
548 },
549};
550
551static int __init sony_acpi_init(void)
552{
553 return acpi_bus_register_driver(&sony_acpi_driver);
554}
555
556static void __exit sony_acpi_exit(void)
557{
558 acpi_bus_unregister_driver(&sony_acpi_driver);
559}
560
561module_init(sony_acpi_init);
562module_exit(sony_acpi_exit);
diff --git a/drivers/pnp/pnpacpi/Kconfig b/drivers/pnp/pnpacpi/Kconfig
index ad27e5e0101f..b04767ce273e 100644
--- a/drivers/pnp/pnpacpi/Kconfig
+++ b/drivers/pnp/pnpacpi/Kconfig
@@ -2,17 +2,5 @@
2# Plug and Play ACPI configuration 2# Plug and Play ACPI configuration
3# 3#
4config PNPACPI 4config PNPACPI
5 bool "Plug and Play ACPI support" 5 bool
6 depends on PNP && ACPI 6 default (PNP && ACPI)
7 default y
8 ---help---
9 Linux uses the PNPACPI to autodetect built-in
10 mainboard resources (e.g. parallel port resources).
11
12 Some features (e.g. real hotplug) are not currently
13 implemented.
14
15 If you would like the kernel to detect and allocate resources to
16 your mainboard devices (on some systems they are disabled by the
17 BIOS) say Y here. Also the PNPACPI can help prevent resource
18 conflicts between mainboard devices and other bus devices.
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 32f0e3a5b022..e573c8ba9785 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -281,8 +281,8 @@ static int appledisplay_probe(struct usb_interface *iface,
281 /* Register backlight device */ 281 /* Register backlight device */
282 snprintf(bl_name, sizeof(bl_name), "appledisplay%d", 282 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
283 atomic_inc_return(&count_displays) - 1); 283 atomic_inc_return(&count_displays) - 1);
284 pdata->bd = backlight_device_register(bl_name, NULL, 284 pdata->bd = backlight_device_register(bl_name, NULL, pdata,
285 pdata, &appledisplay_bl_data); 285 &appledisplay_bl_data);
286 if (IS_ERR(pdata->bd)) { 286 if (IS_ERR(pdata->bd)) {
287 err("appledisplay: Backlight registration failed"); 287 err("appledisplay: Backlight registration failed");
288 goto error; 288 goto error;
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index ccef56d0c157..ed3426062a8b 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -791,6 +791,8 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
791 791
792 info = fbinfo->par; 792 info = fbinfo->par;
793 info->fb = fbinfo; 793 info->fb = fbinfo;
794 info->dev = &pdev->dev;
795
794 platform_set_drvdata(pdev, fbinfo); 796 platform_set_drvdata(pdev, fbinfo);
795 797
796 dprintk("devinit\n"); 798 dprintk("devinit\n");
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index b3609b7cdf11..403e3bad1455 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -467,6 +467,7 @@ extern struct kmem_cache *ecryptfs_header_cache_1;
467extern struct kmem_cache *ecryptfs_header_cache_2; 467extern struct kmem_cache *ecryptfs_header_cache_2;
468extern struct kmem_cache *ecryptfs_xattr_cache; 468extern struct kmem_cache *ecryptfs_xattr_cache;
469extern struct kmem_cache *ecryptfs_lower_page_cache; 469extern struct kmem_cache *ecryptfs_lower_page_cache;
470extern struct kmem_cache *ecryptfs_key_record_cache;
470 471
471int ecryptfs_interpose(struct dentry *hidden_dentry, 472int ecryptfs_interpose(struct dentry *hidden_dentry,
472 struct dentry *this_dentry, struct super_block *sb, 473 struct dentry *this_dentry, struct super_block *sb,
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 81156e95ef8e..b550dea8eee6 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1638,6 +1638,8 @@ out:
1638 return rc; 1638 return rc;
1639} 1639}
1640 1640
1641struct kmem_cache *ecryptfs_key_record_cache;
1642
1641/** 1643/**
1642 * ecryptfs_generate_key_packet_set 1644 * ecryptfs_generate_key_packet_set
1643 * @dest: Virtual address from which to write the key record set 1645 * @dest: Virtual address from which to write the key record set
@@ -1664,50 +1666,55 @@ ecryptfs_generate_key_packet_set(char *dest_base,
1664 &ecryptfs_superblock_to_private( 1666 &ecryptfs_superblock_to_private(
1665 ecryptfs_dentry->d_sb)->mount_crypt_stat; 1667 ecryptfs_dentry->d_sb)->mount_crypt_stat;
1666 size_t written; 1668 size_t written;
1667 struct ecryptfs_key_record key_rec; 1669 struct ecryptfs_key_record *key_rec;
1668 int rc = 0; 1670 int rc = 0;
1669 1671
1670 (*len) = 0; 1672 (*len) = 0;
1673 key_rec = kmem_cache_alloc(ecryptfs_key_record_cache, GFP_KERNEL);
1674 if (!key_rec) {
1675 rc = -ENOMEM;
1676 goto out;
1677 }
1671 if (mount_crypt_stat->global_auth_tok) { 1678 if (mount_crypt_stat->global_auth_tok) {
1672 auth_tok = mount_crypt_stat->global_auth_tok; 1679 auth_tok = mount_crypt_stat->global_auth_tok;
1673 if (auth_tok->token_type == ECRYPTFS_PASSWORD) { 1680 if (auth_tok->token_type == ECRYPTFS_PASSWORD) {
1674 rc = write_tag_3_packet((dest_base + (*len)), 1681 rc = write_tag_3_packet((dest_base + (*len)),
1675 max, auth_tok, 1682 max, auth_tok,
1676 crypt_stat, &key_rec, 1683 crypt_stat, key_rec,
1677 &written); 1684 &written);
1678 if (rc) { 1685 if (rc) {
1679 ecryptfs_printk(KERN_WARNING, "Error " 1686 ecryptfs_printk(KERN_WARNING, "Error "
1680 "writing tag 3 packet\n"); 1687 "writing tag 3 packet\n");
1681 goto out; 1688 goto out_free;
1682 } 1689 }
1683 (*len) += written; 1690 (*len) += written;
1684 /* Write auth tok signature packet */ 1691 /* Write auth tok signature packet */
1685 rc = write_tag_11_packet( 1692 rc = write_tag_11_packet(
1686 (dest_base + (*len)), 1693 (dest_base + (*len)),
1687 (max - (*len)), 1694 (max - (*len)),
1688 key_rec.sig, ECRYPTFS_SIG_SIZE, &written); 1695 key_rec->sig, ECRYPTFS_SIG_SIZE, &written);
1689 if (rc) { 1696 if (rc) {
1690 ecryptfs_printk(KERN_ERR, "Error writing " 1697 ecryptfs_printk(KERN_ERR, "Error writing "
1691 "auth tok signature packet\n"); 1698 "auth tok signature packet\n");
1692 goto out; 1699 goto out_free;
1693 } 1700 }
1694 (*len) += written; 1701 (*len) += written;
1695 } else if (auth_tok->token_type == ECRYPTFS_PRIVATE_KEY) { 1702 } else if (auth_tok->token_type == ECRYPTFS_PRIVATE_KEY) {
1696 rc = write_tag_1_packet(dest_base + (*len), 1703 rc = write_tag_1_packet(dest_base + (*len),
1697 max, auth_tok, 1704 max, auth_tok,
1698 crypt_stat,mount_crypt_stat, 1705 crypt_stat,mount_crypt_stat,
1699 &key_rec, &written); 1706 key_rec, &written);
1700 if (rc) { 1707 if (rc) {
1701 ecryptfs_printk(KERN_WARNING, "Error " 1708 ecryptfs_printk(KERN_WARNING, "Error "
1702 "writing tag 1 packet\n"); 1709 "writing tag 1 packet\n");
1703 goto out; 1710 goto out_free;
1704 } 1711 }
1705 (*len) += written; 1712 (*len) += written;
1706 } else { 1713 } else {
1707 ecryptfs_printk(KERN_WARNING, "Unsupported " 1714 ecryptfs_printk(KERN_WARNING, "Unsupported "
1708 "authentication token type\n"); 1715 "authentication token type\n");
1709 rc = -EINVAL; 1716 rc = -EINVAL;
1710 goto out; 1717 goto out_free;
1711 } 1718 }
1712 } else 1719 } else
1713 BUG(); 1720 BUG();
@@ -1717,6 +1724,9 @@ ecryptfs_generate_key_packet_set(char *dest_base,
1717 ecryptfs_printk(KERN_ERR, "Error writing boundary byte\n"); 1724 ecryptfs_printk(KERN_ERR, "Error writing boundary byte\n");
1718 rc = -EIO; 1725 rc = -EIO;
1719 } 1726 }
1727
1728out_free:
1729 kmem_cache_free(ecryptfs_key_record_cache, key_rec);
1720out: 1730out:
1721 if (rc) 1731 if (rc)
1722 (*len) = 0; 1732 (*len) = 0;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 26fe405a5763..80044d196fe0 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -651,6 +651,11 @@ static struct ecryptfs_cache_info {
651 .name = "ecryptfs_lower_page_cache", 651 .name = "ecryptfs_lower_page_cache",
652 .size = PAGE_CACHE_SIZE, 652 .size = PAGE_CACHE_SIZE,
653 }, 653 },
654 {
655 .cache = &ecryptfs_key_record_cache,
656 .name = "ecryptfs_key_record_cache",
657 .size = sizeof(struct ecryptfs_key_record),
658 },
654}; 659};
655 660
656static void ecryptfs_free_kmem_caches(void) 661static void ecryptfs_free_kmem_caches(void)
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 47d7e7b611f7..3baf253be95a 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -169,7 +169,8 @@ int ecryptfs_process_helo(unsigned int transport, uid_t uid, pid_t pid)
169 if (!new_id) { 169 if (!new_id) {
170 rc = -ENOMEM; 170 rc = -ENOMEM;
171 ecryptfs_printk(KERN_ERR, "Failed to allocate memory; unable " 171 ecryptfs_printk(KERN_ERR, "Failed to allocate memory; unable "
172 "to register daemon [%d] for user\n", pid, uid); 172 "to register daemon [%d] for user [%d]\n",
173 pid, uid);
173 goto unlock; 174 goto unlock;
174 } 175 }
175 if (!ecryptfs_find_daemon_id(uid, &old_id)) { 176 if (!ecryptfs_find_daemon_id(uid, &old_id)) {
diff --git a/fs/namei.c b/fs/namei.c
index 161e2225c757..ee60cc4d3453 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2688,10 +2688,11 @@ int __page_symlink(struct inode *inode, const char *symname, int len,
2688{ 2688{
2689 struct address_space *mapping = inode->i_mapping; 2689 struct address_space *mapping = inode->i_mapping;
2690 struct page *page; 2690 struct page *page;
2691 int err = -ENOMEM; 2691 int err;
2692 char *kaddr; 2692 char *kaddr;
2693 2693
2694retry: 2694retry:
2695 err = -ENOMEM;
2695 page = find_or_create_page(mapping, 0, gfp_mask); 2696 page = find_or_create_page(mapping, 0, gfp_mask);
2696 if (!page) 2697 if (!page)
2697 goto fail; 2698 goto fail;
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 5d94555cdc83..832673b14587 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -61,9 +61,11 @@
61 61
62/* flags used to simulate posix default ACLs */ 62/* flags used to simulate posix default ACLs */
63#define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \ 63#define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \
64 | NFS4_ACE_DIRECTORY_INHERIT_ACE | NFS4_ACE_INHERIT_ONLY_ACE) 64 | NFS4_ACE_DIRECTORY_INHERIT_ACE)
65 65
66#define NFS4_SUPPORTED_FLAGS (NFS4_INHERITANCE_FLAGS | NFS4_ACE_IDENTIFIER_GROUP) 66#define NFS4_SUPPORTED_FLAGS (NFS4_INHERITANCE_FLAGS \
67 | NFS4_ACE_INHERIT_ONLY_ACE \
68 | NFS4_ACE_IDENTIFIER_GROUP)
67 69
68#define MASK_EQUAL(mask1, mask2) \ 70#define MASK_EQUAL(mask1, mask2) \
69 ( ((mask1) & NFS4_ACE_MASK_ALL) == ((mask2) & NFS4_ACE_MASK_ALL) ) 71 ( ((mask1) & NFS4_ACE_MASK_ALL) == ((mask2) & NFS4_ACE_MASK_ALL) )
@@ -87,12 +89,19 @@ mask_from_posix(unsigned short perm, unsigned int flags)
87} 89}
88 90
89static u32 91static u32
90deny_mask(u32 allow_mask, unsigned int flags) 92deny_mask_from_posix(unsigned short perm, u32 flags)
91{ 93{
92 u32 ret = ~allow_mask & ~NFS4_MASK_UNSUPP; 94 u32 mask = 0;
93 if (!(flags & NFS4_ACL_DIR)) 95
94 ret &= ~NFS4_ACE_DELETE_CHILD; 96 if (perm & ACL_READ)
95 return ret; 97 mask |= NFS4_READ_MODE;
98 if (perm & ACL_WRITE)
99 mask |= NFS4_WRITE_MODE;
100 if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR))
101 mask |= NFS4_ACE_DELETE_CHILD;
102 if (perm & ACL_EXECUTE)
103 mask |= NFS4_EXECUTE_MODE;
104 return mask;
96} 105}
97 106
98/* XXX: modify functions to return NFS errors; they're only ever 107/* XXX: modify functions to return NFS errors; they're only ever
@@ -126,108 +135,151 @@ struct ace_container {
126}; 135};
127 136
128static short ace2type(struct nfs4_ace *); 137static short ace2type(struct nfs4_ace *);
129static int _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, unsigned int); 138static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *,
130static struct posix_acl *_nfsv4_to_posix_one(struct nfs4_acl *, unsigned int); 139 unsigned int);
131int nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t); 140void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
132static int nfs4_acl_split(struct nfs4_acl *, struct nfs4_acl *);
133 141
134struct nfs4_acl * 142struct nfs4_acl *
135nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl, 143nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl,
136 unsigned int flags) 144 unsigned int flags)
137{ 145{
138 struct nfs4_acl *acl; 146 struct nfs4_acl *acl;
139 int error = -EINVAL; 147 int size = 0;
140 148
141 if ((pacl != NULL && 149 if (pacl) {
142 (posix_acl_valid(pacl) < 0 || pacl->a_count == 0)) || 150 if (posix_acl_valid(pacl) < 0)
143 (dpacl != NULL && 151 return ERR_PTR(-EINVAL);
144 (posix_acl_valid(dpacl) < 0 || dpacl->a_count == 0))) 152 size += 2*pacl->a_count;
145 goto out_err;
146
147 acl = nfs4_acl_new();
148 if (acl == NULL) {
149 error = -ENOMEM;
150 goto out_err;
151 } 153 }
152 154 if (dpacl) {
153 if (pacl != NULL) { 155 if (posix_acl_valid(dpacl) < 0)
154 error = _posix_to_nfsv4_one(pacl, acl, 156 return ERR_PTR(-EINVAL);
155 flags & ~NFS4_ACL_TYPE_DEFAULT); 157 size += 2*dpacl->a_count;
156 if (error < 0)
157 goto out_acl;
158 } 158 }
159 159
160 if (dpacl != NULL) { 160 /* Allocate for worst case: one (deny, allow) pair each: */
161 error = _posix_to_nfsv4_one(dpacl, acl, 161 acl = nfs4_acl_new(size);
162 flags | NFS4_ACL_TYPE_DEFAULT); 162 if (acl == NULL)
163 if (error < 0) 163 return ERR_PTR(-ENOMEM);
164 goto out_acl;
165 }
166 164
167 return acl; 165 if (pacl)
166 _posix_to_nfsv4_one(pacl, acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
168 167
169out_acl: 168 if (dpacl)
170 nfs4_acl_free(acl); 169 _posix_to_nfsv4_one(dpacl, acl, flags | NFS4_ACL_TYPE_DEFAULT);
171out_err:
172 acl = ERR_PTR(error);
173 170
174 return acl; 171 return acl;
175} 172}
176 173
177static int 174struct posix_acl_summary {
178nfs4_acl_add_pair(struct nfs4_acl *acl, int eflag, u32 mask, int whotype, 175 unsigned short owner;
179 uid_t owner, unsigned int flags) 176 unsigned short users;
177 unsigned short group;
178 unsigned short groups;
179 unsigned short other;
180 unsigned short mask;
181};
182
183static void
184summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas)
180{ 185{
181 int error; 186 struct posix_acl_entry *pa, *pe;
182 187 pas->users = 0;
183 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE, 188 pas->groups = 0;
184 eflag, mask, whotype, owner); 189 pas->mask = 07;
185 if (error < 0) 190
186 return error; 191 pe = acl->a_entries + acl->a_count;
187 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE, 192
188 eflag, deny_mask(mask, flags), whotype, owner); 193 FOREACH_ACL_ENTRY(pa, acl, pe) {
189 return error; 194 switch (pa->e_tag) {
195 case ACL_USER_OBJ:
196 pas->owner = pa->e_perm;
197 break;
198 case ACL_GROUP_OBJ:
199 pas->group = pa->e_perm;
200 break;
201 case ACL_USER:
202 pas->users |= pa->e_perm;
203 break;
204 case ACL_GROUP:
205 pas->groups |= pa->e_perm;
206 break;
207 case ACL_OTHER:
208 pas->other = pa->e_perm;
209 break;
210 case ACL_MASK:
211 pas->mask = pa->e_perm;
212 break;
213 }
214 }
215 /* We'll only care about effective permissions: */
216 pas->users &= pas->mask;
217 pas->group &= pas->mask;
218 pas->groups &= pas->mask;
190} 219}
191 220
192/* We assume the acl has been verified with posix_acl_valid. */ 221/* We assume the acl has been verified with posix_acl_valid. */
193static int 222static void
194_posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl, 223_posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
195 unsigned int flags) 224 unsigned int flags)
196{ 225{
197 struct posix_acl_entry *pa, *pe, *group_owner_entry; 226 struct posix_acl_entry *pa, *group_owner_entry;
198 int error = -EINVAL; 227 struct nfs4_ace *ace;
199 u32 mask, mask_mask; 228 struct posix_acl_summary pas;
229 unsigned short deny;
200 int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ? 230 int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ?
201 NFS4_INHERITANCE_FLAGS : 0); 231 NFS4_INHERITANCE_FLAGS : 0);
202 232
203 BUG_ON(pacl->a_count < 3); 233 BUG_ON(pacl->a_count < 3);
204 pe = pacl->a_entries + pacl->a_count; 234 summarize_posix_acl(pacl, &pas);
205 pa = pe - 2; /* if mask entry exists, it's second from the last. */
206 if (pa->e_tag == ACL_MASK)
207 mask_mask = deny_mask(mask_from_posix(pa->e_perm, flags), flags);
208 else
209 mask_mask = 0;
210 235
211 pa = pacl->a_entries; 236 pa = pacl->a_entries;
212 BUG_ON(pa->e_tag != ACL_USER_OBJ); 237 ace = acl->aces + acl->naces;
213 mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER);
214 error = nfs4_acl_add_pair(acl, eflag, mask, NFS4_ACL_WHO_OWNER, 0, flags);
215 if (error < 0)
216 goto out;
217 pa++;
218 238
219 while (pa->e_tag == ACL_USER) { 239 /* We could deny everything not granted by the owner: */
220 mask = mask_from_posix(pa->e_perm, flags); 240 deny = ~pas.owner;
221 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE, 241 /*
222 eflag, mask_mask, NFS4_ACL_WHO_NAMED, pa->e_id); 242 * but it is equivalent (and simpler) to deny only what is not
223 if (error < 0) 243 * granted by later entries:
224 goto out; 244 */
245 deny &= pas.users | pas.group | pas.groups | pas.other;
246 if (deny) {
247 ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
248 ace->flag = eflag;
249 ace->access_mask = deny_mask_from_posix(deny, flags);
250 ace->whotype = NFS4_ACL_WHO_OWNER;
251 ace++;
252 acl->naces++;
253 }
225 254
255 ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
256 ace->flag = eflag;
257 ace->access_mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER);
258 ace->whotype = NFS4_ACL_WHO_OWNER;
259 ace++;
260 acl->naces++;
261 pa++;
226 262
227 error = nfs4_acl_add_pair(acl, eflag, mask, 263 while (pa->e_tag == ACL_USER) {
228 NFS4_ACL_WHO_NAMED, pa->e_id, flags); 264 deny = ~(pa->e_perm & pas.mask);
229 if (error < 0) 265 deny &= pas.groups | pas.group | pas.other;
230 goto out; 266 if (deny) {
267 ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
268 ace->flag = eflag;
269 ace->access_mask = deny_mask_from_posix(deny, flags);
270 ace->whotype = NFS4_ACL_WHO_NAMED;
271 ace->who = pa->e_id;
272 ace++;
273 acl->naces++;
274 }
275 ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
276 ace->flag = eflag;
277 ace->access_mask = mask_from_posix(pa->e_perm & pas.mask,
278 flags);
279 ace->whotype = NFS4_ACL_WHO_NAMED;
280 ace->who = pa->e_id;
281 ace++;
282 acl->naces++;
231 pa++; 283 pa++;
232 } 284 }
233 285
@@ -236,67 +288,65 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
236 288
237 /* allow ACEs */ 289 /* allow ACEs */
238 290
239 if (pacl->a_count > 3) {
240 BUG_ON(pa->e_tag != ACL_GROUP_OBJ);
241 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
242 NFS4_ACE_IDENTIFIER_GROUP | eflag, mask_mask,
243 NFS4_ACL_WHO_GROUP, 0);
244 if (error < 0)
245 goto out;
246 }
247 group_owner_entry = pa; 291 group_owner_entry = pa;
248 mask = mask_from_posix(pa->e_perm, flags); 292
249 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE, 293 ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
250 NFS4_ACE_IDENTIFIER_GROUP | eflag, mask, 294 ace->flag = eflag;
251 NFS4_ACL_WHO_GROUP, 0); 295 ace->access_mask = mask_from_posix(pas.group, flags);
252 if (error < 0) 296 ace->whotype = NFS4_ACL_WHO_GROUP;
253 goto out; 297 ace++;
298 acl->naces++;
254 pa++; 299 pa++;
255 300
256 while (pa->e_tag == ACL_GROUP) { 301 while (pa->e_tag == ACL_GROUP) {
257 mask = mask_from_posix(pa->e_perm, flags); 302 ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
258 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE, 303 ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
259 NFS4_ACE_IDENTIFIER_GROUP | eflag, mask_mask, 304 ace->access_mask = mask_from_posix(pa->e_perm & pas.mask,
260 NFS4_ACL_WHO_NAMED, pa->e_id); 305 flags);
261 if (error < 0) 306 ace->whotype = NFS4_ACL_WHO_NAMED;
262 goto out; 307 ace->who = pa->e_id;
263 308 ace++;
264 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE, 309 acl->naces++;
265 NFS4_ACE_IDENTIFIER_GROUP | eflag, mask,
266 NFS4_ACL_WHO_NAMED, pa->e_id);
267 if (error < 0)
268 goto out;
269 pa++; 310 pa++;
270 } 311 }
271 312
272 /* deny ACEs */ 313 /* deny ACEs */
273 314
274 pa = group_owner_entry; 315 pa = group_owner_entry;
275 mask = mask_from_posix(pa->e_perm, flags); 316
276 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE, 317 deny = ~pas.group & pas.other;
277 NFS4_ACE_IDENTIFIER_GROUP | eflag, 318 if (deny) {
278 deny_mask(mask, flags), NFS4_ACL_WHO_GROUP, 0); 319 ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
279 if (error < 0) 320 ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
280 goto out; 321 ace->access_mask = deny_mask_from_posix(deny, flags);
322 ace->whotype = NFS4_ACL_WHO_GROUP;
323 ace++;
324 acl->naces++;
325 }
281 pa++; 326 pa++;
327
282 while (pa->e_tag == ACL_GROUP) { 328 while (pa->e_tag == ACL_GROUP) {
283 mask = mask_from_posix(pa->e_perm, flags); 329 deny = ~(pa->e_perm & pas.mask);
284 error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE, 330 deny &= pas.other;
285 NFS4_ACE_IDENTIFIER_GROUP | eflag, 331 if (deny) {
286 deny_mask(mask, flags), NFS4_ACL_WHO_NAMED, pa->e_id); 332 ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
287 if (error < 0) 333 ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
288 goto out; 334 ace->access_mask = mask_from_posix(deny, flags);
335 ace->whotype = NFS4_ACL_WHO_NAMED;
336 ace->who = pa->e_id;
337 ace++;
338 acl->naces++;
339 }
289 pa++; 340 pa++;
290 } 341 }
291 342
292 if (pa->e_tag == ACL_MASK) 343 if (pa->e_tag == ACL_MASK)
293 pa++; 344 pa++;
294 BUG_ON(pa->e_tag != ACL_OTHER); 345 ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE;
295 mask = mask_from_posix(pa->e_perm, flags); 346 ace->flag = eflag;
296 error = nfs4_acl_add_pair(acl, eflag, mask, NFS4_ACL_WHO_EVERYONE, 0, flags); 347 ace->access_mask = mask_from_posix(pa->e_perm, flags);
297 348 ace->whotype = NFS4_ACL_WHO_EVERYONE;
298out: 349 acl->naces++;
299 return error;
300} 350}
301 351
302static void 352static void
@@ -342,46 +392,6 @@ sort_pacl(struct posix_acl *pacl)
342 return; 392 return;
343} 393}
344 394
345int
346nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl,
347 struct posix_acl **dpacl, unsigned int flags)
348{
349 struct nfs4_acl *dacl;
350 int error = -ENOMEM;
351
352 *pacl = NULL;
353 *dpacl = NULL;
354
355 dacl = nfs4_acl_new();
356 if (dacl == NULL)
357 goto out;
358
359 error = nfs4_acl_split(acl, dacl);
360 if (error)
361 goto out_acl;
362
363 *pacl = _nfsv4_to_posix_one(acl, flags);
364 if (IS_ERR(*pacl)) {
365 error = PTR_ERR(*pacl);
366 *pacl = NULL;
367 goto out_acl;
368 }
369
370 *dpacl = _nfsv4_to_posix_one(dacl, flags);
371 if (IS_ERR(*dpacl)) {
372 error = PTR_ERR(*dpacl);
373 *dpacl = NULL;
374 }
375out_acl:
376 if (error) {
377 posix_acl_release(*pacl);
378 *pacl = NULL;
379 }
380 nfs4_acl_free(dacl);
381out:
382 return error;
383}
384
385/* 395/*
386 * While processing the NFSv4 ACE, this maintains bitmasks representing 396 * While processing the NFSv4 ACE, this maintains bitmasks representing
387 * which permission bits have been allowed and which denied to a given 397 * which permission bits have been allowed and which denied to a given
@@ -406,6 +416,7 @@ struct posix_ace_state_array {
406 * calculated so far: */ 416 * calculated so far: */
407 417
408struct posix_acl_state { 418struct posix_acl_state {
419 int empty;
409 struct posix_ace_state owner; 420 struct posix_ace_state owner;
410 struct posix_ace_state group; 421 struct posix_ace_state group;
411 struct posix_ace_state other; 422 struct posix_ace_state other;
@@ -421,6 +432,7 @@ init_state(struct posix_acl_state *state, int cnt)
421 int alloc; 432 int alloc;
422 433
423 memset(state, 0, sizeof(struct posix_acl_state)); 434 memset(state, 0, sizeof(struct posix_acl_state));
435 state->empty = 1;
424 /* 436 /*
425 * In the worst case, each individual acl could be for a distinct 437 * In the worst case, each individual acl could be for a distinct
426 * named user or group, but we don't no which, so we allocate 438 * named user or group, but we don't no which, so we allocate
@@ -488,6 +500,20 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
488 int nace; 500 int nace;
489 int i, error = 0; 501 int i, error = 0;
490 502
503 /*
504 * ACLs with no ACEs are treated differently in the inheritable
505 * and effective cases: when there are no inheritable ACEs, we
506 * set a zero-length default posix acl:
507 */
508 if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
509 pacl = posix_acl_alloc(0, GFP_KERNEL);
510 return pacl ? pacl : ERR_PTR(-ENOMEM);
511 }
512 /*
513 * When there are no effective ACEs, the following will end
514 * up setting a 3-element effective posix ACL with all
515 * permissions zero.
516 */
491 nace = 4 + state->users->n + state->groups->n; 517 nace = 4 + state->users->n + state->groups->n;
492 pacl = posix_acl_alloc(nace, GFP_KERNEL); 518 pacl = posix_acl_alloc(nace, GFP_KERNEL);
493 if (!pacl) 519 if (!pacl)
@@ -603,6 +629,8 @@ static void process_one_v4_ace(struct posix_acl_state *state,
603 u32 mask = ace->access_mask; 629 u32 mask = ace->access_mask;
604 int i; 630 int i;
605 631
632 state->empty = 0;
633
606 switch (ace2type(ace)) { 634 switch (ace2type(ace)) {
607 case ACL_USER_OBJ: 635 case ACL_USER_OBJ:
608 if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { 636 if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
@@ -666,75 +694,62 @@ static void process_one_v4_ace(struct posix_acl_state *state,
666 } 694 }
667} 695}
668 696
669static struct posix_acl * 697int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl,
670_nfsv4_to_posix_one(struct nfs4_acl *n4acl, unsigned int flags) 698 struct posix_acl **dpacl, unsigned int flags)
671{ 699{
672 struct posix_acl_state state; 700 struct posix_acl_state effective_acl_state, default_acl_state;
673 struct posix_acl *pacl;
674 struct nfs4_ace *ace; 701 struct nfs4_ace *ace;
675 int ret; 702 int ret;
676 703
677 ret = init_state(&state, n4acl->naces); 704 ret = init_state(&effective_acl_state, acl->naces);
678 if (ret) 705 if (ret)
679 return ERR_PTR(ret); 706 return ret;
680 707 ret = init_state(&default_acl_state, acl->naces);
681 list_for_each_entry(ace, &n4acl->ace_head, l_ace) 708 if (ret)
682 process_one_v4_ace(&state, ace); 709 goto out_estate;
683 710 ret = -EINVAL;
684 pacl = posix_state_to_acl(&state, flags); 711 for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
685
686 free_state(&state);
687
688 if (!IS_ERR(pacl))
689 sort_pacl(pacl);
690 return pacl;
691}
692
693static int
694nfs4_acl_split(struct nfs4_acl *acl, struct nfs4_acl *dacl)
695{
696 struct list_head *h, *n;
697 struct nfs4_ace *ace;
698 int error = 0;
699
700 list_for_each_safe(h, n, &acl->ace_head) {
701 ace = list_entry(h, struct nfs4_ace, l_ace);
702
703 if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE && 712 if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE &&
704 ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE) 713 ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE)
705 return -EINVAL; 714 goto out_dstate;
706
707 if (ace->flag & ~NFS4_SUPPORTED_FLAGS) 715 if (ace->flag & ~NFS4_SUPPORTED_FLAGS)
708 return -EINVAL; 716 goto out_dstate;
709 717 if ((ace->flag & NFS4_INHERITANCE_FLAGS) == 0) {
710 switch (ace->flag & NFS4_INHERITANCE_FLAGS) { 718 process_one_v4_ace(&effective_acl_state, ace);
711 case 0:
712 /* Leave this ace in the effective acl: */
713 continue; 719 continue;
714 case NFS4_INHERITANCE_FLAGS:
715 /* Add this ace to the default acl and remove it
716 * from the effective acl: */
717 error = nfs4_acl_add_ace(dacl, ace->type, ace->flag,
718 ace->access_mask, ace->whotype, ace->who);
719 if (error)
720 return error;
721 list_del(h);
722 kfree(ace);
723 acl->naces--;
724 break;
725 case NFS4_INHERITANCE_FLAGS & ~NFS4_ACE_INHERIT_ONLY_ACE:
726 /* Add this ace to the default, but leave it in
727 * the effective acl as well: */
728 error = nfs4_acl_add_ace(dacl, ace->type, ace->flag,
729 ace->access_mask, ace->whotype, ace->who);
730 if (error)
731 return error;
732 break;
733 default:
734 return -EINVAL;
735 } 720 }
721 if (!(flags & NFS4_ACL_DIR))
722 goto out_dstate;
723 /*
724 * Note that when only one of FILE_INHERIT or DIRECTORY_INHERIT
725 * is set, we're effectively turning on the other. That's OK,
726 * according to rfc 3530.
727 */
728 process_one_v4_ace(&default_acl_state, ace);
729
730 if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE))
731 process_one_v4_ace(&effective_acl_state, ace);
736 } 732 }
737 return 0; 733 *pacl = posix_state_to_acl(&effective_acl_state, flags);
734 if (IS_ERR(*pacl)) {
735 ret = PTR_ERR(*pacl);
736 goto out_dstate;
737 }
738 *dpacl = posix_state_to_acl(&default_acl_state,
739 flags | NFS4_ACL_TYPE_DEFAULT);
740 if (IS_ERR(*dpacl)) {
741 ret = PTR_ERR(*dpacl);
742 posix_acl_release(*pacl);
743 goto out_dstate;
744 }
745 sort_pacl(*pacl);
746 sort_pacl(*dpacl);
747 ret = 0;
748out_dstate:
749 free_state(&default_acl_state);
750out_estate:
751 free_state(&effective_acl_state);
752 return ret;
738} 753}
739 754
740static short 755static short
@@ -759,48 +774,22 @@ EXPORT_SYMBOL(nfs4_acl_posix_to_nfsv4);
759EXPORT_SYMBOL(nfs4_acl_nfsv4_to_posix); 774EXPORT_SYMBOL(nfs4_acl_nfsv4_to_posix);
760 775
761struct nfs4_acl * 776struct nfs4_acl *
762nfs4_acl_new(void) 777nfs4_acl_new(int n)
763{ 778{
764 struct nfs4_acl *acl; 779 struct nfs4_acl *acl;
765 780
766 if ((acl = kmalloc(sizeof(*acl), GFP_KERNEL)) == NULL) 781 acl = kmalloc(sizeof(*acl) + n*sizeof(struct nfs4_ace), GFP_KERNEL);
782 if (acl == NULL)
767 return NULL; 783 return NULL;
768
769 acl->naces = 0; 784 acl->naces = 0;
770 INIT_LIST_HEAD(&acl->ace_head);
771
772 return acl; 785 return acl;
773} 786}
774 787
775void 788void
776nfs4_acl_free(struct nfs4_acl *acl)
777{
778 struct list_head *h;
779 struct nfs4_ace *ace;
780
781 if (!acl)
782 return;
783
784 while (!list_empty(&acl->ace_head)) {
785 h = acl->ace_head.next;
786 list_del(h);
787 ace = list_entry(h, struct nfs4_ace, l_ace);
788 kfree(ace);
789 }
790
791 kfree(acl);
792
793 return;
794}
795
796int
797nfs4_acl_add_ace(struct nfs4_acl *acl, u32 type, u32 flag, u32 access_mask, 789nfs4_acl_add_ace(struct nfs4_acl *acl, u32 type, u32 flag, u32 access_mask,
798 int whotype, uid_t who) 790 int whotype, uid_t who)
799{ 791{
800 struct nfs4_ace *ace; 792 struct nfs4_ace *ace = acl->aces + acl->naces;
801
802 if ((ace = kmalloc(sizeof(*ace), GFP_KERNEL)) == NULL)
803 return -ENOMEM;
804 793
805 ace->type = type; 794 ace->type = type;
806 ace->flag = flag; 795 ace->flag = flag;
@@ -808,10 +797,7 @@ nfs4_acl_add_ace(struct nfs4_acl *acl, u32 type, u32 flag, u32 access_mask,
808 ace->whotype = whotype; 797 ace->whotype = whotype;
809 ace->who = who; 798 ace->who = who;
810 799
811 list_add_tail(&ace->l_ace, &acl->ace_head);
812 acl->naces++; 800 acl->naces++;
813
814 return 0;
815} 801}
816 802
817static struct { 803static struct {
@@ -865,7 +851,6 @@ nfs4_acl_write_who(int who, char *p)
865} 851}
866 852
867EXPORT_SYMBOL(nfs4_acl_new); 853EXPORT_SYMBOL(nfs4_acl_new);
868EXPORT_SYMBOL(nfs4_acl_free);
869EXPORT_SYMBOL(nfs4_acl_add_ace); 854EXPORT_SYMBOL(nfs4_acl_add_ace);
870EXPORT_SYMBOL(nfs4_acl_get_whotype); 855EXPORT_SYMBOL(nfs4_acl_get_whotype);
871EXPORT_SYMBOL(nfs4_acl_write_who); 856EXPORT_SYMBOL(nfs4_acl_write_who);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index f57655a7a2b6..fb14d68eacab 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -387,7 +387,6 @@ nfsd4_probe_callback(struct nfs4_client *clp)
387 .address = (struct sockaddr *)&addr, 387 .address = (struct sockaddr *)&addr,
388 .addrsize = sizeof(addr), 388 .addrsize = sizeof(addr),
389 .timeout = &timeparms, 389 .timeout = &timeparms,
390 .servername = clp->cl_name.data,
391 .program = program, 390 .program = program,
392 .version = nfs_cb_version[1]->number, 391 .version = nfs_cb_version[1]->number,
393 .authflavor = RPC_AUTH_UNIX, /* XXX: need AUTH_GSS... */ 392 .authflavor = RPC_AUTH_UNIX, /* XXX: need AUTH_GSS... */
@@ -397,6 +396,7 @@ nfsd4_probe_callback(struct nfs4_client *clp)
397 .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], 396 .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
398 .rpc_argp = clp, 397 .rpc_argp = clp,
399 }; 398 };
399 char clientname[16];
400 int status; 400 int status;
401 401
402 if (atomic_read(&cb->cb_set)) 402 if (atomic_read(&cb->cb_set))
@@ -419,6 +419,11 @@ nfsd4_probe_callback(struct nfs4_client *clp)
419 memset(program->stats, 0, sizeof(cb->cb_stat)); 419 memset(program->stats, 0, sizeof(cb->cb_stat));
420 program->stats->program = program; 420 program->stats->program = program;
421 421
422 /* Just here to make some printk's more useful: */
423 snprintf(clientname, sizeof(clientname),
424 "%u.%u.%u.%u", NIPQUAD(addr.sin_addr));
425 args.servername = clientname;
426
422 /* Create RPC client */ 427 /* Create RPC client */
423 cb->cb_client = rpc_create(&args); 428 cb->cb_client = rpc_create(&args);
424 if (IS_ERR(cb->cb_client)) { 429 if (IS_ERR(cb->cb_client)) {
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0efba557fb55..5d090f11f2be 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -199,24 +199,22 @@ defer_free(struct nfsd4_compoundargs *argp,
199 199
200static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes) 200static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
201{ 201{
202 void *new = NULL;
203 if (p == argp->tmp) { 202 if (p == argp->tmp) {
204 new = kmalloc(nbytes, GFP_KERNEL); 203 p = kmalloc(nbytes, GFP_KERNEL);
205 if (!new) return NULL; 204 if (!p)
206 p = new; 205 return NULL;
207 memcpy(p, argp->tmp, nbytes); 206 memcpy(p, argp->tmp, nbytes);
208 } else { 207 } else {
209 BUG_ON(p != argp->tmpp); 208 BUG_ON(p != argp->tmpp);
210 argp->tmpp = NULL; 209 argp->tmpp = NULL;
211 } 210 }
212 if (defer_free(argp, kfree, p)) { 211 if (defer_free(argp, kfree, p)) {
213 kfree(new); 212 kfree(p);
214 return NULL; 213 return NULL;
215 } else 214 } else
216 return (char *)p; 215 return (char *)p;
217} 216}
218 217
219
220static __be32 218static __be32
221nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval) 219nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
222{ 220{
@@ -255,7 +253,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *ia
255 return status; 253 return status;
256 254
257 /* 255 /*
258 * According to spec, unsupported attributes return ERR_NOTSUPP; 256 * According to spec, unsupported attributes return ERR_ATTRNOTSUPP;
259 * read-only attributes return ERR_INVAL. 257 * read-only attributes return ERR_INVAL.
260 */ 258 */
261 if ((bmval[0] & ~NFSD_SUPPORTED_ATTRS_WORD0) || (bmval[1] & ~NFSD_SUPPORTED_ATTRS_WORD1)) 259 if ((bmval[0] & ~NFSD_SUPPORTED_ATTRS_WORD0) || (bmval[1] & ~NFSD_SUPPORTED_ATTRS_WORD1))
@@ -273,42 +271,42 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *ia
273 iattr->ia_valid |= ATTR_SIZE; 271 iattr->ia_valid |= ATTR_SIZE;
274 } 272 }
275 if (bmval[0] & FATTR4_WORD0_ACL) { 273 if (bmval[0] & FATTR4_WORD0_ACL) {
276 int nace, i; 274 int nace;
277 struct nfs4_ace ace; 275 struct nfs4_ace *ace;
278 276
279 READ_BUF(4); len += 4; 277 READ_BUF(4); len += 4;
280 READ32(nace); 278 READ32(nace);
281 279
282 *acl = nfs4_acl_new(); 280 if (nace > NFS4_ACL_MAX)
281 return nfserr_resource;
282
283 *acl = nfs4_acl_new(nace);
283 if (*acl == NULL) { 284 if (*acl == NULL) {
284 host_err = -ENOMEM; 285 host_err = -ENOMEM;
285 goto out_nfserr; 286 goto out_nfserr;
286 } 287 }
287 defer_free(argp, (void (*)(const void *))nfs4_acl_free, *acl); 288 defer_free(argp, kfree, *acl);
288 289
289 for (i = 0; i < nace; i++) { 290 (*acl)->naces = nace;
291 for (ace = (*acl)->aces; ace < (*acl)->aces + nace; ace++) {
290 READ_BUF(16); len += 16; 292 READ_BUF(16); len += 16;
291 READ32(ace.type); 293 READ32(ace->type);
292 READ32(ace.flag); 294 READ32(ace->flag);
293 READ32(ace.access_mask); 295 READ32(ace->access_mask);
294 READ32(dummy32); 296 READ32(dummy32);
295 READ_BUF(dummy32); 297 READ_BUF(dummy32);
296 len += XDR_QUADLEN(dummy32) << 2; 298 len += XDR_QUADLEN(dummy32) << 2;
297 READMEM(buf, dummy32); 299 READMEM(buf, dummy32);
298 ace.whotype = nfs4_acl_get_whotype(buf, dummy32); 300 ace->whotype = nfs4_acl_get_whotype(buf, dummy32);
299 host_err = 0; 301 host_err = 0;
300 if (ace.whotype != NFS4_ACL_WHO_NAMED) 302 if (ace->whotype != NFS4_ACL_WHO_NAMED)
301 ace.who = 0; 303 ace->who = 0;
302 else if (ace.flag & NFS4_ACE_IDENTIFIER_GROUP) 304 else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
303 host_err = nfsd_map_name_to_gid(argp->rqstp, 305 host_err = nfsd_map_name_to_gid(argp->rqstp,
304 buf, dummy32, &ace.who); 306 buf, dummy32, &ace->who);
305 else 307 else
306 host_err = nfsd_map_name_to_uid(argp->rqstp, 308 host_err = nfsd_map_name_to_uid(argp->rqstp,
307 buf, dummy32, &ace.who); 309 buf, dummy32, &ace->who);
308 if (host_err)
309 goto out_nfserr;
310 host_err = nfs4_acl_add_ace(*acl, ace.type, ace.flag,
311 ace.access_mask, ace.whotype, ace.who);
312 if (host_err) 310 if (host_err)
313 goto out_nfserr; 311 goto out_nfserr;
314 } 312 }
@@ -1596,7 +1594,6 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1596 } 1594 }
1597 if (bmval0 & FATTR4_WORD0_ACL) { 1595 if (bmval0 & FATTR4_WORD0_ACL) {
1598 struct nfs4_ace *ace; 1596 struct nfs4_ace *ace;
1599 struct list_head *h;
1600 1597
1601 if (acl == NULL) { 1598 if (acl == NULL) {
1602 if ((buflen -= 4) < 0) 1599 if ((buflen -= 4) < 0)
@@ -1609,9 +1606,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1609 goto out_resource; 1606 goto out_resource;
1610 WRITE32(acl->naces); 1607 WRITE32(acl->naces);
1611 1608
1612 list_for_each(h, &acl->ace_head) { 1609 for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
1613 ace = list_entry(h, struct nfs4_ace, l_ace);
1614
1615 if ((buflen -= 4*3) < 0) 1610 if ((buflen -= 4*3) < 0)
1616 goto out_resource; 1611 goto out_resource;
1617 WRITE32(ace->type); 1612 WRITE32(ace->type);
@@ -1821,7 +1816,7 @@ out_acl:
1821 status = nfs_ok; 1816 status = nfs_ok;
1822 1817
1823out: 1818out:
1824 nfs4_acl_free(acl); 1819 kfree(acl);
1825 if (fhp == &tempfh) 1820 if (fhp == &tempfh)
1826 fh_put(&tempfh); 1821 fh_put(&tempfh);
1827 return status; 1822 return status;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8283236c6a0f..7e6aa245b5d5 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -466,7 +466,10 @@ out:
466 posix_acl_release(dpacl); 466 posix_acl_release(dpacl);
467 return (error); 467 return (error);
468out_nfserr: 468out_nfserr:
469 error = nfserrno(host_error); 469 if (host_error == -EOPNOTSUPP)
470 error = nfserr_attrnotsupp;
471 else
472 error = nfserrno(host_error);
470 goto out; 473 goto out;
471} 474}
472 475
diff --git a/include/acpi/acinterp.h b/include/acpi/acinterp.h
index ce7c9d653910..73967c8152d3 100644
--- a/include/acpi/acinterp.h
+++ b/include/acpi/acinterp.h
@@ -253,7 +253,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
253 253
254void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread); 254void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread);
255 255
256void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc); 256void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc,
257 struct acpi_thread_state *thread);
257 258
258/* 259/*
259 * exprep - ACPI AML execution - prep utilities 260 * exprep - ACPI AML execution - prep utilities
diff --git a/include/acpi/acobject.h b/include/acpi/acobject.h
index 04e9735a6742..5206d61d74a6 100644
--- a/include/acpi/acobject.h
+++ b/include/acpi/acobject.h
@@ -155,7 +155,7 @@ struct acpi_object_event {
155struct acpi_object_mutex { 155struct acpi_object_mutex {
156 ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */ 156 ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
157 u16 acquisition_depth; /* Allow multiple Acquires, same thread */ 157 u16 acquisition_depth; /* Allow multiple Acquires, same thread */
158 struct acpi_thread_state *owner_thread; /* Current owner of the mutex */ 158 acpi_thread_id owner_thread_id; /* Current owner of the mutex */
159 acpi_mutex os_mutex; /* Actual OS synchronization object */ 159 acpi_mutex os_mutex; /* Actual OS synchronization object */
160 union acpi_operand_object *prev; /* Link for list of acquired mutexes */ 160 union acpi_operand_object *prev; /* Link for list of acquired mutexes */
161 union acpi_operand_object *next; /* Link for list of acquired mutexes */ 161 union acpi_operand_object *next; /* Link for list of acquired mutexes */
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 4dc8a5043ef0..07a5eb036e96 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -105,12 +105,6 @@ int acpi_ec_ecdt_probe(void);
105 105
106int acpi_processor_set_thermal_limit(acpi_handle handle, int type); 106int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
107 107
108/* --------------------------------------------------------------------------
109 Hot Keys
110 -------------------------------------------------------------------------- */
111
112extern int acpi_specific_hotkey_enabled;
113
114/*-------------------------------------------------------------------------- 108/*--------------------------------------------------------------------------
115 Dock Station 109 Dock Station
116 -------------------------------------------------------------------------- */ 110 -------------------------------------------------------------------------- */
@@ -122,10 +116,24 @@ extern int register_hotplug_dock_device(acpi_handle handle,
122 acpi_notify_handler handler, void *context); 116 acpi_notify_handler handler, void *context);
123extern void unregister_hotplug_dock_device(acpi_handle handle); 117extern void unregister_hotplug_dock_device(acpi_handle handle);
124#else 118#else
125#define is_dock_device(h) (0) 119static inline int is_dock_device(acpi_handle handle)
126#define register_dock_notifier(nb) (-ENODEV) 120{
127#define unregister_dock_notifier(nb) do { } while(0) 121 return 0;
128#define register_hotplug_dock_device(h1, h2, c) (-ENODEV) 122}
129#define unregister_hotplug_dock_device(h) do { } while(0) 123static inline int register_dock_notifier(struct notifier_block *nb)
124{
125 return -ENODEV;
126}
127static inline void unregister_dock_notifier(struct notifier_block *nb)
128{
129}
130static inline int register_hotplug_dock_device(acpi_handle handle,
131 acpi_notify_handler handler, void *context)
132{
133 return -ENODEV;
134}
135static inline void unregister_hotplug_dock_device(acpi_handle handle)
136{
137}
130#endif 138#endif
131#endif /*__ACPI_DRIVERS_H__*/ 139#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 781394b9efe0..2785058c82ab 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -240,12 +240,6 @@ acpi_status
240acpi_os_validate_address(u8 space_id, 240acpi_os_validate_address(u8 space_id,
241 acpi_physical_address address, acpi_size length); 241 acpi_physical_address address, acpi_size length);
242 242
243u8 acpi_os_readable(void *pointer, acpi_size length);
244
245#ifdef ACPI_FUTURE_USAGE
246u8 acpi_os_writable(void *pointer, acpi_size length);
247#endif
248
249u64 acpi_os_get_timer(void); 243u64 acpi_os_get_timer(void);
250 244
251acpi_status acpi_os_signal(u32 function, void *info); 245acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 7798d2a9f793..916c0102db5b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -79,6 +79,7 @@ struct acpi_processor_power {
79 u32 bm_activity; 79 u32 bm_activity;
80 int count; 80 int count;
81 struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; 81 struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
82 int timer_broadcast_on_state;
82}; 83};
83 84
84/* Performance Management */ 85/* Performance Management */
diff --git a/include/asm-avr32/arch-at32ap/board.h b/include/asm-avr32/arch-at32ap/board.h
index b120ee030c86..1a7b07d436ff 100644
--- a/include/asm-avr32/arch-at32ap/board.h
+++ b/include/asm-avr32/arch-at32ap/board.h
@@ -26,7 +26,9 @@ struct eth_platform_data {
26struct platform_device * 26struct platform_device *
27at32_add_device_eth(unsigned int id, struct eth_platform_data *data); 27at32_add_device_eth(unsigned int id, struct eth_platform_data *data);
28 28
29struct platform_device *at32_add_device_spi(unsigned int id); 29struct spi_board_info;
30struct platform_device *
31at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n);
30 32
31struct lcdc_platform_data { 33struct lcdc_platform_data {
32 unsigned long fbmem_start; 34 unsigned long fbmem_start;
diff --git a/include/asm-avr32/io.h b/include/asm-avr32/io.h
index eec47500fa66..c08e81048393 100644
--- a/include/asm-avr32/io.h
+++ b/include/asm-avr32/io.h
@@ -28,13 +28,13 @@ static __inline__ void * phys_to_virt(unsigned long address)
28 * Generic IO read/write. These perform native-endian accesses. Note 28 * Generic IO read/write. These perform native-endian accesses. Note
29 * that some architectures will want to re-define __raw_{read,write}w. 29 * that some architectures will want to re-define __raw_{read,write}w.
30 */ 30 */
31extern void __raw_writesb(unsigned int addr, const void *data, int bytelen); 31extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
32extern void __raw_writesw(unsigned int addr, const void *data, int wordlen); 32extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
33extern void __raw_writesl(unsigned int addr, const void *data, int longlen); 33extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
34 34
35extern void __raw_readsb(unsigned int addr, void *data, int bytelen); 35extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
36extern void __raw_readsw(unsigned int addr, void *data, int wordlen); 36extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
37extern void __raw_readsl(unsigned int addr, void *data, int longlen); 37extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
38 38
39static inline void writeb(unsigned char b, volatile void __iomem *addr) 39static inline void writeb(unsigned char b, volatile void __iomem *addr)
40{ 40{
@@ -252,6 +252,9 @@ extern void __iounmap(void __iomem *addr);
252#define ioremap(offset, size) \ 252#define ioremap(offset, size) \
253 __ioremap((offset), (size), 0) 253 __ioremap((offset), (size), 0)
254 254
255#define ioremap_nocache(offset, size) \
256 __ioremap((offset), (size), 0)
257
255#define iounmap(addr) \ 258#define iounmap(addr) \
256 __iounmap(addr) 259 __iounmap(addr)
257 260
@@ -263,6 +266,14 @@ extern void __iounmap(void __iomem *addr);
263#define page_to_bus page_to_phys 266#define page_to_bus page_to_phys
264#define bus_to_page phys_to_page 267#define bus_to_page phys_to_page
265 268
269/*
270 * Create a virtual mapping cookie for an IO port range. There exists
271 * no such thing as port-based I/O on AVR32, so a regular ioremap()
272 * should do what we need.
273 */
274#define ioport_map(port, nr) ioremap(port, nr)
275#define ioport_unmap(port) iounmap(port)
276
266#define dma_cache_wback_inv(_start, _size) \ 277#define dma_cache_wback_inv(_start, _size) \
267 flush_dcache_region(_start, _size) 278 flush_dcache_region(_start, _size)
268#define dma_cache_inv(_start, _size) \ 279#define dma_cache_inv(_start, _size) \
diff --git a/include/asm-avr32/unistd.h b/include/asm-avr32/unistd.h
index 56ed1f9d348a..8f5120471819 100644
--- a/include/asm-avr32/unistd.h
+++ b/include/asm-avr32/unistd.h
@@ -120,7 +120,7 @@
120#define __NR_getitimer 105 120#define __NR_getitimer 105
121#define __NR_swapoff 106 121#define __NR_swapoff 106
122#define __NR_sysinfo 107 122#define __NR_sysinfo 107
123#define __NR_ipc 108 123/* 108 was __NR_ipc for a little while */
124#define __NR_sendfile 109 124#define __NR_sendfile 109
125#define __NR_setdomainname 110 125#define __NR_setdomainname 110
126#define __NR_uname 111 126#define __NR_uname 111
@@ -282,8 +282,21 @@
282#define __NR_vmsplice 264 282#define __NR_vmsplice 264
283#define __NR_epoll_pwait 265 283#define __NR_epoll_pwait 265
284 284
285#define __NR_msgget 266
286#define __NR_msgsnd 267
287#define __NR_msgrcv 268
288#define __NR_msgctl 269
289#define __NR_semget 270
290#define __NR_semop 271
291#define __NR_semctl 272
292#define __NR_semtimedop 273
293#define __NR_shmat 274
294#define __NR_shmget 275
295#define __NR_shmdt 276
296#define __NR_shmctl 277
297
285#ifdef __KERNEL__ 298#ifdef __KERNEL__
286#define NR_syscalls 266 299#define NR_syscalls 278
287 300
288 301
289#define __ARCH_WANT_IPC_PARSE_VERSION 302#define __ARCH_WANT_IPC_PARSE_VERSION
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 5e657eb8946c..449f3f272e07 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -127,6 +127,7 @@ extern int acpi_irq_balance_set(char *str);
127#define acpi_ioapic 0 127#define acpi_ioapic 0
128static inline void acpi_noirq_set(void) { } 128static inline void acpi_noirq_set(void) { }
129static inline void acpi_disable_pci(void) { } 129static inline void acpi_disable_pci(void) { }
130static inline void disable_acpi(void) { }
130 131
131#endif /* !CONFIG_ACPI */ 132#endif /* !CONFIG_ACPI */
132 133
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 3a61206fd108..cc6b1652249a 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -95,9 +95,7 @@ static inline void ack_APIC_irq(void)
95 apic_write_around(APIC_EOI, 0); 95 apic_write_around(APIC_EOI, 0);
96} 96}
97 97
98extern void (*wait_timer_tick)(void); 98extern int lapic_get_maxlvt(void);
99
100extern int get_maxlvt(void);
101extern void clear_local_APIC(void); 99extern void clear_local_APIC(void);
102extern void connect_bsp_APIC (void); 100extern void connect_bsp_APIC (void);
103extern void disconnect_bsp_APIC (int virt_wire_setup); 101extern void disconnect_bsp_APIC (int virt_wire_setup);
@@ -113,14 +111,9 @@ extern void smp_local_timer_interrupt (void);
113extern void setup_boot_APIC_clock (void); 111extern void setup_boot_APIC_clock (void);
114extern void setup_secondary_APIC_clock (void); 112extern void setup_secondary_APIC_clock (void);
115extern int APIC_init_uniprocessor (void); 113extern int APIC_init_uniprocessor (void);
116extern void disable_APIC_timer(void);
117extern void enable_APIC_timer(void);
118 114
119extern void enable_NMI_through_LVT0 (void * dummy); 115extern void enable_NMI_through_LVT0 (void * dummy);
120 116
121void smp_send_timer_broadcast_ipi(void);
122void switch_APIC_timer_to_ipi(void *cpumask);
123void switch_ipi_to_APIC_timer(void *cpumask);
124#define ARCH_APICTIMER_STOPS_ON_C3 1 117#define ARCH_APICTIMER_STOPS_ON_C3 1
125 118
126extern int timer_over_8254; 119extern int timer_over_8254;
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
index e47be9a56cc2..fc03cf9de5c4 100644
--- a/include/asm-i386/hpet.h
+++ b/include/asm-i386/hpet.h
@@ -90,16 +90,19 @@
90#define HPET_MIN_PERIOD (100000UL) 90#define HPET_MIN_PERIOD (100000UL)
91#define HPET_TICK_RATE (HZ * 100000UL) 91#define HPET_TICK_RATE (HZ * 100000UL)
92 92
93extern unsigned long hpet_tick; /* hpet clks count per tick */
94extern unsigned long hpet_address; /* hpet memory map physical address */ 93extern unsigned long hpet_address; /* hpet memory map physical address */
95extern int hpet_use_timer; 94extern int is_hpet_enabled(void);
96 95
96#ifdef CONFIG_X86_64
97extern unsigned long hpet_tick; /* hpet clks count per tick */
98extern int hpet_use_timer;
97extern int hpet_rtc_timer_init(void); 99extern int hpet_rtc_timer_init(void);
98extern int hpet_enable(void); 100extern int hpet_enable(void);
99extern int hpet_reenable(void);
100extern int is_hpet_enabled(void);
101extern int is_hpet_capable(void); 101extern int is_hpet_capable(void);
102extern int hpet_readl(unsigned long a); 102extern int hpet_readl(unsigned long a);
103#else
104extern int hpet_enable(void);
105#endif
103 106
104#ifdef CONFIG_HPET_EMULATE_RTC 107#ifdef CONFIG_HPET_EMULATE_RTC
105extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); 108extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
@@ -110,5 +113,10 @@ extern int hpet_rtc_dropped_irq(void);
110extern int hpet_rtc_timer_init(void); 113extern int hpet_rtc_timer_init(void);
111extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); 114extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
112#endif /* CONFIG_HPET_EMULATE_RTC */ 115#endif /* CONFIG_HPET_EMULATE_RTC */
116
117#else
118
119static inline int hpet_enable(void) { return 0; }
120
113#endif /* CONFIG_HPET_TIMER */ 121#endif /* CONFIG_HPET_TIMER */
114#endif /* _I386_HPET_H */ 122#endif /* _I386_HPET_H */
diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h
index 015d8df07690..6cb0dd4dcdde 100644
--- a/include/asm-i386/i8253.h
+++ b/include/asm-i386/i8253.h
@@ -1,6 +1,21 @@
1#ifndef __ASM_I8253_H__ 1#ifndef __ASM_I8253_H__
2#define __ASM_I8253_H__ 2#define __ASM_I8253_H__
3 3
4#include <linux/clockchips.h>
5
4extern spinlock_t i8253_lock; 6extern spinlock_t i8253_lock;
5 7
8extern struct clock_event_device *global_clock_event;
9
10/**
11 * pit_interrupt_hook - hook into timer tick
12 * @regs: standard registers from interrupt
13 *
14 * Call the global clock event handler.
15 **/
16static inline void pit_interrupt_hook(void)
17{
18 global_clock_event->event_handler(global_clock_event);
19}
20
6#endif /* __ASM_I8253_H__ */ 21#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 7d606e3364ae..56e5689863ae 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -1,86 +1,16 @@
1/* defines for inline arch setup functions */ 1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
2 3
3#include <asm/apic.h>
4#include <asm/i8259.h> 4#include <asm/i8259.h>
5#include <asm/i8253.h>
5 6
6/** 7/**
7 * do_timer_interrupt_hook - hook into timer tick 8 * do_timer_interrupt_hook - hook into timer tick
8 * @regs: standard registers from interrupt
9 * 9 *
10 * Description: 10 * Call the pit clock event handler. see asm/i8253.h
11 * This hook is called immediately after the timer interrupt is ack'd.
12 * It's primary purpose is to allow architectures that don't possess
13 * individual per CPU clocks (like the CPU APICs supply) to broadcast the
14 * timer interrupt as a means of triggering reschedules etc.
15 **/ 11 **/
16 12
17static inline void do_timer_interrupt_hook(void) 13static inline void do_timer_interrupt_hook(void)
18{ 14{
19 do_timer(1); 15 pit_interrupt_hook();
20#ifndef CONFIG_SMP
21 update_process_times(user_mode_vm(get_irq_regs()));
22#endif
23/*
24 * In the SMP case we use the local APIC timer interrupt to do the
25 * profiling, except when we simulate SMP mode on a uniprocessor
26 * system, in that case we have to call the local interrupt handler.
27 */
28#ifndef CONFIG_X86_LOCAL_APIC
29 profile_tick(CPU_PROFILING);
30#else
31 if (!using_apic_timer)
32 smp_local_timer_interrupt();
33#endif
34}
35
36
37/* you can safely undefine this if you don't have the Neptune chipset */
38
39#define BUGGY_NEPTUN_TIMER
40
41/**
42 * do_timer_overflow - process a detected timer overflow condition
43 * @count: hardware timer interrupt count on overflow
44 *
45 * Description:
46 * This call is invoked when the jiffies count has not incremented but
47 * the hardware timer interrupt has. It means that a timer tick interrupt
48 * came along while the previous one was pending, thus a tick was missed
49 **/
50static inline int do_timer_overflow(int count)
51{
52 int i;
53
54 spin_lock(&i8259A_lock);
55 /*
56 * This is tricky when I/O APICs are used;
57 * see do_timer_interrupt().
58 */
59 i = inb(0x20);
60 spin_unlock(&i8259A_lock);
61
62 /* assumption about timer being IRQ0 */
63 if (i & 0x01) {
64 /*
65 * We cannot detect lost timer interrupts ...
66 * well, that's why we call them lost, don't we? :)
67 * [hmm, on the Pentium and Alpha we can ... sort of]
68 */
69 count -= LATCH;
70 } else {
71#ifdef BUGGY_NEPTUN_TIMER
72 /*
73 * for the Neptun bug we know that the 'latch'
74 * command doesn't latch the high and low value
75 * of the counter atomically. Thus we have to
76 * substract 256 from the counter
77 * ... funny, isnt it? :)
78 */
79
80 count -= 256;
81#else
82 printk("do_slow_gettimeoffset(): hardware timer problem?\n");
83#endif
84 }
85 return count;
86} 16}
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index 04e69c104a74..60f9dcc15d54 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -1,25 +1,18 @@
1/* defines for inline arch setup functions */ 1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
2#include <asm/voyager.h> 4#include <asm/voyager.h>
5#include <asm/i8253.h>
3 6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 * @regs: standard registers from interrupt
10 *
11 * Call the pit clock event handler. see asm/i8253.h
12 **/
4static inline void do_timer_interrupt_hook(void) 13static inline void do_timer_interrupt_hook(void)
5{ 14{
6 do_timer(1); 15 pit_interrupt_hook();
7#ifndef CONFIG_SMP
8 update_process_times(user_mode_vm(irq_regs));
9#endif
10
11 voyager_timer_interrupt(); 16 voyager_timer_interrupt();
12} 17}
13 18
14static inline int do_timer_overflow(int count)
15{
16 /* can't read the ISR, just assume 1 tick
17 overflow */
18 if(count > LATCH || count < 0) {
19 printk(KERN_ERR "VOYAGER PROBLEM: count is %d, latch is %d\n", count, LATCH);
20 count = LATCH;
21 }
22 count -= LATCH;
23
24 return count;
25}
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index 770bf6da8c3d..f21349399d14 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -23,7 +23,6 @@ extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
23extern int mpc_default_type; 23extern int mpc_default_type;
24extern unsigned long mp_lapic_addr; 24extern unsigned long mp_lapic_addr;
25extern int pic_mode; 25extern int pic_mode;
26extern int using_apic_timer;
27 26
28#ifdef CONFIG_ACPI 27#ifdef CONFIG_ACPI
29extern void mp_register_lapic (u8 id, u8 enabled); 28extern void mp_register_lapic (u8 id, u8 enabled);
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 609a3899475c..6db40d0583f1 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -307,4 +307,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
307#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f 307#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
308#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 308#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
309 309
310/* Geode defined MSRs */
311#define MSR_GEODE_BUSCONT_CONF0 0x1900
312
310#endif /* __ASM_MSR_H */ 313#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index c13933185c1c..e997891cc7cc 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -1,48 +1 @@
1/* #include <asm-x86_64/tsc.h>
2 * linux/include/asm-i386/tsc.h
3 *
4 * i386 TSC related functions
5 */
6#ifndef _ASM_i386_TSC_H
7#define _ASM_i386_TSC_H
8
9#include <asm/processor.h>
10
11/*
12 * Standard way to access the cycle counter on i586+ CPUs.
13 * Currently only used on SMP.
14 *
15 * If you really have a SMP machine with i486 chips or older,
16 * compile for that, and this will just always return zero.
17 * That's ok, it just means that the nicer scheduling heuristics
18 * won't work for you.
19 *
20 * We only use the low 32 bits, and we'd simply better make sure
21 * that we reschedule before that wraps. Scheduling at least every
22 * four billion cycles just basically sounds like a good idea,
23 * regardless of how fast the machine is.
24 */
25typedef unsigned long long cycles_t;
26
27extern unsigned int cpu_khz;
28extern unsigned int tsc_khz;
29
30static inline cycles_t get_cycles(void)
31{
32 unsigned long long ret = 0;
33
34#ifndef CONFIG_X86_TSC
35 if (!cpu_has_tsc)
36 return 0;
37#endif
38
39#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
40 rdtscll(ret);
41#endif
42 return ret;
43}
44
45extern void tsc_init(void);
46extern void mark_tsc_unstable(void);
47
48#endif
diff --git a/include/asm-ia64/libata-portmap.h b/include/asm-ia64/libata-portmap.h
new file mode 100644
index 000000000000..0e00c9a9f410
--- /dev/null
+++ b/include/asm-ia64/libata-portmap.h
@@ -0,0 +1,12 @@
1#ifndef __ASM_IA64_LIBATA_PORTMAP_H
2#define __ASM_IA64_LIBATA_PORTMAP_H
3
4#define ATA_PRIMARY_CMD 0x1F0
5#define ATA_PRIMARY_CTL 0x3F6
6#define ATA_PRIMARY_IRQ(dev) isa_irq_to_vector(14)
7
8#define ATA_SECONDARY_CMD 0x170
9#define ATA_SECONDARY_CTL 0x376
10#define ATA_SECONDARY_IRQ(dev) isa_irq_to_vector(15)
11
12#endif
diff --git a/include/asm-sh/Kbuild b/include/asm-sh/Kbuild
index c68e1680da01..76a8ccf254a5 100644
--- a/include/asm-sh/Kbuild
+++ b/include/asm-sh/Kbuild
@@ -1 +1,3 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2
3header-y += cpu-features.h
diff --git a/include/asm-sh/bigsur/bigsur.h b/include/asm-sh/bigsur/bigsur.h
deleted file mode 100644
index 427245f93589..000000000000
--- a/include/asm-sh/bigsur/bigsur.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 *
3 * Hitachi Big Sur Eval Board support
4 *
5 * Dustin McIntire (dustin@sensoria.com)
6 *
7 * May be copied or modified under the terms of the GNU General Public
8 * License. See linux/COPYING for more information.
9 *
10 * Derived from Hitachi SH7751 reference manual
11 *
12 */
13
14#ifndef _ASM_BIGSUR_H_
15#define _ASM_BIGSUR_H_
16
17#include <asm/irq.h>
18#include <asm/hd64465/hd64465.h>
19
20/* 7751 Internal IRQ's used by external CPLD controller */
21#define BIGSUR_IRQ_LOW 0
22#define BIGSUR_IRQ_NUM 14 /* External CPLD level 1 IRQs */
23#define BIGSUR_IRQ_HIGH (BIGSUR_IRQ_LOW + BIGSUR_IRQ_NUM)
24#define BIGSUR_2NDLVL_IRQ_LOW (HD64465_IRQ_BASE+HD64465_IRQ_NUM)
25#define BIGSUR_2NDLVL_IRQ_NUM 32 /* Level 2 IRQs = 4 regs * 8 bits */
26#define BIGSUR_2NDLVL_IRQ_HIGH (BIGSUR_2NDLVL_IRQ_LOW + \
27 BIGSUR_2NDLVL_IRQ_NUM)
28
29/* PCI interrupt base number (A_INTA-A_INTD) */
30#define BIGSUR_SH7751_PCI_IRQ_BASE (BIGSUR_2NDLVL_IRQ_LOW+10)
31
32/* CPLD registers and external chip addresses */
33#define BIGSUR_HD64464_ADDR 0xB2000000
34#define BIGSUR_DGDR 0xB1FFFE00
35#define BIGSUR_BIDR 0xB1FFFD00
36#define BIGSUR_CSLR 0xB1FFFC00
37#define BIGSUR_SW1R 0xB1FFFB00
38#define BIGSUR_DBGR 0xB1FFFA00
39#define BIGSUR_BDTR 0xB1FFF900
40#define BIGSUR_BDRR 0xB1FFF800
41#define BIGSUR_PPR1 0xB1FFF700
42#define BIGSUR_PPR2 0xB1FFF600
43#define BIGSUR_IDE2 0xB1FFF500
44#define BIGSUR_IDE3 0xB1FFF400
45#define BIGSUR_SPCR 0xB1FFF300
46#define BIGSUR_ETHR 0xB1FE0000
47#define BIGSUR_PPDR 0xB1FDFF00
48#define BIGSUR_ICTL 0xB1FDFE00
49#define BIGSUR_ICMD 0xB1FDFD00
50#define BIGSUR_DMA0 0xB1FDFC00
51#define BIGSUR_DMA1 0xB1FDFB00
52#define BIGSUR_IRQ0 0xB1FDFA00
53#define BIGSUR_IRQ1 0xB1FDF900
54#define BIGSUR_IRQ2 0xB1FDF800
55#define BIGSUR_IRQ3 0xB1FDF700
56#define BIGSUR_IMR0 0xB1FDF600
57#define BIGSUR_IMR1 0xB1FDF500
58#define BIGSUR_IMR2 0xB1FDF400
59#define BIGSUR_IMR3 0xB1FDF300
60#define BIGSUR_IRLMR0 0xB1FDF200
61#define BIGSUR_IRLMR1 0xB1FDF100
62#define BIGSUR_V320USC_ADDR 0xB1000000
63#define BIGSUR_HD64465_ADDR 0xB0000000
64#define BIGSUR_INTERNAL_BASE 0xB0000000
65
66/* SMC ethernet card parameters */
67#define BIGSUR_ETHER_IOPORT 0x220
68
69/* IDE register paramters */
70#define BIGSUR_IDECMD_IOPORT 0x1f0
71#define BIGSUR_IDECTL_IOPORT 0x1f8
72
73/* LED bit position in BIGSUR_CSLR */
74#define BIGSUR_LED (1<<4)
75
76/* PCI: default LOCAL memory window sizes (seen from PCI bus) */
77#define BIGSUR_LSR0_SIZE (64*(1<<20)) //64MB
78#define BIGSUR_LSR1_SIZE (64*(1<<20)) //64MB
79
80#endif /* _ASM_BIGSUR_H_ */
diff --git a/include/asm-sh/bigsur/io.h b/include/asm-sh/bigsur/io.h
deleted file mode 100644
index 1470ac8d4a39..000000000000
--- a/include/asm-sh/bigsur/io.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * include/asm-sh/bigsur/io.h
3 *
4 * By Dustin McIntire (dustin@sensoria.com) (c)2001
5 * Derived from io_hd64465.h, which bore the message:
6 * By Greg Banks <gbanks@pocketpenguins.com>
7 * (c) 2000 PocketPenguins Inc.
8 * and from io_hd64461.h, which bore the message:
9 * Copyright 2000 Stuart Menefy (stuart.menefy@st.com)
10 *
11 * May be copied or modified under the terms of the GNU General Public
12 * License. See linux/COPYING for more information.
13 *
14 * IO functions for a Hitachi Big Sur Evaluation Board.
15 */
16
17#ifndef _ASM_SH_IO_BIGSUR_H
18#define _ASM_SH_IO_BIGSUR_H
19
20#include <linux/types.h>
21
22extern unsigned long bigsur_isa_port2addr(unsigned long offset);
23extern int bigsur_irq_demux(int irq);
24/* Provision for generic secondary demux step -- used by PCMCIA code */
25extern void bigsur_register_irq_demux(int irq,
26 int (*demux)(int irq, void *dev), void *dev);
27extern void bigsur_unregister_irq_demux(int irq);
28/* Set this variable to 1 to see port traffic */
29extern int bigsur_io_debug;
30/* Map a range of ports to a range of kernel virtual memory. */
31extern void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift);
32extern void bigsur_port_unmap(u32 baseport, u32 nports);
33
34#endif /* _ASM_SH_IO_BIGSUR_H */
35
diff --git a/include/asm-sh/bigsur/serial.h b/include/asm-sh/bigsur/serial.h
deleted file mode 100644
index a08fa82fe45a..000000000000
--- a/include/asm-sh/bigsur/serial.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * include/asm-sh/bigsur/serial.h
3 *
4 * Configuration details for Big Sur 16550 based serial ports
5 * i.e. HD64465, PCMCIA, etc.
6 */
7
8#ifndef _ASM_SERIAL_BIGSUR_H
9#define _ASM_SERIAL_BIGSUR_H
10#include <asm/hd64465.h>
11
12#define BASE_BAUD (3379200 / 16)
13
14#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
15
16
17#define SERIAL_PORT_DFNS \
18 /* UART CLK PORT IRQ FLAGS */ \
19 { 0, BASE_BAUD, 0x3F8, HD64465_IRQ_UART, STD_COM_FLAGS } /* ttyS0 */
20
21/* XXX: This should be moved ino irq.h */
22#define irq_cannonicalize(x) (x)
23
24#endif /* _ASM_SERIAL_BIGSUR_H */
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index a294997a8412..5a117ec43c77 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -19,9 +19,9 @@ static void __init check_bugs(void)
19 extern unsigned long loops_per_jiffy; 19 extern unsigned long loops_per_jiffy;
20 char *p = &init_utsname()->machine[2]; /* "sh" */ 20 char *p = &init_utsname()->machine[2]; /* "sh" */
21 21
22 cpu_data->loops_per_jiffy = loops_per_jiffy; 22 current_cpu_data.loops_per_jiffy = loops_per_jiffy;
23 23
24 switch (cpu_data->type) { 24 switch (current_cpu_data.type) {
25 case CPU_SH7604 ... CPU_SH7619: 25 case CPU_SH7604 ... CPU_SH7619:
26 *p++ = '2'; 26 *p++ = '2';
27 break; 27 break;
@@ -54,7 +54,7 @@ static void __init check_bugs(void)
54 break; 54 break;
55 } 55 }
56 56
57 printk("CPU: %s\n", get_cpu_subtype()); 57 printk("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
58 58
59#ifndef __LITTLE_ENDIAN__ 59#ifndef __LITTLE_ENDIAN__
60 /* 'eb' means 'Endian Big' */ 60 /* 'eb' means 'Endian Big' */
diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h
index 07f62ec9ff0c..22f12634975b 100644
--- a/include/asm-sh/cacheflush.h
+++ b/include/asm-sh/cacheflush.h
@@ -30,5 +30,8 @@ extern void __flush_invalidate_region(void *start, int size);
30 30
31#define HAVE_ARCH_UNMAPPED_AREA 31#define HAVE_ARCH_UNMAPPED_AREA
32 32
33/* Page flag for lazy dcache write-back for the aliasing UP caches */
34#define PG_dcache_dirty PG_arch_1
35
33#endif /* __KERNEL__ */ 36#endif /* __KERNEL__ */
34#endif /* __ASM_SH_CACHEFLUSH_H */ 37#endif /* __ASM_SH_CACHEFLUSH_H */
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index f70d8ef76a15..6fabbba228de 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -36,8 +36,6 @@
36 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ 36 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
37#define CACHE_ALIAS 0x00001000 37#define CACHE_ALIAS 0x00001000
38 38
39#define PG_mapped PG_arch_1
40
41void flush_cache_all(void); 39void flush_cache_all(void);
42void flush_cache_mm(struct mm_struct *mm); 40void flush_cache_mm(struct mm_struct *mm);
43#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 41#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index b01a10f31225..b3746a936a09 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -17,6 +17,7 @@
17 * so we need them. 17 * so we need them.
18 */ 18 */
19void flush_cache_all(void); 19void flush_cache_all(void);
20void flush_dcache_all(void);
20void flush_cache_mm(struct mm_struct *mm); 21void flush_cache_mm(struct mm_struct *mm);
21#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 22#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
22void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 23void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
@@ -38,16 +39,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
38/* Initialization of P3 area for copy_user_page */ 39/* Initialization of P3 area for copy_user_page */
39void p3_cache_init(void); 40void p3_cache_init(void);
40 41
41#define PG_mapped PG_arch_1
42
43#ifdef CONFIG_MMU
44extern int remap_area_pages(unsigned long addr, unsigned long phys_addr,
45 unsigned long size, unsigned long flags);
46#else /* CONFIG_MMU */
47static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr,
48 unsigned long size, unsigned long flags)
49{
50 return 0;
51}
52#endif /* CONFIG_MMU */
53#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ 42#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
diff --git a/include/asm-sh/cpu-sh4/dma.h b/include/asm-sh/cpu-sh4/dma.h
index 3e4b3e6d80c0..c135e9cebd9c 100644
--- a/include/asm-sh/cpu-sh4/dma.h
+++ b/include/asm-sh/cpu-sh4/dma.h
@@ -3,6 +3,17 @@
3 3
4#define DMAOR_INIT ( 0x8000 | DMAOR_DME ) 4#define DMAOR_INIT ( 0x8000 | DMAOR_DME )
5 5
6/* SH7751/7760/7780 DMA IRQ sources */
7#define DMTE0_IRQ 34
8#define DMTE1_IRQ 35
9#define DMTE2_IRQ 36
10#define DMTE3_IRQ 37
11#define DMTE4_IRQ 44
12#define DMTE5_IRQ 45
13#define DMTE6_IRQ 46
14#define DMTE7_IRQ 47
15#define DMAE_IRQ 38
16
6#ifdef CONFIG_CPU_SH4A 17#ifdef CONFIG_CPU_SH4A
7#define SH_DMAC_BASE 0xfc808020 18#define SH_DMAC_BASE 0xfc808020
8 19
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 8d0867b98e05..d3bc7818bbbe 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -53,6 +53,10 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
53 consistent_free(vaddr, size); 53 consistent_free(vaddr, size);
54} 54}
55 55
56#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
57#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
58#define dma_is_consistent(d, h) (1)
59
56static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 60static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
57 enum dma_data_direction dir) 61 enum dma_data_direction dir)
58{ 62{
diff --git a/include/asm-sh/ec3104/ec3104.h b/include/asm-sh/ec3104/ec3104.h
deleted file mode 100644
index 639cfa489c8f..000000000000
--- a/include/asm-sh/ec3104/ec3104.h
+++ /dev/null
@@ -1,43 +0,0 @@
1#ifndef __ASM_EC3104_H
2#define __ASM_EC3104_H
3
4
5/*
6 * Most of the register set is at 0xb0ec0000 - 0xb0ecffff.
7 *
8 * as far as I've figured it out the register map is:
9 * 0xb0ec0000 - id string
10 * 0xb0ec0XXX - power management
11 * 0xb0ec1XXX - interrupt control
12 * 0xb0ec3XXX - ps2 port (touch pad on aero 8000)
13 * 0xb0ec6XXX - i2c
14 * 0xb0ec7000 - first serial port (proprietary connector on aero 8000)
15 * 0xb0ec8000 - second serial port
16 * 0xb0ec9000 - third serial port
17 * 0xb0eca000 - fourth serial port (keyboard controller on aero 8000)
18 * 0xb0eccXXX - GPIO
19 * 0xb0ecdXXX - GPIO
20 */
21
22#define EC3104_BASE 0xb0ec0000
23
24#define EC3104_SER4_DATA (EC3104_BASE+0xa000)
25#define EC3104_SER4_IIR (EC3104_BASE+0xa008)
26#define EC3104_SER4_MCR (EC3104_BASE+0xa010)
27#define EC3104_SER4_LSR (EC3104_BASE+0xa014)
28#define EC3104_SER4_MSR (EC3104_BASE+0xa018)
29
30/*
31 * our ISA bus. this seems to be real ISA.
32 */
33#define EC3104_ISA_BASE 0xa5000000
34
35#define EC3104_IRQ 11
36#define EC3104_IRQBASE 64
37
38#define EC3104_IRQ_SER1 EC3104_IRQBASE + 7
39#define EC3104_IRQ_SER2 EC3104_IRQBASE + 8
40#define EC3104_IRQ_SER3 EC3104_IRQBASE + 9
41#define EC3104_IRQ_SER4 EC3104_IRQBASE + 10
42
43#endif /* __ASM_EC3104_H */
diff --git a/include/asm-sh/ec3104/io.h b/include/asm-sh/ec3104/io.h
deleted file mode 100644
index ea5c8e65ac11..000000000000
--- a/include/asm-sh/ec3104/io.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _ASM_SH_IO_EC3104_H
2#define _ASM_SH_IO_EC3104_H
3
4#include <linux/types.h>
5
6extern unsigned char ec3104_inb(unsigned long port);
7extern unsigned short ec3104_inw(unsigned long port);
8extern unsigned long ec3104_inl(unsigned long port);
9
10extern void ec3104_outb(unsigned char value, unsigned long port);
11extern void ec3104_outw(unsigned short value, unsigned long port);
12extern void ec3104_outl(unsigned long value, unsigned long port);
13
14extern int ec3104_irq_demux(int irq);
15
16#endif /* _ASM_SH_IO_EC3104_H */
diff --git a/include/asm-sh/ec3104/keyboard.h b/include/asm-sh/ec3104/keyboard.h
deleted file mode 100644
index c1253a683197..000000000000
--- a/include/asm-sh/ec3104/keyboard.h
+++ /dev/null
@@ -1,15 +0,0 @@
1extern unsigned char ec3104_kbd_sysrq_xlate[];
2extern int ec3104_kbd_setkeycode(unsigned int scancode, unsigned int keycode);
3extern int ec3104_kbd_getkeycode(unsigned int scancode);
4extern int ec3104_kbd_translate(unsigned char, unsigned char *, char);
5extern char ec3104_kbd_unexpected_up(unsigned char);
6extern void ec3104_kbd_leds(unsigned char);
7extern void ec3104_kbd_init_hw(void);
8
9#define kbd_sysrq_xlate ec3104_kbd_sysrq_xlate
10#define kbd_setkeycode ec3104_kbd_setkeycode
11#define kbd_getkeycode ec3104_kbd_getkeycode
12#define kbd_translate ec3104_kbd_translate
13#define kbd_unexpected_up ec3104_kbd_unexpected_up
14#define kbd_leds ec3104_kbd_leds
15#define kbd_init_hw ec3104_kbd_init_hw
diff --git a/include/asm-sh/ec3104/serial.h b/include/asm-sh/ec3104/serial.h
deleted file mode 100644
index cfe4d78ec1ee..000000000000
--- a/include/asm-sh/ec3104/serial.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#include <asm/ec3104.h>
2/* Naturally we don't know the exact value but 115200 baud has a divisor
3 * of 9 and 19200 baud has a divisor of 52, so this seems like a good
4 * guess. */
5#define BASE_BAUD (16800000 / 16)
6
7#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
8
9/* there is a fourth serial port with the expected values as well, but
10 * it's got the keyboard controller behind it so we can't really use it
11 * (without moving the keyboard driver to userspace, which doesn't sound
12 * like a very good idea) */
13#define SERIAL_PORT_DFNS \
14 /* UART CLK PORT IRQ FLAGS */ \
15 { 0, BASE_BAUD, 0x11C00, EC3104_IRQBASE+7, STD_COM_FLAGS }, /* ttyS0 */ \
16 { 0, BASE_BAUD, 0x12000, EC3104_IRQBASE+8, STD_COM_FLAGS }, /* ttyS1 */ \
17 { 0, BASE_BAUD, 0x12400, EC3104_IRQBASE+9, STD_COM_FLAGS }, /* ttyS2 */
18
19/* XXX: This should be moved ino irq.h */
20#define irq_cannonicalize(x) (x)
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index bff965ef4b95..8ccf7ae593ef 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -66,12 +66,8 @@
66/* 3. OFFCHIP_NR_IRQS */ 66/* 3. OFFCHIP_NR_IRQS */
67#if defined(CONFIG_HD64461) 67#if defined(CONFIG_HD64461)
68# define OFFCHIP_NR_IRQS 18 68# define OFFCHIP_NR_IRQS 18
69#elif defined (CONFIG_SH_BIGSUR) /* must be before CONFIG_HD64465 */
70# define OFFCHIP_NR_IRQS 48
71#elif defined(CONFIG_HD64465) 69#elif defined(CONFIG_HD64465)
72# define OFFCHIP_NR_IRQS 16 70# define OFFCHIP_NR_IRQS 16
73#elif defined (CONFIG_SH_EC3104)
74# define OFFCHIP_NR_IRQS 16
75#elif defined (CONFIG_SH_DREAMCAST) 71#elif defined (CONFIG_SH_DREAMCAST)
76# define OFFCHIP_NR_IRQS 96 72# define OFFCHIP_NR_IRQS 96
77#elif defined (CONFIG_SH_TITAN) 73#elif defined (CONFIG_SH_TITAN)
diff --git a/include/asm-sh/kgdb.h b/include/asm-sh/kgdb.h
index 7b26f53fe343..0095c665d272 100644
--- a/include/asm-sh/kgdb.h
+++ b/include/asm-sh/kgdb.h
@@ -85,10 +85,10 @@ extern int setjmp(jmp_buf __jmpb);
85#define KGDB_PRINTK(...) printk("KGDB: " __VA_ARGS__) 85#define KGDB_PRINTK(...) printk("KGDB: " __VA_ARGS__)
86 86
87/* Forced breakpoint */ 87/* Forced breakpoint */
88#define BREAKPOINT() do { \ 88#define BREAKPOINT() \
89 if (kgdb_enabled) { \ 89do { \
90 asm volatile("trapa #0xff"); \ 90 if (kgdb_enabled) \
91 } \ 91 __asm__ __volatile__("trapa #0x3c"); \
92} while (0) 92} while (0)
93 93
94/* KGDB should be able to flush all kernel text space */ 94/* KGDB should be able to flush all kernel text space */
diff --git a/include/asm-sh/mmu.h b/include/asm-sh/mmu.h
index cf47df79bb94..eb0358c097d0 100644
--- a/include/asm-sh/mmu.h
+++ b/include/asm-sh/mmu.h
@@ -1,25 +1,19 @@
1#ifndef __MMU_H 1#ifndef __MMU_H
2#define __MMU_H 2#define __MMU_H
3 3
4#if !defined(CONFIG_MMU) 4/* Default "unsigned long" context */
5typedef unsigned long mm_context_id_t[NR_CPUS];
5 6
6typedef struct { 7typedef struct {
8#ifdef CONFIG_MMU
9 mm_context_id_t id;
10 void *vdso;
11#else
7 struct vm_list_struct *vmlist; 12 struct vm_list_struct *vmlist;
8 unsigned long end_brk; 13 unsigned long end_brk;
14#endif
9} mm_context_t; 15} mm_context_t;
10 16
11#else
12
13/* Default "unsigned long" context */
14typedef unsigned long mm_context_id_t;
15
16typedef struct {
17 mm_context_id_t id;
18 void *vdso;
19} mm_context_t;
20
21#endif /* CONFIG_MMU */
22
23/* 17/*
24 * Privileged Space Mapping Buffer (PMB) definitions 18 * Privileged Space Mapping Buffer (PMB) definitions
25 */ 19 */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 46f04e23bd45..342024425b7d 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1999 Niibe Yutaka 2 * Copyright (C) 1999 Niibe Yutaka
3 * Copyright (C) 2003 Paul Mundt 3 * Copyright (C) 2003 - 2006 Paul Mundt
4 * 4 *
5 * ASID handling idea taken from MIPS implementation. 5 * ASID handling idea taken from MIPS implementation.
6 */ 6 */
@@ -19,11 +19,6 @@
19 * (b) ASID (Address Space IDentifier) 19 * (b) ASID (Address Space IDentifier)
20 */ 20 */
21 21
22/*
23 * Cache of MMU context last used.
24 */
25extern unsigned long mmu_context_cache;
26
27#define MMU_CONTEXT_ASID_MASK 0x000000ff 22#define MMU_CONTEXT_ASID_MASK 0x000000ff
28#define MMU_CONTEXT_VERSION_MASK 0xffffff00 23#define MMU_CONTEXT_VERSION_MASK 0xffffff00
29#define MMU_CONTEXT_FIRST_VERSION 0x00000100 24#define MMU_CONTEXT_FIRST_VERSION 0x00000100
@@ -32,6 +27,11 @@ extern unsigned long mmu_context_cache;
32/* ASID is 8-bit value, so it can't be 0x100 */ 27/* ASID is 8-bit value, so it can't be 0x100 */
33#define MMU_NO_ASID 0x100 28#define MMU_NO_ASID 0x100
34 29
30#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
31#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \
32 MMU_CONTEXT_ASID_MASK)
33#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
34
35/* 35/*
36 * Virtual Page Number mask 36 * Virtual Page Number mask
37 */ 37 */
@@ -41,18 +41,17 @@ extern unsigned long mmu_context_cache;
41/* 41/*
42 * Get MMU context if needed. 42 * Get MMU context if needed.
43 */ 43 */
44static inline void get_mmu_context(struct mm_struct *mm) 44static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
45{ 45{
46 unsigned long mc = mmu_context_cache; 46 unsigned long asid = asid_cache(cpu);
47 47
48 /* Check if we have old version of context. */ 48 /* Check if we have old version of context. */
49 if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) 49 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
50 /* It's up to date, do nothing */ 50 /* It's up to date, do nothing */
51 return; 51 return;
52 52
53 /* It's old, we need to get new context with new version. */ 53 /* It's old, we need to get new context with new version. */
54 mc = ++mmu_context_cache; 54 if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
55 if (!(mc & MMU_CONTEXT_ASID_MASK)) {
56 /* 55 /*
57 * We exhaust ASID of this version. 56 * We exhaust ASID of this version.
58 * Flush all TLB and start new cycle. 57 * Flush all TLB and start new cycle.
@@ -63,10 +62,11 @@ static inline void get_mmu_context(struct mm_struct *mm)
63 * Fix version; Note that we avoid version #0 62 * Fix version; Note that we avoid version #0
64 * to distingush NO_CONTEXT. 63 * to distingush NO_CONTEXT.
65 */ 64 */
66 if (!mc) 65 if (!asid)
67 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; 66 asid = MMU_CONTEXT_FIRST_VERSION;
68 } 67 }
69 mm->context.id = mc; 68
69 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
70} 70}
71 71
72/* 72/*
@@ -74,9 +74,13 @@ static inline void get_mmu_context(struct mm_struct *mm)
74 * instance. 74 * instance.
75 */ 75 */
76static inline int init_new_context(struct task_struct *tsk, 76static inline int init_new_context(struct task_struct *tsk,
77 struct mm_struct *mm) 77 struct mm_struct *mm)
78{ 78{
79 mm->context.id = NO_CONTEXT; 79 int i;
80
81 for (i = 0; i < num_online_cpus(); i++)
82 cpu_context(i, mm) = NO_CONTEXT;
83
80 return 0; 84 return 0;
81} 85}
82 86
@@ -117,10 +121,10 @@ static inline unsigned long get_asid(void)
117 * After we have set current->mm to a new value, this activates 121 * After we have set current->mm to a new value, this activates
118 * the context for the new mm so we see the new mappings. 122 * the context for the new mm so we see the new mappings.
119 */ 123 */
120static inline void activate_context(struct mm_struct *mm) 124static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
121{ 125{
122 get_mmu_context(mm); 126 get_mmu_context(mm, cpu);
123 set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); 127 set_asid(cpu_asid(cpu, mm));
124} 128}
125 129
126/* MMU_TTB is used for optimizing the fault handling. */ 130/* MMU_TTB is used for optimizing the fault handling. */
@@ -138,10 +142,15 @@ static inline void switch_mm(struct mm_struct *prev,
138 struct mm_struct *next, 142 struct mm_struct *next,
139 struct task_struct *tsk) 143 struct task_struct *tsk)
140{ 144{
145 unsigned int cpu = smp_processor_id();
146
141 if (likely(prev != next)) { 147 if (likely(prev != next)) {
148 cpu_set(cpu, next->cpu_vm_mask);
142 set_TTB(next->pgd); 149 set_TTB(next->pgd);
143 activate_context(next); 150 activate_context(next, cpu);
144 } 151 } else
152 if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
153 activate_context(next, cpu);
145} 154}
146 155
147#define deactivate_mm(tsk,mm) do { } while (0) 156#define deactivate_mm(tsk,mm) do { } while (0)
@@ -159,7 +168,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
159#define destroy_context(mm) do { } while (0) 168#define destroy_context(mm) do { } while (0)
160#define set_asid(asid) do { } while (0) 169#define set_asid(asid) do { } while (0)
161#define get_asid() (0) 170#define get_asid() (0)
162#define activate_context(mm) do { } while (0) 171#define activate_context(mm,cpu) do { } while (0)
163#define switch_mm(prev,next,tsk) do { } while (0) 172#define switch_mm(prev,next,tsk) do { } while (0)
164#define deactivate_mm(tsk,mm) do { } while (0) 173#define deactivate_mm(tsk,mm) do { } while (0)
165#define activate_mm(prev,next) do { } while (0) 174#define activate_mm(prev,next) do { } while (0)
@@ -174,14 +183,16 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
174 */ 183 */
175static inline void enable_mmu(void) 184static inline void enable_mmu(void)
176{ 185{
186 unsigned int cpu = smp_processor_id();
187
177 /* Enable MMU */ 188 /* Enable MMU */
178 ctrl_outl(MMU_CONTROL_INIT, MMUCR); 189 ctrl_outl(MMU_CONTROL_INIT, MMUCR);
179 ctrl_barrier(); 190 ctrl_barrier();
180 191
181 if (mmu_context_cache == NO_CONTEXT) 192 if (asid_cache(cpu) == NO_CONTEXT)
182 mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; 193 asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
183 194
184 set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); 195 set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
185} 196}
186 197
187static inline void disable_mmu(void) 198static inline void disable_mmu(void)
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 380fd62dd05a..ac4b4677f28c 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -13,6 +13,8 @@
13 [ P4 control ] 0xE0000000 13 [ P4 control ] 0xE0000000
14 */ 14 */
15 15
16#ifdef __KERNEL__
17
16/* PAGE_SHIFT determines the page size */ 18/* PAGE_SHIFT determines the page size */
17#if defined(CONFIG_PAGE_SIZE_4KB) 19#if defined(CONFIG_PAGE_SIZE_4KB)
18# define PAGE_SHIFT 12 20# define PAGE_SHIFT 12
@@ -51,7 +53,6 @@
51#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) 53#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
52#endif 54#endif
53 55
54#ifdef __KERNEL__
55#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
56 57
57extern void (*clear_page)(void *to); 58extern void (*clear_page)(void *to);
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 036ca2843866..9214c015fe14 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -43,17 +43,17 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
43/* PGD bits */ 43/* PGD bits */
44#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) 44#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
45#define PGDIR_BITS (32 - PGDIR_SHIFT) 45#define PGDIR_BITS (32 - PGDIR_SHIFT)
46#define PGDIR_SIZE (1 << PGDIR_SHIFT) 46#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
47#define PGDIR_MASK (~(PGDIR_SIZE-1)) 47#define PGDIR_MASK (~(PGDIR_SIZE-1))
48 48
49/* Entries per level */ 49/* Entries per level */
50#define PTRS_PER_PTE (PAGE_SIZE / 4) 50#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
51#define PTRS_PER_PGD (PAGE_SIZE / 4) 51#define PTRS_PER_PGD (PAGE_SIZE / 4)
52 52
53#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 53#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
54#define FIRST_USER_ADDRESS 0 54#define FIRST_USER_ADDRESS 0
55 55
56#define PTE_PHYS_MASK 0x1ffff000 56#define PTE_PHYS_MASK (0x20000000 - PAGE_SIZE)
57 57
58/* 58/*
59 * First 1MB map is used by fixed purpose. 59 * First 1MB map is used by fixed purpose.
@@ -583,11 +583,6 @@ struct mm_struct;
583extern unsigned int kobjsize(const void *objp); 583extern unsigned int kobjsize(const void *objp);
584#endif /* !CONFIG_MMU */ 584#endif /* !CONFIG_MMU */
585 585
586#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
587#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
588extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
589#endif
590
591extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 586extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
592extern void paging_init(void); 587extern void paging_init(void);
593 588
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index e29f2abb92de..3e46a7afe764 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -27,8 +27,6 @@
27#define CCN_CVR 0xff000040 27#define CCN_CVR 0xff000040
28#define CCN_PRR 0xff000044 28#define CCN_PRR 0xff000044
29 29
30const char *get_cpu_subtype(void);
31
32/* 30/*
33 * CPU type and hardware bug flags. Kept separately for each CPU. 31 * CPU type and hardware bug flags. Kept separately for each CPU.
34 * 32 *
@@ -66,6 +64,7 @@ enum cpu_type {
66struct sh_cpuinfo { 64struct sh_cpuinfo {
67 unsigned int type; 65 unsigned int type;
68 unsigned long loops_per_jiffy; 66 unsigned long loops_per_jiffy;
67 unsigned long asid_cache;
69 68
70 struct cache_info icache; /* Primary I-cache */ 69 struct cache_info icache; /* Primary I-cache */
71 struct cache_info dcache; /* Primary D-cache */ 70 struct cache_info dcache; /* Primary D-cache */
@@ -288,5 +287,8 @@ extern int vsyscall_init(void);
288#define vsyscall_init() do { } while (0) 287#define vsyscall_init() do { } while (0)
289#endif 288#endif
290 289
290/* arch/sh/kernel/setup.c */
291const char *get_cpu_subtype(struct sh_cpuinfo *c);
292
291#endif /* __KERNEL__ */ 293#endif /* __KERNEL__ */
292#endif /* __ASM_SH_PROCESSOR_H */ 294#endif /* __ASM_SH_PROCESSOR_H */
diff --git a/include/asm-sh/rts7751r2d.h b/include/asm-sh/rts7751r2d.h
index 796b8fcb81a8..10565ac7966a 100644
--- a/include/asm-sh/rts7751r2d.h
+++ b/include/asm-sh/rts7751r2d.h
@@ -68,6 +68,10 @@
68#define IRQ_PCISLOT2 10 /* PCI Slot #2 IRQ */ 68#define IRQ_PCISLOT2 10 /* PCI Slot #2 IRQ */
69#define IRQ_EXTENTION 11 /* EXTn IRQ */ 69#define IRQ_EXTENTION 11 /* EXTn IRQ */
70 70
71/* arch/sh/boards/renesas/rts7751r2d/irq.c */
72void init_rts7751r2d_IRQ(void);
73int rts7751r2d_irq_demux(int);
74
71#define __IO_PREFIX rts7751r2d 75#define __IO_PREFIX rts7751r2d
72#include <asm/io_generic.h> 76#include <asm/io_generic.h>
73 77
diff --git a/include/asm-sh/serial.h b/include/asm-sh/serial.h
index 8734590d27e8..21f6d330f189 100644
--- a/include/asm-sh/serial.h
+++ b/include/asm-sh/serial.h
@@ -9,11 +9,6 @@
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11 11
12#ifdef CONFIG_SH_EC3104
13#include <asm/serial-ec3104.h>
14#elif defined (CONFIG_SH_BIGSUR)
15#include <asm/serial-bigsur.h>
16#else
17/* 12/*
18 * This assumes you have a 1.8432 MHz clock for your UART. 13 * This assumes you have a 1.8432 MHz clock for your UART.
19 * 14 *
@@ -34,12 +29,8 @@
34 29
35#else 30#else
36 31
37#define SERIAL_PORT_DFNS \ 32#define SERIAL_PORT_DFNS
38 /* UART CLK PORT IRQ FLAGS */ \
39 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
40 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS } /* ttyS1 */
41 33
42#endif 34#endif
43 35
44#endif
45#endif /* _ASM_SERIAL_H */ 36#endif /* _ASM_SERIAL_H */
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 879f741105db..279e70a77c75 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -32,12 +32,20 @@ struct thread_info {
32 32
33#define PREEMPT_ACTIVE 0x10000000 33#define PREEMPT_ACTIVE 0x10000000
34 34
35#ifdef CONFIG_4KSTACKS 35#if defined(CONFIG_4KSTACKS)
36#define THREAD_SIZE (PAGE_SIZE) 36#define THREAD_SIZE_ORDER (0)
37#elif defined(CONFIG_PAGE_SIZE_4KB)
38#define THREAD_SIZE_ORDER (1)
39#elif defined(CONFIG_PAGE_SIZE_8KB)
40#define THREAD_SIZE_ORDER (1)
41#elif defined(CONFIG_PAGE_SIZE_64KB)
42#define THREAD_SIZE_ORDER (0)
37#else 43#else
38#define THREAD_SIZE (PAGE_SIZE * 2) 44#error "Unknown thread size"
39#endif 45#endif
40#define STACK_WARN (THREAD_SIZE / 8) 46
47#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
48#define STACK_WARN (THREAD_SIZE >> 3)
41 49
42/* 50/*
43 * macros/functions for gaining access to the thread information structure 51 * macros/functions for gaining access to the thread information structure
diff --git a/include/asm-sh/tlbflush.h b/include/asm-sh/tlbflush.h
index 28c073b0fbab..455fb8da441e 100644
--- a/include/asm-sh/tlbflush.h
+++ b/include/asm-sh/tlbflush.h
@@ -4,7 +4,6 @@
4/* 4/*
5 * TLB flushing: 5 * TLB flushing:
6 * 6 *
7 * - flush_tlb() flushes the current mm struct TLBs
8 * - flush_tlb_all() flushes all processes TLBs 7 * - flush_tlb_all() flushes all processes TLBs
9 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 8 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
10 * - flush_tlb_page(vma, vmaddr) flushes one page 9 * - flush_tlb_page(vma, vmaddr) flushes one page
@@ -12,20 +11,45 @@
12 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
13 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 12 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
14 */ 13 */
14extern void local_flush_tlb_all(void);
15extern void local_flush_tlb_mm(struct mm_struct *mm);
16extern void local_flush_tlb_range(struct vm_area_struct *vma,
17 unsigned long start,
18 unsigned long end);
19extern void local_flush_tlb_page(struct vm_area_struct *vma,
20 unsigned long page);
21extern void local_flush_tlb_kernel_range(unsigned long start,
22 unsigned long end);
23extern void local_flush_tlb_one(unsigned long asid, unsigned long page);
24
25#ifdef CONFIG_SMP
15 26
16extern void flush_tlb(void);
17extern void flush_tlb_all(void); 27extern void flush_tlb_all(void);
18extern void flush_tlb_mm(struct mm_struct *mm); 28extern void flush_tlb_mm(struct mm_struct *mm);
19extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 29extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
20 unsigned long end); 30 unsigned long end);
21extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 31extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
22extern void __flush_tlb_page(unsigned long asid, unsigned long page); 32extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
33extern void flush_tlb_one(unsigned long asid, unsigned long page);
34
35#else
36
37#define flush_tlb_all() local_flush_tlb_all()
38#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
39#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
40#define flush_tlb_one(asid, page) local_flush_tlb_one(asid, page)
41
42#define flush_tlb_range(vma, start, end) \
43 local_flush_tlb_range(vma, start, end)
44
45#define flush_tlb_kernel_range(start, end) \
46 local_flush_tlb_kernel_range(start, end)
47
48#endif /* CONFIG_SMP */
23 49
24static inline void flush_tlb_pgtables(struct mm_struct *mm, 50static inline void flush_tlb_pgtables(struct mm_struct *mm,
25 unsigned long start, unsigned long end) 51 unsigned long start, unsigned long end)
26{ /* Nothing to do */ 52{
53 /* Nothing to do */
27} 54}
28
29extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
30
31#endif /* __ASM_SH_TLBFLUSH_H */ 55#endif /* __ASM_SH_TLBFLUSH_H */
diff --git a/include/asm-sh/ubc.h b/include/asm-sh/ubc.h
index 694f51f47941..ae9bbdeefbe1 100644
--- a/include/asm-sh/ubc.h
+++ b/include/asm-sh/ubc.h
@@ -17,7 +17,7 @@
17/* User Break Controller */ 17/* User Break Controller */
18#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \ 18#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
19 defined(CONFIG_CPU_SUBTYPE_SH7300) 19 defined(CONFIG_CPU_SUBTYPE_SH7300)
20#define UBC_TYPE_SH7729 (cpu_data->type == CPU_SH7729) 20#define UBC_TYPE_SH7729 (current_cpu_data.type == CPU_SH7729)
21#else 21#else
22#define UBC_TYPE_SH7729 0 22#define UBC_TYPE_SH7729 0
23#endif 23#endif
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index f982073dc6c6..17f527bfd455 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -292,22 +292,22 @@
292#define __NR_mq_getsetattr (__NR_mq_open+5) 292#define __NR_mq_getsetattr (__NR_mq_open+5)
293#define __NR_kexec_load 283 293#define __NR_kexec_load 283
294#define __NR_waitid 284 294#define __NR_waitid 284
295/* #define __NR_sys_setaltroot 285 */ 295#define __NR_add_key 285
296#define __NR_add_key 286 296#define __NR_request_key 286
297#define __NR_request_key 287 297#define __NR_keyctl 287
298#define __NR_keyctl 288 298#define __NR_ioprio_set 288
299#define __NR_ioprio_set 289 299#define __NR_ioprio_get 289
300#define __NR_ioprio_get 290 300#define __NR_inotify_init 290
301#define __NR_inotify_init 291 301#define __NR_inotify_add_watch 291
302#define __NR_inotify_add_watch 292 302#define __NR_inotify_rm_watch 292
303#define __NR_inotify_rm_watch 293 303/* 293 is unused */
304#define __NR_migrate_pages 294 304#define __NR_migrate_pages 294
305#define __NR_openat 295 305#define __NR_openat 295
306#define __NR_mkdirat 296 306#define __NR_mkdirat 296
307#define __NR_mknodat 297 307#define __NR_mknodat 297
308#define __NR_fchownat 298 308#define __NR_fchownat 298
309#define __NR_futimesat 299 309#define __NR_futimesat 299
310#define __NR_newfstatat 300 310#define __NR_fstatat64 300
311#define __NR_unlinkat 301 311#define __NR_unlinkat 301
312#define __NR_renameat 302 312#define __NR_renameat 302
313#define __NR_linkat 303 313#define __NR_linkat 303
diff --git a/include/asm-sh/voyagergx.h b/include/asm-sh/voyagergx.h
index 99b0807d1c9f..64c936b22715 100644
--- a/include/asm-sh/voyagergx.h
+++ b/include/asm-sh/voyagergx.h
@@ -308,6 +308,9 @@
308#define AC97C_READ (1 << 19) 308#define AC97C_READ (1 << 19)
309#define AC97C_WD_BIT (1 << 2) 309#define AC97C_WD_BIT (1 << 2)
310#define AC97C_INDEX_MASK 0x7f 310#define AC97C_INDEX_MASK 0x7f
311/* -------------------------------------------------------------------- */ 311
312/* arch/sh/cchips/voyagergx/consistent.c */
313void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t);
314int voyagergx_consistent_free(struct device *, size_t, void *, dma_addr_t);
312 315
313#endif /* _VOYAGER_GX_REG_H */ 316#endif /* _VOYAGER_GX_REG_H */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index b39098408b69..59a66f084611 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -56,8 +56,15 @@
56extern int is_hpet_enabled(void); 56extern int is_hpet_enabled(void);
57extern int hpet_rtc_timer_init(void); 57extern int hpet_rtc_timer_init(void);
58extern int apic_is_clustered_box(void); 58extern int apic_is_clustered_box(void);
59extern int hpet_arch_init(void);
60extern int hpet_timer_stop_set_go(unsigned long tick);
61extern int hpet_reenable(void);
62extern unsigned int hpet_calibrate_tsc(void);
59 63
60extern int hpet_use_timer; 64extern int hpet_use_timer;
65extern unsigned long hpet_address;
66extern unsigned long hpet_period;
67extern unsigned long hpet_tick;
61 68
62#ifdef CONFIG_HPET_EMULATE_RTC 69#ifdef CONFIG_HPET_EMULATE_RTC
63extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); 70extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index a6d2ff5c69b7..f54f3abf93ce 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -45,11 +45,7 @@ extern u32 pmtmr_ioport;
45#else 45#else
46#define pmtmr_ioport 0 46#define pmtmr_ioport 0
47#endif 47#endif
48extern unsigned long long monotonic_base;
49extern int sysctl_vsyscall;
50extern int nohpet; 48extern int nohpet;
51extern unsigned long vxtime_hz;
52extern void time_init_gtod(void);
53 49
54extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); 50extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
55 51
@@ -91,8 +87,6 @@ extern void check_efer(void);
91 87
92extern int unhandled_signal(struct task_struct *tsk, int sig); 88extern int unhandled_signal(struct task_struct *tsk, int sig);
93 89
94extern int unsynchronized_tsc(void);
95
96extern void select_idle_routine(const struct cpuinfo_x86 *c); 90extern void select_idle_routine(const struct cpuinfo_x86 *c);
97 91
98extern unsigned long table_start, table_end; 92extern unsigned long table_start, table_end;
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index b9e5320b7625..8c6808a3fba4 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -12,38 +12,21 @@
12#include <asm/hpet.h> 12#include <asm/hpet.h>
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/tsc.h>
15#include <linux/compiler.h> 16#include <linux/compiler.h>
16 17
17#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */ 18#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
18 19
19typedef unsigned long long cycles_t;
20
21static inline cycles_t get_cycles (void)
22{
23 unsigned long long ret;
24
25 rdtscll(ret);
26 return ret;
27}
28
29/* Like get_cycles, but make sure the CPU is synchronized. */
30static __always_inline cycles_t get_cycles_sync(void)
31{
32 unsigned long long ret;
33 unsigned eax;
34 /* Don't do an additional sync on CPUs where we know
35 RDTSC is already synchronous. */
36 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
37 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
38 rdtscll(ret);
39 return ret;
40}
41
42extern unsigned int cpu_khz;
43
44extern int read_current_timer(unsigned long *timer_value); 20extern int read_current_timer(unsigned long *timer_value);
45#define ARCH_HAS_READ_CURRENT_TIMER 1 21#define ARCH_HAS_READ_CURRENT_TIMER 1
46 22
47extern struct vxtime_data vxtime; 23#define USEC_PER_TICK (USEC_PER_SEC / HZ)
24#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
25#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
26
27#define NS_SCALE 10 /* 2^10, carefully chosen */
28#define US_SCALE 32 /* 2^32, arbitralrily chosen */
48 29
30extern void mark_tsc_unstable(void);
31extern void set_cyc2ns_scale(unsigned long khz);
49#endif 32#endif
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h
new file mode 100644
index 000000000000..9a0a368852c7
--- /dev/null
+++ b/include/asm-x86_64/tsc.h
@@ -0,0 +1,66 @@
1/*
2 * linux/include/asm-x86_64/tsc.h
3 *
4 * x86_64 TSC related functions
5 */
6#ifndef _ASM_x86_64_TSC_H
7#define _ASM_x86_64_TSC_H
8
9#include <asm/processor.h>
10
11/*
12 * Standard way to access the cycle counter.
13 */
14typedef unsigned long long cycles_t;
15
16extern unsigned int cpu_khz;
17extern unsigned int tsc_khz;
18
19static inline cycles_t get_cycles(void)
20{
21 unsigned long long ret = 0;
22
23#ifndef CONFIG_X86_TSC
24 if (!cpu_has_tsc)
25 return 0;
26#endif
27
28#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
29 rdtscll(ret);
30#endif
31 return ret;
32}
33
34/* Like get_cycles, but make sure the CPU is synchronized. */
35static __always_inline cycles_t get_cycles_sync(void)
36{
37 unsigned long long ret;
38#ifdef X86_FEATURE_SYNC_RDTSC
39 unsigned eax;
40
41 /*
42 * Don't do an additional sync on CPUs where we know
43 * RDTSC is already synchronous:
44 */
45 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
46 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
47#else
48 sync_core();
49#endif
50 rdtscll(ret);
51
52 return ret;
53}
54
55extern void tsc_init(void);
56extern void mark_tsc_unstable(void);
57extern int unsynchronized_tsc(void);
58
59/*
60 * Boot-time check whether the TSCs are synchronized across
61 * all CPUs/cores:
62 */
63extern void check_tsc_sync_source(int cpu);
64extern void check_tsc_sync_target(void);
65
66#endif
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 0c7847165eae..82b4afe65c91 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -16,46 +16,27 @@ enum vsyscall_num {
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17#include <linux/seqlock.h> 17#include <linux/seqlock.h>
18 18
19#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16)))
20#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16))) 19#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
21#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) 20#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
22#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
23#define __section_sysctl_vsyscall __attribute__ ((unused, __section__ (".sysctl_vsyscall"), aligned(16)))
24#define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16)))
25#define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"), aligned(16)))
26 21
27#define VXTIME_TSC 1 22/* Definitions for CONFIG_GENERIC_TIME definitions */
28#define VXTIME_HPET 2 23#define __section_vsyscall_gtod_data __attribute__ \
29#define VXTIME_PMTMR 3 24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
30 26
31#define VGETCPU_RDTSCP 1 27#define VGETCPU_RDTSCP 1
32#define VGETCPU_LSL 2 28#define VGETCPU_LSL 2
33 29
34struct vxtime_data {
35 long hpet_address; /* HPET base address */
36 int last;
37 unsigned long last_tsc;
38 long quot;
39 long tsc_quot;
40 int mode;
41};
42
43#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a) 30#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
44#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a) 31#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
45 32
46/* vsyscall space (readonly) */
47extern struct vxtime_data __vxtime;
48extern int __vgetcpu_mode; 33extern int __vgetcpu_mode;
49extern struct timespec __xtime;
50extern volatile unsigned long __jiffies; 34extern volatile unsigned long __jiffies;
51extern struct timezone __sys_tz;
52extern seqlock_t __xtime_lock;
53 35
54/* kernel space (writeable) */ 36/* kernel space (writeable) */
55extern struct vxtime_data vxtime;
56extern int vgetcpu_mode; 37extern int vgetcpu_mode;
57extern struct timezone sys_tz; 38extern struct timezone sys_tz;
58extern int sysctl_vsyscall; 39extern struct vsyscall_gtod_data_t vsyscall_gtod_data;
59 40
60#endif /* __KERNEL__ */ 41#endif /* __KERNEL__ */
61 42
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 815f1fb4ce21..8bcfaa4c66ae 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -75,7 +75,7 @@ enum acpi_address_range_id {
75 75
76typedef int (*acpi_table_handler) (struct acpi_table_header *table); 76typedef int (*acpi_table_handler) (struct acpi_table_header *table);
77 77
78typedef int (*acpi_madt_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); 78typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
79 79
80char * __acpi_map_table (unsigned long phys_addr, unsigned long size); 80char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
81unsigned long acpi_find_rsdp (void); 81unsigned long acpi_find_rsdp (void);
@@ -85,8 +85,10 @@ int acpi_numa_init (void);
85 85
86int acpi_table_init (void); 86int acpi_table_init (void);
87int acpi_table_parse (char *id, acpi_table_handler handler); 87int acpi_table_parse (char *id, acpi_table_handler handler);
88int acpi_table_parse_madt (enum acpi_madt_type id, acpi_madt_entry_handler handler, unsigned int max_entries); 88int __init acpi_table_parse_entries(char *id, unsigned long table_size,
89int acpi_table_parse_srat (enum acpi_srat_type id, acpi_madt_entry_handler handler, unsigned int max_entries); 89 int entry_id, acpi_table_entry_handler handler, unsigned int max_entries);
90int acpi_table_parse_madt (enum acpi_madt_type id, acpi_table_entry_handler handler, unsigned int max_entries);
91int acpi_table_parse_srat (enum acpi_srat_type id, acpi_table_entry_handler handler, unsigned int max_entries);
90int acpi_parse_mcfg (struct acpi_table_header *header); 92int acpi_parse_mcfg (struct acpi_table_header *header);
91void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); 93void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
92void acpi_table_print_srat_entry (struct acpi_subtable_header *srat); 94void acpi_table_print_srat_entry (struct acpi_subtable_header *srat);
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
new file mode 100644
index 000000000000..1d0ef1ae8036
--- /dev/null
+++ b/include/linux/acpi_pmtmr.h
@@ -0,0 +1,38 @@
1#ifndef _ACPI_PMTMR_H_
2#define _ACPI_PMTMR_H_
3
4#include <linux/clocksource.h>
5
6/* Number of PMTMR ticks expected during calibration run */
7#define PMTMR_TICKS_PER_SEC 3579545
8
9/* limit it to 24 bits */
10#define ACPI_PM_MASK CLOCKSOURCE_MASK(24)
11
12/* Overrun value */
13#define ACPI_PM_OVRRUN (1<<24)
14
15#ifdef CONFIG_X86_PM_TIMER
16
17extern u32 acpi_pm_read_verified(void);
18extern u32 pmtmr_ioport;
19
20static inline u32 acpi_pm_read_early(void)
21{
22 if (!pmtmr_ioport)
23 return 0;
24 /* mask the output to 24 bits */
25 return acpi_pm_read_verified() & ACPI_PM_MASK;
26}
27
28#else
29
30static inline u32 acpi_pm_read_early(void)
31{
32 return 0;
33}
34
35#endif
36
37#endif
38
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index a5c8bb5d80ba..abc521cfb084 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -87,10 +87,15 @@ struct agp_memory {
87 u32 physical; 87 u32 physical;
88 u8 is_bound; 88 u8 is_bound;
89 u8 is_flushed; 89 u8 is_flushed;
90 u8 vmalloc_flag;
90}; 91};
91 92
92#define AGP_NORMAL_MEMORY 0 93#define AGP_NORMAL_MEMORY 0
93 94
95#define AGP_USER_TYPES (1 << 16)
96#define AGP_USER_MEMORY (AGP_USER_TYPES)
97#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
98
94extern struct agp_bridge_data *agp_bridge; 99extern struct agp_bridge_data *agp_bridge;
95extern struct list_head agp_bridges; 100extern struct list_head agp_bridges;
96 101
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 18e401ff7eaf..272736e37990 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -352,7 +352,7 @@ static inline int ata_drive_40wire(const u16 *dev_id)
352{ 352{
353 if (ata_id_major_version(dev_id) >= 5 && ata_id_is_sata(dev_id)) 353 if (ata_id_major_version(dev_id) >= 5 && ata_id_is_sata(dev_id))
354 return 0; /* SATA */ 354 return 0; /* SATA */
355 if (dev_id[93] & 0x4000) 355 if ((dev_id[93] & 0xE000) == 0x6000)
356 return 0; /* 80 wire */ 356 return 0; /* 80 wire */
357 return 1; 357 return 1;
358} 358}
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
new file mode 100644
index 000000000000..4ea7e7bcfafe
--- /dev/null
+++ b/include/linux/clockchips.h
@@ -0,0 +1,142 @@
1/* linux/include/linux/clockchips.h
2 *
3 * This file contains the structure definitions for clockchips.
4 *
5 * If you are not a clockchip, or the time of day code, you should
6 * not be including this file!
7 */
8#ifndef _LINUX_CLOCKCHIPS_H
9#define _LINUX_CLOCKCHIPS_H
10
11#ifdef CONFIG_GENERIC_CLOCKEVENTS
12
13#include <linux/clocksource.h>
14#include <linux/cpumask.h>
15#include <linux/ktime.h>
16#include <linux/notifier.h>
17
18struct clock_event_device;
19
20/* Clock event mode commands */
21enum clock_event_mode {
22 CLOCK_EVT_MODE_UNUSED = 0,
23 CLOCK_EVT_MODE_SHUTDOWN,
24 CLOCK_EVT_MODE_PERIODIC,
25 CLOCK_EVT_MODE_ONESHOT,
26};
27
28/* Clock event notification values */
29enum clock_event_nofitiers {
30 CLOCK_EVT_NOTIFY_ADD,
31 CLOCK_EVT_NOTIFY_BROADCAST_ON,
32 CLOCK_EVT_NOTIFY_BROADCAST_OFF,
33 CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
34 CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
35 CLOCK_EVT_NOTIFY_SUSPEND,
36 CLOCK_EVT_NOTIFY_RESUME,
37 CLOCK_EVT_NOTIFY_CPU_DEAD,
38};
39
40/*
41 * Clock event features
42 */
43#define CLOCK_EVT_FEAT_PERIODIC 0x000001
44#define CLOCK_EVT_FEAT_ONESHOT 0x000002
45/*
46 * x86(64) specific misfeatures:
47 *
48 * - Clockevent source stops in C3 State and needs broadcast support.
49 * - Local APIC timer is used as a dummy device.
50 */
51#define CLOCK_EVT_FEAT_C3STOP 0x000004
52#define CLOCK_EVT_FEAT_DUMMY 0x000008
53
54/**
55 * struct clock_event_device - clock event device descriptor
56 * @name: ptr to clock event name
57 * @hints: usage hints
58 * @max_delta_ns: maximum delta value in ns
59 * @min_delta_ns: minimum delta value in ns
60 * @mult: nanosecond to cycles multiplier
61 * @shift: nanoseconds to cycles divisor (power of two)
62 * @rating: variable to rate clock event devices
63 * @irq: irq number (only for non cpu local devices)
64 * @cpumask: cpumask to indicate for which cpus this device works
65 * @set_next_event: set next event
66 * @set_mode: set mode function
67 * @evthandler: Assigned by the framework to be called by the low
68 * level handler of the event source
69 * @broadcast: function to broadcast events
70 * @list: list head for the management code
71 * @mode: operating mode assigned by the management code
72 * @next_event: local storage for the next event in oneshot mode
73 */
74struct clock_event_device {
75 const char *name;
76 unsigned int features;
77 unsigned long max_delta_ns;
78 unsigned long min_delta_ns;
79 unsigned long mult;
80 int shift;
81 int rating;
82 int irq;
83 cpumask_t cpumask;
84 int (*set_next_event)(unsigned long evt,
85 struct clock_event_device *);
86 void (*set_mode)(enum clock_event_mode mode,
87 struct clock_event_device *);
88 void (*event_handler)(struct clock_event_device *);
89 void (*broadcast)(cpumask_t mask);
90 struct list_head list;
91 enum clock_event_mode mode;
92 ktime_t next_event;
93};
94
95/*
96 * Calculate a multiplication factor for scaled math, which is used to convert
97 * nanoseconds based values to clock ticks:
98 *
99 * clock_ticks = (nanoseconds * factor) >> shift.
100 *
101 * div_sc is the rearranged equation to calculate a factor from a given clock
102 * ticks / nanoseconds ratio:
103 *
104 * factor = (clock_ticks << shift) / nanoseconds
105 */
106static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
107 int shift)
108{
109 uint64_t tmp = ((uint64_t)ticks) << shift;
110
111 do_div(tmp, nsec);
112 return (unsigned long) tmp;
113}
114
115/* Clock event layer functions */
116extern unsigned long clockevent_delta2ns(unsigned long latch,
117 struct clock_event_device *evt);
118extern void clockevents_register_device(struct clock_event_device *dev);
119
120extern void clockevents_exchange_device(struct clock_event_device *old,
121 struct clock_event_device *new);
122extern
123struct clock_event_device *clockevents_request_device(unsigned int features,
124 cpumask_t cpumask);
125extern void clockevents_release_device(struct clock_event_device *dev);
126extern void clockevents_set_mode(struct clock_event_device *dev,
127 enum clock_event_mode mode);
128extern int clockevents_register_notifier(struct notifier_block *nb);
129extern void clockevents_unregister_notifier(struct notifier_block *nb);
130extern int clockevents_program_event(struct clock_event_device *dev,
131 ktime_t expires, ktime_t now);
132
133extern void clockevents_notify(unsigned long reason, void *arg);
134
135#else
136
137static inline void clockevents_resume_events(void) { }
138#define clockevents_notify(reason, arg) do { } while (0)
139
140#endif
141
142#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 1622d23a8dc3..daa4940cc0f1 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -12,11 +12,13 @@
12#include <linux/timex.h> 12#include <linux/timex.h>
13#include <linux/time.h> 13#include <linux/time.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/timer.h>
15#include <asm/div64.h> 16#include <asm/div64.h>
16#include <asm/io.h> 17#include <asm/io.h>
17 18
18/* clocksource cycle base type */ 19/* clocksource cycle base type */
19typedef u64 cycle_t; 20typedef u64 cycle_t;
21struct clocksource;
20 22
21/** 23/**
22 * struct clocksource - hardware abstraction for a free running counter 24 * struct clocksource - hardware abstraction for a free running counter
@@ -44,8 +46,8 @@ typedef u64 cycle_t;
44 * subtraction of non 64 bit counters 46 * subtraction of non 64 bit counters
45 * @mult: cycle to nanosecond multiplier 47 * @mult: cycle to nanosecond multiplier
46 * @shift: cycle to nanosecond divisor (power of two) 48 * @shift: cycle to nanosecond divisor (power of two)
47 * @update_callback: called when safe to alter clocksource values 49 * @flags: flags describing special properties
48 * @is_continuous: defines if clocksource is free-running. 50 * @vread: vsyscall based read
49 * @cycle_interval: Used internally by timekeeping core, please ignore. 51 * @cycle_interval: Used internally by timekeeping core, please ignore.
50 * @xtime_interval: Used internally by timekeeping core, please ignore. 52 * @xtime_interval: Used internally by timekeeping core, please ignore.
51 */ 53 */
@@ -57,15 +59,30 @@ struct clocksource {
57 cycle_t mask; 59 cycle_t mask;
58 u32 mult; 60 u32 mult;
59 u32 shift; 61 u32 shift;
60 int (*update_callback)(void); 62 unsigned long flags;
61 int is_continuous; 63 cycle_t (*vread)(void);
62 64
63 /* timekeeping specific data, ignore */ 65 /* timekeeping specific data, ignore */
64 cycle_t cycle_last, cycle_interval; 66 cycle_t cycle_last, cycle_interval;
65 u64 xtime_nsec, xtime_interval; 67 u64 xtime_nsec, xtime_interval;
66 s64 error; 68 s64 error;
69
70#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
71 /* Watchdog related data, used by the framework */
72 struct list_head wd_list;
73 cycle_t wd_last;
74#endif
67}; 75};
68 76
77/*
78 * Clock source flags bits::
79 */
80#define CLOCK_SOURCE_IS_CONTINUOUS 0x01
81#define CLOCK_SOURCE_MUST_VERIFY 0x02
82
83#define CLOCK_SOURCE_WATCHDOG 0x10
84#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
85
69/* simplify initialization of mask field */ 86/* simplify initialization of mask field */
70#define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1) 87#define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1)
71 88
@@ -178,8 +195,16 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
178 195
179 196
180/* used to install a new clocksource */ 197/* used to install a new clocksource */
181int clocksource_register(struct clocksource*); 198extern int clocksource_register(struct clocksource*);
182void clocksource_reselect(void); 199extern struct clocksource* clocksource_get_next(void);
183struct clocksource* clocksource_get_next(void); 200extern void clocksource_change_rating(struct clocksource *cs, int rating);
201
202#ifdef CONFIG_GENERIC_TIME_VSYSCALL
203extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
204#else
205static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
206{
207}
208#endif
184 209
185#endif /* _LINUX_CLOCKSOURCE_H */ 210#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7f008f6bfdc3..0899e2cdcdd1 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -84,9 +84,6 @@ struct cpufreq_policy {
84 unsigned int policy; /* see above */ 84 unsigned int policy; /* see above */
85 struct cpufreq_governor *governor; /* see below */ 85 struct cpufreq_governor *governor; /* see below */
86 86
87 struct mutex lock; /* CPU ->setpolicy or ->target may
88 only be called once a time */
89
90 struct work_struct update; /* if update_policy() needs to be 87 struct work_struct update; /* if update_policy() needs to be
91 * called, but you're in IRQ context */ 88 * called, but you're in IRQ context */
92 89
@@ -172,11 +169,16 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
172 unsigned int relation); 169 unsigned int relation);
173 170
174 171
175extern int cpufreq_driver_getavg(struct cpufreq_policy *policy); 172extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy);
176 173
177int cpufreq_register_governor(struct cpufreq_governor *governor); 174int cpufreq_register_governor(struct cpufreq_governor *governor);
178void cpufreq_unregister_governor(struct cpufreq_governor *governor); 175void cpufreq_unregister_governor(struct cpufreq_governor *governor);
179 176
177int lock_policy_rwsem_read(int cpu);
178int lock_policy_rwsem_write(int cpu);
179void unlock_policy_rwsem_read(int cpu);
180void unlock_policy_rwsem_write(int cpu);
181
180 182
181/********************************************************************* 183/*********************************************************************
182 * CPUFREQ DRIVER INTERFACE * 184 * CPUFREQ DRIVER INTERFACE *
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 612472aaa79c..7803014f3a11 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -106,7 +106,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
106 * always balanced, so the interrupted value of ->hardirq_context 106 * always balanced, so the interrupted value of ->hardirq_context
107 * will always be restored. 107 * will always be restored.
108 */ 108 */
109#define irq_enter() \ 109#define __irq_enter() \
110 do { \ 110 do { \
111 account_system_vtime(current); \ 111 account_system_vtime(current); \
112 add_preempt_count(HARDIRQ_OFFSET); \ 112 add_preempt_count(HARDIRQ_OFFSET); \
@@ -114,6 +114,11 @@ static inline void account_system_vtime(struct task_struct *tsk)
114 } while (0) 114 } while (0)
115 115
116/* 116/*
117 * Enter irq context (on NO_HZ, update jiffies):
118 */
119extern void irq_enter(void);
120
121/*
117 * Exit irq context without processing softirqs: 122 * Exit irq context without processing softirqs:
118 */ 123 */
119#define __irq_exit() \ 124#define __irq_exit() \
@@ -128,7 +133,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
128 */ 133 */
129extern void irq_exit(void); 134extern void irq_exit(void);
130 135
131#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0) 136#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0)
132#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) 137#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
133 138
134#endif /* LINUX_HARDIRQ_H */ 139#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index fca93025ab51..37f9279192a9 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -21,22 +21,72 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23 23
24struct hrtimer_clock_base;
25struct hrtimer_cpu_base;
26
24/* 27/*
25 * Mode arguments of xxx_hrtimer functions: 28 * Mode arguments of xxx_hrtimer functions:
26 */ 29 */
27enum hrtimer_mode { 30enum hrtimer_mode {
28 HRTIMER_ABS, /* Time value is absolute */ 31 HRTIMER_MODE_ABS, /* Time value is absolute */
29 HRTIMER_REL, /* Time value is relative to now */ 32 HRTIMER_MODE_REL, /* Time value is relative to now */
30}; 33};
31 34
35/*
36 * Return values for the callback function
37 */
32enum hrtimer_restart { 38enum hrtimer_restart {
33 HRTIMER_NORESTART, 39 HRTIMER_NORESTART, /* Timer is not restarted */
34 HRTIMER_RESTART, 40 HRTIMER_RESTART, /* Timer must be restarted */
35}; 41};
36 42
37#define HRTIMER_INACTIVE ((void *)1UL) 43/*
44 * hrtimer callback modes:
45 *
46 * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
47 * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
48 * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
49 * does not restart the timer
50 * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in softirq context
51 * Special mode for tick emultation
52 */
53enum hrtimer_cb_mode {
54 HRTIMER_CB_SOFTIRQ,
55 HRTIMER_CB_IRQSAFE,
56 HRTIMER_CB_IRQSAFE_NO_RESTART,
57 HRTIMER_CB_IRQSAFE_NO_SOFTIRQ,
58};
38 59
39struct hrtimer_base; 60/*
61 * Values to track state of the timer
62 *
63 * Possible states:
64 *
65 * 0x00 inactive
66 * 0x01 enqueued into rbtree
67 * 0x02 callback function running
68 * 0x04 callback pending (high resolution mode)
69 *
70 * Special case:
71 * 0x03 callback function running and enqueued
72 * (was requeued on another CPU)
73 * The "callback function running and enqueued" status is only possible on
74 * SMP. It happens for example when a posix timer expired and the callback
75 * queued a signal. Between dropping the lock which protects the posix timer
76 * and reacquiring the base lock of the hrtimer, another CPU can deliver the
77 * signal and rearm the timer. We have to preserve the callback running state,
78 * as otherwise the timer could be removed before the softirq code finishes the
79 * the handling of the timer.
80 *
81 * The HRTIMER_STATE_ENQUEUE bit is always or'ed to the current state to
82 * preserve the HRTIMER_STATE_CALLBACK bit in the above scenario.
83 *
84 * All state transitions are protected by cpu_base->lock.
85 */
86#define HRTIMER_STATE_INACTIVE 0x00
87#define HRTIMER_STATE_ENQUEUED 0x01
88#define HRTIMER_STATE_CALLBACK 0x02
89#define HRTIMER_STATE_PENDING 0x04
40 90
41/** 91/**
42 * struct hrtimer - the basic hrtimer structure 92 * struct hrtimer - the basic hrtimer structure
@@ -46,14 +96,34 @@ struct hrtimer_base;
46 * which the timer is based. 96 * which the timer is based.
47 * @function: timer expiry callback function 97 * @function: timer expiry callback function
48 * @base: pointer to the timer base (per cpu and per clock) 98 * @base: pointer to the timer base (per cpu and per clock)
99 * @state: state information (See bit values above)
100 * @cb_mode: high resolution timer feature to select the callback execution
101 * mode
102 * @cb_entry: list head to enqueue an expired timer into the callback list
103 * @start_site: timer statistics field to store the site where the timer
104 * was started
105 * @start_comm: timer statistics field to store the name of the process which
106 * started the timer
107 * @start_pid: timer statistics field to store the pid of the task which
108 * started the timer
49 * 109 *
50 * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() 110 * The hrtimer structure must be initialized by hrtimer_init()
51 */ 111 */
52struct hrtimer { 112struct hrtimer {
53 struct rb_node node; 113 struct rb_node node;
54 ktime_t expires; 114 ktime_t expires;
55 int (*function)(struct hrtimer *); 115 enum hrtimer_restart (*function)(struct hrtimer *);
56 struct hrtimer_base *base; 116 struct hrtimer_clock_base *base;
117 unsigned long state;
118#ifdef CONFIG_HIGH_RES_TIMERS
119 enum hrtimer_cb_mode cb_mode;
120 struct list_head cb_entry;
121#endif
122#ifdef CONFIG_TIMER_STATS
123 void *start_site;
124 char start_comm[16];
125 int start_pid;
126#endif
57}; 127};
58 128
59/** 129/**
@@ -70,37 +140,114 @@ struct hrtimer_sleeper {
70 140
71/** 141/**
72 * struct hrtimer_base - the timer base for a specific clock 142 * struct hrtimer_base - the timer base for a specific clock
73 * @index: clock type index for per_cpu support when moving a timer 143 * @index: clock type index for per_cpu support when moving a
74 * to a base on another cpu. 144 * timer to a base on another cpu.
75 * @lock: lock protecting the base and associated timers
76 * @active: red black tree root node for the active timers 145 * @active: red black tree root node for the active timers
77 * @first: pointer to the timer node which expires first 146 * @first: pointer to the timer node which expires first
78 * @resolution: the resolution of the clock, in nanoseconds 147 * @resolution: the resolution of the clock, in nanoseconds
79 * @get_time: function to retrieve the current time of the clock 148 * @get_time: function to retrieve the current time of the clock
80 * @get_softirq_time: function to retrieve the current time from the softirq 149 * @get_softirq_time: function to retrieve the current time from the softirq
81 * @curr_timer: the timer which is executing a callback right now
82 * @softirq_time: the time when running the hrtimer queue in the softirq 150 * @softirq_time: the time when running the hrtimer queue in the softirq
83 * @lock_key: the lock_class_key for use with lockdep 151 * @cb_pending: list of timers where the callback is pending
152 * @offset: offset of this clock to the monotonic base
153 * @reprogram: function to reprogram the timer event
84 */ 154 */
85struct hrtimer_base { 155struct hrtimer_clock_base {
156 struct hrtimer_cpu_base *cpu_base;
86 clockid_t index; 157 clockid_t index;
87 spinlock_t lock;
88 struct rb_root active; 158 struct rb_root active;
89 struct rb_node *first; 159 struct rb_node *first;
90 ktime_t resolution; 160 ktime_t resolution;
91 ktime_t (*get_time)(void); 161 ktime_t (*get_time)(void);
92 ktime_t (*get_softirq_time)(void); 162 ktime_t (*get_softirq_time)(void);
93 struct hrtimer *curr_timer;
94 ktime_t softirq_time; 163 ktime_t softirq_time;
95 struct lock_class_key lock_key; 164#ifdef CONFIG_HIGH_RES_TIMERS
165 ktime_t offset;
166 int (*reprogram)(struct hrtimer *t,
167 struct hrtimer_clock_base *b,
168 ktime_t n);
169#endif
170};
171
172#define HRTIMER_MAX_CLOCK_BASES 2
173
174/*
175 * struct hrtimer_cpu_base - the per cpu clock bases
176 * @lock: lock protecting the base and associated clock bases
177 * and timers
178 * @lock_key: the lock_class_key for use with lockdep
179 * @clock_base: array of clock bases for this cpu
180 * @curr_timer: the timer which is executing a callback right now
181 * @expires_next: absolute time of the next event which was scheduled
182 * via clock_set_next_event()
183 * @hres_active: State of high resolution mode
184 * @check_clocks: Indictator, when set evaluate time source and clock
185 * event devices whether high resolution mode can be
186 * activated.
187 * @cb_pending: Expired timers are moved from the rbtree to this
188 * list in the timer interrupt. The list is processed
189 * in the softirq.
190 * @nr_events: Total number of timer interrupt events
191 */
192struct hrtimer_cpu_base {
193 spinlock_t lock;
194 struct lock_class_key lock_key;
195 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
196#ifdef CONFIG_HIGH_RES_TIMERS
197 ktime_t expires_next;
198 int hres_active;
199 struct list_head cb_pending;
200 unsigned long nr_events;
201#endif
96}; 202};
97 203
204#ifdef CONFIG_HIGH_RES_TIMERS
205struct clock_event_device;
206
207extern void clock_was_set(void);
208extern void hrtimer_interrupt(struct clock_event_device *dev);
209
210/*
211 * In high resolution mode the time reference must be read accurate
212 */
213static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
214{
215 return timer->base->get_time();
216}
217
218/*
219 * The resolution of the clocks. The resolution value is returned in
220 * the clock_getres() system call to give application programmers an
221 * idea of the (in)accuracy of timers. Timer values are rounded up to
222 * this resolution values.
223 */
224# define KTIME_HIGH_RES (ktime_t) { .tv64 = 1 }
225# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
226
227#else
228
229# define KTIME_MONOTONIC_RES KTIME_LOW_RES
230
98/* 231/*
99 * clock_was_set() is a NOP for non- high-resolution systems. The 232 * clock_was_set() is a NOP for non- high-resolution systems. The
100 * time-sorted order guarantees that a timer does not expire early and 233 * time-sorted order guarantees that a timer does not expire early and
101 * is expired in the next softirq when the clock was advanced. 234 * is expired in the next softirq when the clock was advanced.
102 */ 235 */
103#define clock_was_set() do { } while (0) 236static inline void clock_was_set(void) { }
237
238/*
239 * In non high resolution mode the time reference is taken from
240 * the base softirq time variable.
241 */
242static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
243{
244 return timer->base->softirq_time;
245}
246
247#endif
248
249extern ktime_t ktime_get(void);
250extern ktime_t ktime_get_real(void);
104 251
105/* Exported timer functions: */ 252/* Exported timer functions: */
106 253
@@ -114,19 +261,33 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
114extern int hrtimer_cancel(struct hrtimer *timer); 261extern int hrtimer_cancel(struct hrtimer *timer);
115extern int hrtimer_try_to_cancel(struct hrtimer *timer); 262extern int hrtimer_try_to_cancel(struct hrtimer *timer);
116 263
117#define hrtimer_restart(timer) hrtimer_start((timer), (timer)->expires, HRTIMER_ABS) 264static inline int hrtimer_restart(struct hrtimer *timer)
265{
266 return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
267}
118 268
119/* Query timers: */ 269/* Query timers: */
120extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); 270extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
121extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); 271extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
122 272
123#ifdef CONFIG_NO_IDLE_HZ
124extern ktime_t hrtimer_get_next_event(void); 273extern ktime_t hrtimer_get_next_event(void);
125#endif
126 274
275/*
276 * A timer is active, when it is enqueued into the rbtree or the callback
277 * function is running.
278 */
127static inline int hrtimer_active(const struct hrtimer *timer) 279static inline int hrtimer_active(const struct hrtimer *timer)
128{ 280{
129 return rb_parent(&timer->node) != &timer->node; 281 return timer->state != HRTIMER_STATE_INACTIVE;
282}
283
284/*
285 * Helper function to check, whether the timer is on one of the queues
286 */
287static inline int hrtimer_is_queued(struct hrtimer *timer)
288{
289 return timer->state &
290 (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
130} 291}
131 292
132/* Forward a hrtimer so it expires after now: */ 293/* Forward a hrtimer so it expires after now: */
@@ -149,4 +310,53 @@ extern void hrtimer_run_queues(void);
149/* Bootup initialization: */ 310/* Bootup initialization: */
150extern void __init hrtimers_init(void); 311extern void __init hrtimers_init(void);
151 312
313#if BITS_PER_LONG < 64
314extern unsigned long ktime_divns(const ktime_t kt, s64 div);
315#else /* BITS_PER_LONG < 64 */
316# define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
317#endif
318
319/* Show pending timers: */
320extern void sysrq_timer_list_show(void);
321
322/*
323 * Timer-statistics info:
324 */
325#ifdef CONFIG_TIMER_STATS
326
327extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
328 void *timerf, char * comm);
329
330static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
331{
332 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
333 timer->function, timer->start_comm);
334}
335
336extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
337 void *addr);
338
339static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
340{
341 __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
342}
343
344static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
345{
346 timer->start_site = NULL;
347}
348#else
349static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
350{
351}
352
353static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
354{
355}
356
357static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
358{
359}
360#endif
361
152#endif 362#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5a8ba0b8ccba..e5ea1411050b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -42,6 +42,8 @@
42 * IRQF_SHARED - allow sharing the irq among several devices 42 * IRQF_SHARED - allow sharing the irq among several devices
43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur 43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt 44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
45 */ 47 */
46#define IRQF_DISABLED 0x00000020 48#define IRQF_DISABLED 0x00000020
47#define IRQF_SAMPLE_RANDOM 0x00000040 49#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -49,6 +51,7 @@
49#define IRQF_PROBE_SHARED 0x00000100 51#define IRQF_PROBE_SHARED 0x00000100
50#define IRQF_TIMER 0x00000200 52#define IRQF_TIMER 0x00000200
51#define IRQF_PERCPU 0x00000400 53#define IRQF_PERCPU 0x00000400
54#define IRQF_NOBALANCING 0x00000800
52 55
53/* 56/*
54 * Migration helpers. Scheduled for removal in 1/2007 57 * Migration helpers. Scheduled for removal in 1/2007
@@ -239,6 +242,9 @@ enum
239 BLOCK_SOFTIRQ, 242 BLOCK_SOFTIRQ,
240 TASKLET_SOFTIRQ, 243 TASKLET_SOFTIRQ,
241 SCHED_SOFTIRQ, 244 SCHED_SOFTIRQ,
245#ifdef CONFIG_HIGH_RES_TIMERS
246 HRTIMER_SOFTIRQ,
247#endif
242}; 248};
243 249
244/* softirq mask and active fields moved to irq_cpustat_t in 250/* softirq mask and active fields moved to irq_cpustat_t in
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 5504b671357f..1939d42c21d2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -31,7 +31,7 @@ typedef void fastcall (*irq_flow_handler_t)(unsigned int irq,
31/* 31/*
32 * IRQ line status. 32 * IRQ line status.
33 * 33 *
34 * Bits 0-16 are reserved for the IRQF_* bits in linux/interrupt.h 34 * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h
35 * 35 *
36 * IRQ types 36 * IRQ types
37 */ 37 */
@@ -45,28 +45,30 @@ typedef void fastcall (*irq_flow_handler_t)(unsigned int irq,
45#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ 45#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */
46 46
47/* Internal flags */ 47/* Internal flags */
48#define IRQ_INPROGRESS 0x00010000 /* IRQ handler active - do not enter! */ 48#define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */
49#define IRQ_DISABLED 0x00020000 /* IRQ disabled - do not enter! */ 49#define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */
50#define IRQ_PENDING 0x00040000 /* IRQ pending - replay on enable */ 50#define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */
51#define IRQ_REPLAY 0x00080000 /* IRQ has been replayed but not acked yet */ 51#define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */
52#define IRQ_AUTODETECT 0x00100000 /* IRQ is being autodetected */ 52#define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */
53#define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */ 53#define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */
54#define IRQ_LEVEL 0x00400000 /* IRQ level triggered */ 54#define IRQ_LEVEL 0x00004000 /* IRQ level triggered */
55#define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */ 55#define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */
56#define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */ 56#define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */
57#define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */
58#define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */
59#define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */
60#define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */
61#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */
62#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
63
57#ifdef CONFIG_IRQ_PER_CPU 64#ifdef CONFIG_IRQ_PER_CPU
58# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 65# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
66# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
59#else 67#else
60# define CHECK_IRQ_PER_CPU(var) 0 68# define CHECK_IRQ_PER_CPU(var) 0
69# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
61#endif 70#endif
62 71
63#define IRQ_NOPROBE 0x02000000 /* IRQ is not valid for probing */
64#define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */
65#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */
66#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */
67#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */
68#define IRQ_MOVE_PENDING 0x40000000 /* need to re-target IRQ destination */
69
70struct proc_dir_entry; 72struct proc_dir_entry;
71struct msi_desc; 73struct msi_desc;
72 74
@@ -127,6 +129,7 @@ struct irq_chip {
127 * 129 *
128 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] 130 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
129 * @chip: low level interrupt hardware access 131 * @chip: low level interrupt hardware access
132 * @msi_desc: MSI descriptor
130 * @handler_data: per-IRQ data for the irq_chip methods 133 * @handler_data: per-IRQ data for the irq_chip methods
131 * @chip_data: platform-specific per-chip private data for the chip 134 * @chip_data: platform-specific per-chip private data for the chip
132 * methods, to allow shared chip implementations 135 * methods, to allow shared chip implementations
@@ -235,11 +238,21 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
235 238
236#endif /* CONFIG_GENERIC_PENDING_IRQ */ 239#endif /* CONFIG_GENERIC_PENDING_IRQ */
237 240
241extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
242extern int irq_can_set_affinity(unsigned int irq);
243
238#else /* CONFIG_SMP */ 244#else /* CONFIG_SMP */
239 245
240#define move_native_irq(x) 246#define move_native_irq(x)
241#define move_masked_irq(x) 247#define move_masked_irq(x)
242 248
249static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
250{
251 return -EINVAL;
252}
253
254static inline int irq_can_set_affinity(unsigned int irq) { return 0; }
255
243#endif /* CONFIG_SMP */ 256#endif /* CONFIG_SMP */
244 257
245#ifdef CONFIG_IRQBALANCE 258#ifdef CONFIG_IRQBALANCE
@@ -261,6 +274,11 @@ static inline int select_smp_affinity(unsigned int irq)
261 274
262extern int no_irq_affinity; 275extern int no_irq_affinity;
263 276
277static inline int irq_balancing_disabled(unsigned int irq)
278{
279 return irq_desc[irq].status & IRQ_NO_BALANCING_MASK;
280}
281
264/* Handle irq action chains: */ 282/* Handle irq action chains: */
265extern int handle_IRQ_event(unsigned int irq, struct irqaction *action); 283extern int handle_IRQ_event(unsigned int irq, struct irqaction *action);
266 284
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 0ec6e28bccd2..c080f61fb024 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -142,13 +142,13 @@ static inline u64 get_jiffies_64(void)
142 * 142 *
143 * And some not so obvious. 143 * And some not so obvious.
144 * 144 *
145 * Note that we don't want to return MAX_LONG, because 145 * Note that we don't want to return LONG_MAX, because
146 * for various timeout reasons we often end up having 146 * for various timeout reasons we often end up having
147 * to wait "jiffies+1" in order to guarantee that we wait 147 * to wait "jiffies+1" in order to guarantee that we wait
148 * at _least_ "jiffies" - so "jiffies+1" had better still 148 * at _least_ "jiffies" - so "jiffies+1" had better still
149 * be positive. 149 * be positive.
150 */ 150 */
151#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1) 151#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1)
152 152
153/* 153/*
154 * We want to do realistic conversions of time so we need to use the same 154 * We want to do realistic conversions of time so we need to use the same
@@ -259,207 +259,23 @@ static inline u64 get_jiffies_64(void)
259#endif 259#endif
260 260
261/* 261/*
262 * Convert jiffies to milliseconds and back. 262 * Convert various time units to each other:
263 *
264 * Avoid unnecessary multiplications/divisions in the
265 * two most common HZ cases:
266 */
267static inline unsigned int jiffies_to_msecs(const unsigned long j)
268{
269#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
270 return (MSEC_PER_SEC / HZ) * j;
271#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
272 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
273#else
274 return (j * MSEC_PER_SEC) / HZ;
275#endif
276}
277
278static inline unsigned int jiffies_to_usecs(const unsigned long j)
279{
280#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
281 return (USEC_PER_SEC / HZ) * j;
282#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
283 return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
284#else
285 return (j * USEC_PER_SEC) / HZ;
286#endif
287}
288
289static inline unsigned long msecs_to_jiffies(const unsigned int m)
290{
291 if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
292 return MAX_JIFFY_OFFSET;
293#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
294 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
295#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
296 return m * (HZ / MSEC_PER_SEC);
297#else
298 return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
299#endif
300}
301
302static inline unsigned long usecs_to_jiffies(const unsigned int u)
303{
304 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
305 return MAX_JIFFY_OFFSET;
306#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
307 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
308#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
309 return u * (HZ / USEC_PER_SEC);
310#else
311 return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
312#endif
313}
314
315/*
316 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
317 * that a remainder subtract here would not do the right thing as the
318 * resolution values don't fall on second boundries. I.e. the line:
319 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
320 *
321 * Rather, we just shift the bits off the right.
322 *
323 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
324 * value to a scaled second value.
325 */
326static __inline__ unsigned long
327timespec_to_jiffies(const struct timespec *value)
328{
329 unsigned long sec = value->tv_sec;
330 long nsec = value->tv_nsec + TICK_NSEC - 1;
331
332 if (sec >= MAX_SEC_IN_JIFFIES){
333 sec = MAX_SEC_IN_JIFFIES;
334 nsec = 0;
335 }
336 return (((u64)sec * SEC_CONVERSION) +
337 (((u64)nsec * NSEC_CONVERSION) >>
338 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
339
340}
341
342static __inline__ void
343jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
344{
345 /*
346 * Convert jiffies to nanoseconds and separate with
347 * one divide.
348 */
349 u64 nsec = (u64)jiffies * TICK_NSEC;
350 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
351}
352
353/* Same for "timeval"
354 *
355 * Well, almost. The problem here is that the real system resolution is
356 * in nanoseconds and the value being converted is in micro seconds.
357 * Also for some machines (those that use HZ = 1024, in-particular),
358 * there is a LARGE error in the tick size in microseconds.
359
360 * The solution we use is to do the rounding AFTER we convert the
361 * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
362 * Instruction wise, this should cost only an additional add with carry
363 * instruction above the way it was done above.
364 */
365static __inline__ unsigned long
366timeval_to_jiffies(const struct timeval *value)
367{
368 unsigned long sec = value->tv_sec;
369 long usec = value->tv_usec;
370
371 if (sec >= MAX_SEC_IN_JIFFIES){
372 sec = MAX_SEC_IN_JIFFIES;
373 usec = 0;
374 }
375 return (((u64)sec * SEC_CONVERSION) +
376 (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
377 (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
378}
379
380static __inline__ void
381jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
382{
383 /*
384 * Convert jiffies to nanoseconds and separate with
385 * one divide.
386 */
387 u64 nsec = (u64)jiffies * TICK_NSEC;
388 long tv_usec;
389
390 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
391 tv_usec /= NSEC_PER_USEC;
392 value->tv_usec = tv_usec;
393}
394
395/*
396 * Convert jiffies/jiffies_64 to clock_t and back.
397 */ 263 */
398static inline clock_t jiffies_to_clock_t(long x) 264extern unsigned int jiffies_to_msecs(const unsigned long j);
399{ 265extern unsigned int jiffies_to_usecs(const unsigned long j);
400#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 266extern unsigned long msecs_to_jiffies(const unsigned int m);
401 return x / (HZ / USER_HZ); 267extern unsigned long usecs_to_jiffies(const unsigned int u);
402#else 268extern unsigned long timespec_to_jiffies(const struct timespec *value);
403 u64 tmp = (u64)x * TICK_NSEC; 269extern void jiffies_to_timespec(const unsigned long jiffies,
404 do_div(tmp, (NSEC_PER_SEC / USER_HZ)); 270 struct timespec *value);
405 return (long)tmp; 271extern unsigned long timeval_to_jiffies(const struct timeval *value);
406#endif 272extern void jiffies_to_timeval(const unsigned long jiffies,
407} 273 struct timeval *value);
408 274extern clock_t jiffies_to_clock_t(long x);
409static inline unsigned long clock_t_to_jiffies(unsigned long x) 275extern unsigned long clock_t_to_jiffies(unsigned long x);
410{ 276extern u64 jiffies_64_to_clock_t(u64 x);
411#if (HZ % USER_HZ)==0 277extern u64 nsec_to_clock_t(u64 x);
412 if (x >= ~0UL / (HZ / USER_HZ)) 278
413 return ~0UL; 279#define TIMESTAMP_SIZE 30
414 return x * (HZ / USER_HZ);
415#else
416 u64 jif;
417
418 /* Don't worry about loss of precision here .. */
419 if (x >= ~0UL / HZ * USER_HZ)
420 return ~0UL;
421
422 /* .. but do try to contain it here */
423 jif = x * (u64) HZ;
424 do_div(jif, USER_HZ);
425 return jif;
426#endif
427}
428
429static inline u64 jiffies_64_to_clock_t(u64 x)
430{
431#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
432 do_div(x, HZ / USER_HZ);
433#else
434 /*
435 * There are better ways that don't overflow early,
436 * but even this doesn't overflow in hundreds of years
437 * in 64 bits, so..
438 */
439 x *= TICK_NSEC;
440 do_div(x, (NSEC_PER_SEC / USER_HZ));
441#endif
442 return x;
443}
444
445static inline u64 nsec_to_clock_t(u64 x)
446{
447#if (NSEC_PER_SEC % USER_HZ) == 0
448 do_div(x, (NSEC_PER_SEC / USER_HZ));
449#elif (USER_HZ % 512) == 0
450 x *= USER_HZ/512;
451 do_div(x, (NSEC_PER_SEC / 512));
452#else
453 /*
454 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
455 * overflow after 64.99 years.
456 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
457 */
458 x *= 9;
459 do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
460 / USER_HZ));
461#endif
462 return x;
463}
464 280
465#endif 281#endif
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 7444a6326231..c68c7ac6b232 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -261,8 +261,7 @@ static inline s64 ktime_to_ns(const ktime_t kt)
261 * idea of the (in)accuracy of timers. Timer values are rounded up to 261 * idea of the (in)accuracy of timers. Timer values are rounded up to
262 * this resolution values. 262 * this resolution values.
263 */ 263 */
264#define KTIME_REALTIME_RES (ktime_t){ .tv64 = TICK_NSEC } 264#define KTIME_LOW_RES (ktime_t){ .tv64 = TICK_NSEC }
265#define KTIME_MONOTONIC_RES (ktime_t){ .tv64 = TICK_NSEC }
266 265
267/* Get the monotonic time in timespec format: */ 266/* Get the monotonic time in timespec format: */
268extern void ktime_get_ts(struct timespec *ts); 267extern void ktime_get_ts(struct timespec *ts);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 596e0c18887d..b870b20df43c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -172,6 +172,7 @@ enum {
172 ATA_FLAG_DEBUGMSG = (1 << 13), 172 ATA_FLAG_DEBUGMSG = (1 << 13),
173 ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */ 173 ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */
174 ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ 174 ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */
175 ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */
175 176
176 /* The following flag belongs to ap->pflags but is kept in 177 /* The following flag belongs to ap->pflags but is kept in
177 * ap->flags because it's referenced in many LLDs and will be 178 * ap->flags because it's referenced in many LLDs and will be
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index db05182ca0e8..1be5be88debe 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -105,12 +105,11 @@ struct nfs4_ace {
105 uint32_t access_mask; 105 uint32_t access_mask;
106 int whotype; 106 int whotype;
107 uid_t who; 107 uid_t who;
108 struct list_head l_ace;
109}; 108};
110 109
111struct nfs4_acl { 110struct nfs4_acl {
112 uint32_t naces; 111 uint32_t naces;
113 struct list_head ace_head; 112 struct nfs4_ace aces[0];
114}; 113};
115 114
116typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; 115typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
diff --git a/include/linux/nfs4_acl.h b/include/linux/nfs4_acl.h
index 22aff4d01f20..409b6e02f337 100644
--- a/include/linux/nfs4_acl.h
+++ b/include/linux/nfs4_acl.h
@@ -39,9 +39,12 @@
39 39
40#include <linux/posix_acl.h> 40#include <linux/posix_acl.h>
41 41
42struct nfs4_acl *nfs4_acl_new(void); 42/* Maximum ACL we'll accept from client; chosen (somewhat arbitrarily) to
43void nfs4_acl_free(struct nfs4_acl *); 43 * fit in a page: */
44int nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t); 44#define NFS4_ACL_MAX 170
45
46struct nfs4_acl *nfs4_acl_new(int);
47void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
45int nfs4_acl_get_whotype(char *, u32); 48int nfs4_acl_get_whotype(char *, u32);
46int nfs4_acl_write_who(int who, char *p); 49int nfs4_acl_write_who(int who, char *p);
47int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group, 50int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
diff --git a/include/linux/tick.h b/include/linux/tick.h
new file mode 100644
index 000000000000..9a7252e089b9
--- /dev/null
+++ b/include/linux/tick.h
@@ -0,0 +1,109 @@
1/* linux/include/linux/tick.h
2 *
3 * This file contains the structure definitions for tick related functions
4 *
5 */
6#ifndef _LINUX_TICK_H
7#define _LINUX_TICK_H
8
9#include <linux/clockchips.h>
10
11#ifdef CONFIG_GENERIC_CLOCKEVENTS
12
13enum tick_device_mode {
14 TICKDEV_MODE_PERIODIC,
15 TICKDEV_MODE_ONESHOT,
16};
17
18struct tick_device {
19 struct clock_event_device *evtdev;
20 enum tick_device_mode mode;
21};
22
23enum tick_nohz_mode {
24 NOHZ_MODE_INACTIVE,
25 NOHZ_MODE_LOWRES,
26 NOHZ_MODE_HIGHRES,
27};
28
29/**
30 * struct tick_sched - sched tick emulation and no idle tick control/stats
31 * @sched_timer: hrtimer to schedule the periodic tick in high
32 * resolution mode
33 * @idle_tick: Store the last idle tick expiry time when the tick
34 * timer is modified for idle sleeps. This is necessary
35 * to resume the tick timer operation in the timeline
36 * when the CPU returns from idle
37 * @tick_stopped: Indicator that the idle tick has been stopped
38 * @idle_jiffies: jiffies at the entry to idle for idle time accounting
39 * @idle_calls: Total number of idle calls
40 * @idle_sleeps: Number of idle calls, where the sched tick was stopped
41 * @idle_entrytime: Time when the idle call was entered
42 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
43 */
44struct tick_sched {
45 struct hrtimer sched_timer;
46 unsigned long check_clocks;
47 enum tick_nohz_mode nohz_mode;
48 ktime_t idle_tick;
49 int tick_stopped;
50 unsigned long idle_jiffies;
51 unsigned long idle_calls;
52 unsigned long idle_sleeps;
53 ktime_t idle_entrytime;
54 ktime_t idle_sleeptime;
55 unsigned long last_jiffies;
56 unsigned long next_jiffies;
57 ktime_t idle_expires;
58};
59
60extern void __init tick_init(void);
61extern int tick_is_oneshot_available(void);
62extern struct tick_device *tick_get_device(int cpu);
63
64# ifdef CONFIG_HIGH_RES_TIMERS
65extern int tick_init_highres(void);
66extern int tick_program_event(ktime_t expires, int force);
67extern void tick_setup_sched_timer(void);
68extern void tick_cancel_sched_timer(int cpu);
69# else
70static inline void tick_cancel_sched_timer(int cpu) { }
71# endif /* HIGHRES */
72
73# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
74extern struct tick_device *tick_get_broadcast_device(void);
75extern cpumask_t *tick_get_broadcast_mask(void);
76
77# ifdef CONFIG_TICK_ONESHOT
78extern cpumask_t *tick_get_broadcast_oneshot_mask(void);
79# endif
80
81# endif /* BROADCAST */
82
83# ifdef CONFIG_TICK_ONESHOT
84extern void tick_clock_notify(void);
85extern int tick_check_oneshot_change(int allow_nohz);
86extern struct tick_sched *tick_get_tick_sched(int cpu);
87# else
88static inline void tick_clock_notify(void) { }
89static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
90# endif
91
92#else /* CONFIG_GENERIC_CLOCKEVENTS */
93static inline void tick_init(void) { }
94static inline void tick_cancel_sched_timer(int cpu) { }
95static inline void tick_clock_notify(void) { }
96static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
97#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
98
99# ifdef CONFIG_NO_HZ
100extern void tick_nohz_stop_sched_tick(void);
101extern void tick_nohz_restart_sched_tick(void);
102extern void tick_nohz_update_jiffies(void);
103# else
104static inline void tick_nohz_stop_sched_tick(void) { }
105static inline void tick_nohz_restart_sched_tick(void) { }
106static inline void tick_nohz_update_jiffies(void) { }
107# endif /* !NO_HZ */
108
109#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index eceb1a59b078..8ea8dea713c7 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -92,6 +92,7 @@ extern struct timespec xtime;
92extern struct timespec wall_to_monotonic; 92extern struct timespec wall_to_monotonic;
93extern seqlock_t xtime_lock __attribute__((weak)); 93extern seqlock_t xtime_lock __attribute__((weak));
94 94
95extern unsigned long read_persistent_clock(void);
95void timekeeping_init(void); 96void timekeeping_init(void);
96 97
97static inline unsigned long get_seconds(void) 98static inline unsigned long get_seconds(void)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index fb5edaaf0ebd..719113b652dd 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -2,6 +2,7 @@
2#define _LINUX_TIMER_H 2#define _LINUX_TIMER_H
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/ktime.h>
5#include <linux/spinlock.h> 6#include <linux/spinlock.h>
6#include <linux/stddef.h> 7#include <linux/stddef.h>
7 8
@@ -15,6 +16,11 @@ struct timer_list {
15 unsigned long data; 16 unsigned long data;
16 17
17 struct tvec_t_base_s *base; 18 struct tvec_t_base_s *base;
19#ifdef CONFIG_TIMER_STATS
20 void *start_site;
21 char start_comm[16];
22 int start_pid;
23#endif
18}; 24};
19 25
20extern struct tvec_t_base_s boot_tvec_bases; 26extern struct tvec_t_base_s boot_tvec_bases;
@@ -61,7 +67,65 @@ extern int del_timer(struct timer_list * timer);
61extern int __mod_timer(struct timer_list *timer, unsigned long expires); 67extern int __mod_timer(struct timer_list *timer, unsigned long expires);
62extern int mod_timer(struct timer_list *timer, unsigned long expires); 68extern int mod_timer(struct timer_list *timer, unsigned long expires);
63 69
70/*
71 * Return when the next timer-wheel timeout occurs (in absolute jiffies),
72 * locks the timer base:
73 */
64extern unsigned long next_timer_interrupt(void); 74extern unsigned long next_timer_interrupt(void);
75/*
76 * Return when the next timer-wheel timeout occurs (in absolute jiffies),
77 * locks the timer base and does the comparison against the given
78 * jiffie.
79 */
80extern unsigned long get_next_timer_interrupt(unsigned long now);
81
82/*
83 * Timer-statistics info:
84 */
85#ifdef CONFIG_TIMER_STATS
86
87extern void init_timer_stats(void);
88
89extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
90 void *timerf, char * comm);
91
92static inline void timer_stats_account_timer(struct timer_list *timer)
93{
94 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
95 timer->function, timer->start_comm);
96}
97
98extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
99 void *addr);
100
101static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
102{
103 __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
104}
105
106static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
107{
108 timer->start_site = NULL;
109}
110#else
111static inline void init_timer_stats(void)
112{
113}
114
115static inline void timer_stats_account_timer(struct timer_list *timer)
116{
117}
118
119static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
120{
121}
122
123static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
124{
125}
126#endif
127
128extern void delayed_work_timer_fn(unsigned long __data);
65 129
66/** 130/**
67 * add_timer - start a timer 131 * add_timer - start a timer
@@ -96,7 +160,7 @@ static inline void add_timer(struct timer_list *timer)
96extern void init_timers(void); 160extern void init_timers(void);
97extern void run_local_timers(void); 161extern void run_local_timers(void);
98struct hrtimer; 162struct hrtimer;
99extern int it_real_fn(struct hrtimer *); 163extern enum hrtimer_restart it_real_fn(struct hrtimer *);
100 164
101unsigned long __round_jiffies(unsigned long j, int cpu); 165unsigned long __round_jiffies(unsigned long j, int cpu);
102unsigned long __round_jiffies_relative(unsigned long j, int cpu); 166unsigned long __round_jiffies_relative(unsigned long j, int cpu);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 9a24e500c311..da929dbbea2a 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -286,6 +286,13 @@ static inline void time_interpolator_update(long delta_nsec)
286 286
287#define TICK_LENGTH_SHIFT 32 287#define TICK_LENGTH_SHIFT 32
288 288
289#ifdef CONFIG_NO_HZ
290#define NTP_INTERVAL_FREQ (2)
291#else
292#define NTP_INTERVAL_FREQ (HZ)
293#endif
294#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
295
289/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ 296/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
290extern u64 current_tick_length(void); 297extern u64 current_tick_length(void);
291 298
diff --git a/init/main.c b/init/main.c
index 2421e1544127..953500b02ac4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -40,6 +40,7 @@
40#include <linux/cpu.h> 40#include <linux/cpu.h>
41#include <linux/cpuset.h> 41#include <linux/cpuset.h>
42#include <linux/efi.h> 42#include <linux/efi.h>
43#include <linux/tick.h>
43#include <linux/taskstats_kern.h> 44#include <linux/taskstats_kern.h>
44#include <linux/delayacct.h> 45#include <linux/delayacct.h>
45#include <linux/unistd.h> 46#include <linux/unistd.h>
@@ -515,6 +516,7 @@ asmlinkage void __init start_kernel(void)
515 * enable them 516 * enable them
516 */ 517 */
517 lock_kernel(); 518 lock_kernel();
519 tick_init();
518 boot_cpu_init(); 520 boot_cpu_init();
519 page_address_init(); 521 page_address_init();
520 printk(KERN_NOTICE); 522 printk(KERN_NOTICE);
diff --git a/kernel/fork.c b/kernel/fork.c
index 0b6293d94d96..d154cc786489 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -858,7 +858,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
858 init_sigpending(&sig->shared_pending); 858 init_sigpending(&sig->shared_pending);
859 INIT_LIST_HEAD(&sig->posix_timers); 859 INIT_LIST_HEAD(&sig->posix_timers);
860 860
861 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL); 861 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
862 sig->it_real_incr.tv64 = 0; 862 sig->it_real_incr.tv64 = 0;
863 sig->real_timer.function = it_real_fn; 863 sig->real_timer.function = it_real_fn;
864 sig->tsk = tsk; 864 sig->tsk = tsk;
diff --git a/kernel/futex.c b/kernel/futex.c
index 5a737de857d3..e749e7df14b1 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1134,7 +1134,7 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1134 1134
1135 if (sec != MAX_SCHEDULE_TIMEOUT) { 1135 if (sec != MAX_SCHEDULE_TIMEOUT) {
1136 to = &timeout; 1136 to = &timeout;
1137 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS); 1137 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
1138 hrtimer_init_sleeper(to, current); 1138 hrtimer_init_sleeper(to, current);
1139 to->timer.expires = ktime_set(sec, nsec); 1139 to->timer.expires = ktime_set(sec, nsec);
1140 } 1140 }
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f44e499e8fca..476cb0c0b4a4 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * linux/kernel/hrtimer.c 2 * linux/kernel/hrtimer.c
3 * 3 *
4 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
6 * 7 *
7 * High-resolution kernel timers 8 * High-resolution kernel timers
8 * 9 *
@@ -31,12 +32,17 @@
31 */ 32 */
32 33
33#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/irq.h>
34#include <linux/module.h> 36#include <linux/module.h>
35#include <linux/percpu.h> 37#include <linux/percpu.h>
36#include <linux/hrtimer.h> 38#include <linux/hrtimer.h>
37#include <linux/notifier.h> 39#include <linux/notifier.h>
38#include <linux/syscalls.h> 40#include <linux/syscalls.h>
41#include <linux/kallsyms.h>
39#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/tick.h>
44#include <linux/seq_file.h>
45#include <linux/err.h>
40 46
41#include <asm/uaccess.h> 47#include <asm/uaccess.h>
42 48
@@ -45,7 +51,7 @@
45 * 51 *
46 * returns the time in ktime_t format 52 * returns the time in ktime_t format
47 */ 53 */
48static ktime_t ktime_get(void) 54ktime_t ktime_get(void)
49{ 55{
50 struct timespec now; 56 struct timespec now;
51 57
@@ -59,7 +65,7 @@ static ktime_t ktime_get(void)
59 * 65 *
60 * returns the time in ktime_t format 66 * returns the time in ktime_t format
61 */ 67 */
62static ktime_t ktime_get_real(void) 68ktime_t ktime_get_real(void)
63{ 69{
64 struct timespec now; 70 struct timespec now;
65 71
@@ -79,21 +85,22 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
79 * This ensures that we capture erroneous accesses to these clock ids 85 * This ensures that we capture erroneous accesses to these clock ids
80 * rather than moving them into the range of valid clock id's. 86 * rather than moving them into the range of valid clock id's.
81 */ 87 */
82 88DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
83#define MAX_HRTIMER_BASES 2
84
85static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) =
86{ 89{
90
91 .clock_base =
87 { 92 {
88 .index = CLOCK_REALTIME, 93 {
89 .get_time = &ktime_get_real, 94 .index = CLOCK_REALTIME,
90 .resolution = KTIME_REALTIME_RES, 95 .get_time = &ktime_get_real,
91 }, 96 .resolution = KTIME_LOW_RES,
92 { 97 },
93 .index = CLOCK_MONOTONIC, 98 {
94 .get_time = &ktime_get, 99 .index = CLOCK_MONOTONIC,
95 .resolution = KTIME_MONOTONIC_RES, 100 .get_time = &ktime_get,
96 }, 101 .resolution = KTIME_LOW_RES,
102 },
103 }
97}; 104};
98 105
99/** 106/**
@@ -125,20 +132,35 @@ EXPORT_SYMBOL_GPL(ktime_get_ts);
125 * Get the coarse grained time at the softirq based on xtime and 132 * Get the coarse grained time at the softirq based on xtime and
126 * wall_to_monotonic. 133 * wall_to_monotonic.
127 */ 134 */
128static void hrtimer_get_softirq_time(struct hrtimer_base *base) 135static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
129{ 136{
130 ktime_t xtim, tomono; 137 ktime_t xtim, tomono;
138 struct timespec xts;
131 unsigned long seq; 139 unsigned long seq;
132 140
133 do { 141 do {
134 seq = read_seqbegin(&xtime_lock); 142 seq = read_seqbegin(&xtime_lock);
135 xtim = timespec_to_ktime(xtime); 143#ifdef CONFIG_NO_HZ
136 tomono = timespec_to_ktime(wall_to_monotonic); 144 getnstimeofday(&xts);
137 145#else
146 xts = xtime;
147#endif
138 } while (read_seqretry(&xtime_lock, seq)); 148 } while (read_seqretry(&xtime_lock, seq));
139 149
140 base[CLOCK_REALTIME].softirq_time = xtim; 150 xtim = timespec_to_ktime(xts);
141 base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono); 151 tomono = timespec_to_ktime(wall_to_monotonic);
152 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
153 base->clock_base[CLOCK_MONOTONIC].softirq_time =
154 ktime_add(xtim, tomono);
155}
156
157/*
158 * Helper function to check, whether the timer is running the callback
159 * function
160 */
161static inline int hrtimer_callback_running(struct hrtimer *timer)
162{
163 return timer->state & HRTIMER_STATE_CALLBACK;
142} 164}
143 165
144/* 166/*
@@ -147,8 +169,6 @@ static void hrtimer_get_softirq_time(struct hrtimer_base *base)
147 */ 169 */
148#ifdef CONFIG_SMP 170#ifdef CONFIG_SMP
149 171
150#define set_curr_timer(b, t) do { (b)->curr_timer = (t); } while (0)
151
152/* 172/*
153 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock 173 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
154 * means that all timers which are tied to this base via timer->base are 174 * means that all timers which are tied to this base via timer->base are
@@ -161,19 +181,20 @@ static void hrtimer_get_softirq_time(struct hrtimer_base *base)
161 * possible to set timer->base = NULL and drop the lock: the timer remains 181 * possible to set timer->base = NULL and drop the lock: the timer remains
162 * locked. 182 * locked.
163 */ 183 */
164static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer, 184static
165 unsigned long *flags) 185struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
186 unsigned long *flags)
166{ 187{
167 struct hrtimer_base *base; 188 struct hrtimer_clock_base *base;
168 189
169 for (;;) { 190 for (;;) {
170 base = timer->base; 191 base = timer->base;
171 if (likely(base != NULL)) { 192 if (likely(base != NULL)) {
172 spin_lock_irqsave(&base->lock, *flags); 193 spin_lock_irqsave(&base->cpu_base->lock, *flags);
173 if (likely(base == timer->base)) 194 if (likely(base == timer->base))
174 return base; 195 return base;
175 /* The timer has migrated to another CPU: */ 196 /* The timer has migrated to another CPU: */
176 spin_unlock_irqrestore(&base->lock, *flags); 197 spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
177 } 198 }
178 cpu_relax(); 199 cpu_relax();
179 } 200 }
@@ -182,12 +203,14 @@ static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer,
182/* 203/*
183 * Switch the timer base to the current CPU when possible. 204 * Switch the timer base to the current CPU when possible.
184 */ 205 */
185static inline struct hrtimer_base * 206static inline struct hrtimer_clock_base *
186switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) 207switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
187{ 208{
188 struct hrtimer_base *new_base; 209 struct hrtimer_clock_base *new_base;
210 struct hrtimer_cpu_base *new_cpu_base;
189 211
190 new_base = &__get_cpu_var(hrtimer_bases)[base->index]; 212 new_cpu_base = &__get_cpu_var(hrtimer_bases);
213 new_base = &new_cpu_base->clock_base[base->index];
191 214
192 if (base != new_base) { 215 if (base != new_base) {
193 /* 216 /*
@@ -199,13 +222,13 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base)
199 * completed. There is no conflict as we hold the lock until 222 * completed. There is no conflict as we hold the lock until
200 * the timer is enqueued. 223 * the timer is enqueued.
201 */ 224 */
202 if (unlikely(base->curr_timer == timer)) 225 if (unlikely(hrtimer_callback_running(timer)))
203 return base; 226 return base;
204 227
205 /* See the comment in lock_timer_base() */ 228 /* See the comment in lock_timer_base() */
206 timer->base = NULL; 229 timer->base = NULL;
207 spin_unlock(&base->lock); 230 spin_unlock(&base->cpu_base->lock);
208 spin_lock(&new_base->lock); 231 spin_lock(&new_base->cpu_base->lock);
209 timer->base = new_base; 232 timer->base = new_base;
210 } 233 }
211 return new_base; 234 return new_base;
@@ -213,19 +236,17 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base)
213 236
214#else /* CONFIG_SMP */ 237#else /* CONFIG_SMP */
215 238
216#define set_curr_timer(b, t) do { } while (0) 239static inline struct hrtimer_clock_base *
217
218static inline struct hrtimer_base *
219lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 240lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
220{ 241{
221 struct hrtimer_base *base = timer->base; 242 struct hrtimer_clock_base *base = timer->base;
222 243
223 spin_lock_irqsave(&base->lock, *flags); 244 spin_lock_irqsave(&base->cpu_base->lock, *flags);
224 245
225 return base; 246 return base;
226} 247}
227 248
228#define switch_hrtimer_base(t, b) (b) 249# define switch_hrtimer_base(t, b) (b)
229 250
230#endif /* !CONFIG_SMP */ 251#endif /* !CONFIG_SMP */
231 252
@@ -256,15 +277,12 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
256 277
257 return ktime_add(kt, tmp); 278 return ktime_add(kt, tmp);
258} 279}
259
260#else /* CONFIG_KTIME_SCALAR */
261
262# endif /* !CONFIG_KTIME_SCALAR */ 280# endif /* !CONFIG_KTIME_SCALAR */
263 281
264/* 282/*
265 * Divide a ktime value by a nanosecond value 283 * Divide a ktime value by a nanosecond value
266 */ 284 */
267static unsigned long ktime_divns(const ktime_t kt, s64 div) 285unsigned long ktime_divns(const ktime_t kt, s64 div)
268{ 286{
269 u64 dclc, inc, dns; 287 u64 dclc, inc, dns;
270 int sft = 0; 288 int sft = 0;
@@ -281,18 +299,311 @@ static unsigned long ktime_divns(const ktime_t kt, s64 div)
281 299
282 return (unsigned long) dclc; 300 return (unsigned long) dclc;
283} 301}
284
285#else /* BITS_PER_LONG < 64 */
286# define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
287#endif /* BITS_PER_LONG >= 64 */ 302#endif /* BITS_PER_LONG >= 64 */
288 303
304/* High resolution timer related functions */
305#ifdef CONFIG_HIGH_RES_TIMERS
306
307/*
308 * High resolution timer enabled ?
309 */
310static int hrtimer_hres_enabled __read_mostly = 1;
311
312/*
313 * Enable / Disable high resolution mode
314 */
315static int __init setup_hrtimer_hres(char *str)
316{
317 if (!strcmp(str, "off"))
318 hrtimer_hres_enabled = 0;
319 else if (!strcmp(str, "on"))
320 hrtimer_hres_enabled = 1;
321 else
322 return 0;
323 return 1;
324}
325
326__setup("highres=", setup_hrtimer_hres);
327
328/*
329 * hrtimer_high_res_enabled - query, if the highres mode is enabled
330 */
331static inline int hrtimer_is_hres_enabled(void)
332{
333 return hrtimer_hres_enabled;
334}
335
336/*
337 * Is the high resolution mode active ?
338 */
339static inline int hrtimer_hres_active(void)
340{
341 return __get_cpu_var(hrtimer_bases).hres_active;
342}
343
344/*
345 * Reprogram the event source with checking both queues for the
346 * next event
347 * Called with interrupts disabled and base->lock held
348 */
349static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
350{
351 int i;
352 struct hrtimer_clock_base *base = cpu_base->clock_base;
353 ktime_t expires;
354
355 cpu_base->expires_next.tv64 = KTIME_MAX;
356
357 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
358 struct hrtimer *timer;
359
360 if (!base->first)
361 continue;
362 timer = rb_entry(base->first, struct hrtimer, node);
363 expires = ktime_sub(timer->expires, base->offset);
364 if (expires.tv64 < cpu_base->expires_next.tv64)
365 cpu_base->expires_next = expires;
366 }
367
368 if (cpu_base->expires_next.tv64 != KTIME_MAX)
369 tick_program_event(cpu_base->expires_next, 1);
370}
371
372/*
373 * Shared reprogramming for clock_realtime and clock_monotonic
374 *
375 * When a timer is enqueued and expires earlier than the already enqueued
376 * timers, we have to check, whether it expires earlier than the timer for
377 * which the clock event device was armed.
378 *
379 * Called with interrupts disabled and base->cpu_base.lock held
380 */
381static int hrtimer_reprogram(struct hrtimer *timer,
382 struct hrtimer_clock_base *base)
383{
384 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
385 ktime_t expires = ktime_sub(timer->expires, base->offset);
386 int res;
387
388 /*
389 * When the callback is running, we do not reprogram the clock event
390 * device. The timer callback is either running on a different CPU or
391 * the callback is executed in the hrtimer_interupt context. The
392 * reprogramming is handled either by the softirq, which called the
393 * callback or at the end of the hrtimer_interrupt.
394 */
395 if (hrtimer_callback_running(timer))
396 return 0;
397
398 if (expires.tv64 >= expires_next->tv64)
399 return 0;
400
401 /*
402 * Clockevents returns -ETIME, when the event was in the past.
403 */
404 res = tick_program_event(expires, 0);
405 if (!IS_ERR_VALUE(res))
406 *expires_next = expires;
407 return res;
408}
409
410
411/*
412 * Retrigger next event is called after clock was set
413 *
414 * Called with interrupts disabled via on_each_cpu()
415 */
416static void retrigger_next_event(void *arg)
417{
418 struct hrtimer_cpu_base *base;
419 struct timespec realtime_offset;
420 unsigned long seq;
421
422 if (!hrtimer_hres_active())
423 return;
424
425 do {
426 seq = read_seqbegin(&xtime_lock);
427 set_normalized_timespec(&realtime_offset,
428 -wall_to_monotonic.tv_sec,
429 -wall_to_monotonic.tv_nsec);
430 } while (read_seqretry(&xtime_lock, seq));
431
432 base = &__get_cpu_var(hrtimer_bases);
433
434 /* Adjust CLOCK_REALTIME offset */
435 spin_lock(&base->lock);
436 base->clock_base[CLOCK_REALTIME].offset =
437 timespec_to_ktime(realtime_offset);
438
439 hrtimer_force_reprogram(base);
440 spin_unlock(&base->lock);
441}
442
443/*
444 * Clock realtime was set
445 *
446 * Change the offset of the realtime clock vs. the monotonic
447 * clock.
448 *
449 * We might have to reprogram the high resolution timer interrupt. On
450 * SMP we call the architecture specific code to retrigger _all_ high
451 * resolution timer interrupts. On UP we just disable interrupts and
452 * call the high resolution interrupt code.
453 */
454void clock_was_set(void)
455{
456 /* Retrigger the CPU local events everywhere */
457 on_each_cpu(retrigger_next_event, NULL, 0, 1);
458}
459
460/*
461 * Check, whether the timer is on the callback pending list
462 */
463static inline int hrtimer_cb_pending(const struct hrtimer *timer)
464{
465 return timer->state & HRTIMER_STATE_PENDING;
466}
467
468/*
469 * Remove a timer from the callback pending list
470 */
471static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
472{
473 list_del_init(&timer->cb_entry);
474}
475
476/*
477 * Initialize the high resolution related parts of cpu_base
478 */
479static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
480{
481 base->expires_next.tv64 = KTIME_MAX;
482 base->hres_active = 0;
483 INIT_LIST_HEAD(&base->cb_pending);
484}
485
486/*
487 * Initialize the high resolution related parts of a hrtimer
488 */
489static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
490{
491 INIT_LIST_HEAD(&timer->cb_entry);
492}
493
494/*
495 * When High resolution timers are active, try to reprogram. Note, that in case
496 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
497 * check happens. The timer gets enqueued into the rbtree. The reprogramming
498 * and expiry check is done in the hrtimer_interrupt or in the softirq.
499 */
500static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
501 struct hrtimer_clock_base *base)
502{
503 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
504
505 /* Timer is expired, act upon the callback mode */
506 switch(timer->cb_mode) {
507 case HRTIMER_CB_IRQSAFE_NO_RESTART:
508 /*
509 * We can call the callback from here. No restart
510 * happens, so no danger of recursion
511 */
512 BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
513 return 1;
514 case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ:
515 /*
516 * This is solely for the sched tick emulation with
517 * dynamic tick support to ensure that we do not
518 * restart the tick right on the edge and end up with
519 * the tick timer in the softirq ! The calling site
520 * takes care of this.
521 */
522 return 1;
523 case HRTIMER_CB_IRQSAFE:
524 case HRTIMER_CB_SOFTIRQ:
525 /*
526 * Move everything else into the softirq pending list !
527 */
528 list_add_tail(&timer->cb_entry,
529 &base->cpu_base->cb_pending);
530 timer->state = HRTIMER_STATE_PENDING;
531 raise_softirq(HRTIMER_SOFTIRQ);
532 return 1;
533 default:
534 BUG();
535 }
536 }
537 return 0;
538}
539
540/*
541 * Switch to high resolution mode
542 */
543static void hrtimer_switch_to_hres(void)
544{
545 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
546 unsigned long flags;
547
548 if (base->hres_active)
549 return;
550
551 local_irq_save(flags);
552
553 if (tick_init_highres()) {
554 local_irq_restore(flags);
555 return;
556 }
557 base->hres_active = 1;
558 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
559 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
560
561 tick_setup_sched_timer();
562
563 /* "Retrigger" the interrupt to get things going */
564 retrigger_next_event(NULL);
565 local_irq_restore(flags);
566 printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
567 smp_processor_id());
568}
569
570#else
571
572static inline int hrtimer_hres_active(void) { return 0; }
573static inline int hrtimer_is_hres_enabled(void) { return 0; }
574static inline void hrtimer_switch_to_hres(void) { }
575static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
576static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
577 struct hrtimer_clock_base *base)
578{
579 return 0;
580}
581static inline int hrtimer_cb_pending(struct hrtimer *timer) { return 0; }
582static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) { }
583static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
584static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
585
586#endif /* CONFIG_HIGH_RES_TIMERS */
587
588#ifdef CONFIG_TIMER_STATS
589void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
590{
591 if (timer->start_site)
592 return;
593
594 timer->start_site = addr;
595 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
596 timer->start_pid = current->pid;
597}
598#endif
599
289/* 600/*
290 * Counterpart to lock_timer_base above: 601 * Counterpart to lock_timer_base above:
291 */ 602 */
292static inline 603static inline
293void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 604void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
294{ 605{
295 spin_unlock_irqrestore(&timer->base->lock, *flags); 606 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
296} 607}
297 608
298/** 609/**
@@ -342,7 +653,8 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
342 * The timer is inserted in expiry order. Insertion into the 653 * The timer is inserted in expiry order. Insertion into the
343 * red black tree is O(log(n)). Must hold the base lock. 654 * red black tree is O(log(n)). Must hold the base lock.
344 */ 655 */
345static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 656static void enqueue_hrtimer(struct hrtimer *timer,
657 struct hrtimer_clock_base *base, int reprogram)
346{ 658{
347 struct rb_node **link = &base->active.rb_node; 659 struct rb_node **link = &base->active.rb_node;
348 struct rb_node *parent = NULL; 660 struct rb_node *parent = NULL;
@@ -368,39 +680,85 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
368 * Insert the timer to the rbtree and check whether it 680 * Insert the timer to the rbtree and check whether it
369 * replaces the first pending timer 681 * replaces the first pending timer
370 */ 682 */
371 rb_link_node(&timer->node, parent, link);
372 rb_insert_color(&timer->node, &base->active);
373
374 if (!base->first || timer->expires.tv64 < 683 if (!base->first || timer->expires.tv64 <
375 rb_entry(base->first, struct hrtimer, node)->expires.tv64) 684 rb_entry(base->first, struct hrtimer, node)->expires.tv64) {
685 /*
686 * Reprogram the clock event device. When the timer is already
687 * expired hrtimer_enqueue_reprogram has either called the
688 * callback or added it to the pending list and raised the
689 * softirq.
690 *
691 * This is a NOP for !HIGHRES
692 */
693 if (reprogram && hrtimer_enqueue_reprogram(timer, base))
694 return;
695
376 base->first = &timer->node; 696 base->first = &timer->node;
697 }
698
699 rb_link_node(&timer->node, parent, link);
700 rb_insert_color(&timer->node, &base->active);
701 /*
702 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
703 * state of a possibly running callback.
704 */
705 timer->state |= HRTIMER_STATE_ENQUEUED;
377} 706}
378 707
379/* 708/*
380 * __remove_hrtimer - internal function to remove a timer 709 * __remove_hrtimer - internal function to remove a timer
381 * 710 *
382 * Caller must hold the base lock. 711 * Caller must hold the base lock.
712 *
713 * High resolution timer mode reprograms the clock event device when the
714 * timer is the one which expires next. The caller can disable this by setting
715 * reprogram to zero. This is useful, when the context does a reprogramming
716 * anyway (e.g. timer interrupt)
383 */ 717 */
384static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 718static void __remove_hrtimer(struct hrtimer *timer,
719 struct hrtimer_clock_base *base,
720 unsigned long newstate, int reprogram)
385{ 721{
386 /* 722 /* High res. callback list. NOP for !HIGHRES */
387 * Remove the timer from the rbtree and replace the 723 if (hrtimer_cb_pending(timer))
388 * first entry pointer if necessary. 724 hrtimer_remove_cb_pending(timer);
389 */ 725 else {
390 if (base->first == &timer->node) 726 /*
391 base->first = rb_next(&timer->node); 727 * Remove the timer from the rbtree and replace the
392 rb_erase(&timer->node, &base->active); 728 * first entry pointer if necessary.
393 rb_set_parent(&timer->node, &timer->node); 729 */
730 if (base->first == &timer->node) {
731 base->first = rb_next(&timer->node);
732 /* Reprogram the clock event device. if enabled */
733 if (reprogram && hrtimer_hres_active())
734 hrtimer_force_reprogram(base->cpu_base);
735 }
736 rb_erase(&timer->node, &base->active);
737 }
738 timer->state = newstate;
394} 739}
395 740
396/* 741/*
397 * remove hrtimer, called with base lock held 742 * remove hrtimer, called with base lock held
398 */ 743 */
399static inline int 744static inline int
400remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 745remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
401{ 746{
402 if (hrtimer_active(timer)) { 747 if (hrtimer_is_queued(timer)) {
403 __remove_hrtimer(timer, base); 748 int reprogram;
749
750 /*
751 * Remove the timer and force reprogramming when high
752 * resolution mode is active and the timer is on the current
753 * CPU. If we remove a timer on another CPU, reprogramming is
754 * skipped. The interrupt event on this CPU is fired and
755 * reprogramming happens in the interrupt handler. This is a
756 * rare case and less expensive than a smp call.
757 */
758 timer_stats_hrtimer_clear_start_info(timer);
759 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
760 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
761 reprogram);
404 return 1; 762 return 1;
405 } 763 }
406 return 0; 764 return 0;
@@ -419,7 +777,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
419int 777int
420hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) 778hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
421{ 779{
422 struct hrtimer_base *base, *new_base; 780 struct hrtimer_clock_base *base, *new_base;
423 unsigned long flags; 781 unsigned long flags;
424 int ret; 782 int ret;
425 783
@@ -431,7 +789,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
431 /* Switch the timer base, if necessary: */ 789 /* Switch the timer base, if necessary: */
432 new_base = switch_hrtimer_base(timer, base); 790 new_base = switch_hrtimer_base(timer, base);
433 791
434 if (mode == HRTIMER_REL) { 792 if (mode == HRTIMER_MODE_REL) {
435 tim = ktime_add(tim, new_base->get_time()); 793 tim = ktime_add(tim, new_base->get_time());
436 /* 794 /*
437 * CONFIG_TIME_LOW_RES is a temporary way for architectures 795 * CONFIG_TIME_LOW_RES is a temporary way for architectures
@@ -446,7 +804,9 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
446 } 804 }
447 timer->expires = tim; 805 timer->expires = tim;
448 806
449 enqueue_hrtimer(timer, new_base); 807 timer_stats_hrtimer_set_start_info(timer);
808
809 enqueue_hrtimer(timer, new_base, base == new_base);
450 810
451 unlock_hrtimer_base(timer, &flags); 811 unlock_hrtimer_base(timer, &flags);
452 812
@@ -466,13 +826,13 @@ EXPORT_SYMBOL_GPL(hrtimer_start);
466 */ 826 */
467int hrtimer_try_to_cancel(struct hrtimer *timer) 827int hrtimer_try_to_cancel(struct hrtimer *timer)
468{ 828{
469 struct hrtimer_base *base; 829 struct hrtimer_clock_base *base;
470 unsigned long flags; 830 unsigned long flags;
471 int ret = -1; 831 int ret = -1;
472 832
473 base = lock_hrtimer_base(timer, &flags); 833 base = lock_hrtimer_base(timer, &flags);
474 834
475 if (base->curr_timer != timer) 835 if (!hrtimer_callback_running(timer))
476 ret = remove_hrtimer(timer, base); 836 ret = remove_hrtimer(timer, base);
477 837
478 unlock_hrtimer_base(timer, &flags); 838 unlock_hrtimer_base(timer, &flags);
@@ -508,19 +868,19 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
508 */ 868 */
509ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 869ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
510{ 870{
511 struct hrtimer_base *base; 871 struct hrtimer_clock_base *base;
512 unsigned long flags; 872 unsigned long flags;
513 ktime_t rem; 873 ktime_t rem;
514 874
515 base = lock_hrtimer_base(timer, &flags); 875 base = lock_hrtimer_base(timer, &flags);
516 rem = ktime_sub(timer->expires, timer->base->get_time()); 876 rem = ktime_sub(timer->expires, base->get_time());
517 unlock_hrtimer_base(timer, &flags); 877 unlock_hrtimer_base(timer, &flags);
518 878
519 return rem; 879 return rem;
520} 880}
521EXPORT_SYMBOL_GPL(hrtimer_get_remaining); 881EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
522 882
523#ifdef CONFIG_NO_IDLE_HZ 883#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
524/** 884/**
525 * hrtimer_get_next_event - get the time until next expiry event 885 * hrtimer_get_next_event - get the time until next expiry event
526 * 886 *
@@ -529,26 +889,31 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
529 */ 889 */
530ktime_t hrtimer_get_next_event(void) 890ktime_t hrtimer_get_next_event(void)
531{ 891{
532 struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); 892 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
893 struct hrtimer_clock_base *base = cpu_base->clock_base;
533 ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; 894 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
534 unsigned long flags; 895 unsigned long flags;
535 int i; 896 int i;
536 897
537 for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) { 898 spin_lock_irqsave(&cpu_base->lock, flags);
538 struct hrtimer *timer;
539 899
540 spin_lock_irqsave(&base->lock, flags); 900 if (!hrtimer_hres_active()) {
541 if (!base->first) { 901 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
542 spin_unlock_irqrestore(&base->lock, flags); 902 struct hrtimer *timer;
543 continue; 903
904 if (!base->first)
905 continue;
906
907 timer = rb_entry(base->first, struct hrtimer, node);
908 delta.tv64 = timer->expires.tv64;
909 delta = ktime_sub(delta, base->get_time());
910 if (delta.tv64 < mindelta.tv64)
911 mindelta.tv64 = delta.tv64;
544 } 912 }
545 timer = rb_entry(base->first, struct hrtimer, node);
546 delta.tv64 = timer->expires.tv64;
547 spin_unlock_irqrestore(&base->lock, flags);
548 delta = ktime_sub(delta, base->get_time());
549 if (delta.tv64 < mindelta.tv64)
550 mindelta.tv64 = delta.tv64;
551 } 913 }
914
915 spin_unlock_irqrestore(&cpu_base->lock, flags);
916
552 if (mindelta.tv64 < 0) 917 if (mindelta.tv64 < 0)
553 mindelta.tv64 = 0; 918 mindelta.tv64 = 0;
554 return mindelta; 919 return mindelta;
@@ -564,17 +929,23 @@ ktime_t hrtimer_get_next_event(void)
564void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 929void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
565 enum hrtimer_mode mode) 930 enum hrtimer_mode mode)
566{ 931{
567 struct hrtimer_base *bases; 932 struct hrtimer_cpu_base *cpu_base;
568 933
569 memset(timer, 0, sizeof(struct hrtimer)); 934 memset(timer, 0, sizeof(struct hrtimer));
570 935
571 bases = __raw_get_cpu_var(hrtimer_bases); 936 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
572 937
573 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS) 938 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
574 clock_id = CLOCK_MONOTONIC; 939 clock_id = CLOCK_MONOTONIC;
575 940
576 timer->base = &bases[clock_id]; 941 timer->base = &cpu_base->clock_base[clock_id];
577 rb_set_parent(&timer->node, &timer->node); 942 hrtimer_init_timer_hres(timer);
943
944#ifdef CONFIG_TIMER_STATS
945 timer->start_site = NULL;
946 timer->start_pid = -1;
947 memset(timer->start_comm, 0, TASK_COMM_LEN);
948#endif
578} 949}
579EXPORT_SYMBOL_GPL(hrtimer_init); 950EXPORT_SYMBOL_GPL(hrtimer_init);
580 951
@@ -588,21 +959,159 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
588 */ 959 */
589int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) 960int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
590{ 961{
591 struct hrtimer_base *bases; 962 struct hrtimer_cpu_base *cpu_base;
592 963
593 bases = __raw_get_cpu_var(hrtimer_bases); 964 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
594 *tp = ktime_to_timespec(bases[which_clock].resolution); 965 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
595 966
596 return 0; 967 return 0;
597} 968}
598EXPORT_SYMBOL_GPL(hrtimer_get_res); 969EXPORT_SYMBOL_GPL(hrtimer_get_res);
599 970
971#ifdef CONFIG_HIGH_RES_TIMERS
972
973/*
974 * High resolution timer interrupt
975 * Called with interrupts disabled
976 */
977void hrtimer_interrupt(struct clock_event_device *dev)
978{
979 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
980 struct hrtimer_clock_base *base;
981 ktime_t expires_next, now;
982 int i, raise = 0;
983
984 BUG_ON(!cpu_base->hres_active);
985 cpu_base->nr_events++;
986 dev->next_event.tv64 = KTIME_MAX;
987
988 retry:
989 now = ktime_get();
990
991 expires_next.tv64 = KTIME_MAX;
992
993 base = cpu_base->clock_base;
994
995 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
996 ktime_t basenow;
997 struct rb_node *node;
998
999 spin_lock(&cpu_base->lock);
1000
1001 basenow = ktime_add(now, base->offset);
1002
1003 while ((node = base->first)) {
1004 struct hrtimer *timer;
1005
1006 timer = rb_entry(node, struct hrtimer, node);
1007
1008 if (basenow.tv64 < timer->expires.tv64) {
1009 ktime_t expires;
1010
1011 expires = ktime_sub(timer->expires,
1012 base->offset);
1013 if (expires.tv64 < expires_next.tv64)
1014 expires_next = expires;
1015 break;
1016 }
1017
1018 /* Move softirq callbacks to the pending list */
1019 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1020 __remove_hrtimer(timer, base,
1021 HRTIMER_STATE_PENDING, 0);
1022 list_add_tail(&timer->cb_entry,
1023 &base->cpu_base->cb_pending);
1024 raise = 1;
1025 continue;
1026 }
1027
1028 __remove_hrtimer(timer, base,
1029 HRTIMER_STATE_CALLBACK, 0);
1030 timer_stats_account_hrtimer(timer);
1031
1032 /*
1033 * Note: We clear the CALLBACK bit after
1034 * enqueue_hrtimer to avoid reprogramming of
1035 * the event hardware. This happens at the end
1036 * of this function anyway.
1037 */
1038 if (timer->function(timer) != HRTIMER_NORESTART) {
1039 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1040 enqueue_hrtimer(timer, base, 0);
1041 }
1042 timer->state &= ~HRTIMER_STATE_CALLBACK;
1043 }
1044 spin_unlock(&cpu_base->lock);
1045 base++;
1046 }
1047
1048 cpu_base->expires_next = expires_next;
1049
1050 /* Reprogramming necessary ? */
1051 if (expires_next.tv64 != KTIME_MAX) {
1052 if (tick_program_event(expires_next, 0))
1053 goto retry;
1054 }
1055
1056 /* Raise softirq ? */
1057 if (raise)
1058 raise_softirq(HRTIMER_SOFTIRQ);
1059}
1060
1061static void run_hrtimer_softirq(struct softirq_action *h)
1062{
1063 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1064
1065 spin_lock_irq(&cpu_base->lock);
1066
1067 while (!list_empty(&cpu_base->cb_pending)) {
1068 enum hrtimer_restart (*fn)(struct hrtimer *);
1069 struct hrtimer *timer;
1070 int restart;
1071
1072 timer = list_entry(cpu_base->cb_pending.next,
1073 struct hrtimer, cb_entry);
1074
1075 timer_stats_account_hrtimer(timer);
1076
1077 fn = timer->function;
1078 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1079 spin_unlock_irq(&cpu_base->lock);
1080
1081 restart = fn(timer);
1082
1083 spin_lock_irq(&cpu_base->lock);
1084
1085 timer->state &= ~HRTIMER_STATE_CALLBACK;
1086 if (restart == HRTIMER_RESTART) {
1087 BUG_ON(hrtimer_active(timer));
1088 /*
1089 * Enqueue the timer, allow reprogramming of the event
1090 * device
1091 */
1092 enqueue_hrtimer(timer, timer->base, 1);
1093 } else if (hrtimer_active(timer)) {
1094 /*
1095 * If the timer was rearmed on another CPU, reprogram
1096 * the event device.
1097 */
1098 if (timer->base->first == &timer->node)
1099 hrtimer_reprogram(timer, timer->base);
1100 }
1101 }
1102 spin_unlock_irq(&cpu_base->lock);
1103}
1104
1105#endif /* CONFIG_HIGH_RES_TIMERS */
1106
600/* 1107/*
601 * Expire the per base hrtimer-queue: 1108 * Expire the per base hrtimer-queue:
602 */ 1109 */
603static inline void run_hrtimer_queue(struct hrtimer_base *base) 1110static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
1111 int index)
604{ 1112{
605 struct rb_node *node; 1113 struct rb_node *node;
1114 struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
606 1115
607 if (!base->first) 1116 if (!base->first)
608 return; 1117 return;
@@ -610,53 +1119,72 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base)
610 if (base->get_softirq_time) 1119 if (base->get_softirq_time)
611 base->softirq_time = base->get_softirq_time(); 1120 base->softirq_time = base->get_softirq_time();
612 1121
613 spin_lock_irq(&base->lock); 1122 spin_lock_irq(&cpu_base->lock);
614 1123
615 while ((node = base->first)) { 1124 while ((node = base->first)) {
616 struct hrtimer *timer; 1125 struct hrtimer *timer;
617 int (*fn)(struct hrtimer *); 1126 enum hrtimer_restart (*fn)(struct hrtimer *);
618 int restart; 1127 int restart;
619 1128
620 timer = rb_entry(node, struct hrtimer, node); 1129 timer = rb_entry(node, struct hrtimer, node);
621 if (base->softirq_time.tv64 <= timer->expires.tv64) 1130 if (base->softirq_time.tv64 <= timer->expires.tv64)
622 break; 1131 break;
623 1132
1133 timer_stats_account_hrtimer(timer);
1134
624 fn = timer->function; 1135 fn = timer->function;
625 set_curr_timer(base, timer); 1136 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
626 __remove_hrtimer(timer, base); 1137 spin_unlock_irq(&cpu_base->lock);
627 spin_unlock_irq(&base->lock);
628 1138
629 restart = fn(timer); 1139 restart = fn(timer);
630 1140
631 spin_lock_irq(&base->lock); 1141 spin_lock_irq(&cpu_base->lock);
632 1142
1143 timer->state &= ~HRTIMER_STATE_CALLBACK;
633 if (restart != HRTIMER_NORESTART) { 1144 if (restart != HRTIMER_NORESTART) {
634 BUG_ON(hrtimer_active(timer)); 1145 BUG_ON(hrtimer_active(timer));
635 enqueue_hrtimer(timer, base); 1146 enqueue_hrtimer(timer, base, 0);
636 } 1147 }
637 } 1148 }
638 set_curr_timer(base, NULL); 1149 spin_unlock_irq(&cpu_base->lock);
639 spin_unlock_irq(&base->lock);
640} 1150}
641 1151
642/* 1152/*
643 * Called from timer softirq every jiffy, expire hrtimers: 1153 * Called from timer softirq every jiffy, expire hrtimers:
1154 *
1155 * For HRT its the fall back code to run the softirq in the timer
1156 * softirq context in case the hrtimer initialization failed or has
1157 * not been done yet.
644 */ 1158 */
645void hrtimer_run_queues(void) 1159void hrtimer_run_queues(void)
646{ 1160{
647 struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); 1161 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
648 int i; 1162 int i;
649 1163
650 hrtimer_get_softirq_time(base); 1164 if (hrtimer_hres_active())
1165 return;
1166
1167 /*
1168 * This _is_ ugly: We have to check in the softirq context,
1169 * whether we can switch to highres and / or nohz mode. The
1170 * clocksource switch happens in the timer interrupt with
1171 * xtime_lock held. Notification from there only sets the
1172 * check bit in the tick_oneshot code, otherwise we might
1173 * deadlock vs. xtime_lock.
1174 */
1175 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1176 hrtimer_switch_to_hres();
651 1177
652 for (i = 0; i < MAX_HRTIMER_BASES; i++) 1178 hrtimer_get_softirq_time(cpu_base);
653 run_hrtimer_queue(&base[i]); 1179
1180 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1181 run_hrtimer_queue(cpu_base, i);
654} 1182}
655 1183
656/* 1184/*
657 * Sleep related functions: 1185 * Sleep related functions:
658 */ 1186 */
659static int hrtimer_wakeup(struct hrtimer *timer) 1187static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
660{ 1188{
661 struct hrtimer_sleeper *t = 1189 struct hrtimer_sleeper *t =
662 container_of(timer, struct hrtimer_sleeper, timer); 1190 container_of(timer, struct hrtimer_sleeper, timer);
@@ -673,6 +1201,9 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
673{ 1201{
674 sl->timer.function = hrtimer_wakeup; 1202 sl->timer.function = hrtimer_wakeup;
675 sl->task = task; 1203 sl->task = task;
1204#ifdef CONFIG_HIGH_RES_TIMERS
1205 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART;
1206#endif
676} 1207}
677 1208
678static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 1209static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -683,10 +1214,11 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
683 set_current_state(TASK_INTERRUPTIBLE); 1214 set_current_state(TASK_INTERRUPTIBLE);
684 hrtimer_start(&t->timer, t->timer.expires, mode); 1215 hrtimer_start(&t->timer, t->timer.expires, mode);
685 1216
686 schedule(); 1217 if (likely(t->task))
1218 schedule();
687 1219
688 hrtimer_cancel(&t->timer); 1220 hrtimer_cancel(&t->timer);
689 mode = HRTIMER_ABS; 1221 mode = HRTIMER_MODE_ABS;
690 1222
691 } while (t->task && !signal_pending(current)); 1223 } while (t->task && !signal_pending(current));
692 1224
@@ -702,10 +1234,10 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
702 1234
703 restart->fn = do_no_restart_syscall; 1235 restart->fn = do_no_restart_syscall;
704 1236
705 hrtimer_init(&t.timer, restart->arg0, HRTIMER_ABS); 1237 hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);
706 t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; 1238 t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;
707 1239
708 if (do_nanosleep(&t, HRTIMER_ABS)) 1240 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
709 return 0; 1241 return 0;
710 1242
711 rmtp = (struct timespec __user *) restart->arg1; 1243 rmtp = (struct timespec __user *) restart->arg1;
@@ -738,7 +1270,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
738 return 0; 1270 return 0;
739 1271
740 /* Absolute timers do not update the rmtp value and restart: */ 1272 /* Absolute timers do not update the rmtp value and restart: */
741 if (mode == HRTIMER_ABS) 1273 if (mode == HRTIMER_MODE_ABS)
742 return -ERESTARTNOHAND; 1274 return -ERESTARTNOHAND;
743 1275
744 if (rmtp) { 1276 if (rmtp) {
@@ -771,7 +1303,7 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
771 if (!timespec_valid(&tu)) 1303 if (!timespec_valid(&tu))
772 return -EINVAL; 1304 return -EINVAL;
773 1305
774 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_REL, CLOCK_MONOTONIC); 1306 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
775} 1307}
776 1308
777/* 1309/*
@@ -779,56 +1311,60 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
779 */ 1311 */
780static void __devinit init_hrtimers_cpu(int cpu) 1312static void __devinit init_hrtimers_cpu(int cpu)
781{ 1313{
782 struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu); 1314 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
783 int i; 1315 int i;
784 1316
785 for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) { 1317 spin_lock_init(&cpu_base->lock);
786 spin_lock_init(&base->lock); 1318 lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);
787 lockdep_set_class(&base->lock, &base->lock_key); 1319
788 } 1320 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1321 cpu_base->clock_base[i].cpu_base = cpu_base;
1322
1323 hrtimer_init_hres(cpu_base);
789} 1324}
790 1325
791#ifdef CONFIG_HOTPLUG_CPU 1326#ifdef CONFIG_HOTPLUG_CPU
792 1327
793static void migrate_hrtimer_list(struct hrtimer_base *old_base, 1328static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
794 struct hrtimer_base *new_base) 1329 struct hrtimer_clock_base *new_base)
795{ 1330{
796 struct hrtimer *timer; 1331 struct hrtimer *timer;
797 struct rb_node *node; 1332 struct rb_node *node;
798 1333
799 while ((node = rb_first(&old_base->active))) { 1334 while ((node = rb_first(&old_base->active))) {
800 timer = rb_entry(node, struct hrtimer, node); 1335 timer = rb_entry(node, struct hrtimer, node);
801 __remove_hrtimer(timer, old_base); 1336 BUG_ON(hrtimer_callback_running(timer));
1337 __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);
802 timer->base = new_base; 1338 timer->base = new_base;
803 enqueue_hrtimer(timer, new_base); 1339 /*
1340 * Enqueue the timer. Allow reprogramming of the event device
1341 */
1342 enqueue_hrtimer(timer, new_base, 1);
804 } 1343 }
805} 1344}
806 1345
807static void migrate_hrtimers(int cpu) 1346static void migrate_hrtimers(int cpu)
808{ 1347{
809 struct hrtimer_base *old_base, *new_base; 1348 struct hrtimer_cpu_base *old_base, *new_base;
810 int i; 1349 int i;
811 1350
812 BUG_ON(cpu_online(cpu)); 1351 BUG_ON(cpu_online(cpu));
813 old_base = per_cpu(hrtimer_bases, cpu); 1352 old_base = &per_cpu(hrtimer_bases, cpu);
814 new_base = get_cpu_var(hrtimer_bases); 1353 new_base = &get_cpu_var(hrtimer_bases);
815
816 local_irq_disable();
817 1354
818 for (i = 0; i < MAX_HRTIMER_BASES; i++) { 1355 tick_cancel_sched_timer(cpu);
819 1356
820 spin_lock(&new_base->lock); 1357 local_irq_disable();
821 spin_lock(&old_base->lock);
822
823 BUG_ON(old_base->curr_timer);
824 1358
825 migrate_hrtimer_list(old_base, new_base); 1359 spin_lock(&new_base->lock);
1360 spin_lock(&old_base->lock);
826 1361
827 spin_unlock(&old_base->lock); 1362 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
828 spin_unlock(&new_base->lock); 1363 migrate_hrtimer_list(&old_base->clock_base[i],
829 old_base++; 1364 &new_base->clock_base[i]);
830 new_base++;
831 } 1365 }
1366 spin_unlock(&old_base->lock);
1367 spin_unlock(&new_base->lock);
832 1368
833 local_irq_enable(); 1369 local_irq_enable();
834 put_cpu_var(hrtimer_bases); 1370 put_cpu_var(hrtimer_bases);
@@ -848,6 +1384,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
848 1384
849#ifdef CONFIG_HOTPLUG_CPU 1385#ifdef CONFIG_HOTPLUG_CPU
850 case CPU_DEAD: 1386 case CPU_DEAD:
1387 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
851 migrate_hrtimers(cpu); 1388 migrate_hrtimers(cpu);
852 break; 1389 break;
853#endif 1390#endif
@@ -868,5 +1405,8 @@ void __init hrtimers_init(void)
868 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1405 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
869 (void *)(long)smp_processor_id()); 1406 (void *)(long)smp_processor_id());
870 register_cpu_notifier(&hrtimers_nb); 1407 register_cpu_notifier(&hrtimers_nb);
1408#ifdef CONFIG_HIGH_RES_TIMERS
1409 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq, NULL);
1410#endif
871} 1411}
872 1412
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 475e8a71bcdc..0133f4f9e9f0 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -168,7 +168,7 @@ EXPORT_SYMBOL(set_irq_data);
168/** 168/**
169 * set_irq_data - set irq type data for an irq 169 * set_irq_data - set irq type data for an irq
170 * @irq: Interrupt number 170 * @irq: Interrupt number
171 * @data: Pointer to interrupt specific data 171 * @entry: Pointer to MSI descriptor data
172 * 172 *
173 * Set the hardware irq controller data for an irq 173 * Set the hardware irq controller data for an irq
174 */ 174 */
@@ -230,10 +230,6 @@ static void default_enable(unsigned int irq)
230 */ 230 */
231static void default_disable(unsigned int irq) 231static void default_disable(unsigned int irq)
232{ 232{
233 struct irq_desc *desc = irq_desc + irq;
234
235 if (!(desc->status & IRQ_DELAYED_DISABLE))
236 desc->chip->mask(irq);
237} 233}
238 234
239/* 235/*
@@ -298,13 +294,18 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
298 294
299 if (unlikely(desc->status & IRQ_INPROGRESS)) 295 if (unlikely(desc->status & IRQ_INPROGRESS))
300 goto out_unlock; 296 goto out_unlock;
301 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
302 kstat_cpu(cpu).irqs[irq]++; 297 kstat_cpu(cpu).irqs[irq]++;
303 298
304 action = desc->action; 299 action = desc->action;
305 if (unlikely(!action || (desc->status & IRQ_DISABLED))) 300 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
301 if (desc->chip->mask)
302 desc->chip->mask(irq);
303 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
304 desc->status |= IRQ_PENDING;
306 goto out_unlock; 305 goto out_unlock;
306 }
307 307
308 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING);
308 desc->status |= IRQ_INPROGRESS; 309 desc->status |= IRQ_INPROGRESS;
309 spin_unlock(&desc->lock); 310 spin_unlock(&desc->lock);
310 311
@@ -396,11 +397,13 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
396 397
397 /* 398 /*
398 * If its disabled or no action available 399 * If its disabled or no action available
399 * keep it masked and get out of here 400 * then mask it and get out of here:
400 */ 401 */
401 action = desc->action; 402 action = desc->action;
402 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 403 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
403 desc->status |= IRQ_PENDING; 404 desc->status |= IRQ_PENDING;
405 if (desc->chip->mask)
406 desc->chip->mask(irq);
404 goto out; 407 goto out;
405 } 408 }
406 409
@@ -562,10 +565,8 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
562 565
563 /* Uninstall? */ 566 /* Uninstall? */
564 if (handle == handle_bad_irq) { 567 if (handle == handle_bad_irq) {
565 if (desc->chip != &no_irq_chip) { 568 if (desc->chip != &no_irq_chip)
566 desc->chip->mask(irq); 569 mask_ack_irq(desc, irq);
567 desc->chip->ack(irq);
568 }
569 desc->status |= IRQ_DISABLED; 570 desc->status |= IRQ_DISABLED;
570 desc->depth = 1; 571 desc->depth = 1;
571 } 572 }
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index acc5d9fe462b..5597c157442a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -38,6 +38,46 @@ void synchronize_irq(unsigned int irq)
38} 38}
39EXPORT_SYMBOL(synchronize_irq); 39EXPORT_SYMBOL(synchronize_irq);
40 40
41/**
42 * irq_can_set_affinity - Check if the affinity of a given irq can be set
43 * @irq: Interrupt to check
44 *
45 */
46int irq_can_set_affinity(unsigned int irq)
47{
48 struct irq_desc *desc = irq_desc + irq;
49
50 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
51 !desc->chip->set_affinity)
52 return 0;
53
54 return 1;
55}
56
57/**
58 * irq_set_affinity - Set the irq affinity of a given irq
59 * @irq: Interrupt to set affinity
60 * @cpumask: cpumask
61 *
62 */
63int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
64{
65 struct irq_desc *desc = irq_desc + irq;
66
67 if (!desc->chip->set_affinity)
68 return -EINVAL;
69
70 set_balance_irq_affinity(irq, cpumask);
71
72#ifdef CONFIG_GENERIC_PENDING_IRQ
73 set_pending_irq(irq, cpumask);
74#else
75 desc->affinity = cpumask;
76 desc->chip->set_affinity(irq, cpumask);
77#endif
78 return 0;
79}
80
41#endif 81#endif
42 82
43/** 83/**
@@ -281,6 +321,10 @@ int setup_irq(unsigned int irq, struct irqaction *new)
281 if (new->flags & IRQF_PERCPU) 321 if (new->flags & IRQF_PERCPU)
282 desc->status |= IRQ_PER_CPU; 322 desc->status |= IRQ_PER_CPU;
283#endif 323#endif
324 /* Exclude IRQ from balancing */
325 if (new->flags & IRQF_NOBALANCING)
326 desc->status |= IRQ_NO_BALANCING;
327
284 if (!shared) { 328 if (!shared) {
285 irq_chip_set_defaults(desc->chip); 329 irq_chip_set_defaults(desc->chip);
286 330
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 6d3be06e8ce6..2db91eb54ad8 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -16,26 +16,6 @@ static struct proc_dir_entry *root_irq_dir;
16 16
17#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
18 18
19#ifdef CONFIG_GENERIC_PENDING_IRQ
20void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
21{
22 set_balance_irq_affinity(irq, mask_val);
23
24 /*
25 * Save these away for later use. Re-progam when the
26 * interrupt is pending
27 */
28 set_pending_irq(irq, mask_val);
29}
30#else
31void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
32{
33 set_balance_irq_affinity(irq, mask_val);
34 irq_desc[irq].affinity = mask_val;
35 irq_desc[irq].chip->set_affinity(irq, mask_val);
36}
37#endif
38
39static int irq_affinity_read_proc(char *page, char **start, off_t off, 19static int irq_affinity_read_proc(char *page, char **start, off_t off,
40 int count, int *eof, void *data) 20 int count, int *eof, void *data)
41{ 21{
@@ -55,7 +35,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
55 cpumask_t new_value, tmp; 35 cpumask_t new_value, tmp;
56 36
57 if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || 37 if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
58 CHECK_IRQ_PER_CPU(irq_desc[irq].status)) 38 irq_balancing_disabled(irq))
59 return -EIO; 39 return -EIO;
60 40
61 err = cpumask_parse_user(buffer, count, new_value); 41 err = cpumask_parse_user(buffer, count, new_value);
@@ -73,7 +53,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
73 code to set default SMP affinity. */ 53 code to set default SMP affinity. */
74 return select_smp_affinity(irq) ? -EINVAL : full_count; 54 return select_smp_affinity(irq) ? -EINVAL : full_count;
75 55
76 proc_set_irq_affinity(irq, new_value); 56 irq_set_affinity(irq, new_value);
77 57
78 return full_count; 58 return full_count;
79} 59}
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 204ed7939e75..307c6a632ef6 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -128,18 +128,13 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
128/* 128/*
129 * The timer is automagically restarted, when interval != 0 129 * The timer is automagically restarted, when interval != 0
130 */ 130 */
131int it_real_fn(struct hrtimer *timer) 131enum hrtimer_restart it_real_fn(struct hrtimer *timer)
132{ 132{
133 struct signal_struct *sig = 133 struct signal_struct *sig =
134 container_of(timer, struct signal_struct, real_timer); 134 container_of(timer, struct signal_struct, real_timer);
135 135
136 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk); 136 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk);
137 137
138 if (sig->it_real_incr.tv64 != 0) {
139 hrtimer_forward(timer, timer->base->softirq_time,
140 sig->it_real_incr);
141 return HRTIMER_RESTART;
142 }
143 return HRTIMER_NORESTART; 138 return HRTIMER_NORESTART;
144} 139}
145 140
@@ -231,11 +226,14 @@ again:
231 spin_unlock_irq(&tsk->sighand->siglock); 226 spin_unlock_irq(&tsk->sighand->siglock);
232 goto again; 227 goto again;
233 } 228 }
234 tsk->signal->it_real_incr =
235 timeval_to_ktime(value->it_interval);
236 expires = timeval_to_ktime(value->it_value); 229 expires = timeval_to_ktime(value->it_value);
237 if (expires.tv64 != 0) 230 if (expires.tv64 != 0) {
238 hrtimer_start(timer, expires, HRTIMER_REL); 231 tsk->signal->it_real_incr =
232 timeval_to_ktime(value->it_interval);
233 hrtimer_start(timer, expires, HRTIMER_MODE_REL);
234 } else
235 tsk->signal->it_real_incr.tv64 = 0;
236
239 spin_unlock_irq(&tsk->sighand->siglock); 237 spin_unlock_irq(&tsk->sighand->siglock);
240 break; 238 break;
241 case ITIMER_VIRTUAL: 239 case ITIMER_VIRTUAL:
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 7c3e1e6dfb5b..657f77697415 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -304,7 +304,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
304 * should be able to see it. 304 * should be able to see it.
305 */ 305 */
306 struct task_struct *p; 306 struct task_struct *p;
307 read_lock(&tasklist_lock); 307 rcu_read_lock();
308 p = find_task_by_pid(pid); 308 p = find_task_by_pid(pid);
309 if (p) { 309 if (p) {
310 if (CPUCLOCK_PERTHREAD(which_clock)) { 310 if (CPUCLOCK_PERTHREAD(which_clock)) {
@@ -312,12 +312,17 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
312 error = cpu_clock_sample(which_clock, 312 error = cpu_clock_sample(which_clock,
313 p, &rtn); 313 p, &rtn);
314 } 314 }
315 } else if (p->tgid == pid && p->signal) { 315 } else {
316 error = cpu_clock_sample_group(which_clock, 316 read_lock(&tasklist_lock);
317 p, &rtn); 317 if (p->tgid == pid && p->signal) {
318 error =
319 cpu_clock_sample_group(which_clock,
320 p, &rtn);
321 }
322 read_unlock(&tasklist_lock);
318 } 323 }
319 } 324 }
320 read_unlock(&tasklist_lock); 325 rcu_read_unlock();
321 } 326 }
322 327
323 if (error) 328 if (error)
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index a1bf61617839..44318ca71978 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -145,7 +145,7 @@ static int common_timer_set(struct k_itimer *, int,
145 struct itimerspec *, struct itimerspec *); 145 struct itimerspec *, struct itimerspec *);
146static int common_timer_del(struct k_itimer *timer); 146static int common_timer_del(struct k_itimer *timer);
147 147
148static int posix_timer_fn(struct hrtimer *data); 148static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
149 149
150static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags); 150static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
151 151
@@ -334,12 +334,12 @@ EXPORT_SYMBOL_GPL(posix_timer_event);
334 334
335 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. 335 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
336 */ 336 */
337static int posix_timer_fn(struct hrtimer *timer) 337static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
338{ 338{
339 struct k_itimer *timr; 339 struct k_itimer *timr;
340 unsigned long flags; 340 unsigned long flags;
341 int si_private = 0; 341 int si_private = 0;
342 int ret = HRTIMER_NORESTART; 342 enum hrtimer_restart ret = HRTIMER_NORESTART;
343 343
344 timr = container_of(timer, struct k_itimer, it.real.timer); 344 timr = container_of(timer, struct k_itimer, it.real.timer);
345 spin_lock_irqsave(&timr->it_lock, flags); 345 spin_lock_irqsave(&timr->it_lock, flags);
@@ -356,7 +356,7 @@ static int posix_timer_fn(struct hrtimer *timer)
356 if (timr->it.real.interval.tv64 != 0) { 356 if (timr->it.real.interval.tv64 != 0) {
357 timr->it_overrun += 357 timr->it_overrun +=
358 hrtimer_forward(timer, 358 hrtimer_forward(timer,
359 timer->base->softirq_time, 359 hrtimer_cb_get_time(timer),
360 timr->it.real.interval); 360 timr->it.real.interval);
361 ret = HRTIMER_RESTART; 361 ret = HRTIMER_RESTART;
362 ++timr->it_requeue_pending; 362 ++timr->it_requeue_pending;
@@ -722,7 +722,7 @@ common_timer_set(struct k_itimer *timr, int flags,
722 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) 722 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
723 return 0; 723 return 0;
724 724
725 mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL; 725 mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
726 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); 726 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
727 timr->it.real.timer.function = posix_timer_fn; 727 timr->it.real.timer.function = posix_timer_fn;
728 728
@@ -734,7 +734,7 @@ common_timer_set(struct k_itimer *timr, int flags,
734 /* SIGEV_NONE timers are not queued ! See common_timer_get */ 734 /* SIGEV_NONE timers are not queued ! See common_timer_get */
735 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { 735 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
736 /* Setup correct expiry time for relative timers */ 736 /* Setup correct expiry time for relative timers */
737 if (mode == HRTIMER_REL) 737 if (mode == HRTIMER_MODE_REL)
738 timer->expires = ktime_add(timer->expires, 738 timer->expires = ktime_add(timer->expires,
739 timer->base->get_time()); 739 timer->base->get_time());
740 return 0; 740 return 0;
@@ -950,7 +950,8 @@ static int common_nsleep(const clockid_t which_clock, int flags,
950 struct timespec *tsave, struct timespec __user *rmtp) 950 struct timespec *tsave, struct timespec __user *rmtp)
951{ 951{
952 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ? 952 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
953 HRTIMER_ABS : HRTIMER_REL, which_clock); 953 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
954 which_clock);
954} 955}
955 956
956asmlinkage long 957asmlinkage long
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 4ab17da46fd8..180978cb2f75 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -625,7 +625,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
625 /* Setup the timer, when timeout != NULL */ 625 /* Setup the timer, when timeout != NULL */
626 if (unlikely(timeout)) 626 if (unlikely(timeout))
627 hrtimer_start(&timeout->timer, timeout->timer.expires, 627 hrtimer_start(&timeout->timer, timeout->timer.expires,
628 HRTIMER_ABS); 628 HRTIMER_MODE_ABS);
629 629
630 for (;;) { 630 for (;;) {
631 /* Try to acquire the lock: */ 631 /* Try to acquire the lock: */
diff --git a/kernel/signal.c b/kernel/signal.c
index 8072e568bbe0..e2a7d4bf7d57 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -456,26 +456,50 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
456int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 456int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
457{ 457{
458 int signr = __dequeue_signal(&tsk->pending, mask, info); 458 int signr = __dequeue_signal(&tsk->pending, mask, info);
459 if (!signr) 459 if (!signr) {
460 signr = __dequeue_signal(&tsk->signal->shared_pending, 460 signr = __dequeue_signal(&tsk->signal->shared_pending,
461 mask, info); 461 mask, info);
462 /*
463 * itimer signal ?
464 *
465 * itimers are process shared and we restart periodic
466 * itimers in the signal delivery path to prevent DoS
467 * attacks in the high resolution timer case. This is
468 * compliant with the old way of self restarting
469 * itimers, as the SIGALRM is a legacy signal and only
470 * queued once. Changing the restart behaviour to
471 * restart the timer in the signal dequeue path is
472 * reducing the timer noise on heavy loaded !highres
473 * systems too.
474 */
475 if (unlikely(signr == SIGALRM)) {
476 struct hrtimer *tmr = &tsk->signal->real_timer;
477
478 if (!hrtimer_is_queued(tmr) &&
479 tsk->signal->it_real_incr.tv64 != 0) {
480 hrtimer_forward(tmr, tmr->base->get_time(),
481 tsk->signal->it_real_incr);
482 hrtimer_restart(tmr);
483 }
484 }
485 }
462 recalc_sigpending_tsk(tsk); 486 recalc_sigpending_tsk(tsk);
463 if (signr && unlikely(sig_kernel_stop(signr))) { 487 if (signr && unlikely(sig_kernel_stop(signr))) {
464 /* 488 /*
465 * Set a marker that we have dequeued a stop signal. Our 489 * Set a marker that we have dequeued a stop signal. Our
466 * caller might release the siglock and then the pending 490 * caller might release the siglock and then the pending
467 * stop signal it is about to process is no longer in the 491 * stop signal it is about to process is no longer in the
468 * pending bitmasks, but must still be cleared by a SIGCONT 492 * pending bitmasks, but must still be cleared by a SIGCONT
469 * (and overruled by a SIGKILL). So those cases clear this 493 * (and overruled by a SIGKILL). So those cases clear this
470 * shared flag after we've set it. Note that this flag may 494 * shared flag after we've set it. Note that this flag may
471 * remain set after the signal we return is ignored or 495 * remain set after the signal we return is ignored or
472 * handled. That doesn't matter because its only purpose 496 * handled. That doesn't matter because its only purpose
473 * is to alert stop-signal processing code when another 497 * is to alert stop-signal processing code when another
474 * processor has come along and cleared the flag. 498 * processor has come along and cleared the flag.
475 */ 499 */
476 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 500 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
477 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 501 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
478 } 502 }
479 if ( signr && 503 if ( signr &&
480 ((info->si_code & __SI_MASK) == __SI_TIMER) && 504 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
481 info->si_sys_private){ 505 info->si_sys_private){
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 918e52df090e..8b75008e2bd8 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -17,6 +17,7 @@
17#include <linux/kthread.h> 17#include <linux/kthread.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/tick.h>
20 21
21#include <asm/irq.h> 22#include <asm/irq.h>
22/* 23/*
@@ -273,6 +274,18 @@ EXPORT_SYMBOL(do_softirq);
273 274
274#endif 275#endif
275 276
277/*
278 * Enter an interrupt context.
279 */
280void irq_enter(void)
281{
282 __irq_enter();
283#ifdef CONFIG_NO_HZ
284 if (idle_cpu(smp_processor_id()))
285 tick_nohz_update_jiffies();
286#endif
287}
288
276#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 289#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
277# define invoke_softirq() __do_softirq() 290# define invoke_softirq() __do_softirq()
278#else 291#else
@@ -289,6 +302,12 @@ void irq_exit(void)
289 sub_preempt_count(IRQ_EXIT_OFFSET); 302 sub_preempt_count(IRQ_EXIT_OFFSET);
290 if (!in_interrupt() && local_softirq_pending()) 303 if (!in_interrupt() && local_softirq_pending())
291 invoke_softirq(); 304 invoke_softirq();
305
306#ifdef CONFIG_NO_HZ
307 /* Make sure that timer wheel updates are propagated */
308 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
309 tick_nohz_stop_sched_tick();
310#endif
292 preempt_enable_no_resched(); 311 preempt_enable_no_resched();
293} 312}
294 313
diff --git a/kernel/time.c b/kernel/time.c
index 0e017bff4c19..c6c80ea5d0ea 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -470,6 +470,260 @@ struct timeval ns_to_timeval(const s64 nsec)
470 return tv; 470 return tv;
471} 471}
472 472
473/*
474 * Convert jiffies to milliseconds and back.
475 *
476 * Avoid unnecessary multiplications/divisions in the
477 * two most common HZ cases:
478 */
479unsigned int jiffies_to_msecs(const unsigned long j)
480{
481#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
482 return (MSEC_PER_SEC / HZ) * j;
483#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
484 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
485#else
486 return (j * MSEC_PER_SEC) / HZ;
487#endif
488}
489EXPORT_SYMBOL(jiffies_to_msecs);
490
491unsigned int jiffies_to_usecs(const unsigned long j)
492{
493#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
494 return (USEC_PER_SEC / HZ) * j;
495#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
496 return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
497#else
498 return (j * USEC_PER_SEC) / HZ;
499#endif
500}
501EXPORT_SYMBOL(jiffies_to_usecs);
502
503/*
504 * When we convert to jiffies then we interpret incoming values
505 * the following way:
506 *
507 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
508 *
509 * - 'too large' values [that would result in larger than
510 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
511 *
512 * - all other values are converted to jiffies by either multiplying
513 * the input value by a factor or dividing it with a factor
514 *
515 * We must also be careful about 32-bit overflows.
516 */
517unsigned long msecs_to_jiffies(const unsigned int m)
518{
519 /*
520 * Negative value, means infinite timeout:
521 */
522 if ((int)m < 0)
523 return MAX_JIFFY_OFFSET;
524
525#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
526 /*
527 * HZ is equal to or smaller than 1000, and 1000 is a nice
528 * round multiple of HZ, divide with the factor between them,
529 * but round upwards:
530 */
531 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
532#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
533 /*
534 * HZ is larger than 1000, and HZ is a nice round multiple of
535 * 1000 - simply multiply with the factor between them.
536 *
537 * But first make sure the multiplication result cannot
538 * overflow:
539 */
540 if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
541 return MAX_JIFFY_OFFSET;
542
543 return m * (HZ / MSEC_PER_SEC);
544#else
545 /*
546 * Generic case - multiply, round and divide. But first
547 * check that if we are doing a net multiplication, that
548 * we wouldnt overflow:
549 */
550 if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
551 return MAX_JIFFY_OFFSET;
552
553 return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
554#endif
555}
556EXPORT_SYMBOL(msecs_to_jiffies);
557
558unsigned long usecs_to_jiffies(const unsigned int u)
559{
560 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
561 return MAX_JIFFY_OFFSET;
562#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
563 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
564#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
565 return u * (HZ / USEC_PER_SEC);
566#else
567 return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
568#endif
569}
570EXPORT_SYMBOL(usecs_to_jiffies);
571
572/*
573 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
574 * that a remainder subtract here would not do the right thing as the
575 * resolution values don't fall on second boundries. I.e. the line:
576 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
577 *
578 * Rather, we just shift the bits off the right.
579 *
580 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
581 * value to a scaled second value.
582 */
583unsigned long
584timespec_to_jiffies(const struct timespec *value)
585{
586 unsigned long sec = value->tv_sec;
587 long nsec = value->tv_nsec + TICK_NSEC - 1;
588
589 if (sec >= MAX_SEC_IN_JIFFIES){
590 sec = MAX_SEC_IN_JIFFIES;
591 nsec = 0;
592 }
593 return (((u64)sec * SEC_CONVERSION) +
594 (((u64)nsec * NSEC_CONVERSION) >>
595 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
596
597}
598EXPORT_SYMBOL(timespec_to_jiffies);
599
600void
601jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
602{
603 /*
604 * Convert jiffies to nanoseconds and separate with
605 * one divide.
606 */
607 u64 nsec = (u64)jiffies * TICK_NSEC;
608 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
609}
610EXPORT_SYMBOL(jiffies_to_timespec);
611
612/* Same for "timeval"
613 *
614 * Well, almost. The problem here is that the real system resolution is
615 * in nanoseconds and the value being converted is in micro seconds.
616 * Also for some machines (those that use HZ = 1024, in-particular),
617 * there is a LARGE error in the tick size in microseconds.
618
619 * The solution we use is to do the rounding AFTER we convert the
620 * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
621 * Instruction wise, this should cost only an additional add with carry
622 * instruction above the way it was done above.
623 */
624unsigned long
625timeval_to_jiffies(const struct timeval *value)
626{
627 unsigned long sec = value->tv_sec;
628 long usec = value->tv_usec;
629
630 if (sec >= MAX_SEC_IN_JIFFIES){
631 sec = MAX_SEC_IN_JIFFIES;
632 usec = 0;
633 }
634 return (((u64)sec * SEC_CONVERSION) +
635 (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
636 (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
637}
638
639void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
640{
641 /*
642 * Convert jiffies to nanoseconds and separate with
643 * one divide.
644 */
645 u64 nsec = (u64)jiffies * TICK_NSEC;
646 long tv_usec;
647
648 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
649 tv_usec /= NSEC_PER_USEC;
650 value->tv_usec = tv_usec;
651}
652
653/*
654 * Convert jiffies/jiffies_64 to clock_t and back.
655 */
656clock_t jiffies_to_clock_t(long x)
657{
658#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
659 return x / (HZ / USER_HZ);
660#else
661 u64 tmp = (u64)x * TICK_NSEC;
662 do_div(tmp, (NSEC_PER_SEC / USER_HZ));
663 return (long)tmp;
664#endif
665}
666EXPORT_SYMBOL(jiffies_to_clock_t);
667
668unsigned long clock_t_to_jiffies(unsigned long x)
669{
670#if (HZ % USER_HZ)==0
671 if (x >= ~0UL / (HZ / USER_HZ))
672 return ~0UL;
673 return x * (HZ / USER_HZ);
674#else
675 u64 jif;
676
677 /* Don't worry about loss of precision here .. */
678 if (x >= ~0UL / HZ * USER_HZ)
679 return ~0UL;
680
681 /* .. but do try to contain it here */
682 jif = x * (u64) HZ;
683 do_div(jif, USER_HZ);
684 return jif;
685#endif
686}
687EXPORT_SYMBOL(clock_t_to_jiffies);
688
689u64 jiffies_64_to_clock_t(u64 x)
690{
691#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
692 do_div(x, HZ / USER_HZ);
693#else
694 /*
695 * There are better ways that don't overflow early,
696 * but even this doesn't overflow in hundreds of years
697 * in 64 bits, so..
698 */
699 x *= TICK_NSEC;
700 do_div(x, (NSEC_PER_SEC / USER_HZ));
701#endif
702 return x;
703}
704
705EXPORT_SYMBOL(jiffies_64_to_clock_t);
706
707u64 nsec_to_clock_t(u64 x)
708{
709#if (NSEC_PER_SEC % USER_HZ) == 0
710 do_div(x, (NSEC_PER_SEC / USER_HZ));
711#elif (USER_HZ % 512) == 0
712 x *= USER_HZ/512;
713 do_div(x, (NSEC_PER_SEC / 512));
714#else
715 /*
716 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
717 * overflow after 64.99 years.
718 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
719 */
720 x *= 9;
721 do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2)) /
722 USER_HZ));
723#endif
724 return x;
725}
726
473#if (BITS_PER_LONG < 64) 727#if (BITS_PER_LONG < 64)
474u64 get_jiffies_64(void) 728u64 get_jiffies_64(void)
475{ 729{
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
new file mode 100644
index 000000000000..f66351126544
--- /dev/null
+++ b/kernel/time/Kconfig
@@ -0,0 +1,25 @@
1#
2# Timer subsystem related configuration options
3#
4config TICK_ONESHOT
5 bool
6 default n
7
8config NO_HZ
9 bool "Tickless System (Dynamic Ticks)"
10 depends on GENERIC_TIME && GENERIC_CLOCKEVENTS
11 select TICK_ONESHOT
12 help
13 This option enables a tickless system: timer interrupts will
14 only trigger on an as-needed basis both when the system is
15 busy and when the system is idle.
16
17config HIGH_RES_TIMERS
18 bool "High Resolution Timer Support"
19 depends on GENERIC_TIME && GENERIC_CLOCKEVENTS
20 select TICK_ONESHOT
21 help
22 This option enables high resolution timer support. If your
23 hardware is not capable then this option only increases
24 the size of the kernel image.
25
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 61a3907d16fb..93bccba1f265 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1 +1,8 @@
1obj-y += ntp.o clocksource.o jiffies.o 1obj-y += ntp.o clocksource.o jiffies.o timer_list.o
2
3obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o
4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
5obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o
6obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o
7obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o
8obj-$(CONFIG_TIMER_STATS) += timer_stats.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
new file mode 100644
index 000000000000..67932ea78c17
--- /dev/null
+++ b/kernel/time/clockevents.c
@@ -0,0 +1,345 @@
1/*
2 * linux/kernel/time/clockevents.c
3 *
4 * This file contains functions which manage clock event devices.
5 *
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9 *
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
12 */
13
14#include <linux/clockchips.h>
15#include <linux/hrtimer.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/notifier.h>
19#include <linux/smp.h>
20#include <linux/sysdev.h>
21
22/* The registered clock event devices */
23static LIST_HEAD(clockevent_devices);
24static LIST_HEAD(clockevents_released);
25
26/* Notification for clock events */
27static RAW_NOTIFIER_HEAD(clockevents_chain);
28
29/* Protection for the above */
30static DEFINE_SPINLOCK(clockevents_lock);
31
32/**
33 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
34 * @latch: value to convert
35 * @evt: pointer to clock event device descriptor
36 *
37 * Math helper, returns latch value converted to nanoseconds (bound checked)
38 */
39unsigned long clockevent_delta2ns(unsigned long latch,
40 struct clock_event_device *evt)
41{
42 u64 clc = ((u64) latch << evt->shift);
43
44 do_div(clc, evt->mult);
45 if (clc < 1000)
46 clc = 1000;
47 if (clc > LONG_MAX)
48 clc = LONG_MAX;
49
50 return (unsigned long) clc;
51}
52
53/**
54 * clockevents_set_mode - set the operating mode of a clock event device
55 * @dev: device to modify
56 * @mode: new mode
57 *
58 * Must be called with interrupts disabled !
59 */
60void clockevents_set_mode(struct clock_event_device *dev,
61 enum clock_event_mode mode)
62{
63 if (dev->mode != mode) {
64 dev->set_mode(mode, dev);
65 dev->mode = mode;
66 }
67}
68
69/**
70 * clockevents_program_event - Reprogram the clock event device.
71 * @expires: absolute expiry time (monotonic clock)
72 *
73 * Returns 0 on success, -ETIME when the event is in the past.
74 */
75int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
76 ktime_t now)
77{
78 unsigned long long clc;
79 int64_t delta;
80
81 delta = ktime_to_ns(ktime_sub(expires, now));
82
83 if (delta <= 0)
84 return -ETIME;
85
86 dev->next_event = expires;
87
88 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
89 return 0;
90
91 if (delta > dev->max_delta_ns)
92 delta = dev->max_delta_ns;
93 if (delta < dev->min_delta_ns)
94 delta = dev->min_delta_ns;
95
96 clc = delta * dev->mult;
97 clc >>= dev->shift;
98
99 return dev->set_next_event((unsigned long) clc, dev);
100}
101
102/**
103 * clockevents_register_notifier - register a clock events change listener
104 */
105int clockevents_register_notifier(struct notifier_block *nb)
106{
107 int ret;
108
109 spin_lock(&clockevents_lock);
110 ret = raw_notifier_chain_register(&clockevents_chain, nb);
111 spin_unlock(&clockevents_lock);
112
113 return ret;
114}
115
116/**
117 * clockevents_unregister_notifier - unregister a clock events change listener
118 */
119void clockevents_unregister_notifier(struct notifier_block *nb)
120{
121 spin_lock(&clockevents_lock);
122 raw_notifier_chain_unregister(&clockevents_chain, nb);
123 spin_unlock(&clockevents_lock);
124}
125
126/*
127 * Notify about a clock event change. Called with clockevents_lock
128 * held.
129 */
130static void clockevents_do_notify(unsigned long reason, void *dev)
131{
132 raw_notifier_call_chain(&clockevents_chain, reason, dev);
133}
134
135/*
136 * Called after a notify add to make devices availble which were
137 * released from the notifier call.
138 */
139static void clockevents_notify_released(void)
140{
141 struct clock_event_device *dev;
142
143 while (!list_empty(&clockevents_released)) {
144 dev = list_entry(clockevents_released.next,
145 struct clock_event_device, list);
146 list_del(&dev->list);
147 list_add(&dev->list, &clockevent_devices);
148 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
149 }
150}
151
152/**
153 * clockevents_register_device - register a clock event device
154 * @dev: device to register
155 */
156void clockevents_register_device(struct clock_event_device *dev)
157{
158 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
159
160 spin_lock(&clockevents_lock);
161
162 list_add(&dev->list, &clockevent_devices);
163 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
164 clockevents_notify_released();
165
166 spin_unlock(&clockevents_lock);
167}
168
169/*
170 * Noop handler when we shut down an event device
171 */
172static void clockevents_handle_noop(struct clock_event_device *dev)
173{
174}
175
176/**
177 * clockevents_exchange_device - release and request clock devices
178 * @old: device to release (can be NULL)
179 * @new: device to request (can be NULL)
180 *
181 * Called from the notifier chain. clockevents_lock is held already
182 */
183void clockevents_exchange_device(struct clock_event_device *old,
184 struct clock_event_device *new)
185{
186 unsigned long flags;
187
188 local_irq_save(flags);
189 /*
190 * Caller releases a clock event device. We queue it into the
191 * released list and do a notify add later.
192 */
193 if (old) {
194 old->event_handler = clockevents_handle_noop;
195 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
196 list_del(&old->list);
197 list_add(&old->list, &clockevents_released);
198 }
199
200 if (new) {
201 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
202 clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN);
203 }
204 local_irq_restore(flags);
205}
206
207/**
208 * clockevents_request_device
209 */
210struct clock_event_device *clockevents_request_device(unsigned int features,
211 cpumask_t cpumask)
212{
213 struct clock_event_device *cur, *dev = NULL;
214 struct list_head *tmp;
215
216 spin_lock(&clockevents_lock);
217
218 list_for_each(tmp, &clockevent_devices) {
219 cur = list_entry(tmp, struct clock_event_device, list);
220
221 if ((cur->features & features) == features &&
222 cpus_equal(cpumask, cur->cpumask)) {
223 if (!dev || dev->rating < cur->rating)
224 dev = cur;
225 }
226 }
227
228 clockevents_exchange_device(NULL, dev);
229
230 spin_unlock(&clockevents_lock);
231
232 return dev;
233}
234
235/**
236 * clockevents_release_device
237 */
238void clockevents_release_device(struct clock_event_device *dev)
239{
240 spin_lock(&clockevents_lock);
241
242 clockevents_exchange_device(dev, NULL);
243 clockevents_notify_released();
244
245 spin_unlock(&clockevents_lock);
246}
247
248/**
249 * clockevents_notify - notification about relevant events
250 */
251void clockevents_notify(unsigned long reason, void *arg)
252{
253 spin_lock(&clockevents_lock);
254 clockevents_do_notify(reason, arg);
255
256 switch (reason) {
257 case CLOCK_EVT_NOTIFY_CPU_DEAD:
258 /*
259 * Unregister the clock event devices which were
260 * released from the users in the notify chain.
261 */
262 while (!list_empty(&clockevents_released)) {
263 struct clock_event_device *dev;
264
265 dev = list_entry(clockevents_released.next,
266 struct clock_event_device, list);
267 list_del(&dev->list);
268 }
269 break;
270 default:
271 break;
272 }
273 spin_unlock(&clockevents_lock);
274}
275EXPORT_SYMBOL_GPL(clockevents_notify);
276
277#ifdef CONFIG_SYSFS
278
279/**
280 * clockevents_show_registered - sysfs interface for listing clockevents
281 * @dev: unused
282 * @buf: char buffer to be filled with clock events list
283 *
284 * Provides sysfs interface for listing registered clock event devices
285 */
286static ssize_t clockevents_show_registered(struct sys_device *dev, char *buf)
287{
288 struct list_head *tmp;
289 char *p = buf;
290 int cpu;
291
292 spin_lock(&clockevents_lock);
293
294 list_for_each(tmp, &clockevent_devices) {
295 struct clock_event_device *ce;
296
297 ce = list_entry(tmp, struct clock_event_device, list);
298 p += sprintf(p, "%-20s F:%04x M:%d", ce->name,
299 ce->features, ce->mode);
300 p += sprintf(p, " C:");
301 if (!cpus_equal(ce->cpumask, cpu_possible_map)) {
302 for_each_cpu_mask(cpu, ce->cpumask)
303 p += sprintf(p, " %d", cpu);
304 } else {
305 /*
306 * FIXME: Add the cpu which is handling this sucker
307 */
308 }
309 p += sprintf(p, "\n");
310 }
311
312 spin_unlock(&clockevents_lock);
313
314 return p - buf;
315}
316
317/*
318 * Sysfs setup bits:
319 */
320static SYSDEV_ATTR(registered, 0600,
321 clockevents_show_registered, NULL);
322
323static struct sysdev_class clockevents_sysclass = {
324 set_kset_name("clockevents"),
325};
326
327static struct sys_device clockevents_sys_device = {
328 .id = 0,
329 .cls = &clockevents_sysclass,
330};
331
332static int __init clockevents_sysfs_init(void)
333{
334 int error = sysdev_class_register(&clockevents_sysclass);
335
336 if (!error)
337 error = sysdev_register(&clockevents_sys_device);
338 if (!error)
339 error = sysdev_create_file(
340 &clockevents_sys_device,
341 &attr_registered);
342 return error;
343}
344device_initcall(clockevents_sysfs_init);
345#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index d9ef176c4e09..193a0793af95 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -29,6 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
32#include <linux/tick.h>
32 33
33/* XXX - Would like a better way for initializing curr_clocksource */ 34/* XXX - Would like a better way for initializing curr_clocksource */
34extern struct clocksource clocksource_jiffies; 35extern struct clocksource clocksource_jiffies;
@@ -48,6 +49,7 @@ extern struct clocksource clocksource_jiffies;
48 */ 49 */
49static struct clocksource *curr_clocksource = &clocksource_jiffies; 50static struct clocksource *curr_clocksource = &clocksource_jiffies;
50static struct clocksource *next_clocksource; 51static struct clocksource *next_clocksource;
52static struct clocksource *clocksource_override;
51static LIST_HEAD(clocksource_list); 53static LIST_HEAD(clocksource_list);
52static DEFINE_SPINLOCK(clocksource_lock); 54static DEFINE_SPINLOCK(clocksource_lock);
53static char override_name[32]; 55static char override_name[32];
@@ -62,9 +64,123 @@ static int __init clocksource_done_booting(void)
62 finished_booting = 1; 64 finished_booting = 1;
63 return 0; 65 return 0;
64} 66}
65
66late_initcall(clocksource_done_booting); 67late_initcall(clocksource_done_booting);
67 68
69#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
70static LIST_HEAD(watchdog_list);
71static struct clocksource *watchdog;
72static struct timer_list watchdog_timer;
73static DEFINE_SPINLOCK(watchdog_lock);
74static cycle_t watchdog_last;
75/*
76 * Interval: 0.5sec Treshold: 0.0625s
77 */
78#define WATCHDOG_INTERVAL (HZ >> 1)
79#define WATCHDOG_TRESHOLD (NSEC_PER_SEC >> 4)
80
81static void clocksource_ratewd(struct clocksource *cs, int64_t delta)
82{
83 if (delta > -WATCHDOG_TRESHOLD && delta < WATCHDOG_TRESHOLD)
84 return;
85
86 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
87 cs->name, delta);
88 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
89 clocksource_change_rating(cs, 0);
90 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
91 list_del(&cs->wd_list);
92}
93
94static void clocksource_watchdog(unsigned long data)
95{
96 struct clocksource *cs, *tmp;
97 cycle_t csnow, wdnow;
98 int64_t wd_nsec, cs_nsec;
99
100 spin_lock(&watchdog_lock);
101
102 wdnow = watchdog->read();
103 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
104 watchdog_last = wdnow;
105
106 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
107 csnow = cs->read();
108 /* Initialized ? */
109 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
110 if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
111 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
112 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
113 /*
114 * We just marked the clocksource as
115 * highres-capable, notify the rest of the
116 * system as well so that we transition
117 * into high-res mode:
118 */
119 tick_clock_notify();
120 }
121 cs->flags |= CLOCK_SOURCE_WATCHDOG;
122 cs->wd_last = csnow;
123 } else {
124 cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask);
125 cs->wd_last = csnow;
126 /* Check the delta. Might remove from the list ! */
127 clocksource_ratewd(cs, cs_nsec - wd_nsec);
128 }
129 }
130
131 if (!list_empty(&watchdog_list)) {
132 __mod_timer(&watchdog_timer,
133 watchdog_timer.expires + WATCHDOG_INTERVAL);
134 }
135 spin_unlock(&watchdog_lock);
136}
137static void clocksource_check_watchdog(struct clocksource *cs)
138{
139 struct clocksource *cse;
140 unsigned long flags;
141
142 spin_lock_irqsave(&watchdog_lock, flags);
143 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
144 int started = !list_empty(&watchdog_list);
145
146 list_add(&cs->wd_list, &watchdog_list);
147 if (!started && watchdog) {
148 watchdog_last = watchdog->read();
149 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
150 add_timer(&watchdog_timer);
151 }
152 } else if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) {
153 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
154
155 if (!watchdog || cs->rating > watchdog->rating) {
156 if (watchdog)
157 del_timer(&watchdog_timer);
158 watchdog = cs;
159 init_timer(&watchdog_timer);
160 watchdog_timer.function = clocksource_watchdog;
161
162 /* Reset watchdog cycles */
163 list_for_each_entry(cse, &watchdog_list, wd_list)
164 cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
165 /* Start if list is not empty */
166 if (!list_empty(&watchdog_list)) {
167 watchdog_last = watchdog->read();
168 watchdog_timer.expires =
169 jiffies + WATCHDOG_INTERVAL;
170 add_timer(&watchdog_timer);
171 }
172 }
173 }
174 spin_unlock_irqrestore(&watchdog_lock, flags);
175}
176#else
177static void clocksource_check_watchdog(struct clocksource *cs)
178{
179 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
180 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
181}
182#endif
183
68/** 184/**
69 * clocksource_get_next - Returns the selected clocksource 185 * clocksource_get_next - Returns the selected clocksource
70 * 186 *
@@ -84,60 +200,54 @@ struct clocksource *clocksource_get_next(void)
84} 200}
85 201
86/** 202/**
87 * select_clocksource - Finds the best registered clocksource. 203 * select_clocksource - Selects the best registered clocksource.
88 * 204 *
89 * Private function. Must hold clocksource_lock when called. 205 * Private function. Must hold clocksource_lock when called.
90 * 206 *
91 * Looks through the list of registered clocksources, returning 207 * Select the clocksource with the best rating, or the clocksource,
92 * the one with the highest rating value. If there is a clocksource 208 * which is selected by userspace override.
93 * name that matches the override string, it returns that clocksource.
94 */ 209 */
95static struct clocksource *select_clocksource(void) 210static struct clocksource *select_clocksource(void)
96{ 211{
97 struct clocksource *best = NULL; 212 struct clocksource *next;
98 struct list_head *tmp;
99 213
100 list_for_each(tmp, &clocksource_list) { 214 if (list_empty(&clocksource_list))
101 struct clocksource *src; 215 return NULL;
102 216
103 src = list_entry(tmp, struct clocksource, list); 217 if (clocksource_override)
104 if (!best) 218 next = clocksource_override;
105 best = src; 219 else
106 220 next = list_entry(clocksource_list.next, struct clocksource,
107 /* check for override: */ 221 list);
108 if (strlen(src->name) == strlen(override_name) && 222
109 !strcmp(src->name, override_name)) { 223 if (next == curr_clocksource)
110 best = src; 224 return NULL;
111 break;
112 }
113 /* pick the highest rating: */
114 if (src->rating > best->rating)
115 best = src;
116 }
117 225
118 return best; 226 return next;
119} 227}
120 228
121/** 229/*
122 * is_registered_source - Checks if clocksource is registered 230 * Enqueue the clocksource sorted by rating
123 * @c: pointer to a clocksource
124 *
125 * Private helper function. Must hold clocksource_lock when called.
126 *
127 * Returns one if the clocksource is already registered, zero otherwise.
128 */ 231 */
129static int is_registered_source(struct clocksource *c) 232static int clocksource_enqueue(struct clocksource *c)
130{ 233{
131 int len = strlen(c->name); 234 struct list_head *tmp, *entry = &clocksource_list;
132 struct list_head *tmp;
133 235
134 list_for_each(tmp, &clocksource_list) { 236 list_for_each(tmp, &clocksource_list) {
135 struct clocksource *src; 237 struct clocksource *cs;
136 238
137 src = list_entry(tmp, struct clocksource, list); 239 cs = list_entry(tmp, struct clocksource, list);
138 if (strlen(src->name) == len && !strcmp(src->name, c->name)) 240 if (cs == c)
139 return 1; 241 return -EBUSY;
242 /* Keep track of the place, where to insert */
243 if (cs->rating >= c->rating)
244 entry = tmp;
140 } 245 }
246 list_add(&c->list, entry);
247
248 if (strlen(c->name) == strlen(override_name) &&
249 !strcmp(c->name, override_name))
250 clocksource_override = c;
141 251
142 return 0; 252 return 0;
143} 253}
@@ -150,42 +260,35 @@ static int is_registered_source(struct clocksource *c)
150 */ 260 */
151int clocksource_register(struct clocksource *c) 261int clocksource_register(struct clocksource *c)
152{ 262{
153 int ret = 0;
154 unsigned long flags; 263 unsigned long flags;
264 int ret;
155 265
156 spin_lock_irqsave(&clocksource_lock, flags); 266 spin_lock_irqsave(&clocksource_lock, flags);
157 /* check if clocksource is already registered */ 267 ret = clocksource_enqueue(c);
158 if (is_registered_source(c)) { 268 if (!ret)
159 printk("register_clocksource: Cannot register %s. "
160 "Already registered!", c->name);
161 ret = -EBUSY;
162 } else {
163 /* register it */
164 list_add(&c->list, &clocksource_list);
165 /* scan the registered clocksources, and pick the best one */
166 next_clocksource = select_clocksource(); 269 next_clocksource = select_clocksource();
167 }
168 spin_unlock_irqrestore(&clocksource_lock, flags); 270 spin_unlock_irqrestore(&clocksource_lock, flags);
271 if (!ret)
272 clocksource_check_watchdog(c);
169 return ret; 273 return ret;
170} 274}
171EXPORT_SYMBOL(clocksource_register); 275EXPORT_SYMBOL(clocksource_register);
172 276
173/** 277/**
174 * clocksource_reselect - Rescan list for next clocksource 278 * clocksource_change_rating - Change the rating of a registered clocksource
175 * 279 *
176 * A quick helper function to be used if a clocksource changes its
177 * rating. Forces the clocksource list to be re-scanned for the best
178 * clocksource.
179 */ 280 */
180void clocksource_reselect(void) 281void clocksource_change_rating(struct clocksource *cs, int rating)
181{ 282{
182 unsigned long flags; 283 unsigned long flags;
183 284
184 spin_lock_irqsave(&clocksource_lock, flags); 285 spin_lock_irqsave(&clocksource_lock, flags);
286 list_del(&cs->list);
287 cs->rating = rating;
288 clocksource_enqueue(cs);
185 next_clocksource = select_clocksource(); 289 next_clocksource = select_clocksource();
186 spin_unlock_irqrestore(&clocksource_lock, flags); 290 spin_unlock_irqrestore(&clocksource_lock, flags);
187} 291}
188EXPORT_SYMBOL(clocksource_reselect);
189 292
190#ifdef CONFIG_SYSFS 293#ifdef CONFIG_SYSFS
191/** 294/**
@@ -221,7 +324,11 @@ sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
221static ssize_t sysfs_override_clocksource(struct sys_device *dev, 324static ssize_t sysfs_override_clocksource(struct sys_device *dev,
222 const char *buf, size_t count) 325 const char *buf, size_t count)
223{ 326{
327 struct clocksource *ovr = NULL;
328 struct list_head *tmp;
224 size_t ret = count; 329 size_t ret = count;
330 int len;
331
225 /* strings from sysfs write are not 0 terminated! */ 332 /* strings from sysfs write are not 0 terminated! */
226 if (count >= sizeof(override_name)) 333 if (count >= sizeof(override_name))
227 return -EINVAL; 334 return -EINVAL;
@@ -229,17 +336,32 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
229 /* strip of \n: */ 336 /* strip of \n: */
230 if (buf[count-1] == '\n') 337 if (buf[count-1] == '\n')
231 count--; 338 count--;
232 if (count < 1)
233 return -EINVAL;
234 339
235 spin_lock_irq(&clocksource_lock); 340 spin_lock_irq(&clocksource_lock);
236 341
237 /* copy the name given: */ 342 if (count > 0)
238 memcpy(override_name, buf, count); 343 memcpy(override_name, buf, count);
239 override_name[count] = 0; 344 override_name[count] = 0;
240 345
241 /* try to select it: */ 346 len = strlen(override_name);
242 next_clocksource = select_clocksource(); 347 if (len) {
348 ovr = clocksource_override;
349 /* try to select it: */
350 list_for_each(tmp, &clocksource_list) {
351 struct clocksource *cs;
352
353 cs = list_entry(tmp, struct clocksource, list);
354 if (strlen(cs->name) == len &&
355 !strcmp(cs->name, override_name))
356 ovr = cs;
357 }
358 }
359
360 /* Reselect, when the override name has changed */
361 if (ovr != clocksource_override) {
362 clocksource_override = ovr;
363 next_clocksource = select_clocksource();
364 }
243 365
244 spin_unlock_irq(&clocksource_lock); 366 spin_unlock_irq(&clocksource_lock);
245 367
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a99b2a6e6a07..3be8da8fed7e 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -62,7 +62,6 @@ struct clocksource clocksource_jiffies = {
62 .mask = 0xffffffff, /*32bits*/ 62 .mask = 0xffffffff, /*32bits*/
63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ 63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
64 .shift = JIFFIES_SHIFT, 64 .shift = JIFFIES_SHIFT,
65 .is_continuous = 0, /* tick based, not free running */
66}; 65};
67 66
68static int __init init_jiffies_clocksource(void) 67static int __init init_jiffies_clocksource(void)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 3afeaa3a73f9..eb12509e00bd 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -24,7 +24,7 @@ static u64 tick_length, tick_length_base;
24 24
25#define MAX_TICKADJ 500 /* microsecs */ 25#define MAX_TICKADJ 500 /* microsecs */
26#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ 26#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \
27 TICK_LENGTH_SHIFT) / HZ) 27 TICK_LENGTH_SHIFT) / NTP_INTERVAL_FREQ)
28 28
29/* 29/*
30 * phase-lock loop variables 30 * phase-lock loop variables
@@ -46,13 +46,17 @@ long time_adjust;
46 46
47static void ntp_update_frequency(void) 47static void ntp_update_frequency(void)
48{ 48{
49 tick_length_base = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << TICK_LENGTH_SHIFT; 49 u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
50 tick_length_base += (s64)CLOCK_TICK_ADJUST << TICK_LENGTH_SHIFT; 50 << TICK_LENGTH_SHIFT;
51 tick_length_base += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); 51 second_length += (s64)CLOCK_TICK_ADJUST << TICK_LENGTH_SHIFT;
52 second_length += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC);
52 53
53 do_div(tick_length_base, HZ); 54 tick_length_base = second_length;
54 55
55 tick_nsec = tick_length_base >> TICK_LENGTH_SHIFT; 56 do_div(second_length, HZ);
57 tick_nsec = second_length >> TICK_LENGTH_SHIFT;
58
59 do_div(tick_length_base, NTP_INTERVAL_FREQ);
56} 60}
57 61
58/** 62/**
@@ -162,7 +166,7 @@ void second_overflow(void)
162 tick_length -= MAX_TICKADJ_SCALED; 166 tick_length -= MAX_TICKADJ_SCALED;
163 } else { 167 } else {
164 tick_length += (s64)(time_adjust * NSEC_PER_USEC / 168 tick_length += (s64)(time_adjust * NSEC_PER_USEC /
165 HZ) << TICK_LENGTH_SHIFT; 169 NTP_INTERVAL_FREQ) << TICK_LENGTH_SHIFT;
166 time_adjust = 0; 170 time_adjust = 0;
167 } 171 }
168 } 172 }
@@ -239,7 +243,8 @@ int do_adjtimex(struct timex *txc)
239 result = -EINVAL; 243 result = -EINVAL;
240 goto leave; 244 goto leave;
241 } 245 }
242 time_freq = ((s64)txc->freq * NSEC_PER_USEC) >> (SHIFT_USEC - SHIFT_NSEC); 246 time_freq = ((s64)txc->freq * NSEC_PER_USEC)
247 >> (SHIFT_USEC - SHIFT_NSEC);
243 } 248 }
244 249
245 if (txc->modes & ADJ_MAXERROR) { 250 if (txc->modes & ADJ_MAXERROR) {
@@ -309,7 +314,8 @@ int do_adjtimex(struct timex *txc)
309 freq_adj += time_freq; 314 freq_adj += time_freq;
310 freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); 315 freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
311 time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); 316 time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
312 time_offset = (time_offset / HZ) << SHIFT_UPDATE; 317 time_offset = (time_offset / NTP_INTERVAL_FREQ)
318 << SHIFT_UPDATE;
313 } /* STA_PLL */ 319 } /* STA_PLL */
314 } /* txc->modes & ADJ_OFFSET */ 320 } /* txc->modes & ADJ_OFFSET */
315 if (txc->modes & ADJ_TICK) 321 if (txc->modes & ADJ_TICK)
@@ -324,8 +330,10 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
324 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) 330 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
325 txc->offset = save_adjust; 331 txc->offset = save_adjust;
326 else 332 else
327 txc->offset = shift_right(time_offset, SHIFT_UPDATE) * HZ / 1000; 333 txc->offset = shift_right(time_offset, SHIFT_UPDATE)
328 txc->freq = (time_freq / NSEC_PER_USEC) << (SHIFT_USEC - SHIFT_NSEC); 334 * NTP_INTERVAL_FREQ / 1000;
335 txc->freq = (time_freq / NSEC_PER_USEC)
336 << (SHIFT_USEC - SHIFT_NSEC);
329 txc->maxerror = time_maxerror; 337 txc->maxerror = time_maxerror;
330 txc->esterror = time_esterror; 338 txc->esterror = time_esterror;
331 txc->status = time_status; 339 txc->status = time_status;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
new file mode 100644
index 000000000000..12b3efeb9f6f
--- /dev/null
+++ b/kernel/time/tick-broadcast.c
@@ -0,0 +1,480 @@
1/*
2 * linux/kernel/time/tick-broadcast.c
3 *
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/irq.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/tick.h>
22
23#include "tick-internal.h"
24
25/*
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
28 */
29
30struct tick_device tick_broadcast_device;
31static cpumask_t tick_broadcast_mask;
32static DEFINE_SPINLOCK(tick_broadcast_lock);
33
34/*
35 * Debugging: see timer_list.c
36 */
37struct tick_device *tick_get_broadcast_device(void)
38{
39 return &tick_broadcast_device;
40}
41
42cpumask_t *tick_get_broadcast_mask(void)
43{
44 return &tick_broadcast_mask;
45}
46
47/*
48 * Start the device in periodic mode
49 */
50static void tick_broadcast_start_periodic(struct clock_event_device *bc)
51{
52 if (bc && bc->mode == CLOCK_EVT_MODE_SHUTDOWN)
53 tick_setup_periodic(bc, 1);
54}
55
56/*
57 * Check, if the device can be utilized as broadcast device:
58 */
59int tick_check_broadcast_device(struct clock_event_device *dev)
60{
61 if (tick_broadcast_device.evtdev ||
62 (dev->features & CLOCK_EVT_FEAT_C3STOP))
63 return 0;
64
65 clockevents_exchange_device(NULL, dev);
66 tick_broadcast_device.evtdev = dev;
67 if (!cpus_empty(tick_broadcast_mask))
68 tick_broadcast_start_periodic(dev);
69 return 1;
70}
71
72/*
73 * Check, if the device is the broadcast device
74 */
75int tick_is_broadcast_device(struct clock_event_device *dev)
76{
77 return (dev && tick_broadcast_device.evtdev == dev);
78}
79
80/*
81 * Check, if the device is disfunctional and a place holder, which
82 * needs to be handled by the broadcast device.
83 */
84int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
85{
86 unsigned long flags;
87 int ret = 0;
88
89 spin_lock_irqsave(&tick_broadcast_lock, flags);
90
91 /*
92 * Devices might be registered with both periodic and oneshot
93 * mode disabled. This signals, that the device needs to be
94 * operated from the broadcast device and is a placeholder for
95 * the cpu local device.
96 */
97 if (!tick_device_is_functional(dev)) {
98 dev->event_handler = tick_handle_periodic;
99 cpu_set(cpu, tick_broadcast_mask);
100 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
101 ret = 1;
102 }
103
104 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
105 return ret;
106}
107
108/*
109 * Broadcast the event to the cpus, which are set in the mask
110 */
111int tick_do_broadcast(cpumask_t mask)
112{
113 int ret = 0, cpu = smp_processor_id();
114 struct tick_device *td;
115
116 /*
117 * Check, if the current cpu is in the mask
118 */
119 if (cpu_isset(cpu, mask)) {
120 cpu_clear(cpu, mask);
121 td = &per_cpu(tick_cpu_device, cpu);
122 td->evtdev->event_handler(td->evtdev);
123 ret = 1;
124 }
125
126 if (!cpus_empty(mask)) {
127 /*
128 * It might be necessary to actually check whether the devices
129 * have different broadcast functions. For now, just use the
130 * one of the first device. This works as long as we have this
131 * misfeature only on x86 (lapic)
132 */
133 cpu = first_cpu(mask);
134 td = &per_cpu(tick_cpu_device, cpu);
135 td->evtdev->broadcast(mask);
136 ret = 1;
137 }
138 return ret;
139}
140
141/*
142 * Periodic broadcast:
143 * - invoke the broadcast handlers
144 */
145static void tick_do_periodic_broadcast(void)
146{
147 cpumask_t mask;
148
149 spin_lock(&tick_broadcast_lock);
150
151 cpus_and(mask, cpu_online_map, tick_broadcast_mask);
152 tick_do_broadcast(mask);
153
154 spin_unlock(&tick_broadcast_lock);
155}
156
157/*
158 * Event handler for periodic broadcast ticks
159 */
160static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
161{
162 dev->next_event.tv64 = KTIME_MAX;
163
164 tick_do_periodic_broadcast();
165
166 /*
167 * The device is in periodic mode. No reprogramming necessary:
168 */
169 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
170 return;
171
172 /*
173 * Setup the next period for devices, which do not have
174 * periodic mode:
175 */
176 for (;;) {
177 ktime_t next = ktime_add(dev->next_event, tick_period);
178
179 if (!clockevents_program_event(dev, next, ktime_get()))
180 return;
181 tick_do_periodic_broadcast();
182 }
183}
184
185/*
186 * Powerstate information: The system enters/leaves a state, where
187 * affected devices might stop
188 */
189static void tick_do_broadcast_on_off(void *why)
190{
191 struct clock_event_device *bc, *dev;
192 struct tick_device *td;
193 unsigned long flags, *reason = why;
194 int cpu;
195
196 spin_lock_irqsave(&tick_broadcast_lock, flags);
197
198 cpu = smp_processor_id();
199 td = &per_cpu(tick_cpu_device, cpu);
200 dev = td->evtdev;
201 bc = tick_broadcast_device.evtdev;
202
203 /*
204 * Is the device in broadcast mode forever or is it not
205 * affected by the powerstate ?
206 */
207 if (!dev || !tick_device_is_functional(dev) ||
208 !(dev->features & CLOCK_EVT_FEAT_C3STOP))
209 goto out;
210
211 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) {
212 if (!cpu_isset(cpu, tick_broadcast_mask)) {
213 cpu_set(cpu, tick_broadcast_mask);
214 if (td->mode == TICKDEV_MODE_PERIODIC)
215 clockevents_set_mode(dev,
216 CLOCK_EVT_MODE_SHUTDOWN);
217 }
218 } else {
219 if (cpu_isset(cpu, tick_broadcast_mask)) {
220 cpu_clear(cpu, tick_broadcast_mask);
221 if (td->mode == TICKDEV_MODE_PERIODIC)
222 tick_setup_periodic(dev, 0);
223 }
224 }
225
226 if (cpus_empty(tick_broadcast_mask))
227 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
228 else {
229 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
230 tick_broadcast_start_periodic(bc);
231 else
232 tick_broadcast_setup_oneshot(bc);
233 }
234out:
235 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
236}
237
238/*
239 * Powerstate information: The system enters/leaves a state, where
240 * affected devices might stop.
241 */
242void tick_broadcast_on_off(unsigned long reason, int *oncpu)
243{
244 int cpu = get_cpu();
245
246 if (cpu == *oncpu)
247 tick_do_broadcast_on_off(&reason);
248 else
249 smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
250 &reason, 1, 1);
251 put_cpu();
252}
253
254/*
255 * Set the periodic handler depending on broadcast on/off
256 */
257void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
258{
259 if (!broadcast)
260 dev->event_handler = tick_handle_periodic;
261 else
262 dev->event_handler = tick_handle_periodic_broadcast;
263}
264
265/*
266 * Remove a CPU from broadcasting
267 */
268void tick_shutdown_broadcast(unsigned int *cpup)
269{
270 struct clock_event_device *bc;
271 unsigned long flags;
272 unsigned int cpu = *cpup;
273
274 spin_lock_irqsave(&tick_broadcast_lock, flags);
275
276 bc = tick_broadcast_device.evtdev;
277 cpu_clear(cpu, tick_broadcast_mask);
278
279 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
280 if (bc && cpus_empty(tick_broadcast_mask))
281 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
282 }
283
284 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
285}
286
287#ifdef CONFIG_TICK_ONESHOT
288
289static cpumask_t tick_broadcast_oneshot_mask;
290
291/*
292 * Debugging: see timer_list.c
293 */
294cpumask_t *tick_get_broadcast_oneshot_mask(void)
295{
296 return &tick_broadcast_oneshot_mask;
297}
298
299static int tick_broadcast_set_event(ktime_t expires, int force)
300{
301 struct clock_event_device *bc = tick_broadcast_device.evtdev;
302 ktime_t now = ktime_get();
303 int res;
304
305 for(;;) {
306 res = clockevents_program_event(bc, expires, now);
307 if (!res || !force)
308 return res;
309 now = ktime_get();
310 expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
311 }
312}
313
314/*
315 * Reprogram the broadcast device:
316 *
317 * Called with tick_broadcast_lock held and interrupts disabled.
318 */
319static int tick_broadcast_reprogram(void)
320{
321 ktime_t expires = { .tv64 = KTIME_MAX };
322 struct tick_device *td;
323 int cpu;
324
325 /*
326 * Find the event which expires next:
327 */
328 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
329 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
330 td = &per_cpu(tick_cpu_device, cpu);
331 if (td->evtdev->next_event.tv64 < expires.tv64)
332 expires = td->evtdev->next_event;
333 }
334
335 if (expires.tv64 == KTIME_MAX)
336 return 0;
337
338 return tick_broadcast_set_event(expires, 0);
339}
340
341/*
342 * Handle oneshot mode broadcasting
343 */
344static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
345{
346 struct tick_device *td;
347 cpumask_t mask;
348 ktime_t now;
349 int cpu;
350
351 spin_lock(&tick_broadcast_lock);
352again:
353 dev->next_event.tv64 = KTIME_MAX;
354 mask = CPU_MASK_NONE;
355 now = ktime_get();
356 /* Find all expired events */
357 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
358 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
359 td = &per_cpu(tick_cpu_device, cpu);
360 if (td->evtdev->next_event.tv64 <= now.tv64)
361 cpu_set(cpu, mask);
362 }
363
364 /*
365 * Wakeup the cpus which have an expired event. The broadcast
366 * device is reprogrammed in the return from idle code.
367 */
368 if (!tick_do_broadcast(mask)) {
369 /*
370 * The global event did not expire any CPU local
371 * events. This happens in dyntick mode, as the
372 * maximum PIT delta is quite small.
373 */
374 if (tick_broadcast_reprogram())
375 goto again;
376 }
377 spin_unlock(&tick_broadcast_lock);
378}
379
380/*
381 * Powerstate information: The system enters/leaves a state, where
382 * affected devices might stop
383 */
384void tick_broadcast_oneshot_control(unsigned long reason)
385{
386 struct clock_event_device *bc, *dev;
387 struct tick_device *td;
388 unsigned long flags;
389 int cpu;
390
391 spin_lock_irqsave(&tick_broadcast_lock, flags);
392
393 /*
394 * Periodic mode does not care about the enter/exit of power
395 * states
396 */
397 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
398 goto out;
399
400 bc = tick_broadcast_device.evtdev;
401 cpu = smp_processor_id();
402 td = &per_cpu(tick_cpu_device, cpu);
403 dev = td->evtdev;
404
405 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
406 goto out;
407
408 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
409 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
410 cpu_set(cpu, tick_broadcast_oneshot_mask);
411 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
412 if (dev->next_event.tv64 < bc->next_event.tv64)
413 tick_broadcast_set_event(dev->next_event, 1);
414 }
415 } else {
416 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
417 cpu_clear(cpu, tick_broadcast_oneshot_mask);
418 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
419 if (dev->next_event.tv64 != KTIME_MAX)
420 tick_program_event(dev->next_event, 1);
421 }
422 }
423
424out:
425 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
426}
427
428/**
429 * tick_broadcast_setup_highres - setup the broadcast device for highres
430 */
431void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
432{
433 if (bc->mode != CLOCK_EVT_MODE_ONESHOT) {
434 bc->event_handler = tick_handle_oneshot_broadcast;
435 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
436 bc->next_event.tv64 = KTIME_MAX;
437 }
438}
439
440/*
441 * Select oneshot operating mode for the broadcast device
442 */
443void tick_broadcast_switch_to_oneshot(void)
444{
445 struct clock_event_device *bc;
446 unsigned long flags;
447
448 spin_lock_irqsave(&tick_broadcast_lock, flags);
449
450 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
451 bc = tick_broadcast_device.evtdev;
452 if (bc)
453 tick_broadcast_setup_oneshot(bc);
454 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
455}
456
457
458/*
459 * Remove a dead CPU from broadcasting
460 */
461void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
462{
463 struct clock_event_device *bc;
464 unsigned long flags;
465 unsigned int cpu = *cpup;
466
467 spin_lock_irqsave(&tick_broadcast_lock, flags);
468
469 bc = tick_broadcast_device.evtdev;
470 cpu_clear(cpu, tick_broadcast_oneshot_mask);
471
472 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) {
473 if (bc && cpus_empty(tick_broadcast_oneshot_mask))
474 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
475 }
476
477 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
478}
479
480#endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
new file mode 100644
index 000000000000..4500e347f1bb
--- /dev/null
+++ b/kernel/time/tick-common.c
@@ -0,0 +1,346 @@
1/*
2 * linux/kernel/time/tick-common.c
3 *
4 * This file contains the base functions to manage periodic tick
5 * related events.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/irq.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/tick.h>
22
23#include "tick-internal.h"
24
25/*
26 * Tick devices
27 */
28DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
29/*
30 * Tick next event: keeps track of the tick time
31 */
32ktime_t tick_next_period;
33ktime_t tick_period;
34static int tick_do_timer_cpu = -1;
35DEFINE_SPINLOCK(tick_device_lock);
36
37/*
38 * Debugging: see timer_list.c
39 */
40struct tick_device *tick_get_device(int cpu)
41{
42 return &per_cpu(tick_cpu_device, cpu);
43}
44
45/**
46 * tick_is_oneshot_available - check for a oneshot capable event device
47 */
48int tick_is_oneshot_available(void)
49{
50 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
51
52 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
53}
54
55/*
56 * Periodic tick
57 */
58static void tick_periodic(int cpu)
59{
60 if (tick_do_timer_cpu == cpu) {
61 write_seqlock(&xtime_lock);
62
63 /* Keep track of the next tick event */
64 tick_next_period = ktime_add(tick_next_period, tick_period);
65
66 do_timer(1);
67 write_sequnlock(&xtime_lock);
68 }
69
70 update_process_times(user_mode(get_irq_regs()));
71 profile_tick(CPU_PROFILING);
72}
73
74/*
75 * Event handler for periodic ticks
76 */
77void tick_handle_periodic(struct clock_event_device *dev)
78{
79 int cpu = smp_processor_id();
80
81 tick_periodic(cpu);
82
83 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
84 return;
85 /*
86 * Setup the next period for devices, which do not have
87 * periodic mode:
88 */
89 for (;;) {
90 ktime_t next = ktime_add(dev->next_event, tick_period);
91
92 if (!clockevents_program_event(dev, next, ktime_get()))
93 return;
94 tick_periodic(cpu);
95 }
96}
97
98/*
99 * Setup the device for a periodic tick
100 */
101void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
102{
103 tick_set_periodic_handler(dev, broadcast);
104
105 /* Broadcast setup ? */
106 if (!tick_device_is_functional(dev))
107 return;
108
109 if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
110 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
111 } else {
112 unsigned long seq;
113 ktime_t next;
114
115 do {
116 seq = read_seqbegin(&xtime_lock);
117 next = tick_next_period;
118 } while (read_seqretry(&xtime_lock, seq));
119
120 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
121
122 for (;;) {
123 if (!clockevents_program_event(dev, next, ktime_get()))
124 return;
125 next = ktime_add(next, tick_period);
126 }
127 }
128}
129
130/*
131 * Setup the tick device
132 */
133static void tick_setup_device(struct tick_device *td,
134 struct clock_event_device *newdev, int cpu,
135 cpumask_t cpumask)
136{
137 ktime_t next_event;
138 void (*handler)(struct clock_event_device *) = NULL;
139
140 /*
141 * First device setup ?
142 */
143 if (!td->evtdev) {
144 /*
145 * If no cpu took the do_timer update, assign it to
146 * this cpu:
147 */
148 if (tick_do_timer_cpu == -1) {
149 tick_do_timer_cpu = cpu;
150 tick_next_period = ktime_get();
151 tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
152 }
153
154 /*
155 * Startup in periodic mode first.
156 */
157 td->mode = TICKDEV_MODE_PERIODIC;
158 } else {
159 handler = td->evtdev->event_handler;
160 next_event = td->evtdev->next_event;
161 }
162
163 td->evtdev = newdev;
164
165 /*
166 * When the device is not per cpu, pin the interrupt to the
167 * current cpu:
168 */
169 if (!cpus_equal(newdev->cpumask, cpumask))
170 irq_set_affinity(newdev->irq, cpumask);
171
172 /*
173 * When global broadcasting is active, check if the current
174 * device is registered as a placeholder for broadcast mode.
175 * This allows us to handle this x86 misfeature in a generic
176 * way.
177 */
178 if (tick_device_uses_broadcast(newdev, cpu))
179 return;
180
181 if (td->mode == TICKDEV_MODE_PERIODIC)
182 tick_setup_periodic(newdev, 0);
183 else
184 tick_setup_oneshot(newdev, handler, next_event);
185}
186
187/*
188 * Check, if the new registered device should be used.
189 */
190static int tick_check_new_device(struct clock_event_device *newdev)
191{
192 struct clock_event_device *curdev;
193 struct tick_device *td;
194 int cpu, ret = NOTIFY_OK;
195 unsigned long flags;
196 cpumask_t cpumask;
197
198 spin_lock_irqsave(&tick_device_lock, flags);
199
200 cpu = smp_processor_id();
201 if (!cpu_isset(cpu, newdev->cpumask))
202 goto out;
203
204 td = &per_cpu(tick_cpu_device, cpu);
205 curdev = td->evtdev;
206 cpumask = cpumask_of_cpu(cpu);
207
208 /* cpu local device ? */
209 if (!cpus_equal(newdev->cpumask, cpumask)) {
210
211 /*
212 * If the cpu affinity of the device interrupt can not
213 * be set, ignore it.
214 */
215 if (!irq_can_set_affinity(newdev->irq))
216 goto out_bc;
217
218 /*
219 * If we have a cpu local device already, do not replace it
220 * by a non cpu local device
221 */
222 if (curdev && cpus_equal(curdev->cpumask, cpumask))
223 goto out_bc;
224 }
225
226 /*
227 * If we have an active device, then check the rating and the oneshot
228 * feature.
229 */
230 if (curdev) {
231 /*
232 * Prefer one shot capable devices !
233 */
234 if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
235 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
236 goto out_bc;
237 /*
238 * Check the rating
239 */
240 if (curdev->rating >= newdev->rating)
241 goto out_bc;
242 }
243
244 /*
245 * Replace the eventually existing device by the new
246 * device. If the current device is the broadcast device, do
247 * not give it back to the clockevents layer !
248 */
249 if (tick_is_broadcast_device(curdev)) {
250 clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
251 curdev = NULL;
252 }
253 clockevents_exchange_device(curdev, newdev);
254 tick_setup_device(td, newdev, cpu, cpumask);
255 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
256 tick_oneshot_notify();
257
258 spin_unlock_irqrestore(&tick_device_lock, flags);
259 return NOTIFY_STOP;
260
261out_bc:
262 /*
263 * Can the new device be used as a broadcast device ?
264 */
265 if (tick_check_broadcast_device(newdev))
266 ret = NOTIFY_STOP;
267out:
268 spin_unlock_irqrestore(&tick_device_lock, flags);
269
270 return ret;
271}
272
273/*
274 * Shutdown an event device on a given cpu:
275 *
276 * This is called on a life CPU, when a CPU is dead. So we cannot
277 * access the hardware device itself.
278 * We just set the mode and remove it from the lists.
279 */
280static void tick_shutdown(unsigned int *cpup)
281{
282 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
283 struct clock_event_device *dev = td->evtdev;
284 unsigned long flags;
285
286 spin_lock_irqsave(&tick_device_lock, flags);
287 td->mode = TICKDEV_MODE_PERIODIC;
288 if (dev) {
289 /*
290 * Prevent that the clock events layer tries to call
291 * the set mode function!
292 */
293 dev->mode = CLOCK_EVT_MODE_UNUSED;
294 clockevents_exchange_device(dev, NULL);
295 td->evtdev = NULL;
296 }
297 spin_unlock_irqrestore(&tick_device_lock, flags);
298}
299
300/*
301 * Notification about clock event devices
302 */
303static int tick_notify(struct notifier_block *nb, unsigned long reason,
304 void *dev)
305{
306 switch (reason) {
307
308 case CLOCK_EVT_NOTIFY_ADD:
309 return tick_check_new_device(dev);
310
311 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
312 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
313 tick_broadcast_on_off(reason, dev);
314 break;
315
316 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
317 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
318 tick_broadcast_oneshot_control(reason);
319 break;
320
321 case CLOCK_EVT_NOTIFY_CPU_DEAD:
322 tick_shutdown_broadcast_oneshot(dev);
323 tick_shutdown_broadcast(dev);
324 tick_shutdown(dev);
325 break;
326
327 default:
328 break;
329 }
330
331 return NOTIFY_OK;
332}
333
334static struct notifier_block tick_notifier = {
335 .notifier_call = tick_notify,
336};
337
338/**
339 * tick_init - initialize the tick control
340 *
341 * Register the notifier with the clockevents framework
342 */
343void __init tick_init(void)
344{
345 clockevents_register_notifier(&tick_notifier);
346}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
new file mode 100644
index 000000000000..54861a0f29ff
--- /dev/null
+++ b/kernel/time/tick-internal.h
@@ -0,0 +1,110 @@
1/*
2 * tick internal variable and functions used by low/high res code
3 */
4DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
5extern spinlock_t tick_device_lock;
6extern ktime_t tick_next_period;
7extern ktime_t tick_period;
8
9extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
10extern void tick_handle_periodic(struct clock_event_device *dev);
11
12/*
13 * NO_HZ / high resolution timer shared code
14 */
15#ifdef CONFIG_TICK_ONESHOT
16extern void tick_setup_oneshot(struct clock_event_device *newdev,
17 void (*handler)(struct clock_event_device *),
18 ktime_t nextevt);
19extern int tick_program_event(ktime_t expires, int force);
20extern void tick_oneshot_notify(void);
21extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
22
23# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
24extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
25extern void tick_broadcast_oneshot_control(unsigned long reason);
26extern void tick_broadcast_switch_to_oneshot(void);
27extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
28# else /* BROADCAST */
29static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
30{
31 BUG();
32}
33static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
34static inline void tick_broadcast_switch_to_oneshot(void) { }
35static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
36# endif /* !BROADCAST */
37
38#else /* !ONESHOT */
39static inline
40void tick_setup_oneshot(struct clock_event_device *newdev,
41 void (*handler)(struct clock_event_device *),
42 ktime_t nextevt)
43{
44 BUG();
45}
46static inline int tick_program_event(ktime_t expires, int force)
47{
48 return 0;
49}
50static inline void tick_oneshot_notify(void) { }
51static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
52{
53 BUG();
54}
55static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
56static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
57#endif /* !TICK_ONESHOT */
58
59/*
60 * Broadcasting support
61 */
62#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
63extern int tick_do_broadcast(cpumask_t mask);
64
65extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
66extern int tick_check_broadcast_device(struct clock_event_device *dev);
67extern int tick_is_broadcast_device(struct clock_event_device *dev);
68extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
69extern void tick_shutdown_broadcast(unsigned int *cpup);
70
71extern void
72tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
73
74#else /* !BROADCAST */
75
76static inline int tick_check_broadcast_device(struct clock_event_device *dev)
77{
78 return 0;
79}
80
81static inline int tick_is_broadcast_device(struct clock_event_device *dev)
82{
83 return 0;
84}
85static inline int tick_device_uses_broadcast(struct clock_event_device *dev,
86 int cpu)
87{
88 return 0;
89}
90static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
91static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { }
92static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
93
94/*
95 * Set the periodic handler in non broadcast mode
96 */
97static inline void tick_set_periodic_handler(struct clock_event_device *dev,
98 int broadcast)
99{
100 dev->event_handler = tick_handle_periodic;
101}
102#endif /* !BROADCAST */
103
104/*
105 * Check, if the device is functional or a dummy for broadcast
106 */
107static inline int tick_device_is_functional(struct clock_event_device *dev)
108{
109 return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
110}
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
new file mode 100644
index 000000000000..2e8b7ff863cc
--- /dev/null
+++ b/kernel/time/tick-oneshot.c
@@ -0,0 +1,84 @@
1/*
2 * linux/kernel/time/tick-oneshot.c
3 *
4 * This file contains functions which manage high resolution tick
5 * related events.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/irq.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/tick.h>
22
23#include "tick-internal.h"
24
25/**
26 * tick_program_event
27 */
28int tick_program_event(ktime_t expires, int force)
29{
30 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
31 ktime_t now = ktime_get();
32
33 while (1) {
34 int ret = clockevents_program_event(dev, expires, now);
35
36 if (!ret || !force)
37 return ret;
38 now = ktime_get();
39 expires = ktime_add(now, ktime_set(0, dev->min_delta_ns));
40 }
41}
42
43/**
44 * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz)
45 */
46void tick_setup_oneshot(struct clock_event_device *newdev,
47 void (*handler)(struct clock_event_device *),
48 ktime_t next_event)
49{
50 newdev->event_handler = handler;
51 clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT);
52 clockevents_program_event(newdev, next_event, ktime_get());
53}
54
55/**
56 * tick_switch_to_oneshot - switch to oneshot mode
57 */
58int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
59{
60 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
61 struct clock_event_device *dev = td->evtdev;
62
63 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
64 !tick_device_is_functional(dev))
65 return -EINVAL;
66
67 td->mode = TICKDEV_MODE_ONESHOT;
68 dev->event_handler = handler;
69 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
70 tick_broadcast_switch_to_oneshot();
71 return 0;
72}
73
74#ifdef CONFIG_HIGH_RES_TIMERS
75/**
76 * tick_init_highres - switch to high resolution mode
77 *
78 * Called with interrupts disabled.
79 */
80int tick_init_highres(void)
81{
82 return tick_switch_to_oneshot(hrtimer_interrupt);
83}
84#endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
new file mode 100644
index 000000000000..95e41f7f850b
--- /dev/null
+++ b/kernel/time/tick-sched.c
@@ -0,0 +1,563 @@
1/*
2 * linux/kernel/time/tick-sched.c
3 *
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 *
8 * No idle tick implementation for low and high resolution timers
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/kernel_stat.h>
19#include <linux/percpu.h>
20#include <linux/profile.h>
21#include <linux/sched.h>
22#include <linux/tick.h>
23
24#include "tick-internal.h"
25
26/*
27 * Per cpu nohz control structure
28 */
29static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
30
31/*
32 * The time, when the last jiffy update happened. Protected by xtime_lock.
33 */
34static ktime_t last_jiffies_update;
35
36struct tick_sched *tick_get_tick_sched(int cpu)
37{
38 return &per_cpu(tick_cpu_sched, cpu);
39}
40
41/*
42 * Must be called with interrupts disabled !
43 */
44static void tick_do_update_jiffies64(ktime_t now)
45{
46 unsigned long ticks = 0;
47 ktime_t delta;
48
49 /* Reevalute with xtime_lock held */
50 write_seqlock(&xtime_lock);
51
52 delta = ktime_sub(now, last_jiffies_update);
53 if (delta.tv64 >= tick_period.tv64) {
54
55 delta = ktime_sub(delta, tick_period);
56 last_jiffies_update = ktime_add(last_jiffies_update,
57 tick_period);
58
59 /* Slow path for long timeouts */
60 if (unlikely(delta.tv64 >= tick_period.tv64)) {
61 s64 incr = ktime_to_ns(tick_period);
62
63 ticks = ktime_divns(delta, incr);
64
65 last_jiffies_update = ktime_add_ns(last_jiffies_update,
66 incr * ticks);
67 }
68 do_timer(++ticks);
69 }
70 write_sequnlock(&xtime_lock);
71}
72
73/*
74 * Initialize and return retrieve the jiffies update.
75 */
76static ktime_t tick_init_jiffy_update(void)
77{
78 ktime_t period;
79
80 write_seqlock(&xtime_lock);
81 /* Did we start the jiffies update yet ? */
82 if (last_jiffies_update.tv64 == 0)
83 last_jiffies_update = tick_next_period;
84 period = last_jiffies_update;
85 write_sequnlock(&xtime_lock);
86 return period;
87}
88
89/*
90 * NOHZ - aka dynamic tick functionality
91 */
92#ifdef CONFIG_NO_HZ
93/*
94 * NO HZ enabled ?
95 */
96static int tick_nohz_enabled __read_mostly = 1;
97
98/*
99 * Enable / Disable tickless mode
100 */
101static int __init setup_tick_nohz(char *str)
102{
103 if (!strcmp(str, "off"))
104 tick_nohz_enabled = 0;
105 else if (!strcmp(str, "on"))
106 tick_nohz_enabled = 1;
107 else
108 return 0;
109 return 1;
110}
111
112__setup("nohz=", setup_tick_nohz);
113
114/**
115 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
116 *
117 * Called from interrupt entry when the CPU was idle
118 *
119 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
120 * must be updated. Otherwise an interrupt handler could use a stale jiffy
121 * value. We do this unconditionally on any cpu, as we don't know whether the
122 * cpu, which has the update task assigned is in a long sleep.
123 */
124void tick_nohz_update_jiffies(void)
125{
126 int cpu = smp_processor_id();
127 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
128 unsigned long flags;
129 ktime_t now;
130
131 if (!ts->tick_stopped)
132 return;
133
134 cpu_clear(cpu, nohz_cpu_mask);
135 now = ktime_get();
136
137 local_irq_save(flags);
138 tick_do_update_jiffies64(now);
139 local_irq_restore(flags);
140}
141
142/**
143 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
144 *
145 * When the next event is more than a tick into the future, stop the idle tick
146 * Called either from the idle loop or from irq_exit() when an idle period was
147 * just interrupted by an interrupt which did not cause a reschedule.
148 */
149void tick_nohz_stop_sched_tick(void)
150{
151 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
152 struct tick_sched *ts;
153 ktime_t last_update, expires, now, delta;
154 int cpu;
155
156 local_irq_save(flags);
157
158 cpu = smp_processor_id();
159 ts = &per_cpu(tick_cpu_sched, cpu);
160
161 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
162 goto end;
163
164 if (need_resched())
165 goto end;
166
167 cpu = smp_processor_id();
168 BUG_ON(local_softirq_pending());
169
170 now = ktime_get();
171 /*
172 * When called from irq_exit we need to account the idle sleep time
173 * correctly.
174 */
175 if (ts->tick_stopped) {
176 delta = ktime_sub(now, ts->idle_entrytime);
177 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
178 }
179
180 ts->idle_entrytime = now;
181 ts->idle_calls++;
182
183 /* Read jiffies and the time when jiffies were updated last */
184 do {
185 seq = read_seqbegin(&xtime_lock);
186 last_update = last_jiffies_update;
187 last_jiffies = jiffies;
188 } while (read_seqretry(&xtime_lock, seq));
189
190 /* Get the next timer wheel timer */
191 next_jiffies = get_next_timer_interrupt(last_jiffies);
192 delta_jiffies = next_jiffies - last_jiffies;
193
194 /*
195 * Do not stop the tick, if we are only one off
196 * or if the cpu is required for rcu
197 */
198 if (!ts->tick_stopped && (delta_jiffies == 1 || rcu_needs_cpu(cpu)))
199 goto out;
200
201 /* Schedule the tick, if we are at least one jiffie off */
202 if ((long)delta_jiffies >= 1) {
203
204 if (rcu_needs_cpu(cpu))
205 delta_jiffies = 1;
206 else
207 cpu_set(cpu, nohz_cpu_mask);
208 /*
209 * nohz_stop_sched_tick can be called several times before
210 * the nohz_restart_sched_tick is called. This happens when
211 * interrupts arrive which do not cause a reschedule. In the
212 * first call we save the current tick time, so we can restart
213 * the scheduler tick in nohz_restart_sched_tick.
214 */
215 if (!ts->tick_stopped) {
216 ts->idle_tick = ts->sched_timer.expires;
217 ts->tick_stopped = 1;
218 ts->idle_jiffies = last_jiffies;
219 }
220 /*
221 * calculate the expiry time for the next timer wheel
222 * timer
223 */
224 expires = ktime_add_ns(last_update, tick_period.tv64 *
225 delta_jiffies);
226 ts->idle_expires = expires;
227 ts->idle_sleeps++;
228
229 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
230 hrtimer_start(&ts->sched_timer, expires,
231 HRTIMER_MODE_ABS);
232 /* Check, if the timer was already in the past */
233 if (hrtimer_active(&ts->sched_timer))
234 goto out;
235 } else if(!tick_program_event(expires, 0))
236 goto out;
237 /*
238 * We are past the event already. So we crossed a
239 * jiffie boundary. Update jiffies and raise the
240 * softirq.
241 */
242 tick_do_update_jiffies64(ktime_get());
243 cpu_clear(cpu, nohz_cpu_mask);
244 }
245 raise_softirq_irqoff(TIMER_SOFTIRQ);
246out:
247 ts->next_jiffies = next_jiffies;
248 ts->last_jiffies = last_jiffies;
249end:
250 local_irq_restore(flags);
251}
252
253/**
254 * nohz_restart_sched_tick - restart the idle tick from the idle task
255 *
256 * Restart the idle tick when the CPU is woken up from idle
257 */
258void tick_nohz_restart_sched_tick(void)
259{
260 int cpu = smp_processor_id();
261 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
262 unsigned long ticks;
263 ktime_t now, delta;
264
265 if (!ts->tick_stopped)
266 return;
267
268 /* Update jiffies first */
269 now = ktime_get();
270
271 local_irq_disable();
272 tick_do_update_jiffies64(now);
273 cpu_clear(cpu, nohz_cpu_mask);
274
275 /* Account the idle time */
276 delta = ktime_sub(now, ts->idle_entrytime);
277 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
278
279 /*
280 * We stopped the tick in idle. Update process times would miss the
281 * time we slept as update_process_times does only a 1 tick
282 * accounting. Enforce that this is accounted to idle !
283 */
284 ticks = jiffies - ts->idle_jiffies;
285 /*
286 * We might be one off. Do not randomly account a huge number of ticks!
287 */
288 if (ticks && ticks < LONG_MAX) {
289 add_preempt_count(HARDIRQ_OFFSET);
290 account_system_time(current, HARDIRQ_OFFSET,
291 jiffies_to_cputime(ticks));
292 sub_preempt_count(HARDIRQ_OFFSET);
293 }
294
295 /*
296 * Cancel the scheduled timer and restore the tick
297 */
298 ts->tick_stopped = 0;
299 hrtimer_cancel(&ts->sched_timer);
300 ts->sched_timer.expires = ts->idle_tick;
301
302 while (1) {
303 /* Forward the time to expire in the future */
304 hrtimer_forward(&ts->sched_timer, now, tick_period);
305
306 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
307 hrtimer_start(&ts->sched_timer,
308 ts->sched_timer.expires,
309 HRTIMER_MODE_ABS);
310 /* Check, if the timer was already in the past */
311 if (hrtimer_active(&ts->sched_timer))
312 break;
313 } else {
314 if (!tick_program_event(ts->sched_timer.expires, 0))
315 break;
316 }
317 /* Update jiffies and reread time */
318 tick_do_update_jiffies64(now);
319 now = ktime_get();
320 }
321 local_irq_enable();
322}
323
324static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
325{
326 hrtimer_forward(&ts->sched_timer, now, tick_period);
327 return tick_program_event(ts->sched_timer.expires, 0);
328}
329
330/*
331 * The nohz low res interrupt handler
332 */
333static void tick_nohz_handler(struct clock_event_device *dev)
334{
335 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
336 struct pt_regs *regs = get_irq_regs();
337 ktime_t now = ktime_get();
338
339 dev->next_event.tv64 = KTIME_MAX;
340
341 /* Check, if the jiffies need an update */
342 tick_do_update_jiffies64(now);
343
344 /*
345 * When we are idle and the tick is stopped, we have to touch
346 * the watchdog as we might not schedule for a really long
347 * time. This happens on complete idle SMP systems while
348 * waiting on the login prompt. We also increment the "start
349 * of idle" jiffy stamp so the idle accounting adjustment we
350 * do when we go busy again does not account too much ticks.
351 */
352 if (ts->tick_stopped) {
353 touch_softlockup_watchdog();
354 ts->idle_jiffies++;
355 }
356
357 update_process_times(user_mode(regs));
358 profile_tick(CPU_PROFILING);
359
360 /* Do not restart, when we are in the idle loop */
361 if (ts->tick_stopped)
362 return;
363
364 while (tick_nohz_reprogram(ts, now)) {
365 now = ktime_get();
366 tick_do_update_jiffies64(now);
367 }
368}
369
370/**
371 * tick_nohz_switch_to_nohz - switch to nohz mode
372 */
373static void tick_nohz_switch_to_nohz(void)
374{
375 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
376 ktime_t next;
377
378 if (!tick_nohz_enabled)
379 return;
380
381 local_irq_disable();
382 if (tick_switch_to_oneshot(tick_nohz_handler)) {
383 local_irq_enable();
384 return;
385 }
386
387 ts->nohz_mode = NOHZ_MODE_LOWRES;
388
389 /*
390 * Recycle the hrtimer in ts, so we can share the
391 * hrtimer_forward with the highres code.
392 */
393 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
394 /* Get the next period */
395 next = tick_init_jiffy_update();
396
397 for (;;) {
398 ts->sched_timer.expires = next;
399 if (!tick_program_event(next, 0))
400 break;
401 next = ktime_add(next, tick_period);
402 }
403 local_irq_enable();
404
405 printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n",
406 smp_processor_id());
407}
408
409#else
410
411static inline void tick_nohz_switch_to_nohz(void) { }
412
413#endif /* NO_HZ */
414
415/*
416 * High resolution timer specific code
417 */
418#ifdef CONFIG_HIGH_RES_TIMERS
419/*
420 * We rearm the timer until we get disabled by the idle code
421 * Called with interrupts disabled and timer->base->cpu_base->lock held.
422 */
423static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
424{
425 struct tick_sched *ts =
426 container_of(timer, struct tick_sched, sched_timer);
427 struct hrtimer_cpu_base *base = timer->base->cpu_base;
428 struct pt_regs *regs = get_irq_regs();
429 ktime_t now = ktime_get();
430
431 /* Check, if the jiffies need an update */
432 tick_do_update_jiffies64(now);
433
434 /*
435 * Do not call, when we are not in irq context and have
436 * no valid regs pointer
437 */
438 if (regs) {
439 /*
440 * When we are idle and the tick is stopped, we have to touch
441 * the watchdog as we might not schedule for a really long
442 * time. This happens on complete idle SMP systems while
443 * waiting on the login prompt. We also increment the "start of
444 * idle" jiffy stamp so the idle accounting adjustment we do
445 * when we go busy again does not account too much ticks.
446 */
447 if (ts->tick_stopped) {
448 touch_softlockup_watchdog();
449 ts->idle_jiffies++;
450 }
451 /*
452 * update_process_times() might take tasklist_lock, hence
453 * drop the base lock. sched-tick hrtimers are per-CPU and
454 * never accessible by userspace APIs, so this is safe to do.
455 */
456 spin_unlock(&base->lock);
457 update_process_times(user_mode(regs));
458 profile_tick(CPU_PROFILING);
459 spin_lock(&base->lock);
460 }
461
462 /* Do not restart, when we are in the idle loop */
463 if (ts->tick_stopped)
464 return HRTIMER_NORESTART;
465
466 hrtimer_forward(timer, now, tick_period);
467
468 return HRTIMER_RESTART;
469}
470
471/**
472 * tick_setup_sched_timer - setup the tick emulation timer
473 */
474void tick_setup_sched_timer(void)
475{
476 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
477 ktime_t now = ktime_get();
478
479 /*
480 * Emulate tick processing via per-CPU hrtimers:
481 */
482 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
483 ts->sched_timer.function = tick_sched_timer;
484 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
485
486 /* Get the next period */
487 ts->sched_timer.expires = tick_init_jiffy_update();
488
489 for (;;) {
490 hrtimer_forward(&ts->sched_timer, now, tick_period);
491 hrtimer_start(&ts->sched_timer, ts->sched_timer.expires,
492 HRTIMER_MODE_ABS);
493 /* Check, if the timer was already in the past */
494 if (hrtimer_active(&ts->sched_timer))
495 break;
496 now = ktime_get();
497 }
498
499#ifdef CONFIG_NO_HZ
500 if (tick_nohz_enabled)
501 ts->nohz_mode = NOHZ_MODE_HIGHRES;
502#endif
503}
504
505void tick_cancel_sched_timer(int cpu)
506{
507 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
508
509 if (ts->sched_timer.base)
510 hrtimer_cancel(&ts->sched_timer);
511 ts->tick_stopped = 0;
512 ts->nohz_mode = NOHZ_MODE_INACTIVE;
513}
514#endif /* HIGH_RES_TIMERS */
515
516/**
517 * Async notification about clocksource changes
518 */
519void tick_clock_notify(void)
520{
521 int cpu;
522
523 for_each_possible_cpu(cpu)
524 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
525}
526
527/*
528 * Async notification about clock event changes
529 */
530void tick_oneshot_notify(void)
531{
532 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
533
534 set_bit(0, &ts->check_clocks);
535}
536
537/**
538 * Check, if a change happened, which makes oneshot possible.
539 *
540 * Called cyclic from the hrtimer softirq (driven by the timer
541 * softirq) allow_nohz signals, that we can switch into low-res nohz
542 * mode, because high resolution timers are disabled (either compile
543 * or runtime).
544 */
545int tick_check_oneshot_change(int allow_nohz)
546{
547 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
548
549 if (!test_and_clear_bit(0, &ts->check_clocks))
550 return 0;
551
552 if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
553 return 0;
554
555 if (!timekeeping_is_continuous() || !tick_is_oneshot_available())
556 return 0;
557
558 if (!allow_nohz)
559 return 1;
560
561 tick_nohz_switch_to_nohz();
562 return 0;
563}
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
new file mode 100644
index 000000000000..f82c635c3d5c
--- /dev/null
+++ b/kernel/time/timer_list.c
@@ -0,0 +1,287 @@
1/*
2 * kernel/time/timer_list.c
3 *
4 * List pending timers
5 *
6 * Copyright(C) 2006, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
14#include <linux/module.h>
15#include <linux/spinlock.h>
16#include <linux/sched.h>
17#include <linux/seq_file.h>
18#include <linux/kallsyms.h>
19#include <linux/tick.h>
20
21#include <asm/uaccess.h>
22
23typedef void (*print_fn_t)(struct seq_file *m, unsigned int *classes);
24
25DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
26
27/*
28 * This allows printing both to /proc/timer_list and
29 * to the console (on SysRq-Q):
30 */
31#define SEQ_printf(m, x...) \
32 do { \
33 if (m) \
34 seq_printf(m, x); \
35 else \
36 printk(x); \
37 } while (0)
38
39static void print_name_offset(struct seq_file *m, void *sym)
40{
41 unsigned long addr = (unsigned long)sym;
42 char namebuf[KSYM_NAME_LEN+1];
43 unsigned long size, offset;
44 const char *sym_name;
45 char *modname;
46
47 sym_name = kallsyms_lookup(addr, &size, &offset, &modname, namebuf);
48 if (sym_name)
49 SEQ_printf(m, "%s", sym_name);
50 else
51 SEQ_printf(m, "<%p>", sym);
52}
53
54static void
55print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
56{
57#ifdef CONFIG_TIMER_STATS
58 char tmp[TASK_COMM_LEN + 1];
59#endif
60 SEQ_printf(m, " #%d: ", idx);
61 print_name_offset(m, timer);
62 SEQ_printf(m, ", ");
63 print_name_offset(m, timer->function);
64 SEQ_printf(m, ", S:%02lx", timer->state);
65#ifdef CONFIG_TIMER_STATS
66 SEQ_printf(m, ", ");
67 print_name_offset(m, timer->start_site);
68 memcpy(tmp, timer->start_comm, TASK_COMM_LEN);
69 tmp[TASK_COMM_LEN] = 0;
70 SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
71#endif
72 SEQ_printf(m, "\n");
73 SEQ_printf(m, " # expires at %Ld nsecs [in %Ld nsecs]\n",
74 (unsigned long long)ktime_to_ns(timer->expires),
75 (unsigned long long)(ktime_to_ns(timer->expires) - now));
76}
77
78static void
79print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
80 u64 now)
81{
82 struct hrtimer *timer, tmp;
83 unsigned long next = 0, i;
84 struct rb_node *curr;
85 unsigned long flags;
86
87next_one:
88 i = 0;
89 spin_lock_irqsave(&base->cpu_base->lock, flags);
90
91 curr = base->first;
92 /*
93 * Crude but we have to do this O(N*N) thing, because
94 * we have to unlock the base when printing:
95 */
96 while (curr && i < next) {
97 curr = rb_next(curr);
98 i++;
99 }
100
101 if (curr) {
102
103 timer = rb_entry(curr, struct hrtimer, node);
104 tmp = *timer;
105 spin_unlock_irqrestore(&base->cpu_base->lock, flags);
106
107 print_timer(m, &tmp, i, now);
108 next++;
109 goto next_one;
110 }
111 spin_unlock_irqrestore(&base->cpu_base->lock, flags);
112}
113
114static void
115print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
116{
117 SEQ_printf(m, " .index: %d\n",
118 base->index);
119 SEQ_printf(m, " .resolution: %Ld nsecs\n",
120 (unsigned long long)ktime_to_ns(base->resolution));
121 SEQ_printf(m, " .get_time: ");
122 print_name_offset(m, base->get_time);
123 SEQ_printf(m, "\n");
124#ifdef CONFIG_HIGH_RES_TIMERS
125 SEQ_printf(m, " .offset: %Ld nsecs\n",
126 ktime_to_ns(base->offset));
127#endif
128 SEQ_printf(m, "active timers:\n");
129 print_active_timers(m, base, now);
130}
131
132static void print_cpu(struct seq_file *m, int cpu, u64 now)
133{
134 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
135 int i;
136
137 SEQ_printf(m, "\ncpu: %d\n", cpu);
138 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
139 SEQ_printf(m, " clock %d:\n", i);
140 print_base(m, cpu_base->clock_base + i, now);
141 }
142#define P(x) \
143 SEQ_printf(m, " .%-15s: %Ld\n", #x, (u64)(cpu_base->x))
144#define P_ns(x) \
145 SEQ_printf(m, " .%-15s: %Ld nsecs\n", #x, \
146 (u64)(ktime_to_ns(cpu_base->x)))
147
148#ifdef CONFIG_HIGH_RES_TIMERS
149 P_ns(expires_next);
150 P(hres_active);
151 P(nr_events);
152#endif
153#undef P
154#undef P_ns
155
156#ifdef CONFIG_TICK_ONESHOT
157# define P(x) \
158 SEQ_printf(m, " .%-15s: %Ld\n", #x, (u64)(ts->x))
159# define P_ns(x) \
160 SEQ_printf(m, " .%-15s: %Ld nsecs\n", #x, \
161 (u64)(ktime_to_ns(ts->x)))
162 {
163 struct tick_sched *ts = tick_get_tick_sched(cpu);
164 P(nohz_mode);
165 P_ns(idle_tick);
166 P(tick_stopped);
167 P(idle_jiffies);
168 P(idle_calls);
169 P(idle_sleeps);
170 P_ns(idle_entrytime);
171 P_ns(idle_sleeptime);
172 P(last_jiffies);
173 P(next_jiffies);
174 P_ns(idle_expires);
175 SEQ_printf(m, "jiffies: %Ld\n", (u64)jiffies);
176 }
177#endif
178
179#undef P
180#undef P_ns
181}
182
183#ifdef CONFIG_GENERIC_CLOCKEVENTS
184static void
185print_tickdevice(struct seq_file *m, struct tick_device *td)
186{
187 struct clock_event_device *dev = td->evtdev;
188
189 SEQ_printf(m, "\nTick Device: mode: %d\n", td->mode);
190
191 SEQ_printf(m, "Clock Event Device: ");
192 if (!dev) {
193 SEQ_printf(m, "<NULL>\n");
194 return;
195 }
196 SEQ_printf(m, "%s\n", dev->name);
197 SEQ_printf(m, " max_delta_ns: %ld\n", dev->max_delta_ns);
198 SEQ_printf(m, " min_delta_ns: %ld\n", dev->min_delta_ns);
199 SEQ_printf(m, " mult: %ld\n", dev->mult);
200 SEQ_printf(m, " shift: %d\n", dev->shift);
201 SEQ_printf(m, " mode: %d\n", dev->mode);
202 SEQ_printf(m, " next_event: %Ld nsecs\n",
203 (unsigned long long) ktime_to_ns(dev->next_event));
204
205 SEQ_printf(m, " set_next_event: ");
206 print_name_offset(m, dev->set_next_event);
207 SEQ_printf(m, "\n");
208
209 SEQ_printf(m, " set_mode: ");
210 print_name_offset(m, dev->set_mode);
211 SEQ_printf(m, "\n");
212
213 SEQ_printf(m, " event_handler: ");
214 print_name_offset(m, dev->event_handler);
215 SEQ_printf(m, "\n");
216}
217
218static void timer_list_show_tickdevices(struct seq_file *m)
219{
220 int cpu;
221
222#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
223 print_tickdevice(m, tick_get_broadcast_device());
224 SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
225 tick_get_broadcast_mask()->bits[0]);
226#ifdef CONFIG_TICK_ONESHOT
227 SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
228 tick_get_broadcast_oneshot_mask()->bits[0]);
229#endif
230 SEQ_printf(m, "\n");
231#endif
232 for_each_online_cpu(cpu)
233 print_tickdevice(m, tick_get_device(cpu));
234 SEQ_printf(m, "\n");
235}
236#else
237static void timer_list_show_tickdevices(struct seq_file *m) { }
238#endif
239
240static int timer_list_show(struct seq_file *m, void *v)
241{
242 u64 now = ktime_to_ns(ktime_get());
243 int cpu;
244
245 SEQ_printf(m, "Timer List Version: v0.3\n");
246 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
247 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
248
249 for_each_online_cpu(cpu)
250 print_cpu(m, cpu, now);
251
252 SEQ_printf(m, "\n");
253 timer_list_show_tickdevices(m);
254
255 return 0;
256}
257
258void sysrq_timer_list_show(void)
259{
260 timer_list_show(NULL, NULL);
261}
262
263static int timer_list_open(struct inode *inode, struct file *filp)
264{
265 return single_open(filp, timer_list_show, NULL);
266}
267
268static struct file_operations timer_list_fops = {
269 .open = timer_list_open,
270 .read = seq_read,
271 .llseek = seq_lseek,
272 .release = seq_release,
273};
274
275static int __init init_timer_list_procfs(void)
276{
277 struct proc_dir_entry *pe;
278
279 pe = create_proc_entry("timer_list", 0644, NULL);
280 if (!pe)
281 return -ENOMEM;
282
283 pe->proc_fops = &timer_list_fops;
284
285 return 0;
286}
287__initcall(init_timer_list_procfs);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
new file mode 100644
index 000000000000..1bc4882e28e0
--- /dev/null
+++ b/kernel/time/timer_stats.c
@@ -0,0 +1,411 @@
1/*
2 * kernel/time/timer_stats.c
3 *
4 * Collect timer usage statistics.
5 *
6 * Copyright(C) 2006, Red Hat, Inc., Ingo Molnar
7 * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 *
9 * timer_stats is based on timer_top, a similar functionality which was part of
10 * Con Kolivas dyntick patch set. It was developed by Daniel Petrini at the
11 * Instituto Nokia de Tecnologia - INdT - Manaus. timer_top's design was based
12 * on dynamic allocation of the statistics entries and linear search based
13 * lookup combined with a global lock, rather than the static array, hash
14 * and per-CPU locking which is used by timer_stats. It was written for the
15 * pre hrtimer kernel code and therefore did not take hrtimers into account.
16 * Nevertheless it provided the base for the timer_stats implementation and
17 * was a helpful source of inspiration. Kudos to Daniel and the Nokia folks
18 * for this effort.
19 *
20 * timer_top.c is
21 * Copyright (C) 2005 Instituto Nokia de Tecnologia - INdT - Manaus
22 * Written by Daniel Petrini <d.pensator@gmail.com>
23 * timer_top.c was released under the GNU General Public License version 2
24 *
25 * We export the addresses and counting of timer functions being called,
26 * the pid and cmdline from the owner process if applicable.
27 *
28 * Start/stop data collection:
29 * # echo 1[0] >/proc/timer_stats
30 *
31 * Display the information collected so far:
32 * # cat /proc/timer_stats
33 *
34 * This program is free software; you can redistribute it and/or modify
35 * it under the terms of the GNU General Public License version 2 as
36 * published by the Free Software Foundation.
37 */
38
39#include <linux/proc_fs.h>
40#include <linux/module.h>
41#include <linux/spinlock.h>
42#include <linux/sched.h>
43#include <linux/seq_file.h>
44#include <linux/kallsyms.h>
45
46#include <asm/uaccess.h>
47
48/*
49 * This is our basic unit of interest: a timer expiry event identified
50 * by the timer, its start/expire functions and the PID of the task that
51 * started the timer. We count the number of times an event happens:
52 */
53struct entry {
54 /*
55 * Hash list:
56 */
57 struct entry *next;
58
59 /*
60 * Hash keys:
61 */
62 void *timer;
63 void *start_func;
64 void *expire_func;
65 pid_t pid;
66
67 /*
68 * Number of timeout events:
69 */
70 unsigned long count;
71
72 /*
73 * We save the command-line string to preserve
74 * this information past task exit:
75 */
76 char comm[TASK_COMM_LEN + 1];
77
78} ____cacheline_aligned_in_smp;
79
80/*
81 * Spinlock protecting the tables - not taken during lookup:
82 */
83static DEFINE_SPINLOCK(table_lock);
84
85/*
86 * Per-CPU lookup locks for fast hash lookup:
87 */
88static DEFINE_PER_CPU(spinlock_t, lookup_lock);
89
90/*
91 * Mutex to serialize state changes with show-stats activities:
92 */
93static DEFINE_MUTEX(show_mutex);
94
95/*
96 * Collection status, active/inactive:
97 */
98static int __read_mostly active;
99
100/*
101 * Beginning/end timestamps of measurement:
102 */
103static ktime_t time_start, time_stop;
104
105/*
106 * tstat entry structs only get allocated while collection is
107 * active and never freed during that time - this simplifies
108 * things quite a bit.
109 *
110 * They get freed when a new collection period is started.
111 */
112#define MAX_ENTRIES_BITS 10
113#define MAX_ENTRIES (1UL << MAX_ENTRIES_BITS)
114
115static unsigned long nr_entries;
116static struct entry entries[MAX_ENTRIES];
117
118static atomic_t overflow_count;
119
120static void reset_entries(void)
121{
122 nr_entries = 0;
123 memset(entries, 0, sizeof(entries));
124 atomic_set(&overflow_count, 0);
125}
126
127static struct entry *alloc_entry(void)
128{
129 if (nr_entries >= MAX_ENTRIES)
130 return NULL;
131
132 return entries + nr_entries++;
133}
134
135/*
136 * The entries are in a hash-table, for fast lookup:
137 */
138#define TSTAT_HASH_BITS (MAX_ENTRIES_BITS - 1)
139#define TSTAT_HASH_SIZE (1UL << TSTAT_HASH_BITS)
140#define TSTAT_HASH_MASK (TSTAT_HASH_SIZE - 1)
141
142#define __tstat_hashfn(entry) \
143 (((unsigned long)(entry)->timer ^ \
144 (unsigned long)(entry)->start_func ^ \
145 (unsigned long)(entry)->expire_func ^ \
146 (unsigned long)(entry)->pid ) & TSTAT_HASH_MASK)
147
148#define tstat_hashentry(entry) (tstat_hash_table + __tstat_hashfn(entry))
149
150static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
151
152static int match_entries(struct entry *entry1, struct entry *entry2)
153{
154 return entry1->timer == entry2->timer &&
155 entry1->start_func == entry2->start_func &&
156 entry1->expire_func == entry2->expire_func &&
157 entry1->pid == entry2->pid;
158}
159
160/*
161 * Look up whether an entry matching this item is present
162 * in the hash already. Must be called with irqs off and the
163 * lookup lock held:
164 */
165static struct entry *tstat_lookup(struct entry *entry, char *comm)
166{
167 struct entry **head, *curr, *prev;
168
169 head = tstat_hashentry(entry);
170 curr = *head;
171
172 /*
173 * The fastpath is when the entry is already hashed,
174 * we do this with the lookup lock held, but with the
175 * table lock not held:
176 */
177 while (curr) {
178 if (match_entries(curr, entry))
179 return curr;
180
181 curr = curr->next;
182 }
183 /*
184 * Slowpath: allocate, set up and link a new hash entry:
185 */
186 prev = NULL;
187 curr = *head;
188
189 spin_lock(&table_lock);
190 /*
191 * Make sure we have not raced with another CPU:
192 */
193 while (curr) {
194 if (match_entries(curr, entry))
195 goto out_unlock;
196
197 prev = curr;
198 curr = curr->next;
199 }
200
201 curr = alloc_entry();
202 if (curr) {
203 *curr = *entry;
204 curr->count = 0;
205 memcpy(curr->comm, comm, TASK_COMM_LEN);
206 if (prev)
207 prev->next = curr;
208 else
209 *head = curr;
210 curr->next = NULL;
211 }
212 out_unlock:
213 spin_unlock(&table_lock);
214
215 return curr;
216}
217
218/**
219 * timer_stats_update_stats - Update the statistics for a timer.
220 * @timer: pointer to either a timer_list or a hrtimer
221 * @pid: the pid of the task which set up the timer
222 * @startf: pointer to the function which did the timer setup
223 * @timerf: pointer to the timer callback function of the timer
224 * @comm: name of the process which set up the timer
225 *
226 * When the timer is already registered, then the event counter is
227 * incremented. Otherwise the timer is registered in a free slot.
228 */
229void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
230 void *timerf, char * comm)
231{
232 /*
233 * It doesnt matter which lock we take:
234 */
235 spinlock_t *lock = &per_cpu(lookup_lock, raw_smp_processor_id());
236 struct entry *entry, input;
237 unsigned long flags;
238
239 input.timer = timer;
240 input.start_func = startf;
241 input.expire_func = timerf;
242 input.pid = pid;
243
244 spin_lock_irqsave(lock, flags);
245 if (!active)
246 goto out_unlock;
247
248 entry = tstat_lookup(&input, comm);
249 if (likely(entry))
250 entry->count++;
251 else
252 atomic_inc(&overflow_count);
253
254 out_unlock:
255 spin_unlock_irqrestore(lock, flags);
256}
257
258static void print_name_offset(struct seq_file *m, unsigned long addr)
259{
260 char namebuf[KSYM_NAME_LEN+1];
261 unsigned long size, offset;
262 const char *sym_name;
263 char *modname;
264
265 sym_name = kallsyms_lookup(addr, &size, &offset, &modname, namebuf);
266 if (sym_name)
267 seq_printf(m, "%s", sym_name);
268 else
269 seq_printf(m, "<%p>", (void *)addr);
270}
271
272static int tstats_show(struct seq_file *m, void *v)
273{
274 struct timespec period;
275 struct entry *entry;
276 unsigned long ms;
277 long events = 0;
278 ktime_t time;
279 int i;
280
281 mutex_lock(&show_mutex);
282 /*
283 * If still active then calculate up to now:
284 */
285 if (active)
286 time_stop = ktime_get();
287
288 time = ktime_sub(time_stop, time_start);
289
290 period = ktime_to_timespec(time);
291 ms = period.tv_nsec / 1000000;
292
293 seq_puts(m, "Timer Stats Version: v0.1\n");
294 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
295 if (atomic_read(&overflow_count))
296 seq_printf(m, "Overflow: %d entries\n",
297 atomic_read(&overflow_count));
298
299 for (i = 0; i < nr_entries; i++) {
300 entry = entries + i;
301 seq_printf(m, "%4lu, %5d %-16s ",
302 entry->count, entry->pid, entry->comm);
303
304 print_name_offset(m, (unsigned long)entry->start_func);
305 seq_puts(m, " (");
306 print_name_offset(m, (unsigned long)entry->expire_func);
307 seq_puts(m, ")\n");
308
309 events += entry->count;
310 }
311
312 ms += period.tv_sec * 1000;
313 if (!ms)
314 ms = 1;
315
316 if (events && period.tv_sec)
317 seq_printf(m, "%ld total events, %ld.%ld events/sec\n", events,
318 events / period.tv_sec, events * 1000 / ms);
319 else
320 seq_printf(m, "%ld total events\n", events);
321
322 mutex_unlock(&show_mutex);
323
324 return 0;
325}
326
327/*
328 * After a state change, make sure all concurrent lookup/update
329 * activities have stopped:
330 */
331static void sync_access(void)
332{
333 unsigned long flags;
334 int cpu;
335
336 for_each_online_cpu(cpu) {
337 spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
338 /* nothing */
339 spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
340 }
341}
342
343static ssize_t tstats_write(struct file *file, const char __user *buf,
344 size_t count, loff_t *offs)
345{
346 char ctl[2];
347
348 if (count != 2 || *offs)
349 return -EINVAL;
350
351 if (copy_from_user(ctl, buf, count))
352 return -EFAULT;
353
354 mutex_lock(&show_mutex);
355 switch (ctl[0]) {
356 case '0':
357 if (active) {
358 active = 0;
359 time_stop = ktime_get();
360 sync_access();
361 }
362 break;
363 case '1':
364 if (!active) {
365 reset_entries();
366 time_start = ktime_get();
367 active = 1;
368 }
369 break;
370 default:
371 count = -EINVAL;
372 }
373 mutex_unlock(&show_mutex);
374
375 return count;
376}
377
378static int tstats_open(struct inode *inode, struct file *filp)
379{
380 return single_open(filp, tstats_show, NULL);
381}
382
383static struct file_operations tstats_fops = {
384 .open = tstats_open,
385 .read = seq_read,
386 .write = tstats_write,
387 .llseek = seq_lseek,
388 .release = seq_release,
389};
390
391void __init init_timer_stats(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 spin_lock_init(&per_cpu(lookup_lock, cpu));
397}
398
399static int __init init_tstats_procfs(void)
400{
401 struct proc_dir_entry *pe;
402
403 pe = create_proc_entry("timer_stats", 0644, NULL);
404 if (!pe)
405 return -ENOMEM;
406
407 pe->proc_fops = &tstats_fops;
408
409 return 0;
410}
411__initcall(init_tstats_procfs);
diff --git a/kernel/timer.c b/kernel/timer.c
index 4902181e10e6..cb1b86a9c52f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -34,6 +34,8 @@
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/syscalls.h> 35#include <linux/syscalls.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/tick.h>
38#include <linux/kallsyms.h>
37 39
38#include <asm/uaccess.h> 40#include <asm/uaccess.h>
39#include <asm/unistd.h> 41#include <asm/unistd.h>
@@ -262,6 +264,18 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
262 list_add_tail(&timer->entry, vec); 264 list_add_tail(&timer->entry, vec);
263} 265}
264 266
267#ifdef CONFIG_TIMER_STATS
268void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
269{
270 if (timer->start_site)
271 return;
272
273 timer->start_site = addr;
274 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
275 timer->start_pid = current->pid;
276}
277#endif
278
265/** 279/**
266 * init_timer - initialize a timer. 280 * init_timer - initialize a timer.
267 * @timer: the timer to be initialized 281 * @timer: the timer to be initialized
@@ -273,11 +287,16 @@ void fastcall init_timer(struct timer_list *timer)
273{ 287{
274 timer->entry.next = NULL; 288 timer->entry.next = NULL;
275 timer->base = __raw_get_cpu_var(tvec_bases); 289 timer->base = __raw_get_cpu_var(tvec_bases);
290#ifdef CONFIG_TIMER_STATS
291 timer->start_site = NULL;
292 timer->start_pid = -1;
293 memset(timer->start_comm, 0, TASK_COMM_LEN);
294#endif
276} 295}
277EXPORT_SYMBOL(init_timer); 296EXPORT_SYMBOL(init_timer);
278 297
279static inline void detach_timer(struct timer_list *timer, 298static inline void detach_timer(struct timer_list *timer,
280 int clear_pending) 299 int clear_pending)
281{ 300{
282 struct list_head *entry = &timer->entry; 301 struct list_head *entry = &timer->entry;
283 302
@@ -324,6 +343,7 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
324 unsigned long flags; 343 unsigned long flags;
325 int ret = 0; 344 int ret = 0;
326 345
346 timer_stats_timer_set_start_info(timer);
327 BUG_ON(!timer->function); 347 BUG_ON(!timer->function);
328 348
329 base = lock_timer_base(timer, &flags); 349 base = lock_timer_base(timer, &flags);
@@ -374,6 +394,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
374 tvec_base_t *base = per_cpu(tvec_bases, cpu); 394 tvec_base_t *base = per_cpu(tvec_bases, cpu);
375 unsigned long flags; 395 unsigned long flags;
376 396
397 timer_stats_timer_set_start_info(timer);
377 BUG_ON(timer_pending(timer) || !timer->function); 398 BUG_ON(timer_pending(timer) || !timer->function);
378 spin_lock_irqsave(&base->lock, flags); 399 spin_lock_irqsave(&base->lock, flags);
379 timer->base = base; 400 timer->base = base;
@@ -406,6 +427,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
406{ 427{
407 BUG_ON(!timer->function); 428 BUG_ON(!timer->function);
408 429
430 timer_stats_timer_set_start_info(timer);
409 /* 431 /*
410 * This is a common optimization triggered by the 432 * This is a common optimization triggered by the
411 * networking code - if the timer is re-modified 433 * networking code - if the timer is re-modified
@@ -436,6 +458,7 @@ int del_timer(struct timer_list *timer)
436 unsigned long flags; 458 unsigned long flags;
437 int ret = 0; 459 int ret = 0;
438 460
461 timer_stats_timer_clear_start_info(timer);
439 if (timer_pending(timer)) { 462 if (timer_pending(timer)) {
440 base = lock_timer_base(timer, &flags); 463 base = lock_timer_base(timer, &flags);
441 if (timer_pending(timer)) { 464 if (timer_pending(timer)) {
@@ -569,6 +592,8 @@ static inline void __run_timers(tvec_base_t *base)
569 fn = timer->function; 592 fn = timer->function;
570 data = timer->data; 593 data = timer->data;
571 594
595 timer_stats_account_timer(timer);
596
572 set_running_timer(base, timer); 597 set_running_timer(base, timer);
573 detach_timer(timer, 1); 598 detach_timer(timer, 1);
574 spin_unlock_irq(&base->lock); 599 spin_unlock_irq(&base->lock);
@@ -591,105 +616,124 @@ static inline void __run_timers(tvec_base_t *base)
591 spin_unlock_irq(&base->lock); 616 spin_unlock_irq(&base->lock);
592} 617}
593 618
594#ifdef CONFIG_NO_IDLE_HZ 619#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
595/* 620/*
596 * Find out when the next timer event is due to happen. This 621 * Find out when the next timer event is due to happen. This
597 * is used on S/390 to stop all activity when a cpus is idle. 622 * is used on S/390 to stop all activity when a cpus is idle.
598 * This functions needs to be called disabled. 623 * This functions needs to be called disabled.
599 */ 624 */
600unsigned long next_timer_interrupt(void) 625static unsigned long __next_timer_interrupt(tvec_base_t *base)
601{ 626{
602 tvec_base_t *base; 627 unsigned long timer_jiffies = base->timer_jiffies;
603 struct list_head *list; 628 unsigned long expires = timer_jiffies + (LONG_MAX >> 1);
629 int index, slot, array, found = 0;
604 struct timer_list *nte; 630 struct timer_list *nte;
605 unsigned long expires;
606 unsigned long hr_expires = MAX_JIFFY_OFFSET;
607 ktime_t hr_delta;
608 tvec_t *varray[4]; 631 tvec_t *varray[4];
609 int i, j;
610
611 hr_delta = hrtimer_get_next_event();
612 if (hr_delta.tv64 != KTIME_MAX) {
613 struct timespec tsdelta;
614 tsdelta = ktime_to_timespec(hr_delta);
615 hr_expires = timespec_to_jiffies(&tsdelta);
616 if (hr_expires < 3)
617 return hr_expires + jiffies;
618 }
619 hr_expires += jiffies;
620
621 base = __get_cpu_var(tvec_bases);
622 spin_lock(&base->lock);
623 expires = base->timer_jiffies + (LONG_MAX >> 1);
624 list = NULL;
625 632
626 /* Look for timer events in tv1. */ 633 /* Look for timer events in tv1. */
627 j = base->timer_jiffies & TVR_MASK; 634 index = slot = timer_jiffies & TVR_MASK;
628 do { 635 do {
629 list_for_each_entry(nte, base->tv1.vec + j, entry) { 636 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
637 found = 1;
630 expires = nte->expires; 638 expires = nte->expires;
631 if (j < (base->timer_jiffies & TVR_MASK)) 639 /* Look at the cascade bucket(s)? */
632 list = base->tv2.vec + (INDEX(0)); 640 if (!index || slot < index)
633 goto found; 641 goto cascade;
642 return expires;
634 } 643 }
635 j = (j + 1) & TVR_MASK; 644 slot = (slot + 1) & TVR_MASK;
636 } while (j != (base->timer_jiffies & TVR_MASK)); 645 } while (slot != index);
646
647cascade:
648 /* Calculate the next cascade event */
649 if (index)
650 timer_jiffies += TVR_SIZE - index;
651 timer_jiffies >>= TVR_BITS;
637 652
638 /* Check tv2-tv5. */ 653 /* Check tv2-tv5. */
639 varray[0] = &base->tv2; 654 varray[0] = &base->tv2;
640 varray[1] = &base->tv3; 655 varray[1] = &base->tv3;
641 varray[2] = &base->tv4; 656 varray[2] = &base->tv4;
642 varray[3] = &base->tv5; 657 varray[3] = &base->tv5;
643 for (i = 0; i < 4; i++) { 658
644 j = INDEX(i); 659 for (array = 0; array < 4; array++) {
660 tvec_t *varp = varray[array];
661
662 index = slot = timer_jiffies & TVN_MASK;
645 do { 663 do {
646 if (list_empty(varray[i]->vec + j)) { 664 list_for_each_entry(nte, varp->vec + slot, entry) {
647 j = (j + 1) & TVN_MASK; 665 found = 1;
648 continue;
649 }
650 list_for_each_entry(nte, varray[i]->vec + j, entry)
651 if (time_before(nte->expires, expires)) 666 if (time_before(nte->expires, expires))
652 expires = nte->expires; 667 expires = nte->expires;
653 if (j < (INDEX(i)) && i < 3) 668 }
654 list = varray[i + 1]->vec + (INDEX(i + 1)); 669 /*
655 goto found; 670 * Do we still search for the first timer or are
656 } while (j != (INDEX(i))); 671 * we looking up the cascade buckets ?
657 } 672 */
658found: 673 if (found) {
659 if (list) { 674 /* Look at the cascade bucket(s)? */
660 /* 675 if (!index || slot < index)
661 * The search wrapped. We need to look at the next list 676 break;
662 * from next tv element that would cascade into tv element 677 return expires;
663 * where we found the timer element. 678 }
664 */ 679 slot = (slot + 1) & TVN_MASK;
665 list_for_each_entry(nte, list, entry) { 680 } while (slot != index);
666 if (time_before(nte->expires, expires)) 681
667 expires = nte->expires; 682 if (index)
668 } 683 timer_jiffies += TVN_SIZE - index;
684 timer_jiffies >>= TVN_BITS;
669 } 685 }
670 spin_unlock(&base->lock); 686 return expires;
687}
671 688
672 /* 689/*
673 * It can happen that other CPUs service timer IRQs and increment 690 * Check, if the next hrtimer event is before the next timer wheel
674 * jiffies, but we have not yet got a local timer tick to process 691 * event:
675 * the timer wheels. In that case, the expiry time can be before 692 */
676 * jiffies, but since the high-resolution timer here is relative to 693static unsigned long cmp_next_hrtimer_event(unsigned long now,
677 * jiffies, the default expression when high-resolution timers are 694 unsigned long expires)
678 * not active, 695{
679 * 696 ktime_t hr_delta = hrtimer_get_next_event();
680 * time_before(MAX_JIFFY_OFFSET + jiffies, expires) 697 struct timespec tsdelta;
681 * 698
682 * would falsely evaluate to true. If that is the case, just 699 if (hr_delta.tv64 == KTIME_MAX)
683 * return jiffies so that we can immediately fire the local timer 700 return expires;
684 */
685 if (time_before(expires, jiffies))
686 return jiffies;
687 701
688 if (time_before(hr_expires, expires)) 702 if (hr_delta.tv64 <= TICK_NSEC)
689 return hr_expires; 703 return now;
690 704
705 tsdelta = ktime_to_timespec(hr_delta);
706 now += timespec_to_jiffies(&tsdelta);
707 if (time_before(now, expires))
708 return now;
691 return expires; 709 return expires;
692} 710}
711
712/**
713 * next_timer_interrupt - return the jiffy of the next pending timer
714 */
715unsigned long get_next_timer_interrupt(unsigned long now)
716{
717 tvec_base_t *base = __get_cpu_var(tvec_bases);
718 unsigned long expires;
719
720 spin_lock(&base->lock);
721 expires = __next_timer_interrupt(base);
722 spin_unlock(&base->lock);
723
724 if (time_before_eq(expires, now))
725 return now;
726
727 return cmp_next_hrtimer_event(now, expires);
728}
729
730#ifdef CONFIG_NO_IDLE_HZ
731unsigned long next_timer_interrupt(void)
732{
733 return get_next_timer_interrupt(jiffies);
734}
735#endif
736
693#endif 737#endif
694 738
695/******************************************************************/ 739/******************************************************************/
@@ -832,32 +876,35 @@ EXPORT_SYMBOL(do_settimeofday);
832 * 876 *
833 * Accumulates current time interval and initializes new clocksource 877 * Accumulates current time interval and initializes new clocksource
834 */ 878 */
835static int change_clocksource(void) 879static void change_clocksource(void)
836{ 880{
837 struct clocksource *new; 881 struct clocksource *new;
838 cycle_t now; 882 cycle_t now;
839 u64 nsec; 883 u64 nsec;
884
840 new = clocksource_get_next(); 885 new = clocksource_get_next();
841 if (clock != new) { 886
842 now = clocksource_read(new); 887 if (clock == new)
843 nsec = __get_nsec_offset(); 888 return;
844 timespec_add_ns(&xtime, nsec); 889
845 890 now = clocksource_read(new);
846 clock = new; 891 nsec = __get_nsec_offset();
847 clock->cycle_last = now; 892 timespec_add_ns(&xtime, nsec);
848 printk(KERN_INFO "Time: %s clocksource has been installed.\n", 893
849 clock->name); 894 clock = new;
850 return 1; 895 clock->cycle_last = now;
851 } else if (clock->update_callback) { 896
852 return clock->update_callback(); 897 clock->error = 0;
853 } 898 clock->xtime_nsec = 0;
854 return 0; 899 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
900
901 tick_clock_notify();
902
903 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
904 clock->name);
855} 905}
856#else 906#else
857static inline int change_clocksource(void) 907static inline void change_clocksource(void) { }
858{
859 return 0;
860}
861#endif 908#endif
862 909
863/** 910/**
@@ -871,33 +918,56 @@ int timekeeping_is_continuous(void)
871 do { 918 do {
872 seq = read_seqbegin(&xtime_lock); 919 seq = read_seqbegin(&xtime_lock);
873 920
874 ret = clock->is_continuous; 921 ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
875 922
876 } while (read_seqretry(&xtime_lock, seq)); 923 } while (read_seqretry(&xtime_lock, seq));
877 924
878 return ret; 925 return ret;
879} 926}
880 927
928/**
929 * read_persistent_clock - Return time in seconds from the persistent clock.
930 *
931 * Weak dummy function for arches that do not yet support it.
932 * Returns seconds from epoch using the battery backed persistent clock.
933 * Returns zero if unsupported.
934 *
935 * XXX - Do be sure to remove it once all arches implement it.
936 */
937unsigned long __attribute__((weak)) read_persistent_clock(void)
938{
939 return 0;
940}
941
881/* 942/*
882 * timekeeping_init - Initializes the clocksource and common timekeeping values 943 * timekeeping_init - Initializes the clocksource and common timekeeping values
883 */ 944 */
884void __init timekeeping_init(void) 945void __init timekeeping_init(void)
885{ 946{
886 unsigned long flags; 947 unsigned long flags;
948 unsigned long sec = read_persistent_clock();
887 949
888 write_seqlock_irqsave(&xtime_lock, flags); 950 write_seqlock_irqsave(&xtime_lock, flags);
889 951
890 ntp_clear(); 952 ntp_clear();
891 953
892 clock = clocksource_get_next(); 954 clock = clocksource_get_next();
893 clocksource_calculate_interval(clock, tick_nsec); 955 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
894 clock->cycle_last = clocksource_read(clock); 956 clock->cycle_last = clocksource_read(clock);
895 957
958 xtime.tv_sec = sec;
959 xtime.tv_nsec = 0;
960 set_normalized_timespec(&wall_to_monotonic,
961 -xtime.tv_sec, -xtime.tv_nsec);
962
896 write_sequnlock_irqrestore(&xtime_lock, flags); 963 write_sequnlock_irqrestore(&xtime_lock, flags);
897} 964}
898 965
899 966/* flag for if timekeeping is suspended */
900static int timekeeping_suspended; 967static int timekeeping_suspended;
968/* time in seconds when suspend began */
969static unsigned long timekeeping_suspend_time;
970
901/** 971/**
902 * timekeeping_resume - Resumes the generic timekeeping subsystem. 972 * timekeeping_resume - Resumes the generic timekeeping subsystem.
903 * @dev: unused 973 * @dev: unused
@@ -909,13 +979,26 @@ static int timekeeping_suspended;
909static int timekeeping_resume(struct sys_device *dev) 979static int timekeeping_resume(struct sys_device *dev)
910{ 980{
911 unsigned long flags; 981 unsigned long flags;
982 unsigned long now = read_persistent_clock();
912 983
913 write_seqlock_irqsave(&xtime_lock, flags); 984 write_seqlock_irqsave(&xtime_lock, flags);
914 /* restart the last cycle value */ 985
986 if (now && (now > timekeeping_suspend_time)) {
987 unsigned long sleep_length = now - timekeeping_suspend_time;
988
989 xtime.tv_sec += sleep_length;
990 wall_to_monotonic.tv_sec -= sleep_length;
991 }
992 /* re-base the last cycle value */
915 clock->cycle_last = clocksource_read(clock); 993 clock->cycle_last = clocksource_read(clock);
916 clock->error = 0; 994 clock->error = 0;
917 timekeeping_suspended = 0; 995 timekeeping_suspended = 0;
918 write_sequnlock_irqrestore(&xtime_lock, flags); 996 write_sequnlock_irqrestore(&xtime_lock, flags);
997
998 touch_softlockup_watchdog();
999 /* Resume hrtimers */
1000 clock_was_set();
1001
919 return 0; 1002 return 0;
920} 1003}
921 1004
@@ -925,6 +1008,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
925 1008
926 write_seqlock_irqsave(&xtime_lock, flags); 1009 write_seqlock_irqsave(&xtime_lock, flags);
927 timekeeping_suspended = 1; 1010 timekeeping_suspended = 1;
1011 timekeeping_suspend_time = read_persistent_clock();
928 write_sequnlock_irqrestore(&xtime_lock, flags); 1012 write_sequnlock_irqrestore(&xtime_lock, flags);
929 return 0; 1013 return 0;
930} 1014}
@@ -1089,11 +1173,8 @@ static void update_wall_time(void)
1089 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; 1173 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
1090 1174
1091 /* check to see if there is a new clocksource to use */ 1175 /* check to see if there is a new clocksource to use */
1092 if (change_clocksource()) { 1176 change_clocksource();
1093 clock->error = 0; 1177 update_vsyscall(&xtime, clock);
1094 clock->xtime_nsec = 0;
1095 clocksource_calculate_interval(clock, tick_nsec);
1096 }
1097} 1178}
1098 1179
1099/* 1180/*
@@ -1173,7 +1254,8 @@ static void run_timer_softirq(struct softirq_action *h)
1173{ 1254{
1174 tvec_base_t *base = __get_cpu_var(tvec_bases); 1255 tvec_base_t *base = __get_cpu_var(tvec_bases);
1175 1256
1176 hrtimer_run_queues(); 1257 hrtimer_run_queues();
1258
1177 if (time_after_eq(jiffies, base->timer_jiffies)) 1259 if (time_after_eq(jiffies, base->timer_jiffies))
1178 __run_timers(base); 1260 __run_timers(base);
1179} 1261}
@@ -1619,6 +1701,8 @@ void __init init_timers(void)
1619 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, 1701 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1620 (void *)(long)smp_processor_id()); 1702 (void *)(long)smp_processor_id());
1621 1703
1704 init_timer_stats();
1705
1622 BUG_ON(err == NOTIFY_BAD); 1706 BUG_ON(err == NOTIFY_BAD);
1623 register_cpu_notifier(&timers_nb); 1707 register_cpu_notifier(&timers_nb);
1624 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); 1708 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index baacc3691415..658f638c402c 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -22,8 +22,6 @@
22#include <linux/acct.h> 22#include <linux/acct.h>
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24 24
25
26#define USEC_PER_TICK (USEC_PER_SEC/HZ)
27/* 25/*
28 * fill in basic accounting fields 26 * fill in basic accounting fields
29 */ 27 */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 020d1fff57dc..b6fa5e63085d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -218,7 +218,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
218} 218}
219EXPORT_SYMBOL_GPL(queue_work); 219EXPORT_SYMBOL_GPL(queue_work);
220 220
221static void delayed_work_timer_fn(unsigned long __data) 221void delayed_work_timer_fn(unsigned long __data)
222{ 222{
223 struct delayed_work *dwork = (struct delayed_work *)__data; 223 struct delayed_work *dwork = (struct delayed_work *)__data;
224 struct workqueue_struct *wq = get_wq_data(&dwork->work); 224 struct workqueue_struct *wq = get_wq_data(&dwork->work);
@@ -245,6 +245,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
245 struct timer_list *timer = &dwork->timer; 245 struct timer_list *timer = &dwork->timer;
246 struct work_struct *work = &dwork->work; 246 struct work_struct *work = &dwork->work;
247 247
248 timer_stats_timer_set_start_info(timer);
248 if (delay == 0) 249 if (delay == 0)
249 return queue_work(wq, work); 250 return queue_work(wq, work);
250 251
@@ -593,8 +594,10 @@ EXPORT_SYMBOL(schedule_work);
593 * After waiting for a given time this puts a job in the kernel-global 594 * After waiting for a given time this puts a job in the kernel-global
594 * workqueue. 595 * workqueue.
595 */ 596 */
596int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) 597int fastcall schedule_delayed_work(struct delayed_work *dwork,
598 unsigned long delay)
597{ 599{
600 timer_stats_timer_set_start_info(&dwork->timer);
598 return queue_delayed_work(keventd_wq, dwork, delay); 601 return queue_delayed_work(keventd_wq, dwork, delay);
599} 602}
600EXPORT_SYMBOL(schedule_delayed_work); 603EXPORT_SYMBOL(schedule_delayed_work);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 63f04c15e6f5..4448f91b865c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -134,6 +134,17 @@ config SCHEDSTATS
134 application, you can say N to avoid the very slight overhead 134 application, you can say N to avoid the very slight overhead
135 this adds. 135 this adds.
136 136
137config TIMER_STATS
138 bool "Collect kernel timers statistics"
139 depends on DEBUG_KERNEL && PROC_FS
140 help
141 If you say Y here, additional code will be inserted into the
142 timer routines to collect statistics about kernel timers being
143 reprogrammed. The statistics can be read from /proc/timer_stats.
144 The statistics collection is started by writing 1 to /proc/timer_stats,
145 writing 0 stops it. This feature is useful to collect information
146 about timer usage patterns in kernel and userspace.
147
137config DEBUG_SLAB 148config DEBUG_SLAB
138 bool "Debug slab memory allocations" 149 bool "Debug slab memory allocations"
139 depends on DEBUG_KERNEL && SLAB 150 depends on DEBUG_KERNEL && SLAB
diff --git a/lib/devres.c b/lib/devres.c
index 2a668dd7cac7..eb38849aa717 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -274,21 +274,21 @@ int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
274 274
275 rc = pci_request_region(pdev, i, name); 275 rc = pci_request_region(pdev, i, name);
276 if (rc) 276 if (rc)
277 goto err_region; 277 goto err_inval;
278 278
279 rc = -ENOMEM; 279 rc = -ENOMEM;
280 if (!pcim_iomap(pdev, i, 0)) 280 if (!pcim_iomap(pdev, i, 0))
281 goto err_iomap; 281 goto err_region;
282 } 282 }
283 283
284 return 0; 284 return 0;
285 285
286 err_iomap:
287 pcim_iounmap(pdev, iomap[i]);
288 err_region: 286 err_region:
289 pci_release_region(pdev, i); 287 pci_release_region(pdev, i);
290 err_inval: 288 err_inval:
291 while (--i >= 0) { 289 while (--i >= 0) {
290 if (!(mask & (1 << i)))
291 continue;
292 pcim_iounmap(pdev, iomap[i]); 292 pcim_iounmap(pdev, iomap[i]);
293 pci_release_region(pdev, i); 293 pci_release_region(pdev, i);
294 } 294 }
diff --git a/mm/filemap.c b/mm/filemap.c
index 00414849a867..d1060b8d3cd6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2079,21 +2079,27 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2079 /* Limit the size of the copy to the caller's write size */ 2079 /* Limit the size of the copy to the caller's write size */
2080 bytes = min(bytes, count); 2080 bytes = min(bytes, count);
2081 2081
2082 /* 2082 /* We only need to worry about prefaulting when writes are from
2083 * Limit the size of the copy to that of the current segment, 2083 * user-space. NFSd uses vfs_writev with several non-aligned
2084 * because fault_in_pages_readable() doesn't know how to walk 2084 * segments in the vector, and limiting to one segment a time is
2085 * segments. 2085 * a noticeable performance for re-write
2086 */ 2086 */
2087 bytes = min(bytes, cur_iov->iov_len - iov_base); 2087 if (!segment_eq(get_fs(), KERNEL_DS)) {
2088 2088 /*
2089 /* 2089 * Limit the size of the copy to that of the current
2090 * Bring in the user page that we will copy from _first_. 2090 * segment, because fault_in_pages_readable() doesn't
2091 * Otherwise there's a nasty deadlock on copying from the 2091 * know how to walk segments.
2092 * same page as we're writing to, without it being marked 2092 */
2093 * up-to-date. 2093 bytes = min(bytes, cur_iov->iov_len - iov_base);
2094 */
2095 fault_in_pages_readable(buf, bytes);
2096 2094
2095 /*
2096 * Bring in the user page that we will copy from
2097 * _first_. Otherwise there's a nasty deadlock on
2098 * copying from the same page as we're writing to,
2099 * without it being marked up-to-date.
2100 */
2101 fault_in_pages_readable(buf, bytes);
2102 }
2097 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); 2103 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
2098 if (!page) { 2104 if (!page) {
2099 status = -ENOMEM; 2105 status = -ENOMEM;
diff --git a/mm/mincore.c b/mm/mincore.c
index 95c5f49f0a1a..5efe0ded69b1 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -77,8 +77,16 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
77 * PTE array for our address. 77 * PTE array for our address.
78 */ 78 */
79 nr = PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1)); 79 nr = PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1));
80 if (nr > pages) 80
81 nr = pages; 81 /*
82 * Don't overrun this vma
83 */
84 nr = min(nr, (vma->vm_end - addr) >> PAGE_SHIFT);
85
86 /*
87 * Don't return more than the caller asked for
88 */
89 nr = min(nr, pages);
82 90
83 pgd = pgd_offset(vma->vm_mm, addr); 91 pgd = pgd_offset(vma->vm_mm, addr);
84 if (pgd_none_or_clear_bad(pgd)) 92 if (pgd_none_or_clear_bad(pgd))
@@ -116,10 +124,17 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
116 /* migration entries are always uptodate */ 124 /* migration entries are always uptodate */
117 present = 1; 125 present = 1;
118 } else { 126 } else {
127#ifdef CONFIG_SWAP
119 pgoff = entry.val; 128 pgoff = entry.val;
120 present = mincore_page(&swapper_space, pgoff); 129 present = mincore_page(&swapper_space, pgoff);
130#else
131 WARN_ON(1);
132 present = 1;
133#endif
121 } 134 }
122 } 135 }
136
137 vec[i] = present;
123 } 138 }
124 pte_unmap_unlock(ptep-1, ptl); 139 pte_unmap_unlock(ptep-1, ptl);
125 140
@@ -130,6 +145,9 @@ none_mapped:
130 pgoff = linear_page_index(vma, addr); 145 pgoff = linear_page_index(vma, addr);
131 for (i = 0; i < nr; i++, pgoff++) 146 for (i = 0; i < nr; i++, pgoff++)
132 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); 147 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
148 } else {
149 for (i = 0; i < nr; i++)
150 vec[i] = 0;
133 } 151 }
134 152
135 return nr; 153 return nr;