aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-davinci/time.c2
-rw-r--r--arch/arm/mach-imx/time.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-omap1/time.c1
-rw-r--r--arch/arm/mm/fault.c6
-rw-r--r--arch/arm/plat-omap/timer32k.c2
-rw-r--r--arch/avr32/boards/atngw100/setup.c31
-rw-r--r--arch/avr32/configs/atngw100_defconfig16
-rw-r--r--arch/i386/Kconfig21
-rw-r--r--arch/i386/boot/compressed/relocs.c1
-rw-r--r--arch/i386/defconfig264
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/acpi/boot.c44
-rw-r--r--arch/i386/kernel/alternative.c69
-rw-r--r--arch/i386/kernel/apic.c10
-rw-r--r--arch/i386/kernel/cpu/Makefile1
-rw-r--r--arch/i386/kernel/cpu/amd.c11
-rw-r--r--arch/i386/kernel/cpu/common.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c2
-rw-r--r--arch/i386/kernel/cpu/cyrix.c2
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c79
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.c14
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c4
-rw-r--r--arch/i386/kernel/cpu/mtrr/cyrix.c1
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/state.c1
-rw-r--r--arch/i386/kernel/cpu/perfctr-watchdog.c10
-rw-r--r--arch/i386/kernel/cpu/rise.c52
-rw-r--r--arch/i386/kernel/e820.c32
-rw-r--r--arch/i386/kernel/geode.c155
-rw-r--r--arch/i386/kernel/hpet.c98
-rw-r--r--arch/i386/kernel/i8253.c32
-rw-r--r--arch/i386/kernel/io_apic.c26
-rw-r--r--arch/i386/kernel/irq.c8
-rw-r--r--arch/i386/kernel/kprobes.c9
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/paravirt.c18
-rw-r--r--arch/i386/kernel/process.c12
-rw-r--r--arch/i386/kernel/reboot.c9
-rw-r--r--arch/i386/kernel/setup.c11
-rw-r--r--arch/i386/kernel/signal.c7
-rw-r--r--arch/i386/kernel/smpboot.c2
-rw-r--r--arch/i386/kernel/sysenter.c4
-rw-r--r--arch/i386/kernel/time.c50
-rw-r--r--arch/i386/kernel/traps.c27
-rw-r--r--arch/i386/kernel/vmiclock.c2
-rw-r--r--arch/i386/kernel/vsyscall-note.S15
-rw-r--r--arch/i386/lib/Makefile2
-rw-r--r--arch/i386/lib/string.c257
-rw-r--r--arch/i386/mm/fault.c10
-rw-r--r--arch/i386/mm/init.c21
-rw-r--r--arch/i386/mm/ioremap.c2
-rw-r--r--arch/i386/mm/pageattr.c20
-rw-r--r--arch/i386/mm/pgtable.c6
-rw-r--r--arch/i386/pci/acpi.c32
-rw-r--r--arch/i386/pci/common.c13
-rw-r--r--arch/i386/pci/mmconfig-shared.c48
-rw-r--r--arch/i386/xen/events.c1
-rw-r--r--arch/i386/xen/setup.c15
-rw-r--r--arch/i386/xen/time.c3
-rw-r--r--arch/i386/xen/vdso.h4
-rw-r--r--arch/i386/xen/xen-head.S2
-rw-r--r--arch/ia64/Kconfig6
-rw-r--r--arch/ia64/configs/bigsur_defconfig2
-rw-r--r--arch/ia64/configs/gensparse_defconfig2
-rw-r--r--arch/ia64/configs/sim_defconfig2
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig322
-rw-r--r--arch/ia64/configs/zx1_defconfig2
-rw-r--r--arch/ia64/defconfig338
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c2
-rw-r--r--arch/ia64/kernel/asm-offsets.c35
-rw-r--r--arch/ia64/kernel/cyclone.c46
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/fsys.S179
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h23
-rw-r--r--arch/ia64/kernel/iosapic.c652
-rw-r--r--arch/ia64/kernel/irq.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c317
-rw-r--r--arch/ia64/kernel/msi_ia64.c23
-rw-r--r--arch/ia64/kernel/smpboot.c4
-rw-r--r--arch/ia64/kernel/time.c96
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c29
-rw-r--r--arch/m68k/Kconfig7
-rw-r--r--arch/m68k/apollo/config.c4
-rw-r--r--arch/m68k/apollo/dn_ints.c2
-rw-r--r--arch/m68k/atari/atakeyb.c9
-rw-r--r--arch/m68k/bvme6000/config.c2
-rw-r--r--arch/m68k/kernel/head.S2
-rw-r--r--arch/m68k/kernel/setup.c1
-rw-r--r--arch/m68k/kernel/sun3-head.S2
-rw-r--r--arch/m68k/kernel/time.c2
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds2
-rw-r--r--arch/m68k/mac/config.c7
-rw-r--r--arch/m68k/mac/macints.c4
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/m68k/mm/sun3kmap.c2
-rw-r--r--arch/m68k/mvme147/config.c2
-rw-r--r--arch/m68k/mvme16x/config.c2
-rw-r--r--arch/m68k/q40/q40ints.c2
-rw-r--r--arch/m68k/sun3/sun3ints.c2
-rw-r--r--arch/m68k/sun3x/prom.c2
-rw-r--r--arch/m68knommu/kernel/setup.c41
-rw-r--r--arch/mips/Kconfig11
-rw-r--r--arch/mips/kernel/cpu-probe.c26
-rw-r--r--arch/mips/kernel/process.c14
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/boot/ps3-head.S2
-rw-r--r--arch/powerpc/boot/ps3-hvcall.S2
-rw-r--r--arch/powerpc/configs/cell_defconfig3
-rw-r--r--arch/powerpc/configs/prpmc2800_defconfig2
-rw-r--r--arch/powerpc/kernel/crash.c67
-rw-r--r--arch/powerpc/kernel/of_device.c122
-rw-r--r--arch/powerpc/kernel/of_platform.c82
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/prom.c252
-rw-r--r--arch/powerpc/kernel/smp.c7
-rw-r--r--arch/powerpc/kernel/time.c1
-rw-r--r--arch/powerpc/mm/fault.c8
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/tlb_32.c2
-rw-r--r--arch/powerpc/oprofile/Kconfig7
-rw-r--r--arch/powerpc/oprofile/Makefile4
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h97
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c221
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c484
-rw-r--r--arch/powerpc/oprofile/cell/vma_map.c287
-rw-r--r--arch/powerpc/oprofile/common.c51
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c14
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c607
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_booke.c11
-rw-r--r--arch/powerpc/oprofile/op_model_pa6t.c12
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c11
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c10
-rw-r--r--arch/powerpc/platforms/Kconfig10
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/cell/Kconfig10
-rw-r--r--arch/powerpc/platforms/cell/Makefile6
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c445
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c217
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.h24
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c115
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c148
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c7
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c25
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c295
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c17
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c42
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c108
-rw-r--r--arch/powerpc/platforms/cell/spufs/gang.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c132
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c36
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c377
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped480
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h99
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c72
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c34
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig36
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c19
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c17
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/axonram.c381
-rw-r--r--arch/powerpc/sysdev/mpic.c32
-rw-r--r--arch/powerpc/sysdev/pmi.c51
-rw-r--r--arch/powerpc/xmon/nonstdio.c5
-rw-r--r--arch/powerpc/xmon/nonstdio.h3
-rw-r--r--arch/powerpc/xmon/start.c2
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/ppc/syslib/mv64x60.c15
-rw-r--r--arch/sh/Kconfig10
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sh/boards/mpc1211/pci.c2
-rw-r--r--arch/sh/boards/renesas/r7780rp/setup.c54
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/setup.c3
-rw-r--r--arch/sh/boards/se/7722/irq.c96
-rw-r--r--arch/sh/boards/se/7722/setup.c5
-rw-r--r--arch/sh/cchips/hd6446x/Makefile2
-rw-r--r--arch/sh/cchips/hd6446x/hd64461.c (renamed from arch/sh/cchips/hd6446x/hd64461/setup.c)1
-rw-r--r--arch/sh/cchips/hd6446x/hd64461/Makefile6
-rw-r--r--arch/sh/cchips/hd6446x/hd64461/io.c150
-rw-r--r--arch/sh/configs/landisk_defconfig2
-rw-r--r--arch/sh/configs/lboxre2_defconfig2
-rw-r--r--arch/sh/configs/r7780mp_defconfig2
-rw-r--r--arch/sh/configs/r7780rp_defconfig2
-rw-r--r--arch/sh/configs/rts7751r2d_defconfig8
-rw-r--r--arch/sh/configs/se7722_defconfig4
-rw-r--r--arch/sh/configs/se7750_defconfig2
-rw-r--r--arch/sh/configs/se7780_defconfig1
-rw-r--r--arch/sh/drivers/dma/Kconfig3
-rw-r--r--arch/sh/drivers/heartbeat.c2
-rw-r--r--arch/sh/drivers/pci/Makefile1
-rw-r--r--arch/sh/drivers/pci/ops-sh4.c2
-rw-r--r--arch/sh/drivers/pci/pci-st40.c2
-rw-r--r--arch/sh/drivers/pci/pci.c2
-rw-r--r--arch/sh/drivers/push-switch.c2
-rw-r--r--arch/sh/kernel/cpu/clock.c16
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile1
-rw-r--r--arch/sh/kernel/cpu/irq/intc.c405
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7709.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c255
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c8
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c15
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c178
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c221
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c2
-rw-r--r--arch/sh/kernel/cpufreq.c215
-rw-r--r--arch/sh/kernel/head.S3
-rw-r--r--arch/sh/kernel/irq.c9
-rw-r--r--arch/sh/kernel/setup.c7
-rw-r--r--arch/sh/kernel/sh_bios.c3
-rw-r--r--arch/sh/kernel/sh_ksyms.c35
-rw-r--r--arch/sh/kernel/syscalls.S1
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c1
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/mm/Kconfig16
-rw-r--r--arch/sh64/configs/cayman_defconfig158
-rw-r--r--arch/sh64/kernel/head.S2
-rw-r--r--arch/sh64/kernel/pci_sh5.c4
-rw-r--r--arch/sh64/kernel/syscalls.S1
-rw-r--r--arch/sh64/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh64/mm/ioremap.c2
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/kernel/ebus.c5
-rw-r--r--arch/sparc/kernel/entry.S14
-rw-r--r--arch/sparc/kernel/irq.c27
-rw-r--r--arch/sparc/kernel/irq.h68
-rw-r--r--arch/sparc/kernel/of_device.c227
-rw-r--r--arch/sparc/kernel/pcic.c1
-rw-r--r--arch/sparc/kernel/process.c8
-rw-r--r--arch/sparc/kernel/prom.c304
-rw-r--r--arch/sparc/kernel/setup.c65
-rw-r--r--arch/sparc/kernel/smp.c2
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c2
-rw-r--r--arch/sparc/kernel/sun4c_irq.c15
-rw-r--r--arch/sparc/kernel/sun4d_irq.c6
-rw-r--r--arch/sparc/kernel/sun4d_smp.c1
-rw-r--r--arch/sparc/kernel/sun4m_irq.c74
-rw-r--r--arch/sparc/kernel/sun4m_smp.c2
-rw-r--r--arch/sparc/kernel/systbls.S9
-rw-r--r--arch/sparc/kernel/tick14.c6
-rw-r--r--arch/sparc/kernel/time.c4
-rw-r--r--arch/sparc/mm/init.c3
-rw-r--r--arch/sparc/mm/srmmu.c2
-rw-r--r--arch/sparc/mm/sun4c.c2
-rw-r--r--arch/sparc/prom/console.c116
-rw-r--r--arch/sparc/prom/misc.c4
-rw-r--r--arch/sparc64/Kconfig7
-rw-r--r--arch/sparc64/defconfig24
-rw-r--r--arch/sparc64/kernel/auxio.c2
-rw-r--r--arch/sparc64/kernel/ds.c255
-rw-r--r--arch/sparc64/kernel/ebus.c5
-rw-r--r--arch/sparc64/kernel/head.S1
-rw-r--r--arch/sparc64/kernel/irq.c76
-rw-r--r--arch/sparc64/kernel/isa.c5
-rw-r--r--arch/sparc64/kernel/mdesc.c62
-rw-r--r--arch/sparc64/kernel/of_device.c243
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c4
-rw-r--r--arch/sparc64/kernel/power.c68
-rw-r--r--arch/sparc64/kernel/process.c6
-rw-r--r--arch/sparc64/kernel/prom.c229
-rw-r--r--arch/sparc64/kernel/setup.c70
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c2
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c12
-rw-r--r--arch/sparc64/kernel/systbls.S11
-rw-r--r--arch/sparc64/kernel/time.c135
-rw-r--r--arch/sparc64/kernel/vio.c32
-rw-r--r--arch/sparc64/prom/console.c85
-rw-r--r--arch/sparc64/prom/misc.c4
-rw-r--r--arch/sparc64/prom/tree.c8
-rw-r--r--arch/x86_64/Kconfig12
-rw-r--r--arch/x86_64/Makefile3
-rw-r--r--arch/x86_64/boot/compressed/Makefile2
-rw-r--r--arch/x86_64/defconfig288
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c1
-rw-r--r--arch/x86_64/ia32/ia32entry.S5
-rw-r--r--arch/x86_64/kernel/aperture.c4
-rw-r--r--arch/x86_64/kernel/apic.c77
-rw-r--r--arch/x86_64/kernel/e820.c138
-rw-r--r--arch/x86_64/kernel/early-quirks.c1
-rw-r--r--arch/x86_64/kernel/entry.S6
-rw-r--r--arch/x86_64/kernel/head.S8
-rw-r--r--arch/x86_64/kernel/hpet.c8
-rw-r--r--arch/x86_64/kernel/i8259.c18
-rw-r--r--arch/x86_64/kernel/io_apic.c58
-rw-r--r--arch/x86_64/kernel/kprobes.c10
-rw-r--r--arch/x86_64/kernel/mce.c255
-rw-r--r--arch/x86_64/kernel/mce_amd.c6
-rw-r--r--arch/x86_64/kernel/mpparse.c21
-rw-r--r--arch/x86_64/kernel/nmi.c17
-rw-r--r--arch/x86_64/kernel/pci-calgary.c570
-rw-r--r--arch/x86_64/kernel/pci-dma.c7
-rw-r--r--arch/x86_64/kernel/pci-gart.c27
-rw-r--r--arch/x86_64/kernel/pci-nommu.c8
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/process.c21
-rw-r--r--arch/x86_64/kernel/reboot.c4
-rw-r--r--arch/x86_64/kernel/setup.c12
-rw-r--r--arch/x86_64/kernel/signal.c9
-rw-r--r--arch/x86_64/kernel/smp.c6
-rw-r--r--arch/x86_64/kernel/suspend.c20
-rw-r--r--arch/x86_64/kernel/tce.c12
-rw-r--r--arch/x86_64/kernel/time.c158
-rw-r--r--arch/x86_64/kernel/traps.c6
-rw-r--r--arch/x86_64/kernel/tsc.c39
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S27
-rw-r--r--arch/x86_64/kernel/vsyscall.c22
-rw-r--r--arch/x86_64/mm/fault.c23
-rw-r--r--arch/x86_64/mm/init.c58
-rw-r--r--arch/x86_64/mm/k8topology.c13
-rw-r--r--arch/x86_64/mm/numa.c15
-rw-r--r--arch/x86_64/mm/pageattr.c25
-rw-r--r--arch/x86_64/mm/srat.c97
-rw-r--r--arch/x86_64/pci/k8-bus.c6
-rw-r--r--arch/x86_64/vdso/Makefile49
-rw-r--r--arch/x86_64/vdso/vclock_gettime.c120
-rw-r--r--arch/x86_64/vdso/vdso-note.S12
-rw-r--r--arch/x86_64/vdso/vdso-start.S2
-rw-r--r--arch/x86_64/vdso/vdso.S2
-rw-r--r--arch/x86_64/vdso/vdso.lds.S77
-rw-r--r--arch/x86_64/vdso/vextern.h16
-rw-r--r--arch/x86_64/vdso/vgetcpu.c50
-rw-r--r--arch/x86_64/vdso/vma.c139
-rw-r--r--arch/x86_64/vdso/voffset.h1
-rw-r--r--arch/x86_64/vdso/vvar.c12
337 files changed, 10531 insertions, 5847 deletions
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 4d8425de6922..e96a3dcdc1a7 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -285,6 +285,8 @@ static void davinci_set_mode(enum clock_event_mode mode,
285 case CLOCK_EVT_MODE_SHUTDOWN: 285 case CLOCK_EVT_MODE_SHUTDOWN:
286 t->opts = TIMER_OPTS_DISABLED; 286 t->opts = TIMER_OPTS_DISABLED;
287 break; 287 break;
288 case CLOCK_EVT_MODE_RESUME:
289 break;
288 } 290 }
289} 291}
290 292
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c
index 010f6fa984a6..d86d124aea22 100644
--- a/arch/arm/mach-imx/time.c
+++ b/arch/arm/mach-imx/time.c
@@ -159,6 +159,7 @@ static void imx_set_mode(enum clock_event_mode mode, struct clock_event_device *
159 break; 159 break;
160 case CLOCK_EVT_MODE_SHUTDOWN: 160 case CLOCK_EVT_MODE_SHUTDOWN:
161 case CLOCK_EVT_MODE_UNUSED: 161 case CLOCK_EVT_MODE_UNUSED:
162 case CLOCK_EVT_MODE_RESUME:
162 /* Left event sources disabled, no more interrupts appears */ 163 /* Left event sources disabled, no more interrupts appears */
163 break; 164 break;
164 } 165 }
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 4c54a86eda3a..c1271c449246 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -459,6 +459,8 @@ static void ixp4xx_set_mode(enum clock_event_mode mode,
459 default: 459 default:
460 osrt = opts = 0; 460 osrt = opts = 0;
461 break; 461 break;
462 case CLOCK_EVT_MODE_RESUME:
463 break;
462 } 464 }
463 465
464 *IXP4XX_OSRT1 = osrt | opts; 466 *IXP4XX_OSRT1 = osrt | opts;
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 3705d20c4e5c..237651ebae5d 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -156,6 +156,7 @@ static void omap_mpu_set_mode(enum clock_event_mode mode,
156 break; 156 break;
157 case CLOCK_EVT_MODE_UNUSED: 157 case CLOCK_EVT_MODE_UNUSED:
158 case CLOCK_EVT_MODE_SHUTDOWN: 158 case CLOCK_EVT_MODE_SHUTDOWN:
159 case CLOCK_EVT_MODE_RESUME:
159 break; 160 break;
160 } 161 }
161} 162}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index c04124a095cf..846cce48e2b7 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -145,8 +145,8 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
145 __do_kernel_fault(mm, addr, fsr, regs); 145 __do_kernel_fault(mm, addr, fsr, regs);
146} 146}
147 147
148#define VM_FAULT_BADMAP (-20) 148#define VM_FAULT_BADMAP 0x010000
149#define VM_FAULT_BADACCESS (-21) 149#define VM_FAULT_BADACCESS 0x020000
150 150
151static int 151static int
152__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 152__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
@@ -249,7 +249,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
249 /* 249 /*
250 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR 250 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
251 */ 251 */
252 if (likely(!(fault & VM_FAULT_ERROR))) 252 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
253 return 0; 253 return 0;
254 254
255 /* 255 /*
diff --git a/arch/arm/plat-omap/timer32k.c b/arch/arm/plat-omap/timer32k.c
index 2feceec8eccd..b0af014b0e2c 100644
--- a/arch/arm/plat-omap/timer32k.c
+++ b/arch/arm/plat-omap/timer32k.c
@@ -156,6 +156,8 @@ static void omap_32k_timer_set_mode(enum clock_event_mode mode,
156 case CLOCK_EVT_MODE_SHUTDOWN: 156 case CLOCK_EVT_MODE_SHUTDOWN:
157 omap_32k_timer_stop(); 157 omap_32k_timer_stop();
158 break; 158 break;
159 case CLOCK_EVT_MODE_RESUME:
160 break;
159 } 161 }
160} 162}
161 163
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index 6c4dc0a00e9f..2edcecdea8bd 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -13,6 +13,7 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/leds.h>
16#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
17 18
18#include <asm/io.h> 19#include <asm/io.h>
@@ -21,6 +22,7 @@
21#include <asm/arch/at32ap7000.h> 22#include <asm/arch/at32ap7000.h>
22#include <asm/arch/board.h> 23#include <asm/arch/board.h>
23#include <asm/arch/init.h> 24#include <asm/arch/init.h>
25#include <asm/arch/portmux.h>
24 26
25/* Initialized by bootloader-specific startup code. */ 27/* Initialized by bootloader-specific startup code. */
26struct tag *bootloader_tags __initdata; 28struct tag *bootloader_tags __initdata;
@@ -100,8 +102,31 @@ void __init setup_board(void)
100 at32_setup_serial_console(0); 102 at32_setup_serial_console(0);
101} 103}
102 104
105static const struct gpio_led ngw_leds[] = {
106 { .name = "sys", .gpio = GPIO_PIN_PA(16), .active_low = 1,
107 .default_trigger = "heartbeat",
108 },
109 { .name = "a", .gpio = GPIO_PIN_PA(19), .active_low = 1, },
110 { .name = "b", .gpio = GPIO_PIN_PE(19), .active_low = 1, },
111};
112
113static const struct gpio_led_platform_data ngw_led_data = {
114 .num_leds = ARRAY_SIZE(ngw_leds),
115 .leds = (void *) ngw_leds,
116};
117
118static struct platform_device ngw_gpio_leds = {
119 .name = "leds-gpio",
120 .id = -1,
121 .dev = {
122 .platform_data = (void *) &ngw_led_data,
123 }
124};
125
103static int __init atngw100_init(void) 126static int __init atngw100_init(void)
104{ 127{
128 unsigned i;
129
105 /* 130 /*
106 * ATNGW100 uses 16-bit SDRAM interface, so we don't need to 131 * ATNGW100 uses 16-bit SDRAM interface, so we don't need to
107 * reserve any pins for it. 132 * reserve any pins for it.
@@ -116,6 +141,12 @@ static int __init atngw100_init(void)
116 141
117 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); 142 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
118 143
144 for (i = 0; i < ARRAY_SIZE(ngw_leds); i++) {
145 at32_select_gpio(ngw_leds[i].gpio,
146 AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
147 }
148 platform_device_register(&ngw_gpio_leds);
149
119 return 0; 150 return 0;
120} 151}
121postcore_initcall(atngw100_init); 152postcore_initcall(atngw100_init);
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 49493ad3b5a9..b799a68ffd97 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -712,7 +712,21 @@ CONFIG_SPI_ATMEL=y
712# 712#
713# LED devices 713# LED devices
714# 714#
715# CONFIG_NEW_LEDS is not set 715CONFIG_NEW_LEDS=y
716CONFIG_LEDS_CLASS=y
717
718#
719# LED drivers
720#
721CONFIG_LEDS_GPIO=y
722
723#
724# LED Triggers
725#
726CONFIG_LEDS_TRIGGERS=y
727CONFIG_LEDS_TRIGGER_TIMER=y
728CONFIG_LEDS_TRIGGER_HEARTBEAT=y
729
716 730
717# 731#
718# LED drivers 732# LED drivers
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 7a11b905ef49..abb582bc218f 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -18,6 +18,10 @@ config GENERIC_TIME
18 bool 18 bool
19 default y 19 default y
20 20
21config GENERIC_CMOS_UPDATE
22 bool
23 default y
24
21config CLOCKSOURCE_WATCHDOG 25config CLOCKSOURCE_WATCHDOG
22 bool 26 bool
23 default y 27 default y
@@ -544,6 +548,7 @@ config HIGHMEM4G
544config HIGHMEM64G 548config HIGHMEM64G
545 bool "64GB" 549 bool "64GB"
546 depends on !M386 && !M486 550 depends on !M386 && !M486
551 select X86_PAE
547 help 552 help
548 Select this if you have a 32-bit processor and more than 4 553 Select this if you have a 32-bit processor and more than 4
549 gigabytes of physical RAM. 554 gigabytes of physical RAM.
@@ -573,12 +578,12 @@ choice
573 config VMSPLIT_3G 578 config VMSPLIT_3G
574 bool "3G/1G user/kernel split" 579 bool "3G/1G user/kernel split"
575 config VMSPLIT_3G_OPT 580 config VMSPLIT_3G_OPT
576 depends on !HIGHMEM 581 depends on !X86_PAE
577 bool "3G/1G user/kernel split (for full 1G low memory)" 582 bool "3G/1G user/kernel split (for full 1G low memory)"
578 config VMSPLIT_2G 583 config VMSPLIT_2G
579 bool "2G/2G user/kernel split" 584 bool "2G/2G user/kernel split"
580 config VMSPLIT_2G_OPT 585 config VMSPLIT_2G_OPT
581 depends on !HIGHMEM 586 depends on !X86_PAE
582 bool "2G/2G user/kernel split (for full 2G low memory)" 587 bool "2G/2G user/kernel split (for full 2G low memory)"
583 config VMSPLIT_1G 588 config VMSPLIT_1G
584 bool "1G/3G user/kernel split" 589 bool "1G/3G user/kernel split"
@@ -598,10 +603,15 @@ config HIGHMEM
598 default y 603 default y
599 604
600config X86_PAE 605config X86_PAE
601 bool 606 bool "PAE (Physical Address Extension) Support"
602 depends on HIGHMEM64G 607 default n
603 default y 608 depends on !HIGHMEM4G
604 select RESOURCES_64BIT 609 select RESOURCES_64BIT
610 help
611 PAE is required for NX support, and furthermore enables
612 larger swapspace support for non-overcommit purposes. It
613 has the cost of more pagetable lookup overhead, and also
614 consumes more pagetable space per process.
605 615
606# Common NUMA Features 616# Common NUMA Features
607config NUMA 617config NUMA
@@ -817,6 +827,7 @@ config CRASH_DUMP
817 827
818config PHYSICAL_START 828config PHYSICAL_START
819 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) 829 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
830 default "0x1000000" if X86_NUMAQ
820 default "0x100000" 831 default "0x100000"
821 help 832 help
822 This gives the physical address where the kernel is loaded. 833 This gives the physical address where the kernel is loaded.
diff --git a/arch/i386/boot/compressed/relocs.c b/arch/i386/boot/compressed/relocs.c
index b0e21c3cee5c..2d77ee728f92 100644
--- a/arch/i386/boot/compressed/relocs.c
+++ b/arch/i386/boot/compressed/relocs.c
@@ -31,6 +31,7 @@ static const char* safe_abs_relocs[] = {
31 "__kernel_rt_sigreturn", 31 "__kernel_rt_sigreturn",
32 "__kernel_sigreturn", 32 "__kernel_sigreturn",
33 "SYSENTER_RETURN", 33 "SYSENTER_RETURN",
34 "VDSO_NOTE_MASK",
34 "xen_irq_disable_direct_reloc", 35 "xen_irq_disable_direct_reloc",
35 "xen_save_fl_direct_reloc", 36 "xen_save_fl_direct_reloc",
36}; 37};
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 0ac62cdcd3b7..54ee1764fdae 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22-rc2 3# Linux kernel version: 2.6.22-git14
4# Mon May 21 13:23:44 2007 4# Fri Jul 20 09:53:15 2007
5# 5#
6CONFIG_X86_32=y 6CONFIG_X86_32=y
7CONFIG_GENERIC_TIME=y 7CONFIG_GENERIC_TIME=y
@@ -37,19 +37,18 @@ CONFIG_LOCALVERSION=""
37CONFIG_LOCALVERSION_AUTO=y 37CONFIG_LOCALVERSION_AUTO=y
38CONFIG_SWAP=y 38CONFIG_SWAP=y
39CONFIG_SYSVIPC=y 39CONFIG_SYSVIPC=y
40# CONFIG_IPC_NS is not set
41CONFIG_SYSVIPC_SYSCTL=y 40CONFIG_SYSVIPC_SYSCTL=y
42CONFIG_POSIX_MQUEUE=y 41CONFIG_POSIX_MQUEUE=y
43# CONFIG_BSD_PROCESS_ACCT is not set 42# CONFIG_BSD_PROCESS_ACCT is not set
44# CONFIG_TASKSTATS is not set 43# CONFIG_TASKSTATS is not set
45# CONFIG_UTS_NS is not set 44# CONFIG_USER_NS is not set
46# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
47CONFIG_IKCONFIG=y 46CONFIG_IKCONFIG=y
48CONFIG_IKCONFIG_PROC=y 47CONFIG_IKCONFIG_PROC=y
49CONFIG_LOG_BUF_SHIFT=18 48CONFIG_LOG_BUF_SHIFT=18
50# CONFIG_CPUSETS is not set 49# CONFIG_CPUSETS is not set
51CONFIG_SYSFS_DEPRECATED=y 50CONFIG_SYSFS_DEPRECATED=y
52# CONFIG_RELAY is not set 51CONFIG_RELAY=y
53CONFIG_BLK_DEV_INITRD=y 52CONFIG_BLK_DEV_INITRD=y
54CONFIG_INITRAMFS_SOURCE="" 53CONFIG_INITRAMFS_SOURCE=""
55CONFIG_CC_OPTIMIZE_FOR_SIZE=y 54CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -73,16 +72,13 @@ CONFIG_TIMERFD=y
73CONFIG_EVENTFD=y 72CONFIG_EVENTFD=y
74CONFIG_SHMEM=y 73CONFIG_SHMEM=y
75CONFIG_VM_EVENT_COUNTERS=y 74CONFIG_VM_EVENT_COUNTERS=y
76CONFIG_SLAB=y 75CONFIG_SLUB_DEBUG=y
77# CONFIG_SLUB is not set 76# CONFIG_SLAB is not set
77CONFIG_SLUB=y
78# CONFIG_SLOB is not set 78# CONFIG_SLOB is not set
79CONFIG_RT_MUTEXES=y 79CONFIG_RT_MUTEXES=y
80# CONFIG_TINY_SHMEM is not set 80# CONFIG_TINY_SHMEM is not set
81CONFIG_BASE_SMALL=0 81CONFIG_BASE_SMALL=0
82
83#
84# Loadable module support
85#
86CONFIG_MODULES=y 82CONFIG_MODULES=y
87CONFIG_MODULE_UNLOAD=y 83CONFIG_MODULE_UNLOAD=y
88CONFIG_MODULE_FORCE_UNLOAD=y 84CONFIG_MODULE_FORCE_UNLOAD=y
@@ -90,14 +86,11 @@ CONFIG_MODULE_FORCE_UNLOAD=y
90# CONFIG_MODULE_SRCVERSION_ALL is not set 86# CONFIG_MODULE_SRCVERSION_ALL is not set
91# CONFIG_KMOD is not set 87# CONFIG_KMOD is not set
92CONFIG_STOP_MACHINE=y 88CONFIG_STOP_MACHINE=y
93
94#
95# Block layer
96#
97CONFIG_BLOCK=y 89CONFIG_BLOCK=y
98CONFIG_LBD=y 90CONFIG_LBD=y
99# CONFIG_BLK_DEV_IO_TRACE is not set 91# CONFIG_BLK_DEV_IO_TRACE is not set
100# CONFIG_LSF is not set 92# CONFIG_LSF is not set
93# CONFIG_BLK_DEV_BSG is not set
101 94
102# 95#
103# IO Schedulers 96# IO Schedulers
@@ -201,6 +194,7 @@ CONFIG_X86_CPUID=y
201# CONFIG_EDD is not set 194# CONFIG_EDD is not set
202# CONFIG_DELL_RBU is not set 195# CONFIG_DELL_RBU is not set
203# CONFIG_DCDBAS is not set 196# CONFIG_DCDBAS is not set
197CONFIG_DMIID=y
204# CONFIG_NOHIGHMEM is not set 198# CONFIG_NOHIGHMEM is not set
205CONFIG_HIGHMEM4G=y 199CONFIG_HIGHMEM4G=y
206# CONFIG_HIGHMEM64G is not set 200# CONFIG_HIGHMEM64G is not set
@@ -217,7 +211,9 @@ CONFIG_FLAT_NODE_MEM_MAP=y
217CONFIG_SPLIT_PTLOCK_CPUS=4 211CONFIG_SPLIT_PTLOCK_CPUS=4
218CONFIG_RESOURCES_64BIT=y 212CONFIG_RESOURCES_64BIT=y
219CONFIG_ZONE_DMA_FLAG=1 213CONFIG_ZONE_DMA_FLAG=1
214CONFIG_BOUNCE=y
220CONFIG_NR_QUICK=1 215CONFIG_NR_QUICK=1
216CONFIG_VIRT_TO_BUS=y
221# CONFIG_HIGHPTE is not set 217# CONFIG_HIGHPTE is not set
222# CONFIG_MATH_EMULATION is not set 218# CONFIG_MATH_EMULATION is not set
223CONFIG_MTRR=y 219CONFIG_MTRR=y
@@ -244,7 +240,6 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
244CONFIG_PM=y 240CONFIG_PM=y
245CONFIG_PM_LEGACY=y 241CONFIG_PM_LEGACY=y
246# CONFIG_PM_DEBUG is not set 242# CONFIG_PM_DEBUG is not set
247# CONFIG_PM_SYSFS_DEPRECATED is not set
248 243
249# 244#
250# ACPI (Advanced Configuration and Power Interface) Support 245# ACPI (Advanced Configuration and Power Interface) Support
@@ -284,7 +279,7 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
284# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set 279# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
285CONFIG_CPU_FREQ_GOV_USERSPACE=y 280CONFIG_CPU_FREQ_GOV_USERSPACE=y
286CONFIG_CPU_FREQ_GOV_ONDEMAND=y 281CONFIG_CPU_FREQ_GOV_ONDEMAND=y
287# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set 282CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
288 283
289# 284#
290# CPUFreq processor drivers 285# CPUFreq processor drivers
@@ -325,7 +320,7 @@ CONFIG_PCI_MMCONFIG=y
325CONFIG_ARCH_SUPPORTS_MSI=y 320CONFIG_ARCH_SUPPORTS_MSI=y
326CONFIG_PCI_MSI=y 321CONFIG_PCI_MSI=y
327# CONFIG_PCI_DEBUG is not set 322# CONFIG_PCI_DEBUG is not set
328CONFIG_HT_IRQ=y 323# CONFIG_HT_IRQ is not set
329CONFIG_ISA_DMA_API=y 324CONFIG_ISA_DMA_API=y
330# CONFIG_ISA is not set 325# CONFIG_ISA is not set
331# CONFIG_MCA is not set 326# CONFIG_MCA is not set
@@ -381,7 +376,7 @@ CONFIG_IP_PNP_DHCP=y
381CONFIG_INET_TUNNEL=y 376CONFIG_INET_TUNNEL=y
382CONFIG_INET_XFRM_MODE_TRANSPORT=y 377CONFIG_INET_XFRM_MODE_TRANSPORT=y
383CONFIG_INET_XFRM_MODE_TUNNEL=y 378CONFIG_INET_XFRM_MODE_TUNNEL=y
384CONFIG_INET_XFRM_MODE_BEET=y 379# CONFIG_INET_XFRM_MODE_BEET is not set
385CONFIG_INET_DIAG=y 380CONFIG_INET_DIAG=y
386CONFIG_INET_TCP_DIAG=y 381CONFIG_INET_TCP_DIAG=y
387# CONFIG_TCP_CONG_ADVANCED is not set 382# CONFIG_TCP_CONG_ADVANCED is not set
@@ -400,27 +395,15 @@ CONFIG_IPV6=y
400# CONFIG_INET6_TUNNEL is not set 395# CONFIG_INET6_TUNNEL is not set
401CONFIG_INET6_XFRM_MODE_TRANSPORT=y 396CONFIG_INET6_XFRM_MODE_TRANSPORT=y
402CONFIG_INET6_XFRM_MODE_TUNNEL=y 397CONFIG_INET6_XFRM_MODE_TUNNEL=y
403CONFIG_INET6_XFRM_MODE_BEET=y 398# CONFIG_INET6_XFRM_MODE_BEET is not set
404# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 399# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
405CONFIG_IPV6_SIT=y 400CONFIG_IPV6_SIT=y
406# CONFIG_IPV6_TUNNEL is not set 401# CONFIG_IPV6_TUNNEL is not set
407# CONFIG_IPV6_MULTIPLE_TABLES is not set 402# CONFIG_IPV6_MULTIPLE_TABLES is not set
408# CONFIG_NETWORK_SECMARK is not set 403# CONFIG_NETWORK_SECMARK is not set
409# CONFIG_NETFILTER is not set 404# CONFIG_NETFILTER is not set
410
411#
412# DCCP Configuration (EXPERIMENTAL)
413#
414# CONFIG_IP_DCCP is not set 405# CONFIG_IP_DCCP is not set
415
416#
417# SCTP Configuration (EXPERIMENTAL)
418#
419# CONFIG_IP_SCTP is not set 406# CONFIG_IP_SCTP is not set
420
421#
422# TIPC Configuration (EXPERIMENTAL)
423#
424# CONFIG_TIPC is not set 407# CONFIG_TIPC is not set
425# CONFIG_ATM is not set 408# CONFIG_ATM is not set
426# CONFIG_BRIDGE is not set 409# CONFIG_BRIDGE is not set
@@ -457,6 +440,7 @@ CONFIG_IPV6_SIT=y
457# CONFIG_MAC80211 is not set 440# CONFIG_MAC80211 is not set
458# CONFIG_IEEE80211 is not set 441# CONFIG_IEEE80211 is not set
459# CONFIG_RFKILL is not set 442# CONFIG_RFKILL is not set
443# CONFIG_NET_9P is not set
460 444
461# 445#
462# Device Drivers 446# Device Drivers
@@ -471,21 +455,9 @@ CONFIG_FW_LOADER=y
471# CONFIG_DEBUG_DRIVER is not set 455# CONFIG_DEBUG_DRIVER is not set
472# CONFIG_DEBUG_DEVRES is not set 456# CONFIG_DEBUG_DEVRES is not set
473# CONFIG_SYS_HYPERVISOR is not set 457# CONFIG_SYS_HYPERVISOR is not set
474
475#
476# Connector - unified userspace <-> kernelspace linker
477#
478# CONFIG_CONNECTOR is not set 458# CONFIG_CONNECTOR is not set
479# CONFIG_MTD is not set 459# CONFIG_MTD is not set
480
481#
482# Parallel port support
483#
484# CONFIG_PARPORT is not set 460# CONFIG_PARPORT is not set
485
486#
487# Plug and Play support
488#
489CONFIG_PNP=y 461CONFIG_PNP=y
490# CONFIG_PNP_DEBUG is not set 462# CONFIG_PNP_DEBUG is not set
491 463
@@ -493,10 +465,7 @@ CONFIG_PNP=y
493# Protocols 465# Protocols
494# 466#
495CONFIG_PNPACPI=y 467CONFIG_PNPACPI=y
496 468CONFIG_BLK_DEV=y
497#
498# Block devices
499#
500CONFIG_BLK_DEV_FD=y 469CONFIG_BLK_DEV_FD=y
501# CONFIG_BLK_CPQ_DA is not set 470# CONFIG_BLK_CPQ_DA is not set
502# CONFIG_BLK_CPQ_CISS_DA is not set 471# CONFIG_BLK_CPQ_CISS_DA is not set
@@ -514,17 +483,14 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
514CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 483CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
515# CONFIG_CDROM_PKTCDVD is not set 484# CONFIG_CDROM_PKTCDVD is not set
516# CONFIG_ATA_OVER_ETH is not set 485# CONFIG_ATA_OVER_ETH is not set
517 486CONFIG_MISC_DEVICES=y
518#
519# Misc devices
520#
521# CONFIG_IBM_ASM is not set 487# CONFIG_IBM_ASM is not set
522# CONFIG_PHANTOM is not set 488# CONFIG_PHANTOM is not set
489# CONFIG_EEPROM_93CX6 is not set
523# CONFIG_SGI_IOC4 is not set 490# CONFIG_SGI_IOC4 is not set
524# CONFIG_TIFM_CORE is not set 491# CONFIG_TIFM_CORE is not set
525# CONFIG_SONY_LAPTOP is not set 492# CONFIG_SONY_LAPTOP is not set
526# CONFIG_THINKPAD_ACPI is not set 493# CONFIG_THINKPAD_ACPI is not set
527# CONFIG_BLINK is not set
528CONFIG_IDE=y 494CONFIG_IDE=y
529CONFIG_BLK_DEV_IDE=y 495CONFIG_BLK_DEV_IDE=y
530 496
@@ -596,6 +562,7 @@ CONFIG_BLK_DEV_IDEDMA=y
596# 562#
597# CONFIG_RAID_ATTRS is not set 563# CONFIG_RAID_ATTRS is not set
598CONFIG_SCSI=y 564CONFIG_SCSI=y
565CONFIG_SCSI_DMA=y
599# CONFIG_SCSI_TGT is not set 566# CONFIG_SCSI_TGT is not set
600CONFIG_SCSI_NETLINK=y 567CONFIG_SCSI_NETLINK=y
601# CONFIG_SCSI_PROC_FS is not set 568# CONFIG_SCSI_PROC_FS is not set
@@ -606,8 +573,9 @@ CONFIG_SCSI_NETLINK=y
606CONFIG_BLK_DEV_SD=y 573CONFIG_BLK_DEV_SD=y
607# CONFIG_CHR_DEV_ST is not set 574# CONFIG_CHR_DEV_ST is not set
608# CONFIG_CHR_DEV_OSST is not set 575# CONFIG_CHR_DEV_OSST is not set
609# CONFIG_BLK_DEV_SR is not set 576CONFIG_BLK_DEV_SR=y
610# CONFIG_CHR_DEV_SG is not set 577# CONFIG_BLK_DEV_SR_VENDOR is not set
578CONFIG_CHR_DEV_SG=y
611# CONFIG_CHR_DEV_SCH is not set 579# CONFIG_CHR_DEV_SCH is not set
612 580
613# 581#
@@ -667,6 +635,7 @@ CONFIG_AIC79XX_DEBUG_MASK=0
667# CONFIG_SCSI_INIA100 is not set 635# CONFIG_SCSI_INIA100 is not set
668# CONFIG_SCSI_STEX is not set 636# CONFIG_SCSI_STEX is not set
669# CONFIG_SCSI_SYM53C8XX_2 is not set 637# CONFIG_SCSI_SYM53C8XX_2 is not set
638# CONFIG_SCSI_IPR is not set
670# CONFIG_SCSI_QLOGIC_1280 is not set 639# CONFIG_SCSI_QLOGIC_1280 is not set
671# CONFIG_SCSI_QLA_FC is not set 640# CONFIG_SCSI_QLA_FC is not set
672# CONFIG_SCSI_QLA_ISCSI is not set 641# CONFIG_SCSI_QLA_ISCSI is not set
@@ -675,14 +644,73 @@ CONFIG_AIC79XX_DEBUG_MASK=0
675# CONFIG_SCSI_DC390T is not set 644# CONFIG_SCSI_DC390T is not set
676# CONFIG_SCSI_NSP32 is not set 645# CONFIG_SCSI_NSP32 is not set
677# CONFIG_SCSI_DEBUG is not set 646# CONFIG_SCSI_DEBUG is not set
678# CONFIG_SCSI_ESP_CORE is not set
679# CONFIG_SCSI_SRP is not set 647# CONFIG_SCSI_SRP is not set
680# CONFIG_ATA is not set 648CONFIG_ATA=y
681 649# CONFIG_ATA_NONSTANDARD is not set
682# 650CONFIG_ATA_ACPI=y
683# Multi-device support (RAID and LVM) 651CONFIG_SATA_AHCI=y
684# 652CONFIG_SATA_SVW=y
685# CONFIG_MD is not set 653CONFIG_ATA_PIIX=y
654# CONFIG_SATA_MV is not set
655CONFIG_SATA_NV=y
656# CONFIG_PDC_ADMA is not set
657# CONFIG_SATA_QSTOR is not set
658# CONFIG_SATA_PROMISE is not set
659# CONFIG_SATA_SX4 is not set
660CONFIG_SATA_SIL=y
661# CONFIG_SATA_SIL24 is not set
662# CONFIG_SATA_SIS is not set
663# CONFIG_SATA_ULI is not set
664CONFIG_SATA_VIA=y
665# CONFIG_SATA_VITESSE is not set
666# CONFIG_SATA_INIC162X is not set
667# CONFIG_PATA_ALI is not set
668# CONFIG_PATA_AMD is not set
669# CONFIG_PATA_ARTOP is not set
670# CONFIG_PATA_ATIIXP is not set
671# CONFIG_PATA_CMD640_PCI is not set
672# CONFIG_PATA_CMD64X is not set
673# CONFIG_PATA_CS5520 is not set
674# CONFIG_PATA_CS5530 is not set
675# CONFIG_PATA_CS5535 is not set
676# CONFIG_PATA_CYPRESS is not set
677# CONFIG_PATA_EFAR is not set
678# CONFIG_ATA_GENERIC is not set
679# CONFIG_PATA_HPT366 is not set
680# CONFIG_PATA_HPT37X is not set
681# CONFIG_PATA_HPT3X2N is not set
682# CONFIG_PATA_HPT3X3 is not set
683# CONFIG_PATA_IT821X is not set
684# CONFIG_PATA_IT8213 is not set
685# CONFIG_PATA_JMICRON is not set
686# CONFIG_PATA_TRIFLEX is not set
687# CONFIG_PATA_MARVELL is not set
688# CONFIG_PATA_MPIIX is not set
689# CONFIG_PATA_OLDPIIX is not set
690# CONFIG_PATA_NETCELL is not set
691# CONFIG_PATA_NS87410 is not set
692# CONFIG_PATA_OPTI is not set
693# CONFIG_PATA_OPTIDMA is not set
694# CONFIG_PATA_PDC_OLD is not set
695# CONFIG_PATA_RADISYS is not set
696# CONFIG_PATA_RZ1000 is not set
697# CONFIG_PATA_SC1200 is not set
698# CONFIG_PATA_SERVERWORKS is not set
699# CONFIG_PATA_PDC2027X is not set
700# CONFIG_PATA_SIL680 is not set
701# CONFIG_PATA_SIS is not set
702# CONFIG_PATA_VIA is not set
703# CONFIG_PATA_WINBOND is not set
704CONFIG_MD=y
705# CONFIG_BLK_DEV_MD is not set
706CONFIG_BLK_DEV_DM=y
707# CONFIG_DM_DEBUG is not set
708# CONFIG_DM_CRYPT is not set
709# CONFIG_DM_SNAPSHOT is not set
710# CONFIG_DM_MIRROR is not set
711# CONFIG_DM_ZERO is not set
712# CONFIG_DM_MULTIPATH is not set
713# CONFIG_DM_DELAY is not set
686 714
687# 715#
688# Fusion MPT device support 716# Fusion MPT device support
@@ -723,42 +751,27 @@ CONFIG_IEEE1394_OHCI1394=y
723# CONFIG_IEEE1394_ETH1394 is not set 751# CONFIG_IEEE1394_ETH1394 is not set
724# CONFIG_IEEE1394_DV1394 is not set 752# CONFIG_IEEE1394_DV1394 is not set
725CONFIG_IEEE1394_RAWIO=y 753CONFIG_IEEE1394_RAWIO=y
726
727#
728# I2O device support
729#
730# CONFIG_I2O is not set 754# CONFIG_I2O is not set
731# CONFIG_MACINTOSH_DRIVERS is not set 755CONFIG_MACINTOSH_DRIVERS=y
732 756# CONFIG_MAC_EMUMOUSEBTN is not set
733#
734# Network device support
735#
736CONFIG_NETDEVICES=y 757CONFIG_NETDEVICES=y
758CONFIG_NETDEVICES_MULTIQUEUE=y
737# CONFIG_DUMMY is not set 759# CONFIG_DUMMY is not set
738# CONFIG_BONDING is not set 760# CONFIG_BONDING is not set
761# CONFIG_MACVLAN is not set
739# CONFIG_EQUALIZER is not set 762# CONFIG_EQUALIZER is not set
740# CONFIG_TUN is not set 763# CONFIG_TUN is not set
741# CONFIG_NET_SB1000 is not set 764# CONFIG_NET_SB1000 is not set
742
743#
744# ARCnet devices
745#
746# CONFIG_ARCNET is not set 765# CONFIG_ARCNET is not set
747# CONFIG_PHYLIB is not set 766# CONFIG_PHYLIB is not set
748
749#
750# Ethernet (10 or 100Mbit)
751#
752CONFIG_NET_ETHERNET=y 767CONFIG_NET_ETHERNET=y
753CONFIG_MII=y 768CONFIG_MII=y
754# CONFIG_HAPPYMEAL is not set 769# CONFIG_HAPPYMEAL is not set
755# CONFIG_SUNGEM is not set 770# CONFIG_SUNGEM is not set
756# CONFIG_CASSINI is not set 771# CONFIG_CASSINI is not set
757# CONFIG_NET_VENDOR_3COM is not set 772CONFIG_NET_VENDOR_3COM=y
758 773CONFIG_VORTEX=y
759# 774# CONFIG_TYPHOON is not set
760# Tulip family network device support
761#
762CONFIG_NET_TULIP=y 775CONFIG_NET_TULIP=y
763# CONFIG_DE2104X is not set 776# CONFIG_DE2104X is not set
764CONFIG_TULIP=y 777CONFIG_TULIP=y
@@ -809,7 +822,6 @@ CONFIG_R8169=y
809# CONFIG_SIS190 is not set 822# CONFIG_SIS190 is not set
810# CONFIG_SKGE is not set 823# CONFIG_SKGE is not set
811CONFIG_SKY2=y 824CONFIG_SKY2=y
812# CONFIG_SK98LIN is not set
813# CONFIG_VIA_VELOCITY is not set 825# CONFIG_VIA_VELOCITY is not set
814CONFIG_TIGON3=y 826CONFIG_TIGON3=y
815CONFIG_BNX2=y 827CONFIG_BNX2=y
@@ -823,10 +835,6 @@ CONFIG_NETDEV_10000=y
823# CONFIG_MYRI10GE is not set 835# CONFIG_MYRI10GE is not set
824# CONFIG_NETXEN_NIC is not set 836# CONFIG_NETXEN_NIC is not set
825# CONFIG_MLX4_CORE is not set 837# CONFIG_MLX4_CORE is not set
826
827#
828# Token Ring devices
829#
830# CONFIG_TR is not set 838# CONFIG_TR is not set
831 839
832# 840#
@@ -855,15 +863,7 @@ CONFIG_NETCONSOLE=y
855CONFIG_NETPOLL=y 863CONFIG_NETPOLL=y
856# CONFIG_NETPOLL_TRAP is not set 864# CONFIG_NETPOLL_TRAP is not set
857CONFIG_NET_POLL_CONTROLLER=y 865CONFIG_NET_POLL_CONTROLLER=y
858
859#
860# ISDN subsystem
861#
862# CONFIG_ISDN is not set 866# CONFIG_ISDN is not set
863
864#
865# Telephony Support
866#
867# CONFIG_PHONE is not set 867# CONFIG_PHONE is not set
868 868
869# 869#
@@ -871,6 +871,7 @@ CONFIG_NET_POLL_CONTROLLER=y
871# 871#
872CONFIG_INPUT=y 872CONFIG_INPUT=y
873# CONFIG_INPUT_FF_MEMLESS is not set 873# CONFIG_INPUT_FF_MEMLESS is not set
874# CONFIG_INPUT_POLLDEV is not set
874 875
875# 876#
876# Userland interfaces 877# Userland interfaces
@@ -936,6 +937,7 @@ CONFIG_HW_CONSOLE=y
936# 937#
937CONFIG_SERIAL_8250=y 938CONFIG_SERIAL_8250=y
938CONFIG_SERIAL_8250_CONSOLE=y 939CONFIG_SERIAL_8250_CONSOLE=y
940CONFIG_FIX_EARLYCON_MEM=y
939CONFIG_SERIAL_8250_PCI=y 941CONFIG_SERIAL_8250_PCI=y
940CONFIG_SERIAL_8250_PNP=y 942CONFIG_SERIAL_8250_PNP=y
941CONFIG_SERIAL_8250_NR_UARTS=4 943CONFIG_SERIAL_8250_NR_UARTS=4
@@ -951,10 +953,6 @@ CONFIG_SERIAL_CORE_CONSOLE=y
951CONFIG_UNIX98_PTYS=y 953CONFIG_UNIX98_PTYS=y
952CONFIG_LEGACY_PTYS=y 954CONFIG_LEGACY_PTYS=y
953CONFIG_LEGACY_PTY_COUNT=256 955CONFIG_LEGACY_PTY_COUNT=256
954
955#
956# IPMI
957#
958# CONFIG_IPMI_HANDLER is not set 956# CONFIG_IPMI_HANDLER is not set
959# CONFIG_WATCHDOG is not set 957# CONFIG_WATCHDOG is not set
960CONFIG_HW_RANDOM=y 958CONFIG_HW_RANDOM=y
@@ -988,11 +986,7 @@ CONFIG_MAX_RAW_DEVS=256
988CONFIG_HPET=y 986CONFIG_HPET=y
989# CONFIG_HPET_RTC_IRQ is not set 987# CONFIG_HPET_RTC_IRQ is not set
990CONFIG_HPET_MMAP=y 988CONFIG_HPET_MMAP=y
991CONFIG_HANGCHECK_TIMER=y 989# CONFIG_HANGCHECK_TIMER is not set
992
993#
994# TPM devices
995#
996# CONFIG_TCG_TPM is not set 990# CONFIG_TCG_TPM is not set
997# CONFIG_TELCLOCK is not set 991# CONFIG_TELCLOCK is not set
998CONFIG_DEVPORT=y 992CONFIG_DEVPORT=y
@@ -1003,11 +997,8 @@ CONFIG_DEVPORT=y
1003# 997#
1004# CONFIG_SPI is not set 998# CONFIG_SPI is not set
1005# CONFIG_SPI_MASTER is not set 999# CONFIG_SPI_MASTER is not set
1006
1007#
1008# Dallas's 1-wire bus
1009#
1010# CONFIG_W1 is not set 1000# CONFIG_W1 is not set
1001# CONFIG_POWER_SUPPLY is not set
1011# CONFIG_HWMON is not set 1002# CONFIG_HWMON is not set
1012 1003
1013# 1004#
@@ -1041,7 +1032,7 @@ CONFIG_DAB=y
1041CONFIG_VGA_CONSOLE=y 1032CONFIG_VGA_CONSOLE=y
1042CONFIG_VGACON_SOFT_SCROLLBACK=y 1033CONFIG_VGACON_SOFT_SCROLLBACK=y
1043CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128 1034CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128
1044# CONFIG_VIDEO_SELECT is not set 1035CONFIG_VIDEO_SELECT=y
1045CONFIG_DUMMY_CONSOLE=y 1036CONFIG_DUMMY_CONSOLE=y
1046 1037
1047# 1038#
@@ -1058,15 +1049,11 @@ CONFIG_SOUND=y
1058# Open Sound System 1049# Open Sound System
1059# 1050#
1060CONFIG_SOUND_PRIME=y 1051CONFIG_SOUND_PRIME=y
1061# CONFIG_OSS_OBSOLETE is not set
1062# CONFIG_SOUND_TRIDENT is not set 1052# CONFIG_SOUND_TRIDENT is not set
1063# CONFIG_SOUND_MSNDCLAS is not set 1053# CONFIG_SOUND_MSNDCLAS is not set
1064# CONFIG_SOUND_MSNDPIN is not set 1054# CONFIG_SOUND_MSNDPIN is not set
1065# CONFIG_SOUND_OSS is not set 1055# CONFIG_SOUND_OSS is not set
1066 1056CONFIG_HID_SUPPORT=y
1067#
1068# HID Devices
1069#
1070CONFIG_HID=y 1057CONFIG_HID=y
1071# CONFIG_HID_DEBUG is not set 1058# CONFIG_HID_DEBUG is not set
1072 1059
@@ -1077,10 +1064,7 @@ CONFIG_USB_HID=y
1077# CONFIG_USB_HIDINPUT_POWERBOOK is not set 1064# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1078# CONFIG_HID_FF is not set 1065# CONFIG_HID_FF is not set
1079# CONFIG_USB_HIDDEV is not set 1066# CONFIG_USB_HIDDEV is not set
1080 1067CONFIG_USB_SUPPORT=y
1081#
1082# USB support
1083#
1084CONFIG_USB_ARCH_HAS_HCD=y 1068CONFIG_USB_ARCH_HAS_HCD=y
1085CONFIG_USB_ARCH_HAS_OHCI=y 1069CONFIG_USB_ARCH_HAS_OHCI=y
1086CONFIG_USB_ARCH_HAS_EHCI=y 1070CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1094,6 +1078,7 @@ CONFIG_USB_DEVICEFS=y
1094# CONFIG_USB_DEVICE_CLASS is not set 1078# CONFIG_USB_DEVICE_CLASS is not set
1095# CONFIG_USB_DYNAMIC_MINORS is not set 1079# CONFIG_USB_DYNAMIC_MINORS is not set
1096# CONFIG_USB_SUSPEND is not set 1080# CONFIG_USB_SUSPEND is not set
1081# CONFIG_USB_PERSIST is not set
1097# CONFIG_USB_OTG is not set 1082# CONFIG_USB_OTG is not set
1098 1083
1099# 1084#
@@ -1103,7 +1088,6 @@ CONFIG_USB_EHCI_HCD=y
1103# CONFIG_USB_EHCI_SPLIT_ISO is not set 1088# CONFIG_USB_EHCI_SPLIT_ISO is not set
1104# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 1089# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1105# CONFIG_USB_EHCI_TT_NEWSCHED is not set 1090# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1106# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
1107# CONFIG_USB_ISP116X_HCD is not set 1091# CONFIG_USB_ISP116X_HCD is not set
1108CONFIG_USB_OHCI_HCD=y 1092CONFIG_USB_OHCI_HCD=y
1109# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 1093# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1111,6 +1095,7 @@ CONFIG_USB_OHCI_HCD=y
1111CONFIG_USB_OHCI_LITTLE_ENDIAN=y 1095CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1112CONFIG_USB_UHCI_HCD=y 1096CONFIG_USB_UHCI_HCD=y
1113# CONFIG_USB_SL811_HCD is not set 1097# CONFIG_USB_SL811_HCD is not set
1098# CONFIG_USB_R8A66597_HCD is not set
1114 1099
1115# 1100#
1116# USB Device Class drivers 1101# USB Device Class drivers
@@ -1201,15 +1186,7 @@ CONFIG_USB_MON=y
1201# 1186#
1202# LED Triggers 1187# LED Triggers
1203# 1188#
1204
1205#
1206# InfiniBand support
1207#
1208# CONFIG_INFINIBAND is not set 1189# CONFIG_INFINIBAND is not set
1209
1210#
1211# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1212#
1213# CONFIG_EDAC is not set 1190# CONFIG_EDAC is not set
1214 1191
1215# 1192#
@@ -1229,11 +1206,13 @@ CONFIG_USB_MON=y
1229# 1206#
1230# DMA Devices 1207# DMA Devices
1231# 1208#
1209CONFIG_VIRTUALIZATION=y
1210# CONFIG_KVM is not set
1232 1211
1233# 1212#
1234# Virtualization 1213# Userspace I/O
1235# 1214#
1236# CONFIG_KVM is not set 1215# CONFIG_UIO is not set
1237 1216
1238# 1217#
1239# File systems 1218# File systems
@@ -1271,6 +1250,7 @@ CONFIG_DNOTIFY=y
1271# CONFIG_AUTOFS_FS is not set 1250# CONFIG_AUTOFS_FS is not set
1272CONFIG_AUTOFS4_FS=y 1251CONFIG_AUTOFS4_FS=y
1273# CONFIG_FUSE_FS is not set 1252# CONFIG_FUSE_FS is not set
1253CONFIG_GENERIC_ACL=y
1274 1254
1275# 1255#
1276# CD-ROM/DVD Filesystems 1256# CD-ROM/DVD Filesystems
@@ -1298,7 +1278,7 @@ CONFIG_PROC_KCORE=y
1298CONFIG_PROC_SYSCTL=y 1278CONFIG_PROC_SYSCTL=y
1299CONFIG_SYSFS=y 1279CONFIG_SYSFS=y
1300CONFIG_TMPFS=y 1280CONFIG_TMPFS=y
1301# CONFIG_TMPFS_POSIX_ACL is not set 1281CONFIG_TMPFS_POSIX_ACL=y
1302CONFIG_HUGETLBFS=y 1282CONFIG_HUGETLBFS=y
1303CONFIG_HUGETLB_PAGE=y 1283CONFIG_HUGETLB_PAGE=y
1304CONFIG_RAMFS=y 1284CONFIG_RAMFS=y
@@ -1348,7 +1328,6 @@ CONFIG_SUNRPC=y
1348# CONFIG_NCP_FS is not set 1328# CONFIG_NCP_FS is not set
1349# CONFIG_CODA_FS is not set 1329# CONFIG_CODA_FS is not set
1350# CONFIG_AFS_FS is not set 1330# CONFIG_AFS_FS is not set
1351# CONFIG_9P_FS is not set
1352 1331
1353# 1332#
1354# Partition Types 1333# Partition Types
@@ -1404,10 +1383,7 @@ CONFIG_NLS_UTF8=y
1404# Distributed Lock Manager 1383# Distributed Lock Manager
1405# 1384#
1406# CONFIG_DLM is not set 1385# CONFIG_DLM is not set
1407 1386CONFIG_INSTRUMENTATION=y
1408#
1409# Instrumentation Support
1410#
1411CONFIG_PROFILING=y 1387CONFIG_PROFILING=y
1412CONFIG_OPROFILE=y 1388CONFIG_OPROFILE=y
1413CONFIG_KPROBES=y 1389CONFIG_KPROBES=y
@@ -1417,7 +1393,7 @@ CONFIG_KPROBES=y
1417# 1393#
1418CONFIG_TRACE_IRQFLAGS_SUPPORT=y 1394CONFIG_TRACE_IRQFLAGS_SUPPORT=y
1419# CONFIG_PRINTK_TIME is not set 1395# CONFIG_PRINTK_TIME is not set
1420CONFIG_ENABLE_MUST_CHECK=y 1396# CONFIG_ENABLE_MUST_CHECK is not set
1421CONFIG_MAGIC_SYSRQ=y 1397CONFIG_MAGIC_SYSRQ=y
1422CONFIG_UNUSED_SYMBOLS=y 1398CONFIG_UNUSED_SYMBOLS=y
1423# CONFIG_DEBUG_FS is not set 1399# CONFIG_DEBUG_FS is not set
@@ -1425,15 +1401,17 @@ CONFIG_UNUSED_SYMBOLS=y
1425CONFIG_DEBUG_KERNEL=y 1401CONFIG_DEBUG_KERNEL=y
1426# CONFIG_DEBUG_SHIRQ is not set 1402# CONFIG_DEBUG_SHIRQ is not set
1427CONFIG_DETECT_SOFTLOCKUP=y 1403CONFIG_DETECT_SOFTLOCKUP=y
1404# CONFIG_SCHED_DEBUG is not set
1428# CONFIG_SCHEDSTATS is not set 1405# CONFIG_SCHEDSTATS is not set
1429# CONFIG_TIMER_STATS is not set 1406CONFIG_TIMER_STATS=y
1430# CONFIG_DEBUG_SLAB is not set 1407# CONFIG_SLUB_DEBUG_ON is not set
1431# CONFIG_DEBUG_RT_MUTEXES is not set 1408# CONFIG_DEBUG_RT_MUTEXES is not set
1432# CONFIG_RT_MUTEX_TESTER is not set 1409# CONFIG_RT_MUTEX_TESTER is not set
1433# CONFIG_DEBUG_SPINLOCK is not set 1410# CONFIG_DEBUG_SPINLOCK is not set
1434# CONFIG_DEBUG_MUTEXES is not set 1411# CONFIG_DEBUG_MUTEXES is not set
1435# CONFIG_DEBUG_LOCK_ALLOC is not set 1412# CONFIG_DEBUG_LOCK_ALLOC is not set
1436# CONFIG_PROVE_LOCKING is not set 1413# CONFIG_PROVE_LOCKING is not set
1414# CONFIG_LOCK_STAT is not set
1437# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1415# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1438# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1416# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1439# CONFIG_DEBUG_KOBJECT is not set 1417# CONFIG_DEBUG_KOBJECT is not set
@@ -1443,7 +1421,6 @@ CONFIG_DEBUG_BUGVERBOSE=y
1443# CONFIG_DEBUG_VM is not set 1421# CONFIG_DEBUG_VM is not set
1444# CONFIG_DEBUG_LIST is not set 1422# CONFIG_DEBUG_LIST is not set
1445# CONFIG_FRAME_POINTER is not set 1423# CONFIG_FRAME_POINTER is not set
1446# CONFIG_UNWIND_INFO is not set
1447# CONFIG_FORCED_INLINING is not set 1424# CONFIG_FORCED_INLINING is not set
1448# CONFIG_RCU_TORTURE_TEST is not set 1425# CONFIG_RCU_TORTURE_TEST is not set
1449# CONFIG_LKDTM is not set 1426# CONFIG_LKDTM is not set
@@ -1462,10 +1439,6 @@ CONFIG_DOUBLEFAULT=y
1462# 1439#
1463# CONFIG_KEYS is not set 1440# CONFIG_KEYS is not set
1464# CONFIG_SECURITY is not set 1441# CONFIG_SECURITY is not set
1465
1466#
1467# Cryptographic options
1468#
1469# CONFIG_CRYPTO is not set 1442# CONFIG_CRYPTO is not set
1470 1443
1471# 1444#
@@ -1476,6 +1449,7 @@ CONFIG_BITREVERSE=y
1476# CONFIG_CRC16 is not set 1449# CONFIG_CRC16 is not set
1477# CONFIG_CRC_ITU_T is not set 1450# CONFIG_CRC_ITU_T is not set
1478CONFIG_CRC32=y 1451CONFIG_CRC32=y
1452# CONFIG_CRC7 is not set
1479# CONFIG_LIBCRC32C is not set 1453# CONFIG_LIBCRC32C is not set
1480CONFIG_ZLIB_INFLATE=y 1454CONFIG_ZLIB_INFLATE=y
1481CONFIG_PLIST=y 1455CONFIG_PLIST=y
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 06da59f6f837..dbe5e87e0d66 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_VM86) += vm86.o
40obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 40obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
41obj-$(CONFIG_HPET_TIMER) += hpet.o 41obj-$(CONFIG_HPET_TIMER) += hpet.o
42obj-$(CONFIG_K8_NB) += k8.o 42obj-$(CONFIG_K8_NB) += k8.o
43obj-$(CONFIG_MGEODE_LX) += geode.o
43 44
44obj-$(CONFIG_VMI) += vmi.o vmiclock.o 45obj-$(CONFIG_VMI) += vmi.o vmiclock.o
45obj-$(CONFIG_PARAVIRT) += paravirt.o 46obj-$(CONFIG_PARAVIRT) += paravirt.o
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index a574cd2c8b61..cacdd883bf2b 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -618,6 +618,8 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
618#ifdef CONFIG_HPET_TIMER 618#ifdef CONFIG_HPET_TIMER
619#include <asm/hpet.h> 619#include <asm/hpet.h>
620 620
621static struct __initdata resource *hpet_res;
622
621static int __init acpi_parse_hpet(struct acpi_table_header *table) 623static int __init acpi_parse_hpet(struct acpi_table_header *table)
622{ 624{
623 struct acpi_table_hpet *hpet_tbl; 625 struct acpi_table_hpet *hpet_tbl;
@@ -638,8 +640,42 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
638 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 640 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
639 hpet_tbl->id, hpet_address); 641 hpet_tbl->id, hpet_address);
640 642
643 /*
644 * Allocate and initialize the HPET firmware resource for adding into
645 * the resource tree during the lateinit timeframe.
646 */
647#define HPET_RESOURCE_NAME_SIZE 9
648 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
649
650 if (!hpet_res)
651 return 0;
652
653 memset(hpet_res, 0, sizeof(*hpet_res));
654 hpet_res->name = (void *)&hpet_res[1];
655 hpet_res->flags = IORESOURCE_MEM;
656 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
657 hpet_tbl->sequence);
658
659 hpet_res->start = hpet_address;
660 hpet_res->end = hpet_address + (1 * 1024) - 1;
661
641 return 0; 662 return 0;
642} 663}
664
665/*
666 * hpet_insert_resource inserts the HPET resources used into the resource
667 * tree.
668 */
669static __init int hpet_insert_resource(void)
670{
671 if (!hpet_res)
672 return 1;
673
674 return insert_resource(&iomem_resource, hpet_res);
675}
676
677late_initcall(hpet_insert_resource);
678
643#else 679#else
644#define acpi_parse_hpet NULL 680#define acpi_parse_hpet NULL
645#endif 681#endif
@@ -950,14 +986,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
950 }, 986 },
951 { 987 {
952 .callback = force_acpi_ht, 988 .callback = force_acpi_ht,
953 .ident = "DELL GX240",
954 .matches = {
955 DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
956 DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
957 },
958 },
959 {
960 .callback = force_acpi_ht,
961 .ident = "HP VISUALIZE NT Workstation", 989 .ident = "HP VISUALIZE NT Workstation",
962 .matches = { 990 .matches = {
963 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 991 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index d8cda14fff8b..c3750c2c4113 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -2,12 +2,17 @@
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/spinlock.h> 3#include <linux/spinlock.h>
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/kprobes.h>
6#include <linux/mm.h>
7#include <linux/vmalloc.h>
5#include <asm/alternative.h> 8#include <asm/alternative.h>
6#include <asm/sections.h> 9#include <asm/sections.h>
10#include <asm/pgtable.h>
11#include <asm/mce.h>
12#include <asm/nmi.h>
7 13
8static int noreplace_smp = 0; 14#ifdef CONFIG_HOTPLUG_CPU
9static int smp_alt_once = 0; 15static int smp_alt_once;
10static int debug_alternative = 0;
11 16
12static int __init bootonly(char *str) 17static int __init bootonly(char *str)
13{ 18{
@@ -15,6 +20,11 @@ static int __init bootonly(char *str)
15 return 1; 20 return 1;
16} 21}
17__setup("smp-alt-boot", bootonly); 22__setup("smp-alt-boot", bootonly);
23#else
24#define smp_alt_once 1
25#endif
26
27static int debug_alternative;
18 28
19static int __init debug_alt(char *str) 29static int __init debug_alt(char *str)
20{ 30{
@@ -23,6 +33,8 @@ static int __init debug_alt(char *str)
23} 33}
24__setup("debug-alternative", debug_alt); 34__setup("debug-alternative", debug_alt);
25 35
36static int noreplace_smp;
37
26static int __init setup_noreplace_smp(char *str) 38static int __init setup_noreplace_smp(char *str)
27{ 39{
28 noreplace_smp = 1; 40 noreplace_smp = 1;
@@ -144,7 +156,7 @@ static void nop_out(void *insns, unsigned int len)
144 unsigned int noplen = len; 156 unsigned int noplen = len;
145 if (noplen > ASM_NOP_MAX) 157 if (noplen > ASM_NOP_MAX)
146 noplen = ASM_NOP_MAX; 158 noplen = ASM_NOP_MAX;
147 memcpy(insns, noptable[noplen], noplen); 159 text_poke(insns, noptable[noplen], noplen);
148 insns += noplen; 160 insns += noplen;
149 len -= noplen; 161 len -= noplen;
150 } 162 }
@@ -196,7 +208,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
196 continue; 208 continue;
197 if (*ptr > text_end) 209 if (*ptr > text_end)
198 continue; 210 continue;
199 **ptr = 0xf0; /* lock prefix */ 211 text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
200 }; 212 };
201} 213}
202 214
@@ -354,10 +366,6 @@ void apply_paravirt(struct paravirt_patch_site *start,
354 /* Pad the rest with nops */ 366 /* Pad the rest with nops */
355 nop_out(p->instr + used, p->len - used); 367 nop_out(p->instr + used, p->len - used);
356 } 368 }
357
358 /* Sync to be conservative, in case we patched following
359 * instructions */
360 sync_core();
361} 369}
362extern struct paravirt_patch_site __start_parainstructions[], 370extern struct paravirt_patch_site __start_parainstructions[],
363 __stop_parainstructions[]; 371 __stop_parainstructions[];
@@ -367,6 +375,14 @@ void __init alternative_instructions(void)
367{ 375{
368 unsigned long flags; 376 unsigned long flags;
369 377
378 /* The patching is not fully atomic, so try to avoid local interruptions
379 that might execute the to be patched code.
380 Other CPUs are not running. */
381 stop_nmi();
382#ifdef CONFIG_MCE
383 stop_mce();
384#endif
385
370 local_irq_save(flags); 386 local_irq_save(flags);
371 apply_alternatives(__alt_instructions, __alt_instructions_end); 387 apply_alternatives(__alt_instructions, __alt_instructions_end);
372 388
@@ -376,8 +392,6 @@ void __init alternative_instructions(void)
376#ifdef CONFIG_HOTPLUG_CPU 392#ifdef CONFIG_HOTPLUG_CPU
377 if (num_possible_cpus() < 2) 393 if (num_possible_cpus() < 2)
378 smp_alt_once = 1; 394 smp_alt_once = 1;
379#else
380 smp_alt_once = 1;
381#endif 395#endif
382 396
383#ifdef CONFIG_SMP 397#ifdef CONFIG_SMP
@@ -401,4 +415,37 @@ void __init alternative_instructions(void)
401#endif 415#endif
402 apply_paravirt(__parainstructions, __parainstructions_end); 416 apply_paravirt(__parainstructions, __parainstructions_end);
403 local_irq_restore(flags); 417 local_irq_restore(flags);
418
419 restart_nmi();
420#ifdef CONFIG_MCE
421 restart_mce();
422#endif
423}
424
425/*
426 * Warning:
427 * When you use this code to patch more than one byte of an instruction
428 * you need to make sure that other CPUs cannot execute this code in parallel.
429 * Also no thread must be currently preempted in the middle of these instructions.
430 * And on the local CPU you need to be protected again NMI or MCE handlers
431 * seeing an inconsistent instruction while you patch.
432 */
433void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len)
434{
435 u8 *addr = oaddr;
436 if (!pte_write(*lookup_address((unsigned long)addr))) {
437 struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) };
438 addr = vmap(p, 2, VM_MAP, PAGE_KERNEL);
439 if (!addr)
440 return;
441 addr += ((unsigned long)oaddr) % PAGE_SIZE;
442 }
443 memcpy(addr, opcode, len);
444 sync_core();
445 /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
446 case. */
447 if (cpu_has_clflush)
448 asm("clflush (%0) " :: "r" (oaddr) : "memory");
449 if (addr != oaddr)
450 vunmap(addr);
404} 451}
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 67824f3bb974..bfc6cb7df7e7 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -263,6 +263,9 @@ static void lapic_timer_setup(enum clock_event_mode mode,
263 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); 263 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
264 apic_write_around(APIC_LVTT, v); 264 apic_write_around(APIC_LVTT, v);
265 break; 265 break;
266 case CLOCK_EVT_MODE_RESUME:
267 /* Nothing to do here */
268 break;
266 } 269 }
267 270
268 local_irq_restore(flags); 271 local_irq_restore(flags);
@@ -315,7 +318,7 @@ static void __devinit setup_APIC_timer(void)
315 318
316#define LAPIC_CAL_LOOPS (HZ/10) 319#define LAPIC_CAL_LOOPS (HZ/10)
317 320
318static __initdata volatile int lapic_cal_loops = -1; 321static __initdata int lapic_cal_loops = -1;
319static __initdata long lapic_cal_t1, lapic_cal_t2; 322static __initdata long lapic_cal_t1, lapic_cal_t2;
320static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2; 323static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
321static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; 324static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
@@ -485,7 +488,7 @@ void __init setup_boot_APIC_clock(void)
485 /* Let the interrupts run */ 488 /* Let the interrupts run */
486 local_irq_enable(); 489 local_irq_enable();
487 490
488 while(lapic_cal_loops <= LAPIC_CAL_LOOPS) 491 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
489 cpu_relax(); 492 cpu_relax();
490 493
491 local_irq_disable(); 494 local_irq_disable();
@@ -521,6 +524,9 @@ void __init setup_boot_APIC_clock(void)
521 */ 524 */
522 if (nmi_watchdog != NMI_IO_APIC) 525 if (nmi_watchdog != NMI_IO_APIC)
523 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; 526 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
527 else
528 printk(KERN_WARNING "APIC timer registered as dummy,"
529 " due to nmi_watchdog=1!\n");
524 } 530 }
525 531
526 /* Setup the lapic or request the broadcast */ 532 /* Setup the lapic or request the broadcast */
diff --git a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile
index 0b6a8551e9e2..778396c78d65 100644
--- a/arch/i386/kernel/cpu/Makefile
+++ b/arch/i386/kernel/cpu/Makefile
@@ -9,7 +9,6 @@ obj-y += cyrix.o
9obj-y += centaur.o 9obj-y += centaur.o
10obj-y += transmeta.o 10obj-y += transmeta.o
11obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o 11obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o
12obj-y += rise.o
13obj-y += nexgen.o 12obj-y += nexgen.o
14obj-y += umc.o 13obj-y += umc.o
15 14
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 6f47eeeb93ea..c7ba455d5ac7 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -231,6 +231,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
231 231
232 switch (c->x86) { 232 switch (c->x86) {
233 case 15: 233 case 15:
234 /* Use K8 tuning for Fam10h and Fam11h */
235 case 0x10:
236 case 0x11:
234 set_bit(X86_FEATURE_K8, c->x86_capability); 237 set_bit(X86_FEATURE_K8, c->x86_capability);
235 break; 238 break;
236 case 6: 239 case 6:
@@ -272,8 +275,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
272 } 275 }
273#endif 276#endif
274 277
275 if (cpuid_eax(0x80000000) >= 0x80000006) 278 if (cpuid_eax(0x80000000) >= 0x80000006) {
276 num_cache_leaves = 3; 279 if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000))
280 num_cache_leaves = 4;
281 else
282 num_cache_leaves = 3;
283 }
277 284
278 if (amd_apic_timer_broken()) 285 if (amd_apic_timer_broken())
279 set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); 286 set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index e5419a9dec88..d506201d397c 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -606,7 +606,6 @@ extern int nsc_init_cpu(void);
606extern int amd_init_cpu(void); 606extern int amd_init_cpu(void);
607extern int centaur_init_cpu(void); 607extern int centaur_init_cpu(void);
608extern int transmeta_init_cpu(void); 608extern int transmeta_init_cpu(void);
609extern int rise_init_cpu(void);
610extern int nexgen_init_cpu(void); 609extern int nexgen_init_cpu(void);
611extern int umc_init_cpu(void); 610extern int umc_init_cpu(void);
612 611
@@ -618,7 +617,6 @@ void __init early_cpu_init(void)
618 amd_init_cpu(); 617 amd_init_cpu();
619 centaur_init_cpu(); 618 centaur_init_cpu();
620 transmeta_init_cpu(); 619 transmeta_init_cpu();
621 rise_init_cpu();
622 nexgen_init_cpu(); 620 nexgen_init_cpu();
623 umc_init_cpu(); 621 umc_init_cpu();
624 early_cpu_detect(); 622 early_cpu_detect();
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 18c8b67ea3a7..6f846bee2103 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -665,8 +665,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
665 data->max_freq = perf->states[0].core_frequency * 1000; 665 data->max_freq = perf->states[0].core_frequency * 1000;
666 /* table init */ 666 /* table init */
667 for (i=0; i<perf->state_count; i++) { 667 for (i=0; i<perf->state_count; i++) {
668 if (i>0 && perf->states[i].core_frequency == 668 if (i>0 && perf->states[i].core_frequency >=
669 perf->states[i-1].core_frequency) 669 data->freq_table[valid_states-1].frequency / 1000)
670 continue; 670 continue;
671 671
672 data->freq_table[valid_states].index = i; 672 data->freq_table[valid_states].index = i;
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index 194144539a6f..461dabc4e495 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -79,7 +79,7 @@
79#include <linux/smp.h> 79#include <linux/smp.h>
80#include <linux/cpufreq.h> 80#include <linux/cpufreq.h>
81#include <linux/pci.h> 81#include <linux/pci.h>
82#include <asm/processor.h> 82#include <asm/processor-cyrix.h>
83#include <asm/errno.h> 83#include <asm/errno.h>
84 84
85/* PCI config registers, all at F0 */ 85/* PCI config registers, all at F0 */
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index e88d2fba156b..122d2d75aa9f 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -4,7 +4,7 @@
4#include <linux/pci.h> 4#include <linux/pci.h>
5#include <asm/dma.h> 5#include <asm/dma.h>
6#include <asm/io.h> 6#include <asm/io.h>
7#include <asm/processor.h> 7#include <asm/processor-cyrix.h>
8#include <asm/timer.h> 8#include <asm/timer.h>
9#include <asm/pci-direct.h> 9#include <asm/pci-direct.h>
10#include <asm/tsc.h> 10#include <asm/tsc.h>
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index e5be819492ef..d5a456d27d82 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -4,7 +4,7 @@
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen : CPUID4 emulation on AMD. 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */ 8 */
9 9
10#include <linux/init.h> 10#include <linux/init.h>
@@ -135,7 +135,7 @@ unsigned short num_cache_leaves;
135 135
136/* AMD doesn't have CPUID4. Emulate it here to report the same 136/* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine: 137 information to the user. This makes some assumptions about the machine:
138 No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs. 138 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139 139
140 In theory the TLBs could be reported as fake type (they are in "dummy"). 140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */ 141 Maybe later */
@@ -159,13 +159,26 @@ union l2_cache {
159 unsigned val; 159 unsigned val;
160}; 160};
161 161
162union l3_cache {
163 struct {
164 unsigned line_size : 8;
165 unsigned lines_per_tag : 4;
166 unsigned assoc : 4;
167 unsigned res : 2;
168 unsigned size_encoded : 14;
169 };
170 unsigned val;
171};
172
162static const unsigned short assocs[] = { 173static const unsigned short assocs[] = {
163 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 174 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
164 [8] = 16, 175 [8] = 16, [0xa] = 32, [0xb] = 48,
176 [0xc] = 64,
165 [0xf] = 0xffff // ?? 177 [0xf] = 0xffff // ??
166 }; 178};
167static const unsigned char levels[] = { 1, 1, 2 }; 179
168static const unsigned char types[] = { 1, 2, 3 }; 180static const unsigned char levels[] = { 1, 1, 2, 3 };
181static const unsigned char types[] = { 1, 2, 3, 3 };
169 182
170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 183static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
171 union _cpuid4_leaf_ebx *ebx, 184 union _cpuid4_leaf_ebx *ebx,
@@ -175,37 +188,58 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
175 unsigned line_size, lines_per_tag, assoc, size_in_kb; 188 unsigned line_size, lines_per_tag, assoc, size_in_kb;
176 union l1_cache l1i, l1d; 189 union l1_cache l1i, l1d;
177 union l2_cache l2; 190 union l2_cache l2;
191 union l3_cache l3;
192 union l1_cache *l1 = &l1d;
178 193
179 eax->full = 0; 194 eax->full = 0;
180 ebx->full = 0; 195 ebx->full = 0;
181 ecx->full = 0; 196 ecx->full = 0;
182 197
183 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); 198 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
184 cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy); 199 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
185 200
186 if (leaf > 2 || !l1d.val || !l1i.val || !l2.val) 201 switch (leaf) {
187 return; 202 case 1:
188 203 l1 = &l1i;
189 eax->split.is_self_initializing = 1; 204 case 0:
190 eax->split.type = types[leaf]; 205 if (!l1->val)
191 eax->split.level = levels[leaf]; 206 return;
192 eax->split.num_threads_sharing = 0;
193 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
194
195 if (leaf <= 1) {
196 union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
197 assoc = l1->assoc; 207 assoc = l1->assoc;
198 line_size = l1->line_size; 208 line_size = l1->line_size;
199 lines_per_tag = l1->lines_per_tag; 209 lines_per_tag = l1->lines_per_tag;
200 size_in_kb = l1->size_in_kb; 210 size_in_kb = l1->size_in_kb;
201 } else { 211 break;
212 case 2:
213 if (!l2.val)
214 return;
202 assoc = l2.assoc; 215 assoc = l2.assoc;
203 line_size = l2.line_size; 216 line_size = l2.line_size;
204 lines_per_tag = l2.lines_per_tag; 217 lines_per_tag = l2.lines_per_tag;
205 /* cpu_data has errata corrections for K7 applied */ 218 /* cpu_data has errata corrections for K7 applied */
206 size_in_kb = current_cpu_data.x86_cache_size; 219 size_in_kb = current_cpu_data.x86_cache_size;
220 break;
221 case 3:
222 if (!l3.val)
223 return;
224 assoc = l3.assoc;
225 line_size = l3.line_size;
226 lines_per_tag = l3.lines_per_tag;
227 size_in_kb = l3.size_encoded * 512;
228 break;
229 default:
230 return;
207 } 231 }
208 232
233 eax->split.is_self_initializing = 1;
234 eax->split.type = types[leaf];
235 eax->split.level = levels[leaf];
236 if (leaf == 3)
237 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
238 else
239 eax->split.num_threads_sharing = 0;
240 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
241
242
209 if (assoc == 0xf) 243 if (assoc == 0xf)
210 eax->split.is_fully_associative = 1; 244 eax->split.is_fully_associative = 1;
211 ebx->split.coherency_line_size = line_size - 1; 245 ebx->split.coherency_line_size = line_size - 1;
@@ -239,8 +273,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
239 return 0; 273 return 0;
240} 274}
241 275
242/* will only be called once; __init is safe here */ 276static int __cpuinit find_num_cache_leaves(void)
243static int __init find_num_cache_leaves(void)
244{ 277{
245 unsigned int eax, ebx, ecx, edx; 278 unsigned int eax, ebx, ecx, edx;
246 union _cpuid4_leaf_eax cache_eax; 279 union _cpuid4_leaf_eax cache_eax;
@@ -710,7 +743,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
710 return retval; 743 return retval;
711} 744}
712 745
713static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) 746static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
714{ 747{
715 unsigned int cpu = sys_dev->id; 748 unsigned int cpu = sys_dev->id;
716 unsigned long i; 749 unsigned long i;
diff --git a/arch/i386/kernel/cpu/mcheck/mce.c b/arch/i386/kernel/cpu/mcheck/mce.c
index 56cd485b127c..34c781eddee4 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.c
+++ b/arch/i386/kernel/cpu/mcheck/mce.c
@@ -60,6 +60,20 @@ void mcheck_init(struct cpuinfo_x86 *c)
60 } 60 }
61} 61}
62 62
63static unsigned long old_cr4 __initdata;
64
65void __init stop_mce(void)
66{
67 old_cr4 = read_cr4();
68 clear_in_cr4(X86_CR4_MCE);
69}
70
71void __init restart_mce(void)
72{
73 if (old_cr4 & X86_CR4_MCE)
74 set_in_cr4(X86_CR4_MCE);
75}
76
63static int __init mcheck_disable(char *str) 77static int __init mcheck_disable(char *str)
64{ 78{
65 mce_disabled = 1; 79 mce_disabled = 1;
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 6b5d3518a1c0..bf39409b3838 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -57,7 +57,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
57static void mce_work_fn(struct work_struct *work) 57static void mce_work_fn(struct work_struct *work)
58{ 58{
59 on_each_cpu(mce_checkregs, NULL, 1, 1); 59 on_each_cpu(mce_checkregs, NULL, 1, 1);
60 schedule_delayed_work(&mce_work, MCE_RATE); 60 schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
61} 61}
62 62
63static int __init init_nonfatal_mce_checker(void) 63static int __init init_nonfatal_mce_checker(void)
@@ -82,7 +82,7 @@ static int __init init_nonfatal_mce_checker(void)
82 /* 82 /*
83 * Check for non-fatal errors every MCE_RATE s 83 * Check for non-fatal errors every MCE_RATE s
84 */ 84 */
85 schedule_delayed_work(&mce_work, MCE_RATE); 85 schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
86 printk(KERN_INFO "Machine check exception polling timer started.\n"); 86 printk(KERN_INFO "Machine check exception polling timer started.\n");
87 return 0; 87 return 0;
88} 88}
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c
index 1001f1e0fe6d..2287d4863a8a 100644
--- a/arch/i386/kernel/cpu/mtrr/cyrix.c
+++ b/arch/i386/kernel/cpu/mtrr/cyrix.c
@@ -3,6 +3,7 @@
3#include <asm/mtrr.h> 3#include <asm/mtrr.h>
4#include <asm/msr.h> 4#include <asm/msr.h>
5#include <asm/io.h> 5#include <asm/io.h>
6#include <asm/processor-cyrix.h>
6#include "mtrr.h" 7#include "mtrr.h"
7 8
8int arr3_protected; 9int arr3_protected;
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index f6e46943e6ef..56f64e34829f 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -79,7 +79,7 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
79} 79}
80 80
81/* Grab all of the MTRR state for this CPU into *state */ 81/* Grab all of the MTRR state for this CPU into *state */
82void get_mtrr_state(void) 82void __init get_mtrr_state(void)
83{ 83{
84 unsigned int i; 84 unsigned int i;
85 struct mtrr_var_range *vrs; 85 struct mtrr_var_range *vrs;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 75dc6d5214bc..c48b6fea5ab4 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -643,7 +643,7 @@ static struct sysdev_driver mtrr_sysdev_driver = {
643 * initialized (i.e. before smp_init()). 643 * initialized (i.e. before smp_init()).
644 * 644 *
645 */ 645 */
646__init void mtrr_bp_init(void) 646void __init mtrr_bp_init(void)
647{ 647{
648 init_ifs(); 648 init_ifs();
649 649
diff --git a/arch/i386/kernel/cpu/mtrr/state.c b/arch/i386/kernel/cpu/mtrr/state.c
index 7b39a2f954d9..c9014ca4a575 100644
--- a/arch/i386/kernel/cpu/mtrr/state.c
+++ b/arch/i386/kernel/cpu/mtrr/state.c
@@ -3,6 +3,7 @@
3#include <asm/io.h> 3#include <asm/io.h>
4#include <asm/mtrr.h> 4#include <asm/mtrr.h>
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include <asm-i386/processor-cyrix.h>
6#include "mtrr.h" 7#include "mtrr.h"
7 8
8 9
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c
index 4d26d514c56f..4be488e73bee 100644
--- a/arch/i386/kernel/cpu/perfctr-watchdog.c
+++ b/arch/i386/kernel/cpu/perfctr-watchdog.c
@@ -325,7 +325,7 @@ static struct wd_ops k7_wd_ops = {
325 .stop = single_msr_stop_watchdog, 325 .stop = single_msr_stop_watchdog,
326 .perfctr = MSR_K7_PERFCTR0, 326 .perfctr = MSR_K7_PERFCTR0,
327 .evntsel = MSR_K7_EVNTSEL0, 327 .evntsel = MSR_K7_EVNTSEL0,
328 .checkbit = 1ULL<<63, 328 .checkbit = 1ULL<<47,
329}; 329};
330 330
331/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */ 331/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
@@ -346,7 +346,9 @@ static int setup_p6_watchdog(unsigned nmi_hz)
346 perfctr_msr = MSR_P6_PERFCTR0; 346 perfctr_msr = MSR_P6_PERFCTR0;
347 evntsel_msr = MSR_P6_EVNTSEL0; 347 evntsel_msr = MSR_P6_EVNTSEL0;
348 348
349 wrmsrl(perfctr_msr, 0UL); 349 /* KVM doesn't implement this MSR */
350 if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
351 return 0;
350 352
351 evntsel = P6_EVNTSEL_INT 353 evntsel = P6_EVNTSEL_INT
352 | P6_EVNTSEL_OS 354 | P6_EVNTSEL_OS
@@ -599,8 +601,8 @@ static struct wd_ops intel_arch_wd_ops = {
599 .setup = setup_intel_arch_watchdog, 601 .setup = setup_intel_arch_watchdog,
600 .rearm = p6_rearm, 602 .rearm = p6_rearm,
601 .stop = single_msr_stop_watchdog, 603 .stop = single_msr_stop_watchdog,
602 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 604 .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
603 .evntsel = MSR_ARCH_PERFMON_EVENTSEL0, 605 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
604}; 606};
605 607
606static void probe_nmi_watchdog(void) 608static void probe_nmi_watchdog(void)
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
deleted file mode 100644
index 50076f22e90f..000000000000
--- a/arch/i386/kernel/cpu/rise.c
+++ /dev/null
@@ -1,52 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/bitops.h>
4#include <asm/processor.h>
5
6#include "cpu.h"
7
8static void __cpuinit init_rise(struct cpuinfo_x86 *c)
9{
10 printk("CPU: Rise iDragon");
11 if (c->x86_model > 2)
12 printk(" II");
13 printk("\n");
14
15 /* Unhide possibly hidden capability flags
16 The mp6 iDragon family don't have MSRs.
17 We switch on extra features with this cpuid weirdness: */
18 __asm__ (
19 "movl $0x6363452a, %%eax\n\t"
20 "movl $0x3231206c, %%ecx\n\t"
21 "movl $0x2a32313a, %%edx\n\t"
22 "cpuid\n\t"
23 "movl $0x63634523, %%eax\n\t"
24 "movl $0x32315f6c, %%ecx\n\t"
25 "movl $0x2333313a, %%edx\n\t"
26 "cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
27 );
28 set_bit(X86_FEATURE_CX8, c->x86_capability);
29}
30
31static struct cpu_dev rise_cpu_dev __cpuinitdata = {
32 .c_vendor = "Rise",
33 .c_ident = { "RiseRiseRise" },
34 .c_models = {
35 { .vendor = X86_VENDOR_RISE, .family = 5, .model_names =
36 {
37 [0] = "iDragon",
38 [2] = "iDragon",
39 [8] = "iDragon II",
40 [9] = "iDragon II"
41 }
42 },
43 },
44 .c_init = init_rise,
45};
46
47int __init rise_init_cpu(void)
48{
49 cpu_devs[X86_VENDOR_RISE] = &rise_cpu_dev;
50 return 0;
51}
52
diff --git a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
index fc822a46897a..e60cddbc4cfb 100644
--- a/arch/i386/kernel/e820.c
+++ b/arch/i386/kernel/e820.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/pfn.h> 11#include <linux/pfn.h>
12#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/suspend.h>
13 14
14#include <asm/pgtable.h> 15#include <asm/pgtable.h>
15#include <asm/page.h> 16#include <asm/page.h>
@@ -320,6 +321,37 @@ static int __init request_standard_resources(void)
320 321
321subsys_initcall(request_standard_resources); 322subsys_initcall(request_standard_resources);
322 323
324#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
325/**
326 * e820_mark_nosave_regions - Find the ranges of physical addresses that do not
327 * correspond to e820 RAM areas and mark the corresponding pages as nosave for
328 * hibernation.
329 *
330 * This function requires the e820 map to be sorted and without any
331 * overlapping entries and assumes the first e820 area to be RAM.
332 */
333void __init e820_mark_nosave_regions(void)
334{
335 int i;
336 unsigned long pfn;
337
338 pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
339 for (i = 1; i < e820.nr_map; i++) {
340 struct e820entry *ei = &e820.map[i];
341
342 if (pfn < PFN_UP(ei->addr))
343 register_nosave_region(pfn, PFN_UP(ei->addr));
344
345 pfn = PFN_DOWN(ei->addr + ei->size);
346 if (ei->type != E820_RAM)
347 register_nosave_region(PFN_UP(ei->addr), pfn);
348
349 if (pfn >= max_low_pfn)
350 break;
351 }
352}
353#endif
354
323void __init add_memory_region(unsigned long long start, 355void __init add_memory_region(unsigned long long start,
324 unsigned long long size, int type) 356 unsigned long long size, int type)
325{ 357{
diff --git a/arch/i386/kernel/geode.c b/arch/i386/kernel/geode.c
new file mode 100644
index 000000000000..41e8aec4c61d
--- /dev/null
+++ b/arch/i386/kernel/geode.c
@@ -0,0 +1,155 @@
1/*
2 * AMD Geode southbridge support code
3 * Copyright (C) 2006, Advanced Micro Devices, Inc.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public License
7 * as published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/ioport.h>
13#include <linux/io.h>
14#include <asm/msr.h>
15#include <asm/geode.h>
16
17static struct {
18 char *name;
19 u32 msr;
20 int size;
21 u32 base;
22} lbars[] = {
23 { "geode-pms", MSR_LBAR_PMS, LBAR_PMS_SIZE, 0 },
24 { "geode-acpi", MSR_LBAR_ACPI, LBAR_ACPI_SIZE, 0 },
25 { "geode-gpio", MSR_LBAR_GPIO, LBAR_GPIO_SIZE, 0 },
26 { "geode-mfgpt", MSR_LBAR_MFGPT, LBAR_MFGPT_SIZE, 0 }
27};
28
29static void __init init_lbars(void)
30{
31 u32 lo, hi;
32 int i;
33
34 for (i = 0; i < ARRAY_SIZE(lbars); i++) {
35 rdmsr(lbars[i].msr, lo, hi);
36 if (hi & 0x01)
37 lbars[i].base = lo & 0x0000ffff;
38
39 if (lbars[i].base == 0)
40 printk(KERN_ERR "geode: Couldn't initialize '%s'\n",
41 lbars[i].name);
42 }
43}
44
45int geode_get_dev_base(unsigned int dev)
46{
47 BUG_ON(dev >= ARRAY_SIZE(lbars));
48 return lbars[dev].base;
49}
50EXPORT_SYMBOL_GPL(geode_get_dev_base);
51
52/* === GPIO API === */
53
54void geode_gpio_set(unsigned int gpio, unsigned int reg)
55{
56 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
57
58 if (!base)
59 return;
60
61 if (gpio < 16)
62 outl(1 << gpio, base + reg);
63 else
64 outl(1 << (gpio - 16), base + 0x80 + reg);
65}
66EXPORT_SYMBOL_GPL(geode_gpio_set);
67
68void geode_gpio_clear(unsigned int gpio, unsigned int reg)
69{
70 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
71
72 if (!base)
73 return;
74
75 if (gpio < 16)
76 outl(1 << (gpio + 16), base + reg);
77 else
78 outl(1 << gpio, base + 0x80 + reg);
79}
80EXPORT_SYMBOL_GPL(geode_gpio_clear);
81
82int geode_gpio_isset(unsigned int gpio, unsigned int reg)
83{
84 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
85
86 if (!base)
87 return 0;
88
89 if (gpio < 16)
90 return (inl(base + reg) & (1 << gpio)) ? 1 : 0;
91 else
92 return (inl(base + 0x80 + reg) & (1 << (gpio - 16))) ? 1 : 0;
93}
94EXPORT_SYMBOL_GPL(geode_gpio_isset);
95
96void geode_gpio_set_irq(unsigned int group, unsigned int irq)
97{
98 u32 lo, hi;
99
100 if (group > 7 || irq > 15)
101 return;
102
103 rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
104
105 lo &= ~(0xF << (group * 4));
106 lo |= (irq & 0xF) << (group * 4);
107
108 wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
109}
110EXPORT_SYMBOL_GPL(geode_gpio_set_irq);
111
112void geode_gpio_setup_event(unsigned int gpio, int pair, int pme)
113{
114 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
115 u32 offset, shift, val;
116
117 if (gpio >= 24)
118 offset = GPIO_MAP_W;
119 else if (gpio >= 16)
120 offset = GPIO_MAP_Z;
121 else if (gpio >= 8)
122 offset = GPIO_MAP_Y;
123 else
124 offset = GPIO_MAP_X;
125
126 shift = (gpio % 8) * 4;
127
128 val = inl(base + offset);
129
130 /* Clear whatever was there before */
131 val &= ~(0xF << shift);
132
133 /* And set the new value */
134
135 val |= ((pair & 7) << shift);
136
137 /* Set the PME bit if this is a PME event */
138
139 if (pme)
140 val |= (1 << (shift + 3));
141
142 outl(val, base + offset);
143}
144EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
145
146static int __init geode_southbridge_init(void)
147{
148 if (!is_geode())
149 return -ENODEV;
150
151 init_lbars();
152 return 0;
153}
154
155postcore_initcall(geode_southbridge_init);
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
index 17d73459fc5f..533d4932bc79 100644
--- a/arch/i386/kernel/hpet.c
+++ b/arch/i386/kernel/hpet.c
@@ -5,6 +5,7 @@
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/sysdev.h> 6#include <linux/sysdev.h>
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <linux/delay.h>
8 9
9#include <asm/hpet.h> 10#include <asm/hpet.h>
10#include <asm/io.h> 11#include <asm/io.h>
@@ -187,6 +188,10 @@ static void hpet_set_mode(enum clock_event_mode mode,
187 cfg &= ~HPET_TN_ENABLE; 188 cfg &= ~HPET_TN_ENABLE;
188 hpet_writel(cfg, HPET_T0_CFG); 189 hpet_writel(cfg, HPET_T0_CFG);
189 break; 190 break;
191
192 case CLOCK_EVT_MODE_RESUME:
193 hpet_enable_int();
194 break;
190 } 195 }
191} 196}
192 197
@@ -217,6 +222,7 @@ static struct clocksource clocksource_hpet = {
217 .mask = HPET_MASK, 222 .mask = HPET_MASK,
218 .shift = HPET_SHIFT, 223 .shift = HPET_SHIFT,
219 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 224 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
225 .resume = hpet_start_counter,
220}; 226};
221 227
222/* 228/*
@@ -226,7 +232,8 @@ int __init hpet_enable(void)
226{ 232{
227 unsigned long id; 233 unsigned long id;
228 uint64_t hpet_freq; 234 uint64_t hpet_freq;
229 u64 tmp; 235 u64 tmp, start, now;
236 cycle_t t1;
230 237
231 if (!is_hpet_capable()) 238 if (!is_hpet_capable())
232 return 0; 239 return 0;
@@ -273,6 +280,27 @@ int __init hpet_enable(void)
273 /* Start the counter */ 280 /* Start the counter */
274 hpet_start_counter(); 281 hpet_start_counter();
275 282
283 /* Verify whether hpet counter works */
284 t1 = read_hpet();
285 rdtscll(start);
286
287 /*
288 * We don't know the TSC frequency yet, but waiting for
289 * 200000 TSC cycles is safe:
290 * 4 GHz == 50us
291 * 1 GHz == 200us
292 */
293 do {
294 rep_nop();
295 rdtscll(now);
296 } while ((now - start) < 200000UL);
297
298 if (t1 == read_hpet()) {
299 printk(KERN_WARNING
300 "HPET counter not counting. HPET disabled\n");
301 goto out_nohpet;
302 }
303
276 /* Initialize and register HPET clocksource 304 /* Initialize and register HPET clocksource
277 * 305 *
278 * hpet period is in femto seconds per cycle 306 * hpet period is in femto seconds per cycle
@@ -291,7 +319,6 @@ int __init hpet_enable(void)
291 319
292 clocksource_register(&clocksource_hpet); 320 clocksource_register(&clocksource_hpet);
293 321
294
295 if (id & HPET_ID_LEGSUP) { 322 if (id & HPET_ID_LEGSUP) {
296 hpet_enable_int(); 323 hpet_enable_int();
297 hpet_reserve_platform_timers(id); 324 hpet_reserve_platform_timers(id);
@@ -299,7 +326,7 @@ int __init hpet_enable(void)
299 * Start hpet with the boot cpu mask and make it 326 * Start hpet with the boot cpu mask and make it
300 * global after the IO_APIC has been initialized. 327 * global after the IO_APIC has been initialized.
301 */ 328 */
302 hpet_clockevent.cpumask =cpumask_of_cpu(0); 329 hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
303 clockevents_register_device(&hpet_clockevent); 330 clockevents_register_device(&hpet_clockevent);
304 global_clock_event = &hpet_clockevent; 331 global_clock_event = &hpet_clockevent;
305 return 1; 332 return 1;
@@ -524,68 +551,3 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
524 return IRQ_HANDLED; 551 return IRQ_HANDLED;
525} 552}
526#endif 553#endif
527
528
529/*
530 * Suspend/resume part
531 */
532
533#ifdef CONFIG_PM
534
535static int hpet_suspend(struct sys_device *sys_device, pm_message_t state)
536{
537 unsigned long cfg = hpet_readl(HPET_CFG);
538
539 cfg &= ~(HPET_CFG_ENABLE|HPET_CFG_LEGACY);
540 hpet_writel(cfg, HPET_CFG);
541
542 return 0;
543}
544
545static int hpet_resume(struct sys_device *sys_device)
546{
547 unsigned int id;
548
549 hpet_start_counter();
550
551 id = hpet_readl(HPET_ID);
552
553 if (id & HPET_ID_LEGSUP)
554 hpet_enable_int();
555
556 return 0;
557}
558
559static struct sysdev_class hpet_class = {
560 set_kset_name("hpet"),
561 .suspend = hpet_suspend,
562 .resume = hpet_resume,
563};
564
565static struct sys_device hpet_device = {
566 .id = 0,
567 .cls = &hpet_class,
568};
569
570
571static __init int hpet_register_sysfs(void)
572{
573 int err;
574
575 if (!is_hpet_capable())
576 return 0;
577
578 err = sysdev_class_register(&hpet_class);
579
580 if (!err) {
581 err = sysdev_register(&hpet_device);
582 if (err)
583 sysdev_class_unregister(&hpet_class);
584 }
585
586 return err;
587}
588
589device_initcall(hpet_register_sysfs);
590
591#endif
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index f8a3c4054c70..6d839f2f1b1a 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -3,18 +3,17 @@
3 * 3 *
4 */ 4 */
5#include <linux/clockchips.h> 5#include <linux/clockchips.h>
6#include <linux/spinlock.h> 6#include <linux/init.h>
7#include <linux/interrupt.h>
7#include <linux/jiffies.h> 8#include <linux/jiffies.h>
8#include <linux/sysdev.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/init.h> 10#include <linux/spinlock.h>
11 11
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include <asm/delay.h> 13#include <asm/delay.h>
14#include <asm/i8253.h> 14#include <asm/i8253.h>
15#include <asm/io.h> 15#include <asm/io.h>
16 16#include <asm/timer.h>
17#include "io_ports.h"
18 17
19DEFINE_SPINLOCK(i8253_lock); 18DEFINE_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock); 19EXPORT_SYMBOL(i8253_lock);
@@ -41,26 +40,27 @@ static void init_pit_timer(enum clock_event_mode mode,
41 case CLOCK_EVT_MODE_PERIODIC: 40 case CLOCK_EVT_MODE_PERIODIC:
42 /* binary, mode 2, LSB/MSB, ch 0 */ 41 /* binary, mode 2, LSB/MSB, ch 0 */
43 outb_p(0x34, PIT_MODE); 42 outb_p(0x34, PIT_MODE);
44 udelay(10);
45 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */ 43 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
46 udelay(10);
47 outb(LATCH >> 8 , PIT_CH0); /* MSB */ 44 outb(LATCH >> 8 , PIT_CH0); /* MSB */
48 break; 45 break;
49 46
50 /*
51 * Avoid unnecessary state transitions, as it confuses
52 * Geode / Cyrix based boxen.
53 */
54 case CLOCK_EVT_MODE_SHUTDOWN: 47 case CLOCK_EVT_MODE_SHUTDOWN:
55 if (evt->mode == CLOCK_EVT_MODE_UNUSED)
56 break;
57 case CLOCK_EVT_MODE_UNUSED: 48 case CLOCK_EVT_MODE_UNUSED:
58 if (evt->mode == CLOCK_EVT_MODE_SHUTDOWN) 49 if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
59 break; 50 evt->mode == CLOCK_EVT_MODE_ONESHOT) {
51 outb_p(0x30, PIT_MODE);
52 outb_p(0, PIT_CH0);
53 outb_p(0, PIT_CH0);
54 }
55 break;
56
60 case CLOCK_EVT_MODE_ONESHOT: 57 case CLOCK_EVT_MODE_ONESHOT:
61 /* One shot setup */ 58 /* One shot setup */
62 outb_p(0x38, PIT_MODE); 59 outb_p(0x38, PIT_MODE);
63 udelay(10); 60 break;
61
62 case CLOCK_EVT_MODE_RESUME:
63 /* Nothing to do here */
64 break; 64 break;
65 } 65 }
66 spin_unlock_irqrestore(&i8253_lock, flags); 66 spin_unlock_irqrestore(&i8253_lock, flags);
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 21db8f56c9a1..893df8280756 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -353,14 +353,6 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
353# include <linux/slab.h> /* kmalloc() */ 353# include <linux/slab.h> /* kmalloc() */
354# include <linux/timer.h> /* time_after() */ 354# include <linux/timer.h> /* time_after() */
355 355
356#ifdef CONFIG_BALANCED_IRQ_DEBUG
357# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
358# define Dprintk(x...) do { TDprintk(x); } while (0)
359# else
360# define TDprintk(x...)
361# define Dprintk(x...)
362# endif
363
364#define IRQBALANCE_CHECK_ARCH -999 356#define IRQBALANCE_CHECK_ARCH -999
365#define MAX_BALANCED_IRQ_INTERVAL (5*HZ) 357#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
366#define MIN_BALANCED_IRQ_INTERVAL (HZ/2) 358#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
@@ -443,7 +435,7 @@ static inline void balance_irq(int cpu, int irq)
443static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) 435static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
444{ 436{
445 int i, j; 437 int i, j;
446 Dprintk("Rotating IRQs among CPUs.\n"); 438
447 for_each_online_cpu(i) { 439 for_each_online_cpu(i) {
448 for (j = 0; j < NR_IRQS; j++) { 440 for (j = 0; j < NR_IRQS; j++) {
449 if (!irq_desc[j].action) 441 if (!irq_desc[j].action)
@@ -560,19 +552,11 @@ tryanothercpu:
560 max_loaded = tmp_loaded; /* processor */ 552 max_loaded = tmp_loaded; /* processor */
561 imbalance = (max_cpu_irq - min_cpu_irq) / 2; 553 imbalance = (max_cpu_irq - min_cpu_irq) / 2;
562 554
563 Dprintk("max_loaded cpu = %d\n", max_loaded);
564 Dprintk("min_loaded cpu = %d\n", min_loaded);
565 Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
566 Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
567 Dprintk("load imbalance = %lu\n", imbalance);
568
569 /* if imbalance is less than approx 10% of max load, then 555 /* if imbalance is less than approx 10% of max load, then
570 * observe diminishing returns action. - quit 556 * observe diminishing returns action. - quit
571 */ 557 */
572 if (imbalance < (max_cpu_irq >> 3)) { 558 if (imbalance < (max_cpu_irq >> 3))
573 Dprintk("Imbalance too trivial\n");
574 goto not_worth_the_effort; 559 goto not_worth_the_effort;
575 }
576 560
577tryanotherirq: 561tryanotherirq:
578 /* if we select an IRQ to move that can't go where we want, then 562 /* if we select an IRQ to move that can't go where we want, then
@@ -629,9 +613,6 @@ tryanotherirq:
629 cpus_and(tmp, target_cpu_mask, allowed_mask); 613 cpus_and(tmp, target_cpu_mask, allowed_mask);
630 614
631 if (!cpus_empty(tmp)) { 615 if (!cpus_empty(tmp)) {
632
633 Dprintk("irq = %d moved to cpu = %d\n",
634 selected_irq, min_loaded);
635 /* mark for change destination */ 616 /* mark for change destination */
636 set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded)); 617 set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
637 618
@@ -651,7 +632,6 @@ not_worth_the_effort:
651 */ 632 */
652 balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL, 633 balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
653 balanced_irq_interval + BALANCED_IRQ_MORE_DELTA); 634 balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
654 Dprintk("IRQ worth rotating not found\n");
655 return; 635 return;
656} 636}
657 637
@@ -1902,7 +1882,7 @@ __setup("no_timer_check", notimercheck);
1902 * - if this function detects that timer IRQs are defunct, then we fall 1882 * - if this function detects that timer IRQs are defunct, then we fall
1903 * back to ISA timer IRQs 1883 * back to ISA timer IRQs
1904 */ 1884 */
1905int __init timer_irq_works(void) 1885static int __init timer_irq_works(void)
1906{ 1886{
1907 unsigned long t1 = jiffies; 1887 unsigned long t1 = jiffies;
1908 1888
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index ba44d40b066d..dd2b97fc00b2 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -149,15 +149,11 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
149 149
150#ifdef CONFIG_4KSTACKS 150#ifdef CONFIG_4KSTACKS
151 151
152/*
153 * These should really be __section__(".bss.page_aligned") as well, but
154 * gcc's 3.0 and earlier don't handle that correctly.
155 */
156static char softirq_stack[NR_CPUS * THREAD_SIZE] 152static char softirq_stack[NR_CPUS * THREAD_SIZE]
157 __attribute__((__aligned__(THREAD_SIZE))); 153 __attribute__((__section__(".bss.page_aligned")));
158 154
159static char hardirq_stack[NR_CPUS * THREAD_SIZE] 155static char hardirq_stack[NR_CPUS * THREAD_SIZE]
160 __attribute__((__aligned__(THREAD_SIZE))); 156 __attribute__((__section__(".bss.page_aligned")));
161 157
162/* 158/*
163 * allocate per-cpu stacks for hardirq and for softirq processing 159 * allocate per-cpu stacks for hardirq and for softirq processing
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index dde828a333c3..448a50b1324c 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -35,6 +35,7 @@
35#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
36#include <asm/desc.h> 36#include <asm/desc.h>
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38#include <asm/alternative.h>
38 39
39void jprobe_return_end(void); 40void jprobe_return_end(void);
40 41
@@ -169,16 +170,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
169 170
170void __kprobes arch_arm_kprobe(struct kprobe *p) 171void __kprobes arch_arm_kprobe(struct kprobe *p)
171{ 172{
172 *p->addr = BREAKPOINT_INSTRUCTION; 173 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
173 flush_icache_range((unsigned long) p->addr,
174 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
175} 174}
176 175
177void __kprobes arch_disarm_kprobe(struct kprobe *p) 176void __kprobes arch_disarm_kprobe(struct kprobe *p)
178{ 177{
179 *p->addr = p->opcode; 178 text_poke(p->addr, &p->opcode, 1);
180 flush_icache_range((unsigned long) p->addr,
181 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
182} 179}
183 180
184void __kprobes arch_remove_kprobe(struct kprobe *p) 181void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 03b7f5584d71..99beac7f96ce 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -353,7 +353,7 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
353 * Take the local apic timer and PIT/HPET into account. We don't 353 * Take the local apic timer and PIT/HPET into account. We don't
354 * know which one is active, when we have highres/dyntick on 354 * know which one is active, when we have highres/dyntick on
355 */ 355 */
356 sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_irqs(0); 356 sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_cpu(cpu).irqs[0];
357 357
358 /* if the none of the timers isn't firing, this cpu isn't doing much */ 358 /* if the none of the timers isn't firing, this cpu isn't doing much */
359 if (!touched && last_irq_sums[cpu] == sum) { 359 if (!touched && last_irq_sums[cpu] == sum) {
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 53f07a8275e3..ea962c0667d5 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -124,20 +124,28 @@ unsigned paravirt_patch_ignore(unsigned len)
124 return len; 124 return len;
125} 125}
126 126
127struct branch {
128 unsigned char opcode;
129 u32 delta;
130} __attribute__((packed));
131
127unsigned paravirt_patch_call(void *target, u16 tgt_clobbers, 132unsigned paravirt_patch_call(void *target, u16 tgt_clobbers,
128 void *site, u16 site_clobbers, 133 void *site, u16 site_clobbers,
129 unsigned len) 134 unsigned len)
130{ 135{
131 unsigned char *call = site; 136 unsigned char *call = site;
132 unsigned long delta = (unsigned long)target - (unsigned long)(call+5); 137 unsigned long delta = (unsigned long)target - (unsigned long)(call+5);
138 struct branch b;
133 139
134 if (tgt_clobbers & ~site_clobbers) 140 if (tgt_clobbers & ~site_clobbers)
135 return len; /* target would clobber too much for this site */ 141 return len; /* target would clobber too much for this site */
136 if (len < 5) 142 if (len < 5)
137 return len; /* call too long for patch site */ 143 return len; /* call too long for patch site */
138 144
139 *call++ = 0xe8; /* call */ 145 b.opcode = 0xe8; /* call */
140 *(unsigned long *)call = delta; 146 b.delta = delta;
147 BUILD_BUG_ON(sizeof(b) != 5);
148 text_poke(call, (unsigned char *)&b, 5);
141 149
142 return 5; 150 return 5;
143} 151}
@@ -146,12 +154,14 @@ unsigned paravirt_patch_jmp(void *target, void *site, unsigned len)
146{ 154{
147 unsigned char *jmp = site; 155 unsigned char *jmp = site;
148 unsigned long delta = (unsigned long)target - (unsigned long)(jmp+5); 156 unsigned long delta = (unsigned long)target - (unsigned long)(jmp+5);
157 struct branch b;
149 158
150 if (len < 5) 159 if (len < 5)
151 return len; /* call too long for patch site */ 160 return len; /* call too long for patch site */
152 161
153 *jmp++ = 0xe9; /* jmp */ 162 b.opcode = 0xe9; /* jmp */
154 *(unsigned long *)jmp = delta; 163 b.delta = delta;
164 text_poke(jmp, (unsigned char *)&b, 5);
155 165
156 return 5; 166 return 5;
157} 167}
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 6c49acb96982..84664710b784 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -300,6 +300,7 @@ early_param("idle", idle_setup);
300void show_regs(struct pt_regs * regs) 300void show_regs(struct pt_regs * regs)
301{ 301{
302 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 302 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
303 unsigned long d0, d1, d2, d3, d6, d7;
303 304
304 printk("\n"); 305 printk("\n");
305 printk("Pid: %d, comm: %20s\n", current->pid, current->comm); 306 printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
@@ -324,6 +325,17 @@ void show_regs(struct pt_regs * regs)
324 cr3 = read_cr3(); 325 cr3 = read_cr3();
325 cr4 = read_cr4_safe(); 326 cr4 = read_cr4_safe();
326 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 327 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
328
329 get_debugreg(d0, 0);
330 get_debugreg(d1, 1);
331 get_debugreg(d2, 2);
332 get_debugreg(d3, 3);
333 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
334 d0, d1, d2, d3);
335 get_debugreg(d6, 6);
336 get_debugreg(d7, 7);
337 printk("DR6: %08lx DR7: %08lx\n", d6, d7);
338
327 show_trace(NULL, regs, &regs->esp); 339 show_trace(NULL, regs, &regs->esp);
328} 340}
329 341
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 5513f8d5b5be..0d796248866c 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -113,6 +113,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
113 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), 113 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
114 }, 114 },
115 }, 115 },
116 { /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
117 .callback = set_bios_reboot,
118 .ident = "Dell OptiPlex 745",
119 .matches = {
120 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
121 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
122 DMI_MATCH(DMI_BOARD_NAME, "0WF810"),
123 },
124 },
116 { /* Handle problems with rebooting on Dell 2400's */ 125 { /* Handle problems with rebooting on Dell 2400's */
117 .callback = set_bios_reboot, 126 .callback = set_bios_reboot,
118 .ident = "Dell PowerEdge 2400", 127 .ident = "Dell PowerEdge 2400",
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 74871d066c2b..d474cd639bcb 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -273,18 +273,18 @@ unsigned long __init find_max_low_pfn(void)
273 printk(KERN_WARNING "Warning only %ldMB will be used.\n", 273 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
274 MAXMEM>>20); 274 MAXMEM>>20);
275 if (max_pfn > MAX_NONPAE_PFN) 275 if (max_pfn > MAX_NONPAE_PFN)
276 printk(KERN_WARNING "Use a PAE enabled kernel.\n"); 276 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
277 else 277 else
278 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); 278 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
279 max_pfn = MAXMEM_PFN; 279 max_pfn = MAXMEM_PFN;
280#else /* !CONFIG_HIGHMEM */ 280#else /* !CONFIG_HIGHMEM */
281#ifndef CONFIG_X86_PAE 281#ifndef CONFIG_HIGHMEM64G
282 if (max_pfn > MAX_NONPAE_PFN) { 282 if (max_pfn > MAX_NONPAE_PFN) {
283 max_pfn = MAX_NONPAE_PFN; 283 max_pfn = MAX_NONPAE_PFN;
284 printk(KERN_WARNING "Warning only 4GB will be used.\n"); 284 printk(KERN_WARNING "Warning only 4GB will be used.\n");
285 printk(KERN_WARNING "Use a PAE enabled kernel.\n"); 285 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
286 } 286 }
287#endif /* !CONFIG_X86_PAE */ 287#endif /* !CONFIG_HIGHMEM64G */
288#endif /* !CONFIG_HIGHMEM */ 288#endif /* !CONFIG_HIGHMEM */
289 } else { 289 } else {
290 if (highmem_pages == -1) 290 if (highmem_pages == -1)
@@ -466,7 +466,7 @@ void __init setup_bootmem_allocator(void)
466 * 466 *
467 * This should all compile down to nothing when NUMA is off. 467 * This should all compile down to nothing when NUMA is off.
468 */ 468 */
469void __init remapped_pgdat_init(void) 469static void __init remapped_pgdat_init(void)
470{ 470{
471 int nid; 471 int nid;
472 472
@@ -640,6 +640,7 @@ void __init setup_arch(char **cmdline_p)
640#endif 640#endif
641 641
642 e820_register_memory(); 642 e820_register_memory();
643 e820_mark_nosave_regions();
643 644
644#ifdef CONFIG_VT 645#ifdef CONFIG_VT
645#if defined(CONFIG_VGA_CONSOLE) 646#if defined(CONFIG_VGA_CONSOLE)
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index d574e38f0f77..f5dd85656c18 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -199,6 +199,13 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
199 return eax; 199 return eax;
200 200
201badframe: 201badframe:
202 if (show_unhandled_signals && printk_ratelimit())
203 printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx"
204 " esp:%lx oeax:%lx\n",
205 current->pid > 1 ? KERN_INFO : KERN_EMERG,
206 current->comm, current->pid, frame, regs->eip,
207 regs->esp, regs->orig_eax);
208
202 force_sig(SIGSEGV, current); 209 force_sig(SIGSEGV, current);
203 return 0; 210 return 0;
204} 211}
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 5910d3fac561..e4f61d1c6248 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -308,7 +308,7 @@ cpumask_t cpu_coregroup_map(int cpu)
308/* representing cpus for which sibling maps can be computed */ 308/* representing cpus for which sibling maps can be computed */
309static cpumask_t cpu_sibling_setup_map; 309static cpumask_t cpu_sibling_setup_map;
310 310
311void set_cpu_sibling_map(int cpu) 311void __cpuinit set_cpu_sibling_map(int cpu)
312{ 312{
313 int i; 313 int i;
314 struct cpuinfo_x86 *c = cpu_data; 314 struct cpuinfo_x86 *c = cpu_data;
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index ff4ee6f3326b..6deb159d08e0 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -336,7 +336,9 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
336 336
337int in_gate_area(struct task_struct *task, unsigned long addr) 337int in_gate_area(struct task_struct *task, unsigned long addr)
338{ 338{
339 return 0; 339 const struct vm_area_struct *vma = get_gate_vma(task);
340
341 return vma && addr >= vma->vm_start && addr < vma->vm_end;
340} 342}
341 343
342int in_gate_area_no_task(unsigned long addr) 344int in_gate_area_no_task(unsigned long addr)
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index a665df61f08c..19a6c678d02e 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -207,55 +207,9 @@ unsigned long read_persistent_clock(void)
207 return retval; 207 return retval;
208} 208}
209 209
210static void sync_cmos_clock(unsigned long dummy); 210int update_persistent_clock(struct timespec now)
211
212static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
213int no_sync_cmos_clock;
214
215static void sync_cmos_clock(unsigned long dummy)
216{
217 struct timeval now, next;
218 int fail = 1;
219
220 /*
221 * If we have an externally synchronized Linux clock, then update
222 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
223 * called as close as possible to 500 ms before the new second starts.
224 * This code is run on a timer. If the clock is set, that timer
225 * may not expire at the correct time. Thus, we adjust...
226 */
227 if (!ntp_synced())
228 /*
229 * Not synced, exit, do not restart a timer (if one is
230 * running, let it run out).
231 */
232 return;
233
234 do_gettimeofday(&now);
235 if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
236 now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
237 fail = set_rtc_mmss(now.tv_sec);
238
239 next.tv_usec = USEC_AFTER - now.tv_usec;
240 if (next.tv_usec <= 0)
241 next.tv_usec += USEC_PER_SEC;
242
243 if (!fail)
244 next.tv_sec = 659;
245 else
246 next.tv_sec = 0;
247
248 if (next.tv_usec >= USEC_PER_SEC) {
249 next.tv_sec++;
250 next.tv_usec -= USEC_PER_SEC;
251 }
252 mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
253}
254
255void notify_arch_cmos_timer(void)
256{ 211{
257 if (!no_sync_cmos_clock) 212 return set_rtc_mmss(now.tv_sec);
258 mod_timer(&sync_cmos_timer, jiffies + 1);
259} 213}
260 214
261extern void (*late_time_init)(void); 215extern void (*late_time_init)(void);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 3e7753c78b9b..cfffe3dd9e83 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
152 if (!stack) { 152 if (!stack) {
153 unsigned long dummy; 153 unsigned long dummy;
154 stack = &dummy; 154 stack = &dummy;
155 if (task && task != current) 155 if (task != current)
156 stack = (unsigned long *)task->thread.esp; 156 stack = (unsigned long *)task->thread.esp;
157 } 157 }
158 158
@@ -211,6 +211,7 @@ static void print_trace_address(void *data, unsigned long addr)
211{ 211{
212 printk("%s [<%08lx>] ", (char *)data, addr); 212 printk("%s [<%08lx>] ", (char *)data, addr);
213 print_symbol("%s\n", addr); 213 print_symbol("%s\n", addr);
214 touch_nmi_watchdog();
214} 215}
215 216
216static struct stacktrace_ops print_trace_ops = { 217static struct stacktrace_ops print_trace_ops = {
@@ -617,6 +618,13 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
617 618
618 current->thread.error_code = error_code; 619 current->thread.error_code = error_code;
619 current->thread.trap_no = 13; 620 current->thread.trap_no = 13;
621 if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&
622 printk_ratelimit())
623 printk(KERN_INFO
624 "%s[%d] general protection eip:%lx esp:%lx error:%lx\n",
625 current->comm, current->pid,
626 regs->eip, regs->esp, error_code);
627
620 force_sig(SIGSEGV, current); 628 force_sig(SIGSEGV, current);
621 return; 629 return;
622 630
@@ -767,6 +775,8 @@ static __kprobes void default_do_nmi(struct pt_regs * regs)
767 reassert_nmi(); 775 reassert_nmi();
768} 776}
769 777
778static int ignore_nmis;
779
770fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code) 780fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
771{ 781{
772 int cpu; 782 int cpu;
@@ -777,11 +787,24 @@ fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
777 787
778 ++nmi_count(cpu); 788 ++nmi_count(cpu);
779 789
780 default_do_nmi(regs); 790 if (!ignore_nmis)
791 default_do_nmi(regs);
781 792
782 nmi_exit(); 793 nmi_exit();
783} 794}
784 795
796void stop_nmi(void)
797{
798 acpi_nmi_disable();
799 ignore_nmis++;
800}
801
802void restart_nmi(void)
803{
804 ignore_nmis--;
805 acpi_nmi_enable();
806}
807
785#ifdef CONFIG_KPROBES 808#ifdef CONFIG_KPROBES
786fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) 809fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
787{ 810{
diff --git a/arch/i386/kernel/vmiclock.c b/arch/i386/kernel/vmiclock.c
index f9b845f4e692..b1b5ab08b26e 100644
--- a/arch/i386/kernel/vmiclock.c
+++ b/arch/i386/kernel/vmiclock.c
@@ -32,6 +32,7 @@
32#include <asm/apicdef.h> 32#include <asm/apicdef.h>
33#include <asm/apic.h> 33#include <asm/apic.h>
34#include <asm/timer.h> 34#include <asm/timer.h>
35#include <asm/i8253.h>
35 36
36#include <irq_vectors.h> 37#include <irq_vectors.h>
37#include "io_ports.h" 38#include "io_ports.h"
@@ -142,6 +143,7 @@ static void vmi_timer_set_mode(enum clock_event_mode mode,
142 143
143 switch (mode) { 144 switch (mode) {
144 case CLOCK_EVT_MODE_ONESHOT: 145 case CLOCK_EVT_MODE_ONESHOT:
146 case CLOCK_EVT_MODE_RESUME:
145 break; 147 break;
146 case CLOCK_EVT_MODE_PERIODIC: 148 case CLOCK_EVT_MODE_PERIODIC:
147 cycles_per_hz = vmi_timer_ops.get_cycle_frequency(); 149 cycles_per_hz = vmi_timer_ops.get_cycle_frequency();
diff --git a/arch/i386/kernel/vsyscall-note.S b/arch/i386/kernel/vsyscall-note.S
index 271f16a8ca01..07c0daf78237 100644
--- a/arch/i386/kernel/vsyscall-note.S
+++ b/arch/i386/kernel/vsyscall-note.S
@@ -14,7 +14,6 @@ ELFNOTE_START(Linux, 0, "a")
14ELFNOTE_END 14ELFNOTE_END
15 15
16#ifdef CONFIG_XEN 16#ifdef CONFIG_XEN
17
18/* 17/*
19 * Add a special note telling glibc's dynamic linker a fake hardware 18 * Add a special note telling glibc's dynamic linker a fake hardware
20 * flavor that it will use to choose the search path for libraries in the 19 * flavor that it will use to choose the search path for libraries in the
@@ -28,15 +27,19 @@ ELFNOTE_END
28 * It should contain: 27 * It should contain:
29 * hwcap 1 nosegneg 28 * hwcap 1 nosegneg
30 * to match the mapping of bit to name that we give here. 29 * to match the mapping of bit to name that we give here.
30 *
31 * At runtime, the fake hardware feature will be considered to be present
32 * if its bit is set in the mask word. So, we start with the mask 0, and
33 * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
31 */ 34 */
32 35
33/* Bit used for the pseudo-hwcap for non-negative segments. We use 36#include "../xen/vdso.h" /* Defines VDSO_NOTE_NONEGSEG_BIT. */
34 bit 1 to avoid bugs in some versions of glibc when bit 0 is
35 used; the choice is otherwise arbitrary. */
36#define VDSO_NOTE_NONEGSEG_BIT 1
37 37
38 .globl VDSO_NOTE_MASK
38ELFNOTE_START(GNU, 2, "a") 39ELFNOTE_START(GNU, 2, "a")
39 .long 1, 1<<VDSO_NOTE_NONEGSEG_BIT /* ncaps, mask */ 40 .long 1 /* ncaps */
41VDSO_NOTE_MASK:
42 .long 0 /* mask */
40 .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */ 43 .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */
41ELFNOTE_END 44ELFNOTE_END
42#endif 45#endif
diff --git a/arch/i386/lib/Makefile b/arch/i386/lib/Makefile
index 22d8ac5815f0..4d105fdfe817 100644
--- a/arch/i386/lib/Makefile
+++ b/arch/i386/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5 5
6lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \ 6lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \
7 bitops.o semaphore.o 7 bitops.o semaphore.o string.o
8 8
9lib-$(CONFIG_X86_USE_3DNOW) += mmx.o 9lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
10 10
diff --git a/arch/i386/lib/string.c b/arch/i386/lib/string.c
new file mode 100644
index 000000000000..2c773fefa3dd
--- /dev/null
+++ b/arch/i386/lib/string.c
@@ -0,0 +1,257 @@
1/*
2 * Most of the string-functions are rather heavily hand-optimized,
3 * see especially strsep,strstr,str[c]spn. They should work, but are not
4 * very easy to understand. Everything is done entirely within the register
5 * set, making the functions fast and clean. String instructions have been
6 * used through-out, making for "slightly" unclear code :-)
7 *
8 * AK: On P4 and K7 using non string instruction implementations might be faster
9 * for large memory blocks. But most of them are unlikely to be used on large
10 * strings.
11 */
12
13#include <linux/string.h>
14#include <linux/module.h>
15
16#ifdef __HAVE_ARCH_STRCPY
17char *strcpy(char * dest,const char *src)
18{
19 int d0, d1, d2;
20 asm volatile( "1:\tlodsb\n\t"
21 "stosb\n\t"
22 "testb %%al,%%al\n\t"
23 "jne 1b"
24 : "=&S" (d0), "=&D" (d1), "=&a" (d2)
25 :"0" (src),"1" (dest) : "memory");
26 return dest;
27}
28EXPORT_SYMBOL(strcpy);
29#endif
30
31#ifdef __HAVE_ARCH_STRNCPY
32char *strncpy(char * dest,const char *src,size_t count)
33{
34 int d0, d1, d2, d3;
35 asm volatile( "1:\tdecl %2\n\t"
36 "js 2f\n\t"
37 "lodsb\n\t"
38 "stosb\n\t"
39 "testb %%al,%%al\n\t"
40 "jne 1b\n\t"
41 "rep\n\t"
42 "stosb\n"
43 "2:"
44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
45 :"0" (src),"1" (dest),"2" (count) : "memory");
46 return dest;
47}
48EXPORT_SYMBOL(strncpy);
49#endif
50
51#ifdef __HAVE_ARCH_STRCAT
52char *strcat(char * dest,const char * src)
53{
54 int d0, d1, d2, d3;
55 asm volatile( "repne\n\t"
56 "scasb\n\t"
57 "decl %1\n"
58 "1:\tlodsb\n\t"
59 "stosb\n\t"
60 "testb %%al,%%al\n\t"
61 "jne 1b"
62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory");
64 return dest;
65}
66EXPORT_SYMBOL(strcat);
67#endif
68
69#ifdef __HAVE_ARCH_STRNCAT
70char *strncat(char * dest,const char * src,size_t count)
71{
72 int d0, d1, d2, d3;
73 asm volatile( "repne\n\t"
74 "scasb\n\t"
75 "decl %1\n\t"
76 "movl %8,%3\n"
77 "1:\tdecl %3\n\t"
78 "js 2f\n\t"
79 "lodsb\n\t"
80 "stosb\n\t"
81 "testb %%al,%%al\n\t"
82 "jne 1b\n"
83 "2:\txorl %2,%2\n\t"
84 "stosb"
85 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
86 : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count)
87 : "memory");
88 return dest;
89}
90EXPORT_SYMBOL(strncat);
91#endif
92
93#ifdef __HAVE_ARCH_STRCMP
94int strcmp(const char * cs,const char * ct)
95{
96 int d0, d1;
97 int res;
98 asm volatile( "1:\tlodsb\n\t"
99 "scasb\n\t"
100 "jne 2f\n\t"
101 "testb %%al,%%al\n\t"
102 "jne 1b\n\t"
103 "xorl %%eax,%%eax\n\t"
104 "jmp 3f\n"
105 "2:\tsbbl %%eax,%%eax\n\t"
106 "orb $1,%%al\n"
107 "3:"
108 :"=a" (res), "=&S" (d0), "=&D" (d1)
109 :"1" (cs),"2" (ct)
110 :"memory");
111 return res;
112}
113EXPORT_SYMBOL(strcmp);
114#endif
115
116#ifdef __HAVE_ARCH_STRNCMP
117int strncmp(const char * cs,const char * ct,size_t count)
118{
119 int res;
120 int d0, d1, d2;
121 asm volatile( "1:\tdecl %3\n\t"
122 "js 2f\n\t"
123 "lodsb\n\t"
124 "scasb\n\t"
125 "jne 3f\n\t"
126 "testb %%al,%%al\n\t"
127 "jne 1b\n"
128 "2:\txorl %%eax,%%eax\n\t"
129 "jmp 4f\n"
130 "3:\tsbbl %%eax,%%eax\n\t"
131 "orb $1,%%al\n"
132 "4:"
133 :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
134 :"1" (cs),"2" (ct),"3" (count)
135 :"memory");
136 return res;
137}
138EXPORT_SYMBOL(strncmp);
139#endif
140
141#ifdef __HAVE_ARCH_STRCHR
142char *strchr(const char * s, int c)
143{
144 int d0;
145 char * res;
146 asm volatile( "movb %%al,%%ah\n"
147 "1:\tlodsb\n\t"
148 "cmpb %%ah,%%al\n\t"
149 "je 2f\n\t"
150 "testb %%al,%%al\n\t"
151 "jne 1b\n\t"
152 "movl $1,%1\n"
153 "2:\tmovl %1,%0\n\t"
154 "decl %0"
155 :"=a" (res), "=&S" (d0)
156 :"1" (s),"0" (c)
157 :"memory");
158 return res;
159}
160EXPORT_SYMBOL(strchr);
161#endif
162
163#ifdef __HAVE_ARCH_STRRCHR
164char *strrchr(const char * s, int c)
165{
166 int d0, d1;
167 char * res;
168 asm volatile( "movb %%al,%%ah\n"
169 "1:\tlodsb\n\t"
170 "cmpb %%ah,%%al\n\t"
171 "jne 2f\n\t"
172 "leal -1(%%esi),%0\n"
173 "2:\ttestb %%al,%%al\n\t"
174 "jne 1b"
175 :"=g" (res), "=&S" (d0), "=&a" (d1)
176 :"0" (0),"1" (s),"2" (c)
177 :"memory");
178 return res;
179}
180EXPORT_SYMBOL(strrchr);
181#endif
182
183#ifdef __HAVE_ARCH_STRLEN
184size_t strlen(const char * s)
185{
186 int d0;
187 int res;
188 asm volatile( "repne\n\t"
189 "scasb\n\t"
190 "notl %0\n\t"
191 "decl %0"
192 :"=c" (res), "=&D" (d0)
193 :"1" (s),"a" (0), "0" (0xffffffffu)
194 :"memory");
195 return res;
196}
197EXPORT_SYMBOL(strlen);
198#endif
199
200#ifdef __HAVE_ARCH_MEMCHR
201void *memchr(const void *cs,int c,size_t count)
202{
203 int d0;
204 void *res;
205 if (!count)
206 return NULL;
207 asm volatile( "repne\n\t"
208 "scasb\n\t"
209 "je 1f\n\t"
210 "movl $1,%0\n"
211 "1:\tdecl %0"
212 :"=D" (res), "=&c" (d0)
213 :"a" (c),"0" (cs),"1" (count)
214 :"memory");
215 return res;
216}
217EXPORT_SYMBOL(memchr);
218#endif
219
220#ifdef __HAVE_ARCH_MEMSCAN
221void *memscan(void * addr, int c, size_t size)
222{
223 if (!size)
224 return addr;
225 asm volatile("repnz; scasb\n\t"
226 "jnz 1f\n\t"
227 "dec %%edi\n"
228 "1:"
229 : "=D" (addr), "=c" (size)
230 : "0" (addr), "1" (size), "a" (c)
231 : "memory");
232 return addr;
233}
234EXPORT_SYMBOL(memscan);
235#endif
236
237#ifdef __HAVE_ARCH_STRNLEN
238size_t strnlen(const char *s, size_t count)
239{
240 int d0;
241 int res;
242 asm volatile( "movl %2,%0\n\t"
243 "jmp 2f\n"
244 "1:\tcmpb $0,(%0)\n\t"
245 "je 3f\n\t"
246 "incl %0\n"
247 "2:\tdecl %1\n\t"
248 "cmpl $-1,%1\n\t"
249 "jne 1b\n"
250 "3:\tsubl %2,%0"
251 :"=a" (res), "=&d" (d0)
252 :"c" (s),"1" (count)
253 :"memory");
254 return res;
255}
256EXPORT_SYMBOL(strnlen);
257#endif
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index e92a10124935..01ffdd4964f0 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -283,6 +283,8 @@ static inline int vmalloc_fault(unsigned long address)
283 return 0; 283 return 0;
284} 284}
285 285
286int show_unhandled_signals = 1;
287
286/* 288/*
287 * This routine handles page faults. It determines the address, 289 * This routine handles page faults. It determines the address,
288 * and the problem, and then passes it off to one of the appropriate 290 * and the problem, and then passes it off to one of the appropriate
@@ -469,6 +471,14 @@ bad_area_nosemaphore:
469 if (is_prefetch(regs, address, error_code)) 471 if (is_prefetch(regs, address, error_code))
470 return; 472 return;
471 473
474 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
475 printk_ratelimit()) {
476 printk("%s%s[%d]: segfault at %08lx eip %08lx "
477 "esp %08lx error %lx\n",
478 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
479 tsk->comm, tsk->pid, address, regs->eip,
480 regs->esp, error_code);
481 }
472 tsk->thread.cr2 = address; 482 tsk->thread.cr2 = address;
473 /* Kernel addresses are always protection faults */ 483 /* Kernel addresses are always protection faults */
474 tsk->thread.error_code = error_code | (address >= TASK_SIZE); 484 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 6e72f22e6bbd..c3b9905af2d5 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -471,6 +471,10 @@ void zap_low_mappings (void)
471 flush_tlb_all(); 471 flush_tlb_all();
472} 472}
473 473
474int nx_enabled = 0;
475
476#ifdef CONFIG_X86_PAE
477
474static int disable_nx __initdata = 0; 478static int disable_nx __initdata = 0;
475u64 __supported_pte_mask __read_mostly = ~_PAGE_NX; 479u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
476EXPORT_SYMBOL_GPL(__supported_pte_mask); 480EXPORT_SYMBOL_GPL(__supported_pte_mask);
@@ -500,9 +504,6 @@ static int __init noexec_setup(char *str)
500} 504}
501early_param("noexec", noexec_setup); 505early_param("noexec", noexec_setup);
502 506
503int nx_enabled = 0;
504#ifdef CONFIG_X86_PAE
505
506static void __init set_nx(void) 507static void __init set_nx(void)
507{ 508{
508 unsigned int v[4], l, h; 509 unsigned int v[4], l, h;
@@ -799,17 +800,9 @@ void mark_rodata_ro(void)
799 unsigned long start = PFN_ALIGN(_text); 800 unsigned long start = PFN_ALIGN(_text);
800 unsigned long size = PFN_ALIGN(_etext) - start; 801 unsigned long size = PFN_ALIGN(_etext) - start;
801 802
802#ifndef CONFIG_KPROBES 803 change_page_attr(virt_to_page(start),
803#ifdef CONFIG_HOTPLUG_CPU 804 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
804 /* It must still be possible to apply SMP alternatives. */ 805 printk("Write protecting the kernel text: %luk\n", size >> 10);
805 if (num_possible_cpus() <= 1)
806#endif
807 {
808 change_page_attr(virt_to_page(start),
809 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
810 printk("Write protecting the kernel text: %luk\n", size >> 10);
811 }
812#endif
813 start += size; 806 start += size;
814 size = (unsigned long)__end_rodata - start; 807 size = (unsigned long)__end_rodata - start;
815 change_page_attr(virt_to_page(start), 808 change_page_attr(virt_to_page(start),
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index fff08ae7b5ed..0b278315d737 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -196,7 +196,7 @@ void iounmap(volatile void __iomem *addr)
196 /* Reset the direct mapping. Can block */ 196 /* Reset the direct mapping. Can block */
197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) { 197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
198 change_page_attr(virt_to_page(__va(p->phys_addr)), 198 change_page_attr(virt_to_page(__va(p->phys_addr)),
199 p->size >> PAGE_SHIFT, 199 get_vm_area_size(p) >> PAGE_SHIFT,
200 PAGE_KERNEL); 200 PAGE_KERNEL);
201 global_flush_tlb(); 201 global_flush_tlb();
202 } 202 }
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 37992ffb1633..8927222b3ab2 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -82,7 +82,7 @@ static void flush_kernel_map(void *arg)
82 struct page *p; 82 struct page *p;
83 83
84 /* High level code is not ready for clflush yet */ 84 /* High level code is not ready for clflush yet */
85 if (0 && cpu_has_clflush) { 85 if (cpu_has_clflush) {
86 list_for_each_entry (p, lh, lru) 86 list_for_each_entry (p, lh, lru)
87 cache_flush_page(p); 87 cache_flush_page(p);
88 } else if (boot_cpu_data.x86_model >= 4) 88 } else if (boot_cpu_data.x86_model >= 4)
@@ -136,6 +136,12 @@ static inline void revert_page(struct page *kpte_page, unsigned long address)
136 ref_prot)); 136 ref_prot));
137} 137}
138 138
139static inline void save_page(struct page *kpte_page)
140{
141 if (!test_and_set_bit(PG_arch_1, &kpte_page->flags))
142 list_add(&kpte_page->lru, &df_list);
143}
144
139static int 145static int
140__change_page_attr(struct page *page, pgprot_t prot) 146__change_page_attr(struct page *page, pgprot_t prot)
141{ 147{
@@ -150,6 +156,9 @@ __change_page_attr(struct page *page, pgprot_t prot)
150 if (!kpte) 156 if (!kpte)
151 return -EINVAL; 157 return -EINVAL;
152 kpte_page = virt_to_page(kpte); 158 kpte_page = virt_to_page(kpte);
159 BUG_ON(PageLRU(kpte_page));
160 BUG_ON(PageCompound(kpte_page));
161
153 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 162 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
154 if (!pte_huge(*kpte)) { 163 if (!pte_huge(*kpte)) {
155 set_pte_atomic(kpte, mk_pte(page, prot)); 164 set_pte_atomic(kpte, mk_pte(page, prot));
@@ -179,11 +188,11 @@ __change_page_attr(struct page *page, pgprot_t prot)
179 * time (not via split_large_page) and in turn we must not 188 * time (not via split_large_page) and in turn we must not
180 * replace it with a largepage. 189 * replace it with a largepage.
181 */ 190 */
191
192 save_page(kpte_page);
182 if (!PageReserved(kpte_page)) { 193 if (!PageReserved(kpte_page)) {
183 if (cpu_has_pse && (page_private(kpte_page) == 0)) { 194 if (cpu_has_pse && (page_private(kpte_page) == 0)) {
184 ClearPagePrivate(kpte_page);
185 paravirt_release_pt(page_to_pfn(kpte_page)); 195 paravirt_release_pt(page_to_pfn(kpte_page));
186 list_add(&kpte_page->lru, &df_list);
187 revert_page(kpte_page, address); 196 revert_page(kpte_page, address);
188 } 197 }
189 } 198 }
@@ -236,6 +245,11 @@ void global_flush_tlb(void)
236 spin_unlock_irq(&cpa_lock); 245 spin_unlock_irq(&cpa_lock);
237 flush_map(&l); 246 flush_map(&l);
238 list_for_each_entry_safe(pg, next, &l, lru) { 247 list_for_each_entry_safe(pg, next, &l, lru) {
248 list_del(&pg->lru);
249 clear_bit(PG_arch_1, &pg->flags);
250 if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0)
251 continue;
252 ClearPagePrivate(pg);
239 __free_page(pg); 253 __free_page(pg);
240 } 254 }
241} 255}
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 8d7c0864cc04..01437c46baae 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -235,7 +235,7 @@ static inline void pgd_list_del(pgd_t *pgd)
235 235
236#if (PTRS_PER_PMD == 1) 236#if (PTRS_PER_PMD == 1)
237/* Non-PAE pgd constructor */ 237/* Non-PAE pgd constructor */
238void pgd_ctor(void *pgd) 238static void pgd_ctor(void *pgd)
239{ 239{
240 unsigned long flags; 240 unsigned long flags;
241 241
@@ -257,7 +257,7 @@ void pgd_ctor(void *pgd)
257} 257}
258#else /* PTRS_PER_PMD > 1 */ 258#else /* PTRS_PER_PMD > 1 */
259/* PAE pgd constructor */ 259/* PAE pgd constructor */
260void pgd_ctor(void *pgd) 260static void pgd_ctor(void *pgd)
261{ 261{
262 /* PAE, kernel PMD may be shared */ 262 /* PAE, kernel PMD may be shared */
263 263
@@ -276,7 +276,7 @@ void pgd_ctor(void *pgd)
276} 276}
277#endif /* PTRS_PER_PMD */ 277#endif /* PTRS_PER_PMD */
278 278
279void pgd_dtor(void *pgd) 279static void pgd_dtor(void *pgd)
280{ 280{
281 unsigned long flags; /* can be called from interrupt context */ 281 unsigned long flags; /* can be called from interrupt context */
282 282
diff --git a/arch/i386/pci/acpi.c b/arch/i386/pci/acpi.c
index b33aea845f58..bc8a44bddaa7 100644
--- a/arch/i386/pci/acpi.c
+++ b/arch/i386/pci/acpi.c
@@ -8,20 +8,42 @@
8struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum) 8struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum)
9{ 9{
10 struct pci_bus *bus; 10 struct pci_bus *bus;
11 struct pci_sysdata *sd;
12 int pxm;
13
14 /* Allocate per-root-bus (not per bus) arch-specific data.
15 * TODO: leak; this memory is never freed.
16 * It's arguable whether it's worth the trouble to care.
17 */
18 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
19 if (!sd) {
20 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
21 return NULL;
22 }
11 23
12 if (domain != 0) { 24 if (domain != 0) {
13 printk(KERN_WARNING "PCI: Multiple domains not supported\n"); 25 printk(KERN_WARNING "PCI: Multiple domains not supported\n");
26 kfree(sd);
14 return NULL; 27 return NULL;
15 } 28 }
16 29
17 bus = pcibios_scan_root(busnum); 30 sd->node = -1;
31
32 pxm = acpi_get_pxm(device->handle);
33#ifdef CONFIG_ACPI_NUMA
34 if (pxm >= 0)
35 sd->node = pxm_to_node(pxm);
36#endif
37
38 bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
39 if (!bus)
40 kfree(sd);
41
18#ifdef CONFIG_ACPI_NUMA 42#ifdef CONFIG_ACPI_NUMA
19 if (bus != NULL) { 43 if (bus != NULL) {
20 int pxm = acpi_get_pxm(device->handle);
21 if (pxm >= 0) { 44 if (pxm >= 0) {
22 bus->sysdata = (void *)(unsigned long)pxm_to_node(pxm); 45 printk("bus %d -> pxm %d -> node %d\n",
23 printk("bus %d -> pxm %d -> node %ld\n", 46 busnum, pxm, sd->node);
24 busnum, pxm, (long)(bus->sysdata));
25 } 47 }
26 } 48 }
27#endif 49#endif
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index 3f78d4d8ecf3..85503deeda46 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -293,6 +293,7 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
293struct pci_bus * __devinit pcibios_scan_root(int busnum) 293struct pci_bus * __devinit pcibios_scan_root(int busnum)
294{ 294{
295 struct pci_bus *bus = NULL; 295 struct pci_bus *bus = NULL;
296 struct pci_sysdata *sd;
296 297
297 dmi_check_system(pciprobe_dmi_table); 298 dmi_check_system(pciprobe_dmi_table);
298 299
@@ -303,9 +304,19 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
303 } 304 }
304 } 305 }
305 306
307 /* Allocate per-root-bus (not per bus) arch-specific data.
308 * TODO: leak; this memory is never freed.
309 * It's arguable whether it's worth the trouble to care.
310 */
311 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
312 if (!sd) {
313 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
314 return NULL;
315 }
316
306 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum); 317 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
307 318
308 return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, NULL); 319 return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
309} 320}
310 321
311extern u8 pci_cache_line_size; 322extern u8 pci_cache_line_size;
diff --git a/arch/i386/pci/mmconfig-shared.c b/arch/i386/pci/mmconfig-shared.c
index c7cabeed4d7b..4df637e34f81 100644
--- a/arch/i386/pci/mmconfig-shared.c
+++ b/arch/i386/pci/mmconfig-shared.c
@@ -24,6 +24,9 @@
24 24
25DECLARE_BITMAP(pci_mmcfg_fallback_slots, 32*PCI_MMCFG_MAX_CHECK_BUS); 25DECLARE_BITMAP(pci_mmcfg_fallback_slots, 32*PCI_MMCFG_MAX_CHECK_BUS);
26 26
27/* Indicate if the mmcfg resources have been placed into the resource table. */
28static int __initdata pci_mmcfg_resources_inserted;
29
27/* K8 systems have some devices (typically in the builtin northbridge) 30/* K8 systems have some devices (typically in the builtin northbridge)
28 that are only accessible using type1 31 that are only accessible using type1
29 Normally this can be expressed in the MCFG by not listing them 32 Normally this can be expressed in the MCFG by not listing them
@@ -170,7 +173,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
170 return name != NULL; 173 return name != NULL;
171} 174}
172 175
173static void __init pci_mmcfg_insert_resources(void) 176static void __init pci_mmcfg_insert_resources(unsigned long resource_flags)
174{ 177{
175#define PCI_MMCFG_RESOURCE_NAME_LEN 19 178#define PCI_MMCFG_RESOURCE_NAME_LEN 19
176 int i; 179 int i;
@@ -194,10 +197,13 @@ static void __init pci_mmcfg_insert_resources(void)
194 cfg->pci_segment); 197 cfg->pci_segment);
195 res->start = cfg->address; 198 res->start = cfg->address;
196 res->end = res->start + (num_buses << 20) - 1; 199 res->end = res->start + (num_buses << 20) - 1;
197 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 200 res->flags = IORESOURCE_MEM | resource_flags;
198 insert_resource(&iomem_resource, res); 201 insert_resource(&iomem_resource, res);
199 names += PCI_MMCFG_RESOURCE_NAME_LEN; 202 names += PCI_MMCFG_RESOURCE_NAME_LEN;
200 } 203 }
204
205 /* Mark that the resources have been inserted. */
206 pci_mmcfg_resources_inserted = 1;
201} 207}
202 208
203static void __init pci_mmcfg_reject_broken(int type) 209static void __init pci_mmcfg_reject_broken(int type)
@@ -267,7 +273,43 @@ void __init pci_mmcfg_init(int type)
267 if (type == 1) 273 if (type == 1)
268 unreachable_devices(); 274 unreachable_devices();
269 if (known_bridge) 275 if (known_bridge)
270 pci_mmcfg_insert_resources(); 276 pci_mmcfg_insert_resources(IORESOURCE_BUSY);
271 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; 277 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
278 } else {
279 /*
280 * Signal not to attempt to insert mmcfg resources because
281 * the architecture mmcfg setup could not initialize.
282 */
283 pci_mmcfg_resources_inserted = 1;
272 } 284 }
273} 285}
286
287static int __init pci_mmcfg_late_insert_resources(void)
288{
289 /*
290 * If resources are already inserted or we are not using MMCONFIG,
291 * don't insert the resources.
292 */
293 if ((pci_mmcfg_resources_inserted == 1) ||
294 (pci_probe & PCI_PROBE_MMCONF) == 0 ||
295 (pci_mmcfg_config_num == 0) ||
296 (pci_mmcfg_config == NULL) ||
297 (pci_mmcfg_config[0].address == 0))
298 return 1;
299
300 /*
301 * Attempt to insert the mmcfg resources but not with the busy flag
302 * marked so it won't cause request errors when __request_region is
303 * called.
304 */
305 pci_mmcfg_insert_resources(0);
306
307 return 0;
308}
309
310/*
311 * Perform MMCONFIG resource insertion after PCI initialization to allow for
312 * misprogrammed MCFG tables that state larger sizes but actually conflict
313 * with other system resources.
314 */
315late_initcall(pci_mmcfg_late_insert_resources);
diff --git a/arch/i386/xen/events.c b/arch/i386/xen/events.c
index 8904acc20f8c..da1b173547a1 100644
--- a/arch/i386/xen/events.c
+++ b/arch/i386/xen/events.c
@@ -31,6 +31,7 @@
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/sync_bitops.h> 32#include <asm/sync_bitops.h>
33#include <asm/xen/hypercall.h> 33#include <asm/xen/hypercall.h>
34#include <asm/xen/hypervisor.h>
34 35
35#include <xen/events.h> 36#include <xen/events.h>
36#include <xen/interface/xen.h> 37#include <xen/interface/xen.h>
diff --git a/arch/i386/xen/setup.c b/arch/i386/xen/setup.c
index 2fe6eac510f0..f84e77226646 100644
--- a/arch/i386/xen/setup.c
+++ b/arch/i386/xen/setup.c
@@ -19,6 +19,7 @@
19#include <xen/features.h> 19#include <xen/features.h>
20 20
21#include "xen-ops.h" 21#include "xen-ops.h"
22#include "vdso.h"
22 23
23/* These are code, but not functions. Defined in entry.S */ 24/* These are code, but not functions. Defined in entry.S */
24extern const char xen_hypervisor_callback[]; 25extern const char xen_hypervisor_callback[];
@@ -55,6 +56,18 @@ static void xen_idle(void)
55 } 56 }
56} 57}
57 58
59/*
60 * Set the bit indicating "nosegneg" library variants should be used.
61 */
62static void fiddle_vdso(void)
63{
64 extern u32 VDSO_NOTE_MASK; /* See ../kernel/vsyscall-note.S. */
65 extern char vsyscall_int80_start;
66 u32 *mask = (u32 *) ((unsigned long) &VDSO_NOTE_MASK - VDSO_PRELINK +
67 &vsyscall_int80_start);
68 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
69}
70
58void __init xen_arch_setup(void) 71void __init xen_arch_setup(void)
59{ 72{
60 struct physdev_set_iopl set_iopl; 73 struct physdev_set_iopl set_iopl;
@@ -93,4 +106,6 @@ void __init xen_arch_setup(void)
93#endif 106#endif
94 107
95 paravirt_disable_iospace(); 108 paravirt_disable_iospace();
109
110 fiddle_vdso();
96} 111}
diff --git a/arch/i386/xen/time.c b/arch/i386/xen/time.c
index 51fdabf1fd4d..dfd6db69ead5 100644
--- a/arch/i386/xen/time.c
+++ b/arch/i386/xen/time.c
@@ -412,6 +412,7 @@ static void xen_timerop_set_mode(enum clock_event_mode mode,
412 break; 412 break;
413 413
414 case CLOCK_EVT_MODE_ONESHOT: 414 case CLOCK_EVT_MODE_ONESHOT:
415 case CLOCK_EVT_MODE_RESUME:
415 break; 416 break;
416 417
417 case CLOCK_EVT_MODE_UNUSED: 418 case CLOCK_EVT_MODE_UNUSED:
@@ -474,6 +475,8 @@ static void xen_vcpuop_set_mode(enum clock_event_mode mode,
474 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) 475 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
475 BUG(); 476 BUG();
476 break; 477 break;
478 case CLOCK_EVT_MODE_RESUME:
479 break;
477 } 480 }
478} 481}
479 482
diff --git a/arch/i386/xen/vdso.h b/arch/i386/xen/vdso.h
new file mode 100644
index 000000000000..861fedfe5230
--- /dev/null
+++ b/arch/i386/xen/vdso.h
@@ -0,0 +1,4 @@
1/* Bit used for the pseudo-hwcap for non-negative segments. We use
2 bit 1 to avoid bugs in some versions of glibc when bit 0 is
3 used; the choice is otherwise arbitrary. */
4#define VDSO_NOTE_NONEGSEG_BIT 1
diff --git a/arch/i386/xen/xen-head.S b/arch/i386/xen/xen-head.S
index 2998d55a0017..bc71f3bc4014 100644
--- a/arch/i386/xen/xen-head.S
+++ b/arch/i386/xen/xen-head.S
@@ -7,6 +7,7 @@
7#include <asm/boot.h> 7#include <asm/boot.h>
8#include <xen/interface/elfnote.h> 8#include <xen/interface/elfnote.h>
9 9
10 .section .init.text
10ENTRY(startup_xen) 11ENTRY(startup_xen)
11 movl %esi,xen_start_info 12 movl %esi,xen_start_info
12 cld 13 cld
@@ -19,6 +20,7 @@ ENTRY(hypercall_page)
19 .skip 0x1000 20 .skip 0x1000
20.popsection 21.popsection
21 22
23 .section .text
22 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") 24 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
23 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") 25 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
24 ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") 26 ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 616c96e73483..36c7b9682aa6 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -62,7 +62,11 @@ config GENERIC_CALIBRATE_DELAY
62 bool 62 bool
63 default y 63 default y
64 64
65config TIME_INTERPOLATION 65config GENERIC_TIME
66 bool
67 default y
68
69config GENERIC_TIME_VSYSCALL
66 bool 70 bool
67 default y 71 default y
68 72
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig
index 90e9c2e61bf4..9eb48c0927b0 100644
--- a/arch/ia64/configs/bigsur_defconfig
+++ b/arch/ia64/configs/bigsur_defconfig
@@ -85,7 +85,7 @@ CONFIG_MMU=y
85CONFIG_SWIOTLB=y 85CONFIG_SWIOTLB=y
86CONFIG_RWSEM_XCHGADD_ALGORITHM=y 86CONFIG_RWSEM_XCHGADD_ALGORITHM=y
87CONFIG_GENERIC_CALIBRATE_DELAY=y 87CONFIG_GENERIC_CALIBRATE_DELAY=y
88CONFIG_TIME_INTERPOLATION=y 88CONFIG_GENERIC_TIME=y
89CONFIG_EFI=y 89CONFIG_EFI=y
90CONFIG_GENERIC_IOMAP=y 90CONFIG_GENERIC_IOMAP=y
91CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 91CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 0d29aa2066b3..3a9ed951db08 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -86,7 +86,7 @@ CONFIG_MMU=y
86CONFIG_SWIOTLB=y 86CONFIG_SWIOTLB=y
87CONFIG_RWSEM_XCHGADD_ALGORITHM=y 87CONFIG_RWSEM_XCHGADD_ALGORITHM=y
88CONFIG_GENERIC_CALIBRATE_DELAY=y 88CONFIG_GENERIC_CALIBRATE_DELAY=y
89CONFIG_TIME_INTERPOLATION=y 89CONFIG_GENERIC_TIME=y
90CONFIG_EFI=y 90CONFIG_EFI=y
91CONFIG_GENERIC_IOMAP=y 91CONFIG_GENERIC_IOMAP=y
92CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 92CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig
index d9146c31ea13..c420d9f3df98 100644
--- a/arch/ia64/configs/sim_defconfig
+++ b/arch/ia64/configs/sim_defconfig
@@ -86,7 +86,7 @@ CONFIG_MMU=y
86CONFIG_SWIOTLB=y 86CONFIG_SWIOTLB=y
87CONFIG_RWSEM_XCHGADD_ALGORITHM=y 87CONFIG_RWSEM_XCHGADD_ALGORITHM=y
88CONFIG_GENERIC_CALIBRATE_DELAY=y 88CONFIG_GENERIC_CALIBRATE_DELAY=y
89CONFIG_TIME_INTERPOLATION=y 89CONFIG_GENERIC_TIME=y
90CONFIG_EFI=y 90CONFIG_EFI=y
91CONFIG_GENERIC_IOMAP=y 91CONFIG_GENERIC_IOMAP=y
92CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 92CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 64e951de4e57..4c9ffc47bc7a 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -93,7 +93,7 @@ CONFIG_SWIOTLB=y
93CONFIG_RWSEM_XCHGADD_ALGORITHM=y 93CONFIG_RWSEM_XCHGADD_ALGORITHM=y
94CONFIG_GENERIC_FIND_NEXT_BIT=y 94CONFIG_GENERIC_FIND_NEXT_BIT=y
95CONFIG_GENERIC_CALIBRATE_DELAY=y 95CONFIG_GENERIC_CALIBRATE_DELAY=y
96CONFIG_TIME_INTERPOLATION=y 96CONFIG_GENERIC_TIME=y
97CONFIG_DMI=y 97CONFIG_DMI=y
98CONFIG_EFI=y 98CONFIG_EFI=y
99CONFIG_GENERIC_IOMAP=y 99CONFIG_GENERIC_IOMAP=y
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index a1446931b401..3dbb3987df27 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.21-rc3 3# Linux kernel version: 2.6.22
4# Thu Mar 8 11:07:09 2007 4# Thu Jul 19 13:54:47 2007
5# 5#
6CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 6CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
7 7
@@ -19,15 +19,15 @@ CONFIG_LOCALVERSION=""
19CONFIG_LOCALVERSION_AUTO=y 19CONFIG_LOCALVERSION_AUTO=y
20CONFIG_SWAP=y 20CONFIG_SWAP=y
21CONFIG_SYSVIPC=y 21CONFIG_SYSVIPC=y
22# CONFIG_IPC_NS is not set
23CONFIG_SYSVIPC_SYSCTL=y 22CONFIG_SYSVIPC_SYSCTL=y
24CONFIG_POSIX_MQUEUE=y 23CONFIG_POSIX_MQUEUE=y
25# CONFIG_BSD_PROCESS_ACCT is not set 24# CONFIG_BSD_PROCESS_ACCT is not set
26# CONFIG_TASKSTATS is not set 25# CONFIG_TASKSTATS is not set
27# CONFIG_UTS_NS is not set 26# CONFIG_USER_NS is not set
28# CONFIG_AUDIT is not set 27# CONFIG_AUDIT is not set
29CONFIG_IKCONFIG=y 28CONFIG_IKCONFIG=y
30CONFIG_IKCONFIG_PROC=y 29CONFIG_IKCONFIG_PROC=y
30CONFIG_LOG_BUF_SHIFT=20
31# CONFIG_CPUSETS is not set 31# CONFIG_CPUSETS is not set
32CONFIG_SYSFS_DEPRECATED=y 32CONFIG_SYSFS_DEPRECATED=y
33# CONFIG_RELAY is not set 33# CONFIG_RELAY is not set
@@ -46,18 +46,19 @@ CONFIG_BUG=y
46CONFIG_ELF_CORE=y 46CONFIG_ELF_CORE=y
47CONFIG_BASE_FULL=y 47CONFIG_BASE_FULL=y
48CONFIG_FUTEX=y 48CONFIG_FUTEX=y
49CONFIG_ANON_INODES=y
49CONFIG_EPOLL=y 50CONFIG_EPOLL=y
51CONFIG_SIGNALFD=y
52CONFIG_TIMERFD=y
53CONFIG_EVENTFD=y
50CONFIG_SHMEM=y 54CONFIG_SHMEM=y
51CONFIG_SLAB=y
52CONFIG_VM_EVENT_COUNTERS=y 55CONFIG_VM_EVENT_COUNTERS=y
56CONFIG_SLAB=y
57# CONFIG_SLUB is not set
58# CONFIG_SLOB is not set
53CONFIG_RT_MUTEXES=y 59CONFIG_RT_MUTEXES=y
54# CONFIG_TINY_SHMEM is not set 60# CONFIG_TINY_SHMEM is not set
55CONFIG_BASE_SMALL=0 61CONFIG_BASE_SMALL=0
56# CONFIG_SLOB is not set
57
58#
59# Loadable module support
60#
61CONFIG_MODULES=y 62CONFIG_MODULES=y
62CONFIG_MODULE_UNLOAD=y 63CONFIG_MODULE_UNLOAD=y
63# CONFIG_MODULE_FORCE_UNLOAD is not set 64# CONFIG_MODULE_FORCE_UNLOAD is not set
@@ -65,12 +66,9 @@ CONFIG_MODVERSIONS=y
65CONFIG_MODULE_SRCVERSION_ALL=y 66CONFIG_MODULE_SRCVERSION_ALL=y
66CONFIG_KMOD=y 67CONFIG_KMOD=y
67CONFIG_STOP_MACHINE=y 68CONFIG_STOP_MACHINE=y
68
69#
70# Block layer
71#
72CONFIG_BLOCK=y 69CONFIG_BLOCK=y
73# CONFIG_BLK_DEV_IO_TRACE is not set 70# CONFIG_BLK_DEV_IO_TRACE is not set
71# CONFIG_BLK_DEV_BSG is not set
74 72
75# 73#
76# IO Schedulers 74# IO Schedulers
@@ -91,6 +89,7 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
91CONFIG_IA64=y 89CONFIG_IA64=y
92CONFIG_64BIT=y 90CONFIG_64BIT=y
93CONFIG_ZONE_DMA=y 91CONFIG_ZONE_DMA=y
92CONFIG_QUICKLIST=y
94CONFIG_MMU=y 93CONFIG_MMU=y
95CONFIG_SWIOTLB=y 94CONFIG_SWIOTLB=y
96CONFIG_RWSEM_XCHGADD_ALGORITHM=y 95CONFIG_RWSEM_XCHGADD_ALGORITHM=y
@@ -98,7 +97,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
98# CONFIG_ARCH_HAS_ILOG2_U64 is not set 97# CONFIG_ARCH_HAS_ILOG2_U64 is not set
99CONFIG_GENERIC_FIND_NEXT_BIT=y 98CONFIG_GENERIC_FIND_NEXT_BIT=y
100CONFIG_GENERIC_CALIBRATE_DELAY=y 99CONFIG_GENERIC_CALIBRATE_DELAY=y
101CONFIG_TIME_INTERPOLATION=y 100CONFIG_GENERIC_TIME=y
102CONFIG_DMI=y 101CONFIG_DMI=y
103CONFIG_EFI=y 102CONFIG_EFI=y
104CONFIG_GENERIC_IOMAP=y 103CONFIG_GENERIC_IOMAP=y
@@ -114,8 +113,8 @@ CONFIG_IA64_DIG=y
114CONFIG_MCKINLEY=y 113CONFIG_MCKINLEY=y
115# CONFIG_IA64_PAGE_SIZE_4KB is not set 114# CONFIG_IA64_PAGE_SIZE_4KB is not set
116# CONFIG_IA64_PAGE_SIZE_8KB is not set 115# CONFIG_IA64_PAGE_SIZE_8KB is not set
117CONFIG_IA64_PAGE_SIZE_16KB=y 116# CONFIG_IA64_PAGE_SIZE_16KB is not set
118# CONFIG_IA64_PAGE_SIZE_64KB is not set 117CONFIG_IA64_PAGE_SIZE_64KB=y
119CONFIG_PGTABLE_3=y 118CONFIG_PGTABLE_3=y
120# CONFIG_PGTABLE_4 is not set 119# CONFIG_PGTABLE_4 is not set
121# CONFIG_HZ_100 is not set 120# CONFIG_HZ_100 is not set
@@ -145,6 +144,9 @@ CONFIG_FLAT_NODE_MEM_MAP=y
145CONFIG_SPLIT_PTLOCK_CPUS=4 144CONFIG_SPLIT_PTLOCK_CPUS=4
146CONFIG_RESOURCES_64BIT=y 145CONFIG_RESOURCES_64BIT=y
147CONFIG_ZONE_DMA_FLAG=1 146CONFIG_ZONE_DMA_FLAG=1
147CONFIG_BOUNCE=y
148CONFIG_NR_QUICK=1
149CONFIG_VIRT_TO_BUS=y
148CONFIG_ARCH_SELECT_MEMORY_MODEL=y 150CONFIG_ARCH_SELECT_MEMORY_MODEL=y
149CONFIG_ARCH_DISCONTIGMEM_ENABLE=y 151CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
150CONFIG_ARCH_FLATMEM_ENABLE=y 152CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -152,11 +154,11 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
152CONFIG_ARCH_POPULATES_NODE_MAP=y 154CONFIG_ARCH_POPULATES_NODE_MAP=y
153CONFIG_VIRTUAL_MEM_MAP=y 155CONFIG_VIRTUAL_MEM_MAP=y
154CONFIG_HOLES_IN_ZONE=y 156CONFIG_HOLES_IN_ZONE=y
155CONFIG_IA32_SUPPORT=y 157# CONFIG_IA32_SUPPORT is not set
156CONFIG_COMPAT=y
157CONFIG_IA64_MCA_RECOVERY=y 158CONFIG_IA64_MCA_RECOVERY=y
158CONFIG_PERFMON=y 159CONFIG_PERFMON=y
159CONFIG_IA64_PALINFO=y 160CONFIG_IA64_PALINFO=y
161# CONFIG_IA64_MC_ERR_INJECT is not set
160# CONFIG_IA64_ESI is not set 162# CONFIG_IA64_ESI is not set
161CONFIG_KEXEC=y 163CONFIG_KEXEC=y
162# CONFIG_CRASH_DUMP is not set 164# CONFIG_CRASH_DUMP is not set
@@ -166,6 +168,7 @@ CONFIG_KEXEC=y
166# 168#
167CONFIG_EFI_VARS=y 169CONFIG_EFI_VARS=y
168CONFIG_EFI_PCDP=y 170CONFIG_EFI_PCDP=y
171CONFIG_DMIID=y
169CONFIG_BINFMT_ELF=y 172CONFIG_BINFMT_ELF=y
170CONFIG_BINFMT_MISC=m 173CONFIG_BINFMT_MISC=m
171 174
@@ -175,7 +178,6 @@ CONFIG_BINFMT_MISC=m
175CONFIG_PM=y 178CONFIG_PM=y
176CONFIG_PM_LEGACY=y 179CONFIG_PM_LEGACY=y
177# CONFIG_PM_DEBUG is not set 180# CONFIG_PM_DEBUG is not set
178# CONFIG_PM_SYSFS_DEPRECATED is not set
179 181
180# 182#
181# ACPI (Advanced Configuration and Power Interface) Support 183# ACPI (Advanced Configuration and Power Interface) Support
@@ -205,13 +207,11 @@ CONFIG_ACPI_CONTAINER=m
205# 207#
206CONFIG_PCI=y 208CONFIG_PCI=y
207CONFIG_PCI_DOMAINS=y 209CONFIG_PCI_DOMAINS=y
210CONFIG_PCI_SYSCALL=y
208# CONFIG_PCIEPORTBUS is not set 211# CONFIG_PCIEPORTBUS is not set
212CONFIG_ARCH_SUPPORTS_MSI=y
209# CONFIG_PCI_MSI is not set 213# CONFIG_PCI_MSI is not set
210# CONFIG_PCI_DEBUG is not set 214# CONFIG_PCI_DEBUG is not set
211
212#
213# PCI Hotplug Support
214#
215CONFIG_HOTPLUG_PCI=m 215CONFIG_HOTPLUG_PCI=m
216# CONFIG_HOTPLUG_PCI_FAKE is not set 216# CONFIG_HOTPLUG_PCI_FAKE is not set
217CONFIG_HOTPLUG_PCI_ACPI=m 217CONFIG_HOTPLUG_PCI_ACPI=m
@@ -232,7 +232,6 @@ CONFIG_NET=y
232# 232#
233# Networking options 233# Networking options
234# 234#
235# CONFIG_NETDEBUG is not set
236CONFIG_PACKET=y 235CONFIG_PACKET=y
237# CONFIG_PACKET_MMAP is not set 236# CONFIG_PACKET_MMAP is not set
238CONFIG_UNIX=y 237CONFIG_UNIX=y
@@ -270,20 +269,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
270# CONFIG_INET6_TUNNEL is not set 269# CONFIG_INET6_TUNNEL is not set
271# CONFIG_NETWORK_SECMARK is not set 270# CONFIG_NETWORK_SECMARK is not set
272# CONFIG_NETFILTER is not set 271# CONFIG_NETFILTER is not set
273
274#
275# DCCP Configuration (EXPERIMENTAL)
276#
277# CONFIG_IP_DCCP is not set 272# CONFIG_IP_DCCP is not set
278
279#
280# SCTP Configuration (EXPERIMENTAL)
281#
282# CONFIG_IP_SCTP is not set 273# CONFIG_IP_SCTP is not set
283
284#
285# TIPC Configuration (EXPERIMENTAL)
286#
287# CONFIG_TIPC is not set 274# CONFIG_TIPC is not set
288# CONFIG_ATM is not set 275# CONFIG_ATM is not set
289# CONFIG_BRIDGE is not set 276# CONFIG_BRIDGE is not set
@@ -309,7 +296,17 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
309# CONFIG_HAMRADIO is not set 296# CONFIG_HAMRADIO is not set
310# CONFIG_IRDA is not set 297# CONFIG_IRDA is not set
311# CONFIG_BT is not set 298# CONFIG_BT is not set
299# CONFIG_AF_RXRPC is not set
300
301#
302# Wireless
303#
304# CONFIG_CFG80211 is not set
305# CONFIG_WIRELESS_EXT is not set
306# CONFIG_MAC80211 is not set
312# CONFIG_IEEE80211 is not set 307# CONFIG_IEEE80211 is not set
308# CONFIG_RFKILL is not set
309# CONFIG_NET_9P is not set
313 310
314# 311#
315# Device Drivers 312# Device Drivers
@@ -324,25 +321,9 @@ CONFIG_FW_LOADER=m
324# CONFIG_DEBUG_DRIVER is not set 321# CONFIG_DEBUG_DRIVER is not set
325# CONFIG_DEBUG_DEVRES is not set 322# CONFIG_DEBUG_DEVRES is not set
326# CONFIG_SYS_HYPERVISOR is not set 323# CONFIG_SYS_HYPERVISOR is not set
327
328#
329# Connector - unified userspace <-> kernelspace linker
330#
331# CONFIG_CONNECTOR is not set 324# CONFIG_CONNECTOR is not set
332
333#
334# Memory Technology Devices (MTD)
335#
336# CONFIG_MTD is not set 325# CONFIG_MTD is not set
337
338#
339# Parallel port support
340#
341# CONFIG_PARPORT is not set 326# CONFIG_PARPORT is not set
342
343#
344# Plug and Play support
345#
346CONFIG_PNP=y 327CONFIG_PNP=y
347# CONFIG_PNP_DEBUG is not set 328# CONFIG_PNP_DEBUG is not set
348 329
@@ -350,10 +331,7 @@ CONFIG_PNP=y
350# Protocols 331# Protocols
351# 332#
352CONFIG_PNPACPI=y 333CONFIG_PNPACPI=y
353 334CONFIG_BLK_DEV=y
354#
355# Block devices
356#
357# CONFIG_BLK_CPQ_DA is not set 335# CONFIG_BLK_CPQ_DA is not set
358# CONFIG_BLK_CPQ_CISS_DA is not set 336# CONFIG_BLK_CPQ_CISS_DA is not set
359# CONFIG_BLK_DEV_DAC960 is not set 337# CONFIG_BLK_DEV_DAC960 is not set
@@ -370,16 +348,11 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
370CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 348CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
371# CONFIG_CDROM_PKTCDVD is not set 349# CONFIG_CDROM_PKTCDVD is not set
372# CONFIG_ATA_OVER_ETH is not set 350# CONFIG_ATA_OVER_ETH is not set
373 351CONFIG_MISC_DEVICES=y
374# 352# CONFIG_PHANTOM is not set
375# Misc devices 353# CONFIG_EEPROM_93CX6 is not set
376#
377# CONFIG_SGI_IOC4 is not set 354# CONFIG_SGI_IOC4 is not set
378# CONFIG_TIFM_CORE is not set 355# CONFIG_TIFM_CORE is not set
379
380#
381# ATA/ATAPI/MFM/RLL support
382#
383CONFIG_IDE=y 356CONFIG_IDE=y
384CONFIG_IDE_MAX_HWIFS=4 357CONFIG_IDE_MAX_HWIFS=4
385CONFIG_BLK_DEV_IDE=y 358CONFIG_BLK_DEV_IDE=y
@@ -396,6 +369,7 @@ CONFIG_BLK_DEV_IDEFLOPPY=y
396CONFIG_BLK_DEV_IDESCSI=m 369CONFIG_BLK_DEV_IDESCSI=m
397# CONFIG_BLK_DEV_IDEACPI is not set 370# CONFIG_BLK_DEV_IDEACPI is not set
398# CONFIG_IDE_TASK_IOCTL is not set 371# CONFIG_IDE_TASK_IOCTL is not set
372CONFIG_IDE_PROC_FS=y
399 373
400# 374#
401# IDE chipset support/bugfixes 375# IDE chipset support/bugfixes
@@ -404,12 +378,12 @@ CONFIG_BLK_DEV_IDESCSI=m
404# CONFIG_BLK_DEV_IDEPNP is not set 378# CONFIG_BLK_DEV_IDEPNP is not set
405CONFIG_BLK_DEV_IDEPCI=y 379CONFIG_BLK_DEV_IDEPCI=y
406# CONFIG_IDEPCI_SHARE_IRQ is not set 380# CONFIG_IDEPCI_SHARE_IRQ is not set
381CONFIG_IDEPCI_PCIBUS_ORDER=y
407# CONFIG_BLK_DEV_OFFBOARD is not set 382# CONFIG_BLK_DEV_OFFBOARD is not set
408CONFIG_BLK_DEV_GENERIC=y 383CONFIG_BLK_DEV_GENERIC=y
409# CONFIG_BLK_DEV_OPTI621 is not set 384# CONFIG_BLK_DEV_OPTI621 is not set
410CONFIG_BLK_DEV_IDEDMA_PCI=y 385CONFIG_BLK_DEV_IDEDMA_PCI=y
411# CONFIG_BLK_DEV_IDEDMA_FORCED is not set 386# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
412CONFIG_IDEDMA_PCI_AUTO=y
413# CONFIG_IDEDMA_ONLYDISK is not set 387# CONFIG_IDEDMA_ONLYDISK is not set
414# CONFIG_BLK_DEV_AEC62XX is not set 388# CONFIG_BLK_DEV_AEC62XX is not set
415# CONFIG_BLK_DEV_ALI15X3 is not set 389# CONFIG_BLK_DEV_ALI15X3 is not set
@@ -438,7 +412,6 @@ CONFIG_BLK_DEV_PIIX=y
438# CONFIG_IDE_ARM is not set 412# CONFIG_IDE_ARM is not set
439CONFIG_BLK_DEV_IDEDMA=y 413CONFIG_BLK_DEV_IDEDMA=y
440# CONFIG_IDEDMA_IVB is not set 414# CONFIG_IDEDMA_IVB is not set
441CONFIG_IDEDMA_AUTO=y
442# CONFIG_BLK_DEV_HD is not set 415# CONFIG_BLK_DEV_HD is not set
443 416
444# 417#
@@ -446,6 +419,7 @@ CONFIG_IDEDMA_AUTO=y
446# 419#
447# CONFIG_RAID_ATTRS is not set 420# CONFIG_RAID_ATTRS is not set
448CONFIG_SCSI=y 421CONFIG_SCSI=y
422CONFIG_SCSI_DMA=y
449# CONFIG_SCSI_TGT is not set 423# CONFIG_SCSI_TGT is not set
450CONFIG_SCSI_NETLINK=y 424CONFIG_SCSI_NETLINK=y
451CONFIG_SCSI_PROC_FS=y 425CONFIG_SCSI_PROC_FS=y
@@ -468,6 +442,7 @@ CONFIG_CHR_DEV_SG=m
468# CONFIG_SCSI_CONSTANTS is not set 442# CONFIG_SCSI_CONSTANTS is not set
469# CONFIG_SCSI_LOGGING is not set 443# CONFIG_SCSI_LOGGING is not set
470# CONFIG_SCSI_SCAN_ASYNC is not set 444# CONFIG_SCSI_SCAN_ASYNC is not set
445CONFIG_SCSI_WAIT_SCAN=m
471 446
472# 447#
473# SCSI Transports 448# SCSI Transports
@@ -514,15 +489,7 @@ CONFIG_SCSI_QLOGIC_1280=y
514# CONFIG_SCSI_DC390T is not set 489# CONFIG_SCSI_DC390T is not set
515# CONFIG_SCSI_DEBUG is not set 490# CONFIG_SCSI_DEBUG is not set
516# CONFIG_SCSI_SRP is not set 491# CONFIG_SCSI_SRP is not set
517
518#
519# Serial ATA (prod) and Parallel ATA (experimental) drivers
520#
521# CONFIG_ATA is not set 492# CONFIG_ATA is not set
522
523#
524# Multi-device support (RAID and LVM)
525#
526CONFIG_MD=y 493CONFIG_MD=y
527CONFIG_BLK_DEV_MD=m 494CONFIG_BLK_DEV_MD=m
528CONFIG_MD_LINEAR=m 495CONFIG_MD_LINEAR=m
@@ -539,6 +506,7 @@ CONFIG_DM_SNAPSHOT=m
539CONFIG_DM_MIRROR=m 506CONFIG_DM_MIRROR=m
540CONFIG_DM_ZERO=m 507CONFIG_DM_ZERO=m
541# CONFIG_DM_MULTIPATH is not set 508# CONFIG_DM_MULTIPATH is not set
509# CONFIG_DM_DELAY is not set
542 510
543# 511#
544# Fusion MPT device support 512# Fusion MPT device support
@@ -553,46 +521,25 @@ CONFIG_FUSION_CTL=y
553# 521#
554# IEEE 1394 (FireWire) support 522# IEEE 1394 (FireWire) support
555# 523#
524# CONFIG_FIREWIRE is not set
556# CONFIG_IEEE1394 is not set 525# CONFIG_IEEE1394 is not set
557
558#
559# I2O device support
560#
561# CONFIG_I2O is not set 526# CONFIG_I2O is not set
562
563#
564# Network device support
565#
566CONFIG_NETDEVICES=y 527CONFIG_NETDEVICES=y
528# CONFIG_NETDEVICES_MULTIQUEUE is not set
567CONFIG_DUMMY=m 529CONFIG_DUMMY=m
568# CONFIG_BONDING is not set 530# CONFIG_BONDING is not set
531# CONFIG_MACVLAN is not set
569# CONFIG_EQUALIZER is not set 532# CONFIG_EQUALIZER is not set
570# CONFIG_TUN is not set 533# CONFIG_TUN is not set
571# CONFIG_NET_SB1000 is not set 534# CONFIG_NET_SB1000 is not set
572
573#
574# ARCnet devices
575#
576# CONFIG_ARCNET is not set 535# CONFIG_ARCNET is not set
577
578#
579# PHY device support
580#
581# CONFIG_PHYLIB is not set 536# CONFIG_PHYLIB is not set
582
583#
584# Ethernet (10 or 100Mbit)
585#
586CONFIG_NET_ETHERNET=y 537CONFIG_NET_ETHERNET=y
587CONFIG_MII=m 538CONFIG_MII=m
588# CONFIG_HAPPYMEAL is not set 539# CONFIG_HAPPYMEAL is not set
589# CONFIG_SUNGEM is not set 540# CONFIG_SUNGEM is not set
590# CONFIG_CASSINI is not set 541# CONFIG_CASSINI is not set
591# CONFIG_NET_VENDOR_3COM is not set 542# CONFIG_NET_VENDOR_3COM is not set
592
593#
594# Tulip family network device support
595#
596CONFIG_NET_TULIP=y 543CONFIG_NET_TULIP=y
597# CONFIG_DE2104X is not set 544# CONFIG_DE2104X is not set
598CONFIG_TULIP=m 545CONFIG_TULIP=m
@@ -623,10 +570,7 @@ CONFIG_E100=m
623# CONFIG_SUNDANCE is not set 570# CONFIG_SUNDANCE is not set
624# CONFIG_VIA_RHINE is not set 571# CONFIG_VIA_RHINE is not set
625# CONFIG_SC92031 is not set 572# CONFIG_SC92031 is not set
626 573CONFIG_NETDEV_1000=y
627#
628# Ethernet (1000 Mbit)
629#
630# CONFIG_ACENIC is not set 574# CONFIG_ACENIC is not set
631# CONFIG_DL2K is not set 575# CONFIG_DL2K is not set
632CONFIG_E1000=y 576CONFIG_E1000=y
@@ -639,36 +583,36 @@ CONFIG_E1000=y
639# CONFIG_SIS190 is not set 583# CONFIG_SIS190 is not set
640# CONFIG_SKGE is not set 584# CONFIG_SKGE is not set
641# CONFIG_SKY2 is not set 585# CONFIG_SKY2 is not set
642# CONFIG_SK98LIN is not set
643# CONFIG_VIA_VELOCITY is not set 586# CONFIG_VIA_VELOCITY is not set
644CONFIG_TIGON3=y 587CONFIG_TIGON3=y
645# CONFIG_BNX2 is not set 588# CONFIG_BNX2 is not set
646# CONFIG_QLA3XXX is not set 589# CONFIG_QLA3XXX is not set
647# CONFIG_ATL1 is not set 590# CONFIG_ATL1 is not set
648 591CONFIG_NETDEV_10000=y
649#
650# Ethernet (10000 Mbit)
651#
652# CONFIG_CHELSIO_T1 is not set 592# CONFIG_CHELSIO_T1 is not set
653# CONFIG_CHELSIO_T3 is not set 593# CONFIG_CHELSIO_T3 is not set
654# CONFIG_IXGB is not set 594# CONFIG_IXGB is not set
655# CONFIG_S2IO is not set 595# CONFIG_S2IO is not set
656# CONFIG_MYRI10GE is not set 596# CONFIG_MYRI10GE is not set
657# CONFIG_NETXEN_NIC is not set 597# CONFIG_NETXEN_NIC is not set
658 598# CONFIG_MLX4_CORE is not set
659#
660# Token Ring devices
661#
662# CONFIG_TR is not set 599# CONFIG_TR is not set
663 600
664# 601#
665# Wireless LAN (non-hamradio) 602# Wireless LAN
666# 603#
667# CONFIG_NET_RADIO is not set 604# CONFIG_WLAN_PRE80211 is not set
605# CONFIG_WLAN_80211 is not set
668 606
669# 607#
670# Wan interfaces 608# USB Network Adapters
671# 609#
610# CONFIG_USB_CATC is not set
611# CONFIG_USB_KAWETH is not set
612# CONFIG_USB_PEGASUS is not set
613# CONFIG_USB_RTL8150 is not set
614# CONFIG_USB_USBNET_MII is not set
615# CONFIG_USB_USBNET is not set
672# CONFIG_WAN is not set 616# CONFIG_WAN is not set
673# CONFIG_FDDI is not set 617# CONFIG_FDDI is not set
674# CONFIG_HIPPI is not set 618# CONFIG_HIPPI is not set
@@ -678,18 +622,9 @@ CONFIG_TIGON3=y
678# CONFIG_SHAPER is not set 622# CONFIG_SHAPER is not set
679CONFIG_NETCONSOLE=y 623CONFIG_NETCONSOLE=y
680CONFIG_NETPOLL=y 624CONFIG_NETPOLL=y
681# CONFIG_NETPOLL_RX is not set
682# CONFIG_NETPOLL_TRAP is not set 625# CONFIG_NETPOLL_TRAP is not set
683CONFIG_NET_POLL_CONTROLLER=y 626CONFIG_NET_POLL_CONTROLLER=y
684
685#
686# ISDN subsystem
687#
688# CONFIG_ISDN is not set 627# CONFIG_ISDN is not set
689
690#
691# Telephony Support
692#
693# CONFIG_PHONE is not set 628# CONFIG_PHONE is not set
694 629
695# 630#
@@ -697,6 +632,7 @@ CONFIG_NET_POLL_CONTROLLER=y
697# 632#
698CONFIG_INPUT=y 633CONFIG_INPUT=y
699# CONFIG_INPUT_FF_MEMLESS is not set 634# CONFIG_INPUT_FF_MEMLESS is not set
635# CONFIG_INPUT_POLLDEV is not set
700 636
701# 637#
702# Userland interfaces 638# Userland interfaces
@@ -722,9 +658,17 @@ CONFIG_KEYBOARD_ATKBD=y
722# CONFIG_KEYBOARD_STOWAWAY is not set 658# CONFIG_KEYBOARD_STOWAWAY is not set
723CONFIG_INPUT_MOUSE=y 659CONFIG_INPUT_MOUSE=y
724CONFIG_MOUSE_PS2=y 660CONFIG_MOUSE_PS2=y
661CONFIG_MOUSE_PS2_ALPS=y
662CONFIG_MOUSE_PS2_LOGIPS2PP=y
663CONFIG_MOUSE_PS2_SYNAPTICS=y
664CONFIG_MOUSE_PS2_LIFEBOOK=y
665CONFIG_MOUSE_PS2_TRACKPOINT=y
666# CONFIG_MOUSE_PS2_TOUCHKIT is not set
725# CONFIG_MOUSE_SERIAL is not set 667# CONFIG_MOUSE_SERIAL is not set
668# CONFIG_MOUSE_APPLETOUCH is not set
726# CONFIG_MOUSE_VSXXXAA is not set 669# CONFIG_MOUSE_VSXXXAA is not set
727# CONFIG_INPUT_JOYSTICK is not set 670# CONFIG_INPUT_JOYSTICK is not set
671# CONFIG_INPUT_TABLET is not set
728# CONFIG_INPUT_TOUCHSCREEN is not set 672# CONFIG_INPUT_TOUCHSCREEN is not set
729# CONFIG_INPUT_MISC is not set 673# CONFIG_INPUT_MISC is not set
730 674
@@ -790,19 +734,10 @@ CONFIG_SERIAL_CORE_CONSOLE=y
790CONFIG_UNIX98_PTYS=y 734CONFIG_UNIX98_PTYS=y
791CONFIG_LEGACY_PTYS=y 735CONFIG_LEGACY_PTYS=y
792CONFIG_LEGACY_PTY_COUNT=256 736CONFIG_LEGACY_PTY_COUNT=256
793
794#
795# IPMI
796#
797# CONFIG_IPMI_HANDLER is not set 737# CONFIG_IPMI_HANDLER is not set
798
799#
800# Watchdog Cards
801#
802# CONFIG_WATCHDOG is not set 738# CONFIG_WATCHDOG is not set
803# CONFIG_HW_RANDOM is not set 739# CONFIG_HW_RANDOM is not set
804CONFIG_EFI_RTC=y 740CONFIG_EFI_RTC=y
805# CONFIG_DTLK is not set
806# CONFIG_R3964 is not set 741# CONFIG_R3964 is not set
807# CONFIG_APPLICOM is not set 742# CONFIG_APPLICOM is not set
808CONFIG_AGP=m 743CONFIG_AGP=m
@@ -821,15 +756,8 @@ CONFIG_HPET=y
821# CONFIG_HPET_RTC_IRQ is not set 756# CONFIG_HPET_RTC_IRQ is not set
822CONFIG_HPET_MMAP=y 757CONFIG_HPET_MMAP=y
823# CONFIG_HANGCHECK_TIMER is not set 758# CONFIG_HANGCHECK_TIMER is not set
824
825#
826# TPM devices
827#
828# CONFIG_TCG_TPM is not set 759# CONFIG_TCG_TPM is not set
829 760CONFIG_DEVPORT=y
830#
831# I2C support
832#
833# CONFIG_I2C is not set 761# CONFIG_I2C is not set
834 762
835# 763#
@@ -837,21 +765,17 @@ CONFIG_HPET_MMAP=y
837# 765#
838# CONFIG_SPI is not set 766# CONFIG_SPI is not set
839# CONFIG_SPI_MASTER is not set 767# CONFIG_SPI_MASTER is not set
840
841#
842# Dallas's 1-wire bus
843#
844# CONFIG_W1 is not set 768# CONFIG_W1 is not set
845 769# CONFIG_POWER_SUPPLY is not set
846#
847# Hardware Monitoring support
848#
849CONFIG_HWMON=y 770CONFIG_HWMON=y
850# CONFIG_HWMON_VID is not set 771# CONFIG_HWMON_VID is not set
851# CONFIG_SENSORS_ABITUGURU is not set 772# CONFIG_SENSORS_ABITUGURU is not set
852# CONFIG_SENSORS_F71805F is not set 773# CONFIG_SENSORS_F71805F is not set
853# CONFIG_SENSORS_PC87427 is not set 774# CONFIG_SENSORS_PC87427 is not set
775# CONFIG_SENSORS_SMSC47M1 is not set
776# CONFIG_SENSORS_SMSC47B397 is not set
854# CONFIG_SENSORS_VT1211 is not set 777# CONFIG_SENSORS_VT1211 is not set
778# CONFIG_SENSORS_W83627HF is not set
855# CONFIG_HWMON_DEBUG_CHIP is not set 779# CONFIG_HWMON_DEBUG_CHIP is not set
856 780
857# 781#
@@ -863,17 +787,20 @@ CONFIG_HWMON=y
863# Multimedia devices 787# Multimedia devices
864# 788#
865# CONFIG_VIDEO_DEV is not set 789# CONFIG_VIDEO_DEV is not set
866 790# CONFIG_DVB_CORE is not set
867# 791CONFIG_DAB=y
868# Digital Video Broadcasting Devices
869#
870# CONFIG_DVB is not set
871# CONFIG_USB_DABUSB is not set 792# CONFIG_USB_DABUSB is not set
872 793
873# 794#
874# Graphics support 795# Graphics support
875# 796#
876# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 797# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
798
799#
800# Display device support
801#
802# CONFIG_DISPLAY_SUPPORT is not set
803# CONFIG_VGASTATE is not set
877# CONFIG_FB is not set 804# CONFIG_FB is not set
878 805
879# 806#
@@ -887,16 +814,18 @@ CONFIG_DUMMY_CONSOLE=y
887# Sound 814# Sound
888# 815#
889# CONFIG_SOUND is not set 816# CONFIG_SOUND is not set
890 817CONFIG_HID_SUPPORT=y
891#
892# HID Devices
893#
894CONFIG_HID=y 818CONFIG_HID=y
895# CONFIG_HID_DEBUG is not set 819# CONFIG_HID_DEBUG is not set
896 820
897# 821#
898# USB support 822# USB Input Devices
899# 823#
824CONFIG_USB_HID=y
825# CONFIG_USB_HIDINPUT_POWERBOOK is not set
826# CONFIG_HID_FF is not set
827# CONFIG_USB_HIDDEV is not set
828CONFIG_USB_SUPPORT=y
900CONFIG_USB_ARCH_HAS_HCD=y 829CONFIG_USB_ARCH_HAS_HCD=y
901CONFIG_USB_ARCH_HAS_OHCI=y 830CONFIG_USB_ARCH_HAS_OHCI=y
902CONFIG_USB_ARCH_HAS_EHCI=y 831CONFIG_USB_ARCH_HAS_EHCI=y
@@ -907,8 +836,10 @@ CONFIG_USB=y
907# Miscellaneous USB options 836# Miscellaneous USB options
908# 837#
909CONFIG_USB_DEVICEFS=y 838CONFIG_USB_DEVICEFS=y
839CONFIG_USB_DEVICE_CLASS=y
910# CONFIG_USB_DYNAMIC_MINORS is not set 840# CONFIG_USB_DYNAMIC_MINORS is not set
911# CONFIG_USB_SUSPEND is not set 841# CONFIG_USB_SUSPEND is not set
842# CONFIG_USB_PERSIST is not set
912# CONFIG_USB_OTG is not set 843# CONFIG_USB_OTG is not set
913 844
914# 845#
@@ -918,7 +849,6 @@ CONFIG_USB_EHCI_HCD=m
918# CONFIG_USB_EHCI_SPLIT_ISO is not set 849# CONFIG_USB_EHCI_SPLIT_ISO is not set
919# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 850# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
920# CONFIG_USB_EHCI_TT_NEWSCHED is not set 851# CONFIG_USB_EHCI_TT_NEWSCHED is not set
921# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
922# CONFIG_USB_ISP116X_HCD is not set 852# CONFIG_USB_ISP116X_HCD is not set
923CONFIG_USB_OHCI_HCD=m 853CONFIG_USB_OHCI_HCD=m
924# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 854# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -926,6 +856,7 @@ CONFIG_USB_OHCI_HCD=m
926CONFIG_USB_OHCI_LITTLE_ENDIAN=y 856CONFIG_USB_OHCI_LITTLE_ENDIAN=y
927CONFIG_USB_UHCI_HCD=y 857CONFIG_USB_UHCI_HCD=y
928# CONFIG_USB_SL811_HCD is not set 858# CONFIG_USB_SL811_HCD is not set
859# CONFIG_USB_R8A66597_HCD is not set
929 860
930# 861#
931# USB Device Class drivers 862# USB Device Class drivers
@@ -955,41 +886,10 @@ CONFIG_USB_STORAGE=m
955# CONFIG_USB_LIBUSUAL is not set 886# CONFIG_USB_LIBUSUAL is not set
956 887
957# 888#
958# USB Input Devices
959#
960CONFIG_USB_HID=y
961# CONFIG_USB_HIDINPUT_POWERBOOK is not set
962# CONFIG_HID_FF is not set
963# CONFIG_USB_HIDDEV is not set
964# CONFIG_USB_AIPTEK is not set
965# CONFIG_USB_WACOM is not set
966# CONFIG_USB_ACECAD is not set
967# CONFIG_USB_KBTAB is not set
968# CONFIG_USB_POWERMATE is not set
969# CONFIG_USB_TOUCHSCREEN is not set
970# CONFIG_USB_YEALINK is not set
971# CONFIG_USB_XPAD is not set
972# CONFIG_USB_ATI_REMOTE is not set
973# CONFIG_USB_ATI_REMOTE2 is not set
974# CONFIG_USB_KEYSPAN_REMOTE is not set
975# CONFIG_USB_APPLETOUCH is not set
976# CONFIG_USB_GTCO is not set
977
978#
979# USB Imaging devices 889# USB Imaging devices
980# 890#
981# CONFIG_USB_MDC800 is not set 891# CONFIG_USB_MDC800 is not set
982# CONFIG_USB_MICROTEK is not set 892# CONFIG_USB_MICROTEK is not set
983
984#
985# USB Network Adapters
986#
987# CONFIG_USB_CATC is not set
988# CONFIG_USB_KAWETH is not set
989# CONFIG_USB_PEGASUS is not set
990# CONFIG_USB_RTL8150 is not set
991# CONFIG_USB_USBNET_MII is not set
992# CONFIG_USB_USBNET is not set
993# CONFIG_USB_MON is not set 893# CONFIG_USB_MON is not set
994 894
995# 895#
@@ -1033,10 +933,6 @@ CONFIG_USB_HID=y
1033# USB Gadget Support 933# USB Gadget Support
1034# 934#
1035# CONFIG_USB_GADGET is not set 935# CONFIG_USB_GADGET is not set
1036
1037#
1038# MMC/SD Card support
1039#
1040# CONFIG_MMC is not set 936# CONFIG_MMC is not set
1041 937
1042# 938#
@@ -1051,17 +947,9 @@ CONFIG_USB_HID=y
1051# 947#
1052# LED Triggers 948# LED Triggers
1053# 949#
1054
1055#
1056# InfiniBand support
1057#
1058# CONFIG_INFINIBAND is not set 950# CONFIG_INFINIBAND is not set
1059 951
1060# 952#
1061# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1062#
1063
1064#
1065# Real Time Clock 953# Real Time Clock
1066# 954#
1067# CONFIG_RTC_CLASS is not set 955# CONFIG_RTC_CLASS is not set
@@ -1080,12 +968,9 @@ CONFIG_USB_HID=y
1080# 968#
1081 969
1082# 970#
1083# Auxiliary Display support 971# Userspace I/O
1084#
1085
1086#
1087# Virtualization
1088# 972#
973# CONFIG_UIO is not set
1089# CONFIG_MSPEC is not set 974# CONFIG_MSPEC is not set
1090 975
1091# 976#
@@ -1200,7 +1085,8 @@ CONFIG_EXPORTFS=m
1200CONFIG_NFS_COMMON=y 1085CONFIG_NFS_COMMON=y
1201CONFIG_SUNRPC=m 1086CONFIG_SUNRPC=m
1202CONFIG_SUNRPC_GSS=m 1087CONFIG_SUNRPC_GSS=m
1203CONFIG_RPCSEC_GSS_KRB5=m 1088# CONFIG_SUNRPC_BIND34 is not set
1089CONFIG_RPCSEC_GSS_KRB5=y
1204# CONFIG_RPCSEC_GSS_SPKM3 is not set 1090# CONFIG_RPCSEC_GSS_SPKM3 is not set
1205CONFIG_SMB_FS=m 1091CONFIG_SMB_FS=m
1206CONFIG_SMB_NLS_DEFAULT=y 1092CONFIG_SMB_NLS_DEFAULT=y
@@ -1214,7 +1100,6 @@ CONFIG_CIFS=m
1214# CONFIG_NCP_FS is not set 1100# CONFIG_NCP_FS is not set
1215# CONFIG_CODA_FS is not set 1101# CONFIG_CODA_FS is not set
1216# CONFIG_AFS_FS is not set 1102# CONFIG_AFS_FS is not set
1217# CONFIG_9P_FS is not set
1218 1103
1219# 1104#
1220# Partition Types 1105# Partition Types
@@ -1236,6 +1121,7 @@ CONFIG_SGI_PARTITION=y
1236# CONFIG_SUN_PARTITION is not set 1121# CONFIG_SUN_PARTITION is not set
1237# CONFIG_KARMA_PARTITION is not set 1122# CONFIG_KARMA_PARTITION is not set
1238CONFIG_EFI_PARTITION=y 1123CONFIG_EFI_PARTITION=y
1124# CONFIG_SYSV68_PARTITION is not set
1239 1125
1240# 1126#
1241# Native Language Support 1127# Native Language Support
@@ -1292,11 +1178,14 @@ CONFIG_NLS_UTF8=m
1292CONFIG_BITREVERSE=y 1178CONFIG_BITREVERSE=y
1293# CONFIG_CRC_CCITT is not set 1179# CONFIG_CRC_CCITT is not set
1294# CONFIG_CRC16 is not set 1180# CONFIG_CRC16 is not set
1181# CONFIG_CRC_ITU_T is not set
1295CONFIG_CRC32=y 1182CONFIG_CRC32=y
1183# CONFIG_CRC7 is not set
1296# CONFIG_LIBCRC32C is not set 1184# CONFIG_LIBCRC32C is not set
1297CONFIG_PLIST=y 1185CONFIG_PLIST=y
1298CONFIG_HAS_IOMEM=y 1186CONFIG_HAS_IOMEM=y
1299CONFIG_HAS_IOPORT=y 1187CONFIG_HAS_IOPORT=y
1188CONFIG_HAS_DMA=y
1300CONFIG_GENERIC_HARDIRQS=y 1189CONFIG_GENERIC_HARDIRQS=y
1301CONFIG_GENERIC_IRQ_PROBE=y 1190CONFIG_GENERIC_IRQ_PROBE=y
1302CONFIG_GENERIC_PENDING_IRQ=y 1191CONFIG_GENERIC_PENDING_IRQ=y
@@ -1319,8 +1208,8 @@ CONFIG_MAGIC_SYSRQ=y
1319# CONFIG_HEADERS_CHECK is not set 1208# CONFIG_HEADERS_CHECK is not set
1320CONFIG_DEBUG_KERNEL=y 1209CONFIG_DEBUG_KERNEL=y
1321# CONFIG_DEBUG_SHIRQ is not set 1210# CONFIG_DEBUG_SHIRQ is not set
1322CONFIG_LOG_BUF_SHIFT=20
1323CONFIG_DETECT_SOFTLOCKUP=y 1211CONFIG_DETECT_SOFTLOCKUP=y
1212CONFIG_SCHED_DEBUG=y
1324# CONFIG_SCHEDSTATS is not set 1213# CONFIG_SCHEDSTATS is not set
1325# CONFIG_TIMER_STATS is not set 1214# CONFIG_TIMER_STATS is not set
1326# CONFIG_DEBUG_SLAB is not set 1215# CONFIG_DEBUG_SLAB is not set
@@ -1343,17 +1232,12 @@ CONFIG_IA64_GRANULE_16MB=y
1343# CONFIG_DISABLE_VHPT is not set 1232# CONFIG_DISABLE_VHPT is not set
1344# CONFIG_IA64_DEBUG_CMPXCHG is not set 1233# CONFIG_IA64_DEBUG_CMPXCHG is not set
1345# CONFIG_IA64_DEBUG_IRQ is not set 1234# CONFIG_IA64_DEBUG_IRQ is not set
1346CONFIG_SYSVIPC_COMPAT=y
1347 1235
1348# 1236#
1349# Security options 1237# Security options
1350# 1238#
1351# CONFIG_KEYS is not set 1239# CONFIG_KEYS is not set
1352# CONFIG_SECURITY is not set 1240# CONFIG_SECURITY is not set
1353
1354#
1355# Cryptographic options
1356#
1357CONFIG_CRYPTO=y 1241CONFIG_CRYPTO=y
1358CONFIG_CRYPTO_ALGAPI=y 1242CONFIG_CRYPTO_ALGAPI=y
1359CONFIG_CRYPTO_BLKCIPHER=m 1243CONFIG_CRYPTO_BLKCIPHER=m
@@ -1373,6 +1257,7 @@ CONFIG_CRYPTO_ECB=m
1373CONFIG_CRYPTO_CBC=m 1257CONFIG_CRYPTO_CBC=m
1374CONFIG_CRYPTO_PCBC=m 1258CONFIG_CRYPTO_PCBC=m
1375# CONFIG_CRYPTO_LRW is not set 1259# CONFIG_CRYPTO_LRW is not set
1260# CONFIG_CRYPTO_CRYPTD is not set
1376CONFIG_CRYPTO_DES=m 1261CONFIG_CRYPTO_DES=m
1377# CONFIG_CRYPTO_FCRYPT is not set 1262# CONFIG_CRYPTO_FCRYPT is not set
1378# CONFIG_CRYPTO_BLOWFISH is not set 1263# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1390,7 +1275,4 @@ CONFIG_CRYPTO_DES=m
1390# CONFIG_CRYPTO_CRC32C is not set 1275# CONFIG_CRYPTO_CRC32C is not set
1391# CONFIG_CRYPTO_CAMELLIA is not set 1276# CONFIG_CRYPTO_CAMELLIA is not set
1392# CONFIG_CRYPTO_TEST is not set 1277# CONFIG_CRYPTO_TEST is not set
1393 1278CONFIG_CRYPTO_HW=y
1394#
1395# Hardware crypto devices
1396#
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig
index 1c7955c16358..4a060fc39934 100644
--- a/arch/ia64/configs/zx1_defconfig
+++ b/arch/ia64/configs/zx1_defconfig
@@ -96,7 +96,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
96# CONFIG_ARCH_HAS_ILOG2_U64 is not set 96# CONFIG_ARCH_HAS_ILOG2_U64 is not set
97CONFIG_GENERIC_FIND_NEXT_BIT=y 97CONFIG_GENERIC_FIND_NEXT_BIT=y
98CONFIG_GENERIC_CALIBRATE_DELAY=y 98CONFIG_GENERIC_CALIBRATE_DELAY=y
99CONFIG_TIME_INTERPOLATION=y 99CONFIG_GENERIC_TIME=y
100CONFIG_DMI=y 100CONFIG_DMI=y
101CONFIG_EFI=y 101CONFIG_EFI=y
102CONFIG_GENERIC_IOMAP=y 102CONFIG_GENERIC_IOMAP=y
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 90bd9601cdde..03172dc8c403 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.21-rc3 3# Linux kernel version: 2.6.22
4# Thu Mar 8 11:01:03 2007 4# Thu Jul 19 13:55:32 2007
5# 5#
6CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 6CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
7 7
@@ -19,15 +19,15 @@ CONFIG_LOCALVERSION=""
19CONFIG_LOCALVERSION_AUTO=y 19CONFIG_LOCALVERSION_AUTO=y
20CONFIG_SWAP=y 20CONFIG_SWAP=y
21CONFIG_SYSVIPC=y 21CONFIG_SYSVIPC=y
22# CONFIG_IPC_NS is not set
23CONFIG_SYSVIPC_SYSCTL=y 22CONFIG_SYSVIPC_SYSCTL=y
24CONFIG_POSIX_MQUEUE=y 23CONFIG_POSIX_MQUEUE=y
25# CONFIG_BSD_PROCESS_ACCT is not set 24# CONFIG_BSD_PROCESS_ACCT is not set
26# CONFIG_TASKSTATS is not set 25# CONFIG_TASKSTATS is not set
27# CONFIG_UTS_NS is not set 26# CONFIG_USER_NS is not set
28# CONFIG_AUDIT is not set 27# CONFIG_AUDIT is not set
29CONFIG_IKCONFIG=y 28CONFIG_IKCONFIG=y
30CONFIG_IKCONFIG_PROC=y 29CONFIG_IKCONFIG_PROC=y
30CONFIG_LOG_BUF_SHIFT=20
31# CONFIG_CPUSETS is not set 31# CONFIG_CPUSETS is not set
32CONFIG_SYSFS_DEPRECATED=y 32CONFIG_SYSFS_DEPRECATED=y
33# CONFIG_RELAY is not set 33# CONFIG_RELAY is not set
@@ -46,18 +46,19 @@ CONFIG_BUG=y
46CONFIG_ELF_CORE=y 46CONFIG_ELF_CORE=y
47CONFIG_BASE_FULL=y 47CONFIG_BASE_FULL=y
48CONFIG_FUTEX=y 48CONFIG_FUTEX=y
49CONFIG_ANON_INODES=y
49CONFIG_EPOLL=y 50CONFIG_EPOLL=y
51CONFIG_SIGNALFD=y
52CONFIG_TIMERFD=y
53CONFIG_EVENTFD=y
50CONFIG_SHMEM=y 54CONFIG_SHMEM=y
51CONFIG_SLAB=y
52CONFIG_VM_EVENT_COUNTERS=y 55CONFIG_VM_EVENT_COUNTERS=y
56CONFIG_SLAB=y
57# CONFIG_SLUB is not set
58# CONFIG_SLOB is not set
53CONFIG_RT_MUTEXES=y 59CONFIG_RT_MUTEXES=y
54# CONFIG_TINY_SHMEM is not set 60# CONFIG_TINY_SHMEM is not set
55CONFIG_BASE_SMALL=0 61CONFIG_BASE_SMALL=0
56# CONFIG_SLOB is not set
57
58#
59# Loadable module support
60#
61CONFIG_MODULES=y 62CONFIG_MODULES=y
62CONFIG_MODULE_UNLOAD=y 63CONFIG_MODULE_UNLOAD=y
63# CONFIG_MODULE_FORCE_UNLOAD is not set 64# CONFIG_MODULE_FORCE_UNLOAD is not set
@@ -65,12 +66,9 @@ CONFIG_MODVERSIONS=y
65# CONFIG_MODULE_SRCVERSION_ALL is not set 66# CONFIG_MODULE_SRCVERSION_ALL is not set
66CONFIG_KMOD=y 67CONFIG_KMOD=y
67CONFIG_STOP_MACHINE=y 68CONFIG_STOP_MACHINE=y
68
69#
70# Block layer
71#
72CONFIG_BLOCK=y 69CONFIG_BLOCK=y
73# CONFIG_BLK_DEV_IO_TRACE is not set 70# CONFIG_BLK_DEV_IO_TRACE is not set
71# CONFIG_BLK_DEV_BSG is not set
74 72
75# 73#
76# IO Schedulers 74# IO Schedulers
@@ -91,6 +89,7 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
91CONFIG_IA64=y 89CONFIG_IA64=y
92CONFIG_64BIT=y 90CONFIG_64BIT=y
93CONFIG_ZONE_DMA=y 91CONFIG_ZONE_DMA=y
92CONFIG_QUICKLIST=y
94CONFIG_MMU=y 93CONFIG_MMU=y
95CONFIG_SWIOTLB=y 94CONFIG_SWIOTLB=y
96CONFIG_RWSEM_XCHGADD_ALGORITHM=y 95CONFIG_RWSEM_XCHGADD_ALGORITHM=y
@@ -98,7 +97,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
98# CONFIG_ARCH_HAS_ILOG2_U64 is not set 97# CONFIG_ARCH_HAS_ILOG2_U64 is not set
99CONFIG_GENERIC_FIND_NEXT_BIT=y 98CONFIG_GENERIC_FIND_NEXT_BIT=y
100CONFIG_GENERIC_CALIBRATE_DELAY=y 99CONFIG_GENERIC_CALIBRATE_DELAY=y
101CONFIG_TIME_INTERPOLATION=y 100CONFIG_GENERIC_TIME=y
102CONFIG_DMI=y 101CONFIG_DMI=y
103CONFIG_EFI=y 102CONFIG_EFI=y
104CONFIG_GENERIC_IOMAP=y 103CONFIG_GENERIC_IOMAP=y
@@ -114,8 +113,8 @@ CONFIG_IA64_GENERIC=y
114CONFIG_MCKINLEY=y 113CONFIG_MCKINLEY=y
115# CONFIG_IA64_PAGE_SIZE_4KB is not set 114# CONFIG_IA64_PAGE_SIZE_4KB is not set
116# CONFIG_IA64_PAGE_SIZE_8KB is not set 115# CONFIG_IA64_PAGE_SIZE_8KB is not set
117CONFIG_IA64_PAGE_SIZE_16KB=y 116# CONFIG_IA64_PAGE_SIZE_16KB is not set
118# CONFIG_IA64_PAGE_SIZE_64KB is not set 117CONFIG_IA64_PAGE_SIZE_64KB=y
119CONFIG_PGTABLE_3=y 118CONFIG_PGTABLE_3=y
120# CONFIG_PGTABLE_4 is not set 119# CONFIG_PGTABLE_4 is not set
121# CONFIG_HZ_100 is not set 120# CONFIG_HZ_100 is not set
@@ -147,6 +146,9 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
147CONFIG_MIGRATION=y 146CONFIG_MIGRATION=y
148CONFIG_RESOURCES_64BIT=y 147CONFIG_RESOURCES_64BIT=y
149CONFIG_ZONE_DMA_FLAG=1 148CONFIG_ZONE_DMA_FLAG=1
149CONFIG_BOUNCE=y
150CONFIG_NR_QUICK=1
151CONFIG_VIRT_TO_BUS=y
150CONFIG_ARCH_SELECT_MEMORY_MODEL=y 152CONFIG_ARCH_SELECT_MEMORY_MODEL=y
151CONFIG_ARCH_DISCONTIGMEM_ENABLE=y 153CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
152CONFIG_ARCH_FLATMEM_ENABLE=y 154CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -164,7 +166,7 @@ CONFIG_COMPAT=y
164CONFIG_IA64_MCA_RECOVERY=y 166CONFIG_IA64_MCA_RECOVERY=y
165CONFIG_PERFMON=y 167CONFIG_PERFMON=y
166CONFIG_IA64_PALINFO=y 168CONFIG_IA64_PALINFO=y
167# CONFIG_MC_ERR_INJECT is not set 169# CONFIG_IA64_MC_ERR_INJECT is not set
168CONFIG_SGI_SN=y 170CONFIG_SGI_SN=y
169# CONFIG_IA64_ESI is not set 171# CONFIG_IA64_ESI is not set
170 172
@@ -180,6 +182,7 @@ CONFIG_CRASH_DUMP=y
180# 182#
181CONFIG_EFI_VARS=y 183CONFIG_EFI_VARS=y
182CONFIG_EFI_PCDP=y 184CONFIG_EFI_PCDP=y
185CONFIG_DMIID=y
183CONFIG_BINFMT_ELF=y 186CONFIG_BINFMT_ELF=y
184CONFIG_BINFMT_MISC=m 187CONFIG_BINFMT_MISC=m
185 188
@@ -189,7 +192,6 @@ CONFIG_BINFMT_MISC=m
189CONFIG_PM=y 192CONFIG_PM=y
190CONFIG_PM_LEGACY=y 193CONFIG_PM_LEGACY=y
191# CONFIG_PM_DEBUG is not set 194# CONFIG_PM_DEBUG is not set
192# CONFIG_PM_SYSFS_DEPRECATED is not set
193 195
194# 196#
195# ACPI (Advanced Configuration and Power Interface) Support 197# ACPI (Advanced Configuration and Power Interface) Support
@@ -220,13 +222,11 @@ CONFIG_ACPI_CONTAINER=m
220# 222#
221CONFIG_PCI=y 223CONFIG_PCI=y
222CONFIG_PCI_DOMAINS=y 224CONFIG_PCI_DOMAINS=y
225CONFIG_PCI_SYSCALL=y
223# CONFIG_PCIEPORTBUS is not set 226# CONFIG_PCIEPORTBUS is not set
227CONFIG_ARCH_SUPPORTS_MSI=y
224# CONFIG_PCI_MSI is not set 228# CONFIG_PCI_MSI is not set
225# CONFIG_PCI_DEBUG is not set 229# CONFIG_PCI_DEBUG is not set
226
227#
228# PCI Hotplug Support
229#
230CONFIG_HOTPLUG_PCI=m 230CONFIG_HOTPLUG_PCI=m
231# CONFIG_HOTPLUG_PCI_FAKE is not set 231# CONFIG_HOTPLUG_PCI_FAKE is not set
232CONFIG_HOTPLUG_PCI_ACPI=m 232CONFIG_HOTPLUG_PCI_ACPI=m
@@ -248,7 +248,6 @@ CONFIG_NET=y
248# 248#
249# Networking options 249# Networking options
250# 250#
251# CONFIG_NETDEBUG is not set
252CONFIG_PACKET=y 251CONFIG_PACKET=y
253# CONFIG_PACKET_MMAP is not set 252# CONFIG_PACKET_MMAP is not set
254CONFIG_UNIX=y 253CONFIG_UNIX=y
@@ -286,20 +285,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
286# CONFIG_INET6_TUNNEL is not set 285# CONFIG_INET6_TUNNEL is not set
287# CONFIG_NETWORK_SECMARK is not set 286# CONFIG_NETWORK_SECMARK is not set
288# CONFIG_NETFILTER is not set 287# CONFIG_NETFILTER is not set
289
290#
291# DCCP Configuration (EXPERIMENTAL)
292#
293# CONFIG_IP_DCCP is not set 288# CONFIG_IP_DCCP is not set
294
295#
296# SCTP Configuration (EXPERIMENTAL)
297#
298# CONFIG_IP_SCTP is not set 289# CONFIG_IP_SCTP is not set
299
300#
301# TIPC Configuration (EXPERIMENTAL)
302#
303# CONFIG_TIPC is not set 290# CONFIG_TIPC is not set
304# CONFIG_ATM is not set 291# CONFIG_ATM is not set
305# CONFIG_BRIDGE is not set 292# CONFIG_BRIDGE is not set
@@ -325,7 +312,17 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
325# CONFIG_HAMRADIO is not set 312# CONFIG_HAMRADIO is not set
326# CONFIG_IRDA is not set 313# CONFIG_IRDA is not set
327# CONFIG_BT is not set 314# CONFIG_BT is not set
315# CONFIG_AF_RXRPC is not set
316
317#
318# Wireless
319#
320# CONFIG_CFG80211 is not set
321# CONFIG_WIRELESS_EXT is not set
322# CONFIG_MAC80211 is not set
328# CONFIG_IEEE80211 is not set 323# CONFIG_IEEE80211 is not set
324# CONFIG_RFKILL is not set
325# CONFIG_NET_9P is not set
329 326
330# 327#
331# Device Drivers 328# Device Drivers
@@ -340,25 +337,9 @@ CONFIG_FW_LOADER=m
340# CONFIG_DEBUG_DRIVER is not set 337# CONFIG_DEBUG_DRIVER is not set
341# CONFIG_DEBUG_DEVRES is not set 338# CONFIG_DEBUG_DEVRES is not set
342# CONFIG_SYS_HYPERVISOR is not set 339# CONFIG_SYS_HYPERVISOR is not set
343
344#
345# Connector - unified userspace <-> kernelspace linker
346#
347# CONFIG_CONNECTOR is not set 340# CONFIG_CONNECTOR is not set
348
349#
350# Memory Technology Devices (MTD)
351#
352# CONFIG_MTD is not set 341# CONFIG_MTD is not set
353
354#
355# Parallel port support
356#
357# CONFIG_PARPORT is not set 342# CONFIG_PARPORT is not set
358
359#
360# Plug and Play support
361#
362CONFIG_PNP=y 343CONFIG_PNP=y
363# CONFIG_PNP_DEBUG is not set 344# CONFIG_PNP_DEBUG is not set
364 345
@@ -366,10 +347,7 @@ CONFIG_PNP=y
366# Protocols 347# Protocols
367# 348#
368CONFIG_PNPACPI=y 349CONFIG_PNPACPI=y
369 350CONFIG_BLK_DEV=y
370#
371# Block devices
372#
373# CONFIG_BLK_CPQ_DA is not set 351# CONFIG_BLK_CPQ_DA is not set
374# CONFIG_BLK_CPQ_CISS_DA is not set 352# CONFIG_BLK_CPQ_CISS_DA is not set
375# CONFIG_BLK_DEV_DAC960 is not set 353# CONFIG_BLK_DEV_DAC960 is not set
@@ -386,16 +364,11 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
386CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 364CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
387# CONFIG_CDROM_PKTCDVD is not set 365# CONFIG_CDROM_PKTCDVD is not set
388# CONFIG_ATA_OVER_ETH is not set 366# CONFIG_ATA_OVER_ETH is not set
389 367CONFIG_MISC_DEVICES=y
390# 368# CONFIG_PHANTOM is not set
391# Misc devices 369# CONFIG_EEPROM_93CX6 is not set
392#
393CONFIG_SGI_IOC4=y 370CONFIG_SGI_IOC4=y
394# CONFIG_TIFM_CORE is not set 371# CONFIG_TIFM_CORE is not set
395
396#
397# ATA/ATAPI/MFM/RLL support
398#
399CONFIG_IDE=y 372CONFIG_IDE=y
400CONFIG_IDE_MAX_HWIFS=4 373CONFIG_IDE_MAX_HWIFS=4
401CONFIG_BLK_DEV_IDE=y 374CONFIG_BLK_DEV_IDE=y
@@ -412,6 +385,7 @@ CONFIG_BLK_DEV_IDEFLOPPY=y
412CONFIG_BLK_DEV_IDESCSI=m 385CONFIG_BLK_DEV_IDESCSI=m
413# CONFIG_BLK_DEV_IDEACPI is not set 386# CONFIG_BLK_DEV_IDEACPI is not set
414# CONFIG_IDE_TASK_IOCTL is not set 387# CONFIG_IDE_TASK_IOCTL is not set
388CONFIG_IDE_PROC_FS=y
415 389
416# 390#
417# IDE chipset support/bugfixes 391# IDE chipset support/bugfixes
@@ -420,12 +394,12 @@ CONFIG_BLK_DEV_IDESCSI=m
420# CONFIG_BLK_DEV_IDEPNP is not set 394# CONFIG_BLK_DEV_IDEPNP is not set
421CONFIG_BLK_DEV_IDEPCI=y 395CONFIG_BLK_DEV_IDEPCI=y
422CONFIG_IDEPCI_SHARE_IRQ=y 396CONFIG_IDEPCI_SHARE_IRQ=y
397CONFIG_IDEPCI_PCIBUS_ORDER=y
423# CONFIG_BLK_DEV_OFFBOARD is not set 398# CONFIG_BLK_DEV_OFFBOARD is not set
424CONFIG_BLK_DEV_GENERIC=y 399CONFIG_BLK_DEV_GENERIC=y
425# CONFIG_BLK_DEV_OPTI621 is not set 400# CONFIG_BLK_DEV_OPTI621 is not set
426CONFIG_BLK_DEV_IDEDMA_PCI=y 401CONFIG_BLK_DEV_IDEDMA_PCI=y
427# CONFIG_BLK_DEV_IDEDMA_FORCED is not set 402# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
428CONFIG_IDEDMA_PCI_AUTO=y
429# CONFIG_IDEDMA_ONLYDISK is not set 403# CONFIG_IDEDMA_ONLYDISK is not set
430# CONFIG_BLK_DEV_AEC62XX is not set 404# CONFIG_BLK_DEV_AEC62XX is not set
431# CONFIG_BLK_DEV_ALI15X3 is not set 405# CONFIG_BLK_DEV_ALI15X3 is not set
@@ -455,7 +429,6 @@ CONFIG_BLK_DEV_SGIIOC4=y
455# CONFIG_IDE_ARM is not set 429# CONFIG_IDE_ARM is not set
456CONFIG_BLK_DEV_IDEDMA=y 430CONFIG_BLK_DEV_IDEDMA=y
457# CONFIG_IDEDMA_IVB is not set 431# CONFIG_IDEDMA_IVB is not set
458CONFIG_IDEDMA_AUTO=y
459# CONFIG_BLK_DEV_HD is not set 432# CONFIG_BLK_DEV_HD is not set
460 433
461# 434#
@@ -463,6 +436,7 @@ CONFIG_IDEDMA_AUTO=y
463# 436#
464# CONFIG_RAID_ATTRS is not set 437# CONFIG_RAID_ATTRS is not set
465CONFIG_SCSI=y 438CONFIG_SCSI=y
439CONFIG_SCSI_DMA=y
466# CONFIG_SCSI_TGT is not set 440# CONFIG_SCSI_TGT is not set
467CONFIG_SCSI_NETLINK=y 441CONFIG_SCSI_NETLINK=y
468CONFIG_SCSI_PROC_FS=y 442CONFIG_SCSI_PROC_FS=y
@@ -485,6 +459,7 @@ CONFIG_CHR_DEV_SG=m
485# CONFIG_SCSI_CONSTANTS is not set 459# CONFIG_SCSI_CONSTANTS is not set
486# CONFIG_SCSI_LOGGING is not set 460# CONFIG_SCSI_LOGGING is not set
487# CONFIG_SCSI_SCAN_ASYNC is not set 461# CONFIG_SCSI_SCAN_ASYNC is not set
462CONFIG_SCSI_WAIT_SCAN=m
488 463
489# 464#
490# SCSI Transports 465# SCSI Transports
@@ -492,7 +467,7 @@ CONFIG_CHR_DEV_SG=m
492CONFIG_SCSI_SPI_ATTRS=y 467CONFIG_SCSI_SPI_ATTRS=y
493CONFIG_SCSI_FC_ATTRS=y 468CONFIG_SCSI_FC_ATTRS=y
494# CONFIG_SCSI_ISCSI_ATTRS is not set 469# CONFIG_SCSI_ISCSI_ATTRS is not set
495# CONFIG_SCSI_SAS_ATTRS is not set 470CONFIG_SCSI_SAS_ATTRS=y
496# CONFIG_SCSI_SAS_LIBSAS is not set 471# CONFIG_SCSI_SAS_LIBSAS is not set
497 472
498# 473#
@@ -531,15 +506,7 @@ CONFIG_SCSI_QLOGIC_1280=y
531# CONFIG_SCSI_DC390T is not set 506# CONFIG_SCSI_DC390T is not set
532# CONFIG_SCSI_DEBUG is not set 507# CONFIG_SCSI_DEBUG is not set
533# CONFIG_SCSI_SRP is not set 508# CONFIG_SCSI_SRP is not set
534
535#
536# Serial ATA (prod) and Parallel ATA (experimental) drivers
537#
538# CONFIG_ATA is not set 509# CONFIG_ATA is not set
539
540#
541# Multi-device support (RAID and LVM)
542#
543CONFIG_MD=y 510CONFIG_MD=y
544CONFIG_BLK_DEV_MD=m 511CONFIG_BLK_DEV_MD=m
545CONFIG_MD_LINEAR=m 512CONFIG_MD_LINEAR=m
@@ -557,6 +524,8 @@ CONFIG_DM_MIRROR=m
557CONFIG_DM_ZERO=m 524CONFIG_DM_ZERO=m
558CONFIG_DM_MULTIPATH=m 525CONFIG_DM_MULTIPATH=m
559# CONFIG_DM_MULTIPATH_EMC is not set 526# CONFIG_DM_MULTIPATH_EMC is not set
527# CONFIG_DM_MULTIPATH_RDAC is not set
528# CONFIG_DM_DELAY is not set
560 529
561# 530#
562# Fusion MPT device support 531# Fusion MPT device support
@@ -564,53 +533,32 @@ CONFIG_DM_MULTIPATH=m
564CONFIG_FUSION=y 533CONFIG_FUSION=y
565CONFIG_FUSION_SPI=y 534CONFIG_FUSION_SPI=y
566CONFIG_FUSION_FC=m 535CONFIG_FUSION_FC=m
567# CONFIG_FUSION_SAS is not set 536CONFIG_FUSION_SAS=y
568CONFIG_FUSION_MAX_SGE=128 537CONFIG_FUSION_MAX_SGE=128
569# CONFIG_FUSION_CTL is not set 538# CONFIG_FUSION_CTL is not set
570 539
571# 540#
572# IEEE 1394 (FireWire) support 541# IEEE 1394 (FireWire) support
573# 542#
543# CONFIG_FIREWIRE is not set
574# CONFIG_IEEE1394 is not set 544# CONFIG_IEEE1394 is not set
575
576#
577# I2O device support
578#
579# CONFIG_I2O is not set 545# CONFIG_I2O is not set
580
581#
582# Network device support
583#
584CONFIG_NETDEVICES=y 546CONFIG_NETDEVICES=y
547# CONFIG_NETDEVICES_MULTIQUEUE is not set
585CONFIG_DUMMY=m 548CONFIG_DUMMY=m
586# CONFIG_BONDING is not set 549# CONFIG_BONDING is not set
550# CONFIG_MACVLAN is not set
587# CONFIG_EQUALIZER is not set 551# CONFIG_EQUALIZER is not set
588# CONFIG_TUN is not set 552# CONFIG_TUN is not set
589# CONFIG_NET_SB1000 is not set 553# CONFIG_NET_SB1000 is not set
590
591#
592# ARCnet devices
593#
594# CONFIG_ARCNET is not set 554# CONFIG_ARCNET is not set
595
596#
597# PHY device support
598#
599# CONFIG_PHYLIB is not set 555# CONFIG_PHYLIB is not set
600
601#
602# Ethernet (10 or 100Mbit)
603#
604CONFIG_NET_ETHERNET=y 556CONFIG_NET_ETHERNET=y
605CONFIG_MII=m 557CONFIG_MII=m
606# CONFIG_HAPPYMEAL is not set 558# CONFIG_HAPPYMEAL is not set
607# CONFIG_SUNGEM is not set 559# CONFIG_SUNGEM is not set
608# CONFIG_CASSINI is not set 560# CONFIG_CASSINI is not set
609# CONFIG_NET_VENDOR_3COM is not set 561# CONFIG_NET_VENDOR_3COM is not set
610
611#
612# Tulip family network device support
613#
614CONFIG_NET_TULIP=y 562CONFIG_NET_TULIP=y
615# CONFIG_DE2104X is not set 563# CONFIG_DE2104X is not set
616CONFIG_TULIP=m 564CONFIG_TULIP=m
@@ -641,10 +589,7 @@ CONFIG_E100=m
641# CONFIG_SUNDANCE is not set 589# CONFIG_SUNDANCE is not set
642# CONFIG_VIA_RHINE is not set 590# CONFIG_VIA_RHINE is not set
643# CONFIG_SC92031 is not set 591# CONFIG_SC92031 is not set
644 592CONFIG_NETDEV_1000=y
645#
646# Ethernet (1000 Mbit)
647#
648# CONFIG_ACENIC is not set 593# CONFIG_ACENIC is not set
649# CONFIG_DL2K is not set 594# CONFIG_DL2K is not set
650CONFIG_E1000=y 595CONFIG_E1000=y
@@ -657,36 +602,36 @@ CONFIG_E1000=y
657# CONFIG_SIS190 is not set 602# CONFIG_SIS190 is not set
658# CONFIG_SKGE is not set 603# CONFIG_SKGE is not set
659# CONFIG_SKY2 is not set 604# CONFIG_SKY2 is not set
660# CONFIG_SK98LIN is not set
661# CONFIG_VIA_VELOCITY is not set 605# CONFIG_VIA_VELOCITY is not set
662CONFIG_TIGON3=y 606CONFIG_TIGON3=y
663# CONFIG_BNX2 is not set 607# CONFIG_BNX2 is not set
664# CONFIG_QLA3XXX is not set 608# CONFIG_QLA3XXX is not set
665# CONFIG_ATL1 is not set 609# CONFIG_ATL1 is not set
666 610CONFIG_NETDEV_10000=y
667#
668# Ethernet (10000 Mbit)
669#
670# CONFIG_CHELSIO_T1 is not set 611# CONFIG_CHELSIO_T1 is not set
671# CONFIG_CHELSIO_T3 is not set 612# CONFIG_CHELSIO_T3 is not set
672# CONFIG_IXGB is not set 613# CONFIG_IXGB is not set
673# CONFIG_S2IO is not set 614# CONFIG_S2IO is not set
674# CONFIG_MYRI10GE is not set 615# CONFIG_MYRI10GE is not set
675# CONFIG_NETXEN_NIC is not set 616# CONFIG_NETXEN_NIC is not set
676 617# CONFIG_MLX4_CORE is not set
677#
678# Token Ring devices
679#
680# CONFIG_TR is not set 618# CONFIG_TR is not set
681 619
682# 620#
683# Wireless LAN (non-hamradio) 621# Wireless LAN
684# 622#
685# CONFIG_NET_RADIO is not set 623# CONFIG_WLAN_PRE80211 is not set
624# CONFIG_WLAN_80211 is not set
686 625
687# 626#
688# Wan interfaces 627# USB Network Adapters
689# 628#
629# CONFIG_USB_CATC is not set
630# CONFIG_USB_KAWETH is not set
631# CONFIG_USB_PEGASUS is not set
632# CONFIG_USB_RTL8150 is not set
633# CONFIG_USB_USBNET_MII is not set
634# CONFIG_USB_USBNET is not set
690# CONFIG_WAN is not set 635# CONFIG_WAN is not set
691# CONFIG_FDDI is not set 636# CONFIG_FDDI is not set
692# CONFIG_HIPPI is not set 637# CONFIG_HIPPI is not set
@@ -696,18 +641,9 @@ CONFIG_TIGON3=y
696# CONFIG_SHAPER is not set 641# CONFIG_SHAPER is not set
697CONFIG_NETCONSOLE=y 642CONFIG_NETCONSOLE=y
698CONFIG_NETPOLL=y 643CONFIG_NETPOLL=y
699# CONFIG_NETPOLL_RX is not set
700# CONFIG_NETPOLL_TRAP is not set 644# CONFIG_NETPOLL_TRAP is not set
701CONFIG_NET_POLL_CONTROLLER=y 645CONFIG_NET_POLL_CONTROLLER=y
702
703#
704# ISDN subsystem
705#
706# CONFIG_ISDN is not set 646# CONFIG_ISDN is not set
707
708#
709# Telephony Support
710#
711# CONFIG_PHONE is not set 647# CONFIG_PHONE is not set
712 648
713# 649#
@@ -715,6 +651,7 @@ CONFIG_NET_POLL_CONTROLLER=y
715# 651#
716CONFIG_INPUT=y 652CONFIG_INPUT=y
717# CONFIG_INPUT_FF_MEMLESS is not set 653# CONFIG_INPUT_FF_MEMLESS is not set
654# CONFIG_INPUT_POLLDEV is not set
718 655
719# 656#
720# Userland interfaces 657# Userland interfaces
@@ -740,9 +677,17 @@ CONFIG_KEYBOARD_ATKBD=y
740# CONFIG_KEYBOARD_STOWAWAY is not set 677# CONFIG_KEYBOARD_STOWAWAY is not set
741CONFIG_INPUT_MOUSE=y 678CONFIG_INPUT_MOUSE=y
742CONFIG_MOUSE_PS2=y 679CONFIG_MOUSE_PS2=y
680CONFIG_MOUSE_PS2_ALPS=y
681CONFIG_MOUSE_PS2_LOGIPS2PP=y
682CONFIG_MOUSE_PS2_SYNAPTICS=y
683CONFIG_MOUSE_PS2_LIFEBOOK=y
684CONFIG_MOUSE_PS2_TRACKPOINT=y
685# CONFIG_MOUSE_PS2_TOUCHKIT is not set
743# CONFIG_MOUSE_SERIAL is not set 686# CONFIG_MOUSE_SERIAL is not set
687# CONFIG_MOUSE_APPLETOUCH is not set
744# CONFIG_MOUSE_VSXXXAA is not set 688# CONFIG_MOUSE_VSXXXAA is not set
745# CONFIG_INPUT_JOYSTICK is not set 689# CONFIG_INPUT_JOYSTICK is not set
690# CONFIG_INPUT_TABLET is not set
746# CONFIG_INPUT_TOUCHSCREEN is not set 691# CONFIG_INPUT_TOUCHSCREEN is not set
747# CONFIG_INPUT_MISC is not set 692# CONFIG_INPUT_MISC is not set
748 693
@@ -814,19 +759,10 @@ CONFIG_SERIAL_SGI_IOC4=y
814CONFIG_UNIX98_PTYS=y 759CONFIG_UNIX98_PTYS=y
815CONFIG_LEGACY_PTYS=y 760CONFIG_LEGACY_PTYS=y
816CONFIG_LEGACY_PTY_COUNT=256 761CONFIG_LEGACY_PTY_COUNT=256
817
818#
819# IPMI
820#
821# CONFIG_IPMI_HANDLER is not set 762# CONFIG_IPMI_HANDLER is not set
822
823#
824# Watchdog Cards
825#
826# CONFIG_WATCHDOG is not set 763# CONFIG_WATCHDOG is not set
827# CONFIG_HW_RANDOM is not set 764# CONFIG_HW_RANDOM is not set
828CONFIG_EFI_RTC=y 765CONFIG_EFI_RTC=y
829# CONFIG_DTLK is not set
830# CONFIG_R3964 is not set 766# CONFIG_R3964 is not set
831# CONFIG_APPLICOM is not set 767# CONFIG_APPLICOM is not set
832CONFIG_AGP=m 768CONFIG_AGP=m
@@ -848,15 +784,8 @@ CONFIG_HPET=y
848CONFIG_HPET_MMAP=y 784CONFIG_HPET_MMAP=y
849# CONFIG_HANGCHECK_TIMER is not set 785# CONFIG_HANGCHECK_TIMER is not set
850CONFIG_MMTIMER=y 786CONFIG_MMTIMER=y
851
852#
853# TPM devices
854#
855# CONFIG_TCG_TPM is not set 787# CONFIG_TCG_TPM is not set
856 788CONFIG_DEVPORT=y
857#
858# I2C support
859#
860# CONFIG_I2C is not set 789# CONFIG_I2C is not set
861 790
862# 791#
@@ -864,21 +793,17 @@ CONFIG_MMTIMER=y
864# 793#
865# CONFIG_SPI is not set 794# CONFIG_SPI is not set
866# CONFIG_SPI_MASTER is not set 795# CONFIG_SPI_MASTER is not set
867
868#
869# Dallas's 1-wire bus
870#
871# CONFIG_W1 is not set 796# CONFIG_W1 is not set
872 797# CONFIG_POWER_SUPPLY is not set
873#
874# Hardware Monitoring support
875#
876CONFIG_HWMON=y 798CONFIG_HWMON=y
877# CONFIG_HWMON_VID is not set 799# CONFIG_HWMON_VID is not set
878# CONFIG_SENSORS_ABITUGURU is not set 800# CONFIG_SENSORS_ABITUGURU is not set
879# CONFIG_SENSORS_F71805F is not set 801# CONFIG_SENSORS_F71805F is not set
880# CONFIG_SENSORS_PC87427 is not set 802# CONFIG_SENSORS_PC87427 is not set
803# CONFIG_SENSORS_SMSC47M1 is not set
804# CONFIG_SENSORS_SMSC47B397 is not set
881# CONFIG_SENSORS_VT1211 is not set 805# CONFIG_SENSORS_VT1211 is not set
806# CONFIG_SENSORS_W83627HF is not set
882# CONFIG_HWMON_DEBUG_CHIP is not set 807# CONFIG_HWMON_DEBUG_CHIP is not set
883 808
884# 809#
@@ -890,17 +815,20 @@ CONFIG_HWMON=y
890# Multimedia devices 815# Multimedia devices
891# 816#
892# CONFIG_VIDEO_DEV is not set 817# CONFIG_VIDEO_DEV is not set
893 818# CONFIG_DVB_CORE is not set
894# 819CONFIG_DAB=y
895# Digital Video Broadcasting Devices
896#
897# CONFIG_DVB is not set
898# CONFIG_USB_DABUSB is not set 820# CONFIG_USB_DABUSB is not set
899 821
900# 822#
901# Graphics support 823# Graphics support
902# 824#
903# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 825# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
826
827#
828# Display device support
829#
830# CONFIG_DISPLAY_SUPPORT is not set
831# CONFIG_VGASTATE is not set
904# CONFIG_FB is not set 832# CONFIG_FB is not set
905 833
906# 834#
@@ -1014,9 +942,10 @@ CONFIG_SND_FM801=m
1014# USB devices 942# USB devices
1015# 943#
1016# CONFIG_SND_USB_AUDIO is not set 944# CONFIG_SND_USB_AUDIO is not set
945# CONFIG_SND_USB_CAIAQ is not set
1017 946
1018# 947#
1019# SoC audio support 948# System on Chip audio support
1020# 949#
1021# CONFIG_SND_SOC is not set 950# CONFIG_SND_SOC is not set
1022 951
@@ -1025,16 +954,24 @@ CONFIG_SND_FM801=m
1025# 954#
1026# CONFIG_SOUND_PRIME is not set 955# CONFIG_SOUND_PRIME is not set
1027CONFIG_AC97_BUS=m 956CONFIG_AC97_BUS=m
957CONFIG_HID_SUPPORT=y
958CONFIG_HID=y
959# CONFIG_HID_DEBUG is not set
1028 960
1029# 961#
1030# HID Devices 962# USB Input Devices
1031# 963#
1032CONFIG_HID=y 964CONFIG_USB_HID=m
1033# CONFIG_HID_DEBUG is not set 965# CONFIG_USB_HIDINPUT_POWERBOOK is not set
966# CONFIG_HID_FF is not set
967# CONFIG_USB_HIDDEV is not set
1034 968
1035# 969#
1036# USB support 970# USB HID Boot Protocol drivers
1037# 971#
972# CONFIG_USB_KBD is not set
973# CONFIG_USB_MOUSE is not set
974CONFIG_USB_SUPPORT=y
1038CONFIG_USB_ARCH_HAS_HCD=y 975CONFIG_USB_ARCH_HAS_HCD=y
1039CONFIG_USB_ARCH_HAS_OHCI=y 976CONFIG_USB_ARCH_HAS_OHCI=y
1040CONFIG_USB_ARCH_HAS_EHCI=y 977CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1045,8 +982,10 @@ CONFIG_USB=m
1045# Miscellaneous USB options 982# Miscellaneous USB options
1046# 983#
1047CONFIG_USB_DEVICEFS=y 984CONFIG_USB_DEVICEFS=y
985CONFIG_USB_DEVICE_CLASS=y
1048# CONFIG_USB_DYNAMIC_MINORS is not set 986# CONFIG_USB_DYNAMIC_MINORS is not set
1049# CONFIG_USB_SUSPEND is not set 987# CONFIG_USB_SUSPEND is not set
988# CONFIG_USB_PERSIST is not set
1050# CONFIG_USB_OTG is not set 989# CONFIG_USB_OTG is not set
1051 990
1052# 991#
@@ -1056,7 +995,6 @@ CONFIG_USB_EHCI_HCD=m
1056# CONFIG_USB_EHCI_SPLIT_ISO is not set 995# CONFIG_USB_EHCI_SPLIT_ISO is not set
1057# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 996# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1058# CONFIG_USB_EHCI_TT_NEWSCHED is not set 997# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1059# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
1060# CONFIG_USB_ISP116X_HCD is not set 998# CONFIG_USB_ISP116X_HCD is not set
1061CONFIG_USB_OHCI_HCD=m 999CONFIG_USB_OHCI_HCD=m
1062# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 1000# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1064,6 +1002,7 @@ CONFIG_USB_OHCI_HCD=m
1064CONFIG_USB_OHCI_LITTLE_ENDIAN=y 1002CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1065CONFIG_USB_UHCI_HCD=m 1003CONFIG_USB_UHCI_HCD=m
1066# CONFIG_USB_SL811_HCD is not set 1004# CONFIG_USB_SL811_HCD is not set
1005# CONFIG_USB_R8A66597_HCD is not set
1067 1006
1068# 1007#
1069# USB Device Class drivers 1008# USB Device Class drivers
@@ -1093,47 +1032,10 @@ CONFIG_USB_STORAGE=m
1093# CONFIG_USB_LIBUSUAL is not set 1032# CONFIG_USB_LIBUSUAL is not set
1094 1033
1095# 1034#
1096# USB Input Devices
1097#
1098CONFIG_USB_HID=m
1099# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1100# CONFIG_HID_FF is not set
1101# CONFIG_USB_HIDDEV is not set
1102
1103#
1104# USB HID Boot Protocol drivers
1105#
1106# CONFIG_USB_KBD is not set
1107# CONFIG_USB_MOUSE is not set
1108# CONFIG_USB_AIPTEK is not set
1109# CONFIG_USB_WACOM is not set
1110# CONFIG_USB_ACECAD is not set
1111# CONFIG_USB_KBTAB is not set
1112# CONFIG_USB_POWERMATE is not set
1113# CONFIG_USB_TOUCHSCREEN is not set
1114# CONFIG_USB_YEALINK is not set
1115# CONFIG_USB_XPAD is not set
1116# CONFIG_USB_ATI_REMOTE is not set
1117# CONFIG_USB_ATI_REMOTE2 is not set
1118# CONFIG_USB_KEYSPAN_REMOTE is not set
1119# CONFIG_USB_APPLETOUCH is not set
1120# CONFIG_USB_GTCO is not set
1121
1122#
1123# USB Imaging devices 1035# USB Imaging devices
1124# 1036#
1125# CONFIG_USB_MDC800 is not set 1037# CONFIG_USB_MDC800 is not set
1126# CONFIG_USB_MICROTEK is not set 1038# CONFIG_USB_MICROTEK is not set
1127
1128#
1129# USB Network Adapters
1130#
1131# CONFIG_USB_CATC is not set
1132# CONFIG_USB_KAWETH is not set
1133# CONFIG_USB_PEGASUS is not set
1134# CONFIG_USB_RTL8150 is not set
1135# CONFIG_USB_USBNET_MII is not set
1136# CONFIG_USB_USBNET is not set
1137CONFIG_USB_MON=y 1039CONFIG_USB_MON=y
1138 1040
1139# 1041#
@@ -1177,10 +1079,6 @@ CONFIG_USB_MON=y
1177# USB Gadget Support 1079# USB Gadget Support
1178# 1080#
1179# CONFIG_USB_GADGET is not set 1081# CONFIG_USB_GADGET is not set
1180
1181#
1182# MMC/SD Card support
1183#
1184# CONFIG_MMC is not set 1082# CONFIG_MMC is not set
1185 1083
1186# 1084#
@@ -1195,10 +1093,6 @@ CONFIG_USB_MON=y
1195# 1093#
1196# LED Triggers 1094# LED Triggers
1197# 1095#
1198
1199#
1200# InfiniBand support
1201#
1202CONFIG_INFINIBAND=m 1096CONFIG_INFINIBAND=m
1203# CONFIG_INFINIBAND_USER_MAD is not set 1097# CONFIG_INFINIBAND_USER_MAD is not set
1204# CONFIG_INFINIBAND_USER_ACCESS is not set 1098# CONFIG_INFINIBAND_USER_ACCESS is not set
@@ -1206,6 +1100,7 @@ CONFIG_INFINIBAND_ADDR_TRANS=y
1206CONFIG_INFINIBAND_MTHCA=m 1100CONFIG_INFINIBAND_MTHCA=m
1207CONFIG_INFINIBAND_MTHCA_DEBUG=y 1101CONFIG_INFINIBAND_MTHCA_DEBUG=y
1208# CONFIG_INFINIBAND_AMSO1100 is not set 1102# CONFIG_INFINIBAND_AMSO1100 is not set
1103# CONFIG_MLX4_INFINIBAND is not set
1209CONFIG_INFINIBAND_IPOIB=m 1104CONFIG_INFINIBAND_IPOIB=m
1210# CONFIG_INFINIBAND_IPOIB_CM is not set 1105# CONFIG_INFINIBAND_IPOIB_CM is not set
1211CONFIG_INFINIBAND_IPOIB_DEBUG=y 1106CONFIG_INFINIBAND_IPOIB_DEBUG=y
@@ -1214,10 +1109,6 @@ CONFIG_INFINIBAND_IPOIB_DEBUG=y
1214# CONFIG_INFINIBAND_ISER is not set 1109# CONFIG_INFINIBAND_ISER is not set
1215 1110
1216# 1111#
1217# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1218#
1219
1220#
1221# Real Time Clock 1112# Real Time Clock
1222# 1113#
1223# CONFIG_RTC_CLASS is not set 1114# CONFIG_RTC_CLASS is not set
@@ -1236,12 +1127,9 @@ CONFIG_INFINIBAND_IPOIB_DEBUG=y
1236# 1127#
1237 1128
1238# 1129#
1239# Auxiliary Display support 1130# Userspace I/O
1240#
1241
1242#
1243# Virtualization
1244# 1131#
1132# CONFIG_UIO is not set
1245# CONFIG_MSPEC is not set 1133# CONFIG_MSPEC is not set
1246 1134
1247# 1135#
@@ -1357,7 +1245,8 @@ CONFIG_EXPORTFS=m
1357CONFIG_NFS_COMMON=y 1245CONFIG_NFS_COMMON=y
1358CONFIG_SUNRPC=m 1246CONFIG_SUNRPC=m
1359CONFIG_SUNRPC_GSS=m 1247CONFIG_SUNRPC_GSS=m
1360CONFIG_RPCSEC_GSS_KRB5=m 1248# CONFIG_SUNRPC_BIND34 is not set
1249CONFIG_RPCSEC_GSS_KRB5=y
1361# CONFIG_RPCSEC_GSS_SPKM3 is not set 1250# CONFIG_RPCSEC_GSS_SPKM3 is not set
1362CONFIG_SMB_FS=m 1251CONFIG_SMB_FS=m
1363CONFIG_SMB_NLS_DEFAULT=y 1252CONFIG_SMB_NLS_DEFAULT=y
@@ -1371,7 +1260,6 @@ CONFIG_CIFS=m
1371# CONFIG_NCP_FS is not set 1260# CONFIG_NCP_FS is not set
1372# CONFIG_CODA_FS is not set 1261# CONFIG_CODA_FS is not set
1373# CONFIG_AFS_FS is not set 1262# CONFIG_AFS_FS is not set
1374# CONFIG_9P_FS is not set
1375 1263
1376# 1264#
1377# Partition Types 1265# Partition Types
@@ -1393,6 +1281,7 @@ CONFIG_SGI_PARTITION=y
1393# CONFIG_SUN_PARTITION is not set 1281# CONFIG_SUN_PARTITION is not set
1394# CONFIG_KARMA_PARTITION is not set 1282# CONFIG_KARMA_PARTITION is not set
1395CONFIG_EFI_PARTITION=y 1283CONFIG_EFI_PARTITION=y
1284# CONFIG_SYSV68_PARTITION is not set
1396 1285
1397# 1286#
1398# Native Language Support 1287# Native Language Support
@@ -1449,11 +1338,14 @@ CONFIG_NLS_UTF8=m
1449CONFIG_BITREVERSE=y 1338CONFIG_BITREVERSE=y
1450# CONFIG_CRC_CCITT is not set 1339# CONFIG_CRC_CCITT is not set
1451# CONFIG_CRC16 is not set 1340# CONFIG_CRC16 is not set
1341# CONFIG_CRC_ITU_T is not set
1452CONFIG_CRC32=y 1342CONFIG_CRC32=y
1343# CONFIG_CRC7 is not set
1453# CONFIG_LIBCRC32C is not set 1344# CONFIG_LIBCRC32C is not set
1454CONFIG_PLIST=y 1345CONFIG_PLIST=y
1455CONFIG_HAS_IOMEM=y 1346CONFIG_HAS_IOMEM=y
1456CONFIG_HAS_IOPORT=y 1347CONFIG_HAS_IOPORT=y
1348CONFIG_HAS_DMA=y
1457CONFIG_GENERIC_HARDIRQS=y 1349CONFIG_GENERIC_HARDIRQS=y
1458CONFIG_GENERIC_IRQ_PROBE=y 1350CONFIG_GENERIC_IRQ_PROBE=y
1459CONFIG_GENERIC_PENDING_IRQ=y 1351CONFIG_GENERIC_PENDING_IRQ=y
@@ -1483,8 +1375,8 @@ CONFIG_MAGIC_SYSRQ=y
1483# CONFIG_HEADERS_CHECK is not set 1375# CONFIG_HEADERS_CHECK is not set
1484CONFIG_DEBUG_KERNEL=y 1376CONFIG_DEBUG_KERNEL=y
1485# CONFIG_DEBUG_SHIRQ is not set 1377# CONFIG_DEBUG_SHIRQ is not set
1486CONFIG_LOG_BUF_SHIFT=20
1487CONFIG_DETECT_SOFTLOCKUP=y 1378CONFIG_DETECT_SOFTLOCKUP=y
1379CONFIG_SCHED_DEBUG=y
1488# CONFIG_SCHEDSTATS is not set 1380# CONFIG_SCHEDSTATS is not set
1489# CONFIG_TIMER_STATS is not set 1381# CONFIG_TIMER_STATS is not set
1490# CONFIG_DEBUG_SLAB is not set 1382# CONFIG_DEBUG_SLAB is not set
@@ -1514,10 +1406,6 @@ CONFIG_SYSVIPC_COMPAT=y
1514# 1406#
1515# CONFIG_KEYS is not set 1407# CONFIG_KEYS is not set
1516# CONFIG_SECURITY is not set 1408# CONFIG_SECURITY is not set
1517
1518#
1519# Cryptographic options
1520#
1521CONFIG_CRYPTO=y 1409CONFIG_CRYPTO=y
1522CONFIG_CRYPTO_ALGAPI=y 1410CONFIG_CRYPTO_ALGAPI=y
1523CONFIG_CRYPTO_BLKCIPHER=m 1411CONFIG_CRYPTO_BLKCIPHER=m
@@ -1537,6 +1425,7 @@ CONFIG_CRYPTO_ECB=m
1537CONFIG_CRYPTO_CBC=m 1425CONFIG_CRYPTO_CBC=m
1538CONFIG_CRYPTO_PCBC=m 1426CONFIG_CRYPTO_PCBC=m
1539# CONFIG_CRYPTO_LRW is not set 1427# CONFIG_CRYPTO_LRW is not set
1428# CONFIG_CRYPTO_CRYPTD is not set
1540CONFIG_CRYPTO_DES=m 1429CONFIG_CRYPTO_DES=m
1541# CONFIG_CRYPTO_FCRYPT is not set 1430# CONFIG_CRYPTO_FCRYPT is not set
1542# CONFIG_CRYPTO_BLOWFISH is not set 1431# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1554,7 +1443,4 @@ CONFIG_CRYPTO_DES=m
1554# CONFIG_CRYPTO_CRC32C is not set 1443# CONFIG_CRYPTO_CRC32C is not set
1555# CONFIG_CRYPTO_CAMELLIA is not set 1444# CONFIG_CRYPTO_CAMELLIA is not set
1556# CONFIG_CRYPTO_TEST is not set 1445# CONFIG_CRYPTO_TEST is not set
1557 1446CONFIG_CRYPTO_HW=y
1558#
1559# Hardware crypto devices
1560#
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index e1189ba1ca5e..1cfab326fb7e 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -226,7 +226,7 @@ elf32_set_personality (void)
226} 226}
227 227
228static unsigned long 228static unsigned long
229elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long unused) 229elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
230{ 230{
231 unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK; 231 unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
232 232
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 2236fabbb3c6..0aebc6f79e95 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -7,6 +7,7 @@
7#define ASM_OFFSETS_C 1 7#define ASM_OFFSETS_C 1
8 8
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/clocksource.h>
10 11
11#include <asm-ia64/processor.h> 12#include <asm-ia64/processor.h>
12#include <asm-ia64/ptrace.h> 13#include <asm-ia64/ptrace.h>
@@ -15,6 +16,7 @@
15#include <asm-ia64/mca.h> 16#include <asm-ia64/mca.h>
16 17
17#include "../kernel/sigframe.h" 18#include "../kernel/sigframe.h"
19#include "../kernel/fsyscall_gtod_data.h"
18 20
19#define DEFINE(sym, val) \ 21#define DEFINE(sym, val) \
20 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 22 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -256,17 +258,24 @@ void foo(void)
256 BLANK(); 258 BLANK();
257 259
258 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ 260 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
259 DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); 261 DEFINE(IA64_GTOD_LOCK_OFFSET,
260 DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); 262 offsetof (struct fsyscall_gtod_data_t, lock));
261 DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift)); 263 DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
262 DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc)); 264 offsetof (struct fsyscall_gtod_data_t, wall_time));
263 DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset)); 265 DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
264 DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle)); 266 offsetof (struct fsyscall_gtod_data_t, monotonic_time));
265 DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter)); 267 DEFINE(IA64_CLKSRC_MASK_OFFSET,
266 DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter)); 268 offsetof (struct fsyscall_gtod_data_t, clk_mask));
267 DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask)); 269 DEFINE(IA64_CLKSRC_MULT_OFFSET,
268 DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU); 270 offsetof (struct fsyscall_gtod_data_t, clk_mult));
269 DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64); 271 DEFINE(IA64_CLKSRC_SHIFT_OFFSET,
270 DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32); 272 offsetof (struct fsyscall_gtod_data_t, clk_shift));
271 DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec)); 273 DEFINE(IA64_CLKSRC_MMIO_OFFSET,
274 offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio));
275 DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET,
276 offsetof (struct fsyscall_gtod_data_t, clk_cycle_last));
277 DEFINE(IA64_ITC_JITTER_OFFSET,
278 offsetof (struct itc_jitter_data_t, itc_jitter));
279 DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
280 offsetof (struct itc_jitter_data_t, itc_lastcycle));
272} 281}
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index e00b21514f7c..2fd96d9062a1 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -3,6 +3,7 @@
3#include <linux/time.h> 3#include <linux/time.h>
4#include <linux/errno.h> 4#include <linux/errno.h>
5#include <linux/timex.h> 5#include <linux/timex.h>
6#include <linux/clocksource.h>
6#include <asm/io.h> 7#include <asm/io.h>
7 8
8/* IBM Summit (EXA) Cyclone counter code*/ 9/* IBM Summit (EXA) Cyclone counter code*/
@@ -18,13 +19,21 @@ void __init cyclone_setup(void)
18 use_cyclone = 1; 19 use_cyclone = 1;
19} 20}
20 21
22static void __iomem *cyclone_mc;
21 23
22struct time_interpolator cyclone_interpolator = { 24static cycle_t read_cyclone(void)
23 .source = TIME_SOURCE_MMIO64, 25{
24 .shift = 16, 26 return (cycle_t)readq((void __iomem *)cyclone_mc);
25 .frequency = CYCLONE_TIMER_FREQ, 27}
26 .drift = -100, 28
27 .mask = (1LL << 40) - 1 29static struct clocksource clocksource_cyclone = {
30 .name = "cyclone",
31 .rating = 300,
32 .read = read_cyclone,
33 .mask = (1LL << 40) - 1,
34 .mult = 0, /*to be caluclated*/
35 .shift = 16,
36 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
28}; 37};
29 38
30int __init init_cyclone_clock(void) 39int __init init_cyclone_clock(void)
@@ -44,13 +53,15 @@ int __init init_cyclone_clock(void)
44 offset = (CYCLONE_CBAR_ADDR); 53 offset = (CYCLONE_CBAR_ADDR);
45 reg = (u64*)ioremap_nocache(offset, sizeof(u64)); 54 reg = (u64*)ioremap_nocache(offset, sizeof(u64));
46 if(!reg){ 55 if(!reg){
47 printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n"); 56 printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
57 " register.\n");
48 use_cyclone = 0; 58 use_cyclone = 0;
49 return -ENODEV; 59 return -ENODEV;
50 } 60 }
51 base = readq(reg); 61 base = readq(reg);
52 if(!base){ 62 if(!base){
53 printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n"); 63 printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
64 " value.\n");
54 use_cyclone = 0; 65 use_cyclone = 0;
55 return -ENODEV; 66 return -ENODEV;
56 } 67 }
@@ -60,7 +71,8 @@ int __init init_cyclone_clock(void)
60 offset = (base + CYCLONE_PMCC_OFFSET); 71 offset = (base + CYCLONE_PMCC_OFFSET);
61 reg = (u64*)ioremap_nocache(offset, sizeof(u64)); 72 reg = (u64*)ioremap_nocache(offset, sizeof(u64));
62 if(!reg){ 73 if(!reg){
63 printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n"); 74 printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
75 " register.\n");
64 use_cyclone = 0; 76 use_cyclone = 0;
65 return -ENODEV; 77 return -ENODEV;
66 } 78 }
@@ -71,7 +83,8 @@ int __init init_cyclone_clock(void)
71 offset = (base + CYCLONE_MPCS_OFFSET); 83 offset = (base + CYCLONE_MPCS_OFFSET);
72 reg = (u64*)ioremap_nocache(offset, sizeof(u64)); 84 reg = (u64*)ioremap_nocache(offset, sizeof(u64));
73 if(!reg){ 85 if(!reg){
74 printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n"); 86 printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
87 " register.\n");
75 use_cyclone = 0; 88 use_cyclone = 0;
76 return -ENODEV; 89 return -ENODEV;
77 } 90 }
@@ -82,7 +95,8 @@ int __init init_cyclone_clock(void)
82 offset = (base + CYCLONE_MPMC_OFFSET); 95 offset = (base + CYCLONE_MPMC_OFFSET);
83 cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32)); 96 cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32));
84 if(!cyclone_timer){ 97 if(!cyclone_timer){
85 printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n"); 98 printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
99 " register.\n");
86 use_cyclone = 0; 100 use_cyclone = 0;
87 return -ENODEV; 101 return -ENODEV;
88 } 102 }
@@ -93,7 +107,8 @@ int __init init_cyclone_clock(void)
93 int stall = 100; 107 int stall = 100;
94 while(stall--) barrier(); 108 while(stall--) barrier();
95 if(readl(cyclone_timer) == old){ 109 if(readl(cyclone_timer) == old){
96 printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n"); 110 printk(KERN_ERR "Summit chipset: Counter not counting!"
111 " DISABLED\n");
97 iounmap(cyclone_timer); 112 iounmap(cyclone_timer);
98 cyclone_timer = 0; 113 cyclone_timer = 0;
99 use_cyclone = 0; 114 use_cyclone = 0;
@@ -101,8 +116,11 @@ int __init init_cyclone_clock(void)
101 } 116 }
102 } 117 }
103 /* initialize last tick */ 118 /* initialize last tick */
104 cyclone_interpolator.addr = cyclone_timer; 119 cyclone_mc = cyclone_timer;
105 register_time_interpolator(&cyclone_interpolator); 120 clocksource_cyclone.fsys_mmio = cyclone_timer;
121 clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ,
122 clocksource_cyclone.shift);
123 clocksource_register(&clocksource_cyclone);
106 124
107 return 0; 125 return 0;
108} 126}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 95f517515235..c36f43c94600 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1581,7 +1581,7 @@ sys_call_table:
1581 data8 sys_sync_file_range // 1300 1581 data8 sys_sync_file_range // 1300
1582 data8 sys_tee 1582 data8 sys_tee
1583 data8 sys_vmsplice 1583 data8 sys_vmsplice
1584 data8 sys_ni_syscall // reserved for move_pages 1584 data8 sys_fallocate
1585 data8 sys_getcpu 1585 data8 sys_getcpu
1586 data8 sys_epoll_pwait // 1305 1586 data8 sys_epoll_pwait // 1305
1587 data8 sys_utimensat 1587 data8 sys_utimensat
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 3f926c2dc708..44841971f077 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -147,12 +147,11 @@ ENTRY(fsys_set_tid_address)
147 FSYS_RETURN 147 FSYS_RETURN
148END(fsys_set_tid_address) 148END(fsys_set_tid_address)
149 149
150/* 150#if IA64_GTOD_LOCK_OFFSET !=0
151 * Ensure that the time interpolator structure is compatible with the asm code 151#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
152 */ 152#endif
153#if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 \ 153#if IA64_ITC_JITTER_OFFSET !=0
154 || IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4 154#error fsys_gettimeofday incompatible with changes to struct itc_jitter_data_t
155#error fsys_gettimeofday incompatible with changes to struct time_interpolator
156#endif 155#endif
157#define CLOCK_REALTIME 0 156#define CLOCK_REALTIME 0
158#define CLOCK_MONOTONIC 1 157#define CLOCK_MONOTONIC 1
@@ -179,126 +178,124 @@ ENTRY(fsys_gettimeofday)
179 // r11 = preserved: saved ar.pfs 178 // r11 = preserved: saved ar.pfs
180 // r12 = preserved: memory stack 179 // r12 = preserved: memory stack
181 // r13 = preserved: thread pointer 180 // r13 = preserved: thread pointer
182 // r14 = address of mask / mask 181 // r14 = address of mask / mask value
183 // r15 = preserved: system call number 182 // r15 = preserved: system call number
184 // r16 = preserved: current task pointer 183 // r16 = preserved: current task pointer
185 // r17 = wall to monotonic use 184 // r17 = (not used)
186 // r18 = time_interpolator->offset 185 // r18 = (not used)
187 // r19 = address of wall_to_monotonic 186 // r19 = address of itc_lastcycle
188 // r20 = pointer to struct time_interpolator / pointer to time_interpolator->address 187 // r20 = struct fsyscall_gtod_data (= address of gtod_lock.sequence)
189 // r21 = shift factor 188 // r21 = address of mmio_ptr
190 // r22 = address of time interpolator->last_counter 189 // r22 = address of wall_time or monotonic_time
191 // r23 = address of time_interpolator->last_cycle 190 // r23 = address of shift / value
192 // r24 = adress of time_interpolator->offset 191 // r24 = address mult factor / cycle_last value
193 // r25 = last_cycle value 192 // r25 = itc_lastcycle value
194 // r26 = last_counter value 193 // r26 = address clocksource cycle_last
195 // r27 = pointer to xtime 194 // r27 = (not used)
196 // r28 = sequence number at the beginning of critcal section 195 // r28 = sequence number at the beginning of critcal section
197 // r29 = address of seqlock 196 // r29 = address of itc_jitter
198 // r30 = time processing flags / memory address 197 // r30 = time processing flags / memory address
199 // r31 = pointer to result 198 // r31 = pointer to result
200 // Predicates 199 // Predicates
201 // p6,p7 short term use 200 // p6,p7 short term use
202 // p8 = timesource ar.itc 201 // p8 = timesource ar.itc
203 // p9 = timesource mmio64 202 // p9 = timesource mmio64
204 // p10 = timesource mmio32 203 // p10 = timesource mmio32 - not used
205 // p11 = timesource not to be handled by asm code 204 // p11 = timesource not to be handled by asm code
206 // p12 = memory time source ( = p9 | p10) 205 // p12 = memory time source ( = p9 | p10) - not used
207 // p13 = do cmpxchg with time_interpolator_last_cycle 206 // p13 = do cmpxchg with itc_lastcycle
208 // p14 = Divide by 1000 207 // p14 = Divide by 1000
209 // p15 = Add monotonic 208 // p15 = Add monotonic
210 // 209 //
211 // Note that instructions are optimized for McKinley. McKinley can process two 210 // Note that instructions are optimized for McKinley. McKinley can
212 // bundles simultaneously and therefore we continuously try to feed the CPU 211 // process two bundles simultaneously and therefore we continuously
213 // two bundles and then a stop. 212 // try to feed the CPU two bundles and then a stop.
214 tnat.nz p6,p0 = r31 // branch deferred since it does not fit into bundle structure 213 //
214 // Additional note that code has changed a lot. Optimization is TBD.
215 // Comments begin with "?" are maybe outdated.
216 tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle
215 mov pr = r30,0xc000 // Set predicates according to function 217 mov pr = r30,0xc000 // Set predicates according to function
216 add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 218 add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
217 movl r20 = time_interpolator 219 movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address
218 ;; 220 ;;
219 ld8 r20 = [r20] // get pointer to time_interpolator structure 221 movl r29 = itc_jitter_data // itc_jitter
220 movl r29 = xtime_lock 222 add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time
221 ld4 r2 = [r2] // process work pending flags 223 ld4 r2 = [r2] // process work pending flags
222 movl r27 = xtime 224 ;;
223 ;; // only one bundle here 225(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time
224 ld8 r21 = [r20] // first quad with control information 226 add r21 = IA64_CLKSRC_MMIO_OFFSET,r20
227 add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
225 and r2 = TIF_ALLWORK_MASK,r2 228 and r2 = TIF_ALLWORK_MASK,r2
226(p6) br.cond.spnt.few .fail_einval // deferred branch 229(p6) br.cond.spnt.few .fail_einval // ? deferred branch
227 ;; 230 ;;
228 add r10 = IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET,r20 231 add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
229 extr r3 = r21,32,32 // time_interpolator->nsec_per_cyc
230 extr r8 = r21,0,16 // time_interpolator->source
231 cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled 232 cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
232(p6) br.cond.spnt.many fsys_fallback_syscall 233(p6) br.cond.spnt.many fsys_fallback_syscall
233 ;; 234 ;;
234 cmp.eq p8,p12 = 0,r8 // Check for cpu timer 235 // Begin critical section
235 cmp.eq p9,p0 = 1,r8 // MMIO64 ? 236.time_redo:
236 extr r2 = r21,24,8 // time_interpolator->jitter 237 ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first
237 cmp.eq p10,p0 = 2,r8 // MMIO32 ? 238 ;;
238 cmp.ltu p11,p0 = 2,r8 // function or other clock 239 and r28 = ~1,r28 // And make sequence even to force retry if odd
239(p11) br.cond.spnt.many fsys_fallback_syscall
240 ;; 240 ;;
241 setf.sig f7 = r3 // Setup for scaling of counter 241 ld8 r30 = [r21] // clocksource->mmio_ptr
242(p15) movl r19 = wall_to_monotonic 242 add r24 = IA64_CLKSRC_MULT_OFFSET,r20
243(p12) ld8 r30 = [r10] 243 ld4 r2 = [r29] // itc_jitter value
244 cmp.ne p13,p0 = r2,r0 // need jitter compensation? 244 add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20
245 extr r21 = r21,16,8 // shift factor 245 add r14 = IA64_CLKSRC_MASK_OFFSET,r20
246 ;; 246 ;;
247.time_redo: 247 ld4 r3 = [r24] // clocksource mult value
248 .pred.rel.mutex p8,p9,p10 248 ld8 r14 = [r14] // clocksource mask value
249 ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes 249 cmp.eq p8,p9 = 0,r30 // use cpu timer if no mmio_ptr
250 ;; 250 ;;
251 and r28 = ~1,r28 // Make sequence even to force retry if odd 251 setf.sig f7 = r3 // Setup for mult scaling of counter
252(p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13
253 ld4 r23 = [r23] // clocksource shift value
254 ld8 r24 = [r26] // get clksrc_cycle_last value
255(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control
252 ;; 256 ;;
257 .pred.rel.mutex p8,p9
253(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! 258(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
254 add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20 259(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
255(p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues.. 260(p13) ld8 r25 = [r19] // get itc_lastcycle value
256(p10) ld4 r2 = [r30] // readw(ti->address) 261 ;; // ? could be removed by moving the last add upward
257(p13) add r23 = IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET,r20 262 ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
258 ;; // could be removed by moving the last add upward 263 ;;
259 ld8 r26 = [r22] // time_interpolator->last_counter 264 ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec
260(p13) ld8 r25 = [r23] // time interpolator->last_cycle 265(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
261 add r24 = IA64_TIME_INTERPOLATOR_OFFSET_OFFSET,r20 266 ;;
262(p15) ld8 r17 = [r19],IA64_TIMESPEC_TV_NSEC_OFFSET 267(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
263 ld8 r9 = [r27],IA64_TIMESPEC_TV_NSEC_OFFSET 268 sub r10 = r2,r24 // current_cycle - last_cycle
264 add r14 = IA64_TIME_INTERPOLATOR_MASK_OFFSET, r20 269 ;;
265 ;; 270(p6) sub r10 = r25,r24 // time we got was less than last_cycle
266 ld8 r18 = [r24] // time_interpolator->offset
267 ld8 r8 = [r27],-IA64_TIMESPEC_TV_NSEC_OFFSET // xtime.tv_nsec
268(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
269 ;;
270 ld8 r14 = [r14] // time_interpolator->mask
271(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
272 sub r10 = r2,r26 // current_counter - last_counter
273 ;;
274(p6) sub r10 = r25,r26 // time we got was less than last_cycle
275(p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg 271(p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg
276 ;; 272 ;;
273(p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv
274 ;;
275(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful
276 ;;
277(p7) sub r10 = r3,r24 // then use new last_cycle instead
278 ;;
277 and r10 = r10,r14 // Apply mask 279 and r10 = r10,r14 // Apply mask
278 ;; 280 ;;
279 setf.sig f8 = r10 281 setf.sig f8 = r10
280 nop.i 123 282 nop.i 123
281 ;; 283 ;;
282(p7) cmpxchg8.rel r3 = [r23],r2,ar.ccv 284 // fault check takes 5 cycles and we have spare time
283EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare time 285EX(.fail_efault, probe.w.fault r31, 3)
284 xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) 286 xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter)
285(p15) add r9 = r9,r17 // Add wall to monotonic.secs to result secs
286 ;; 287 ;;
287(p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET 288 // ? simulate tbit.nz.or p7,p0 = r28,0
288(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo
289 // simulate tbit.nz.or p7,p0 = r28,0
290 getf.sig r2 = f8 289 getf.sig r2 = f8
291 mf 290 mf
292 add r8 = r8,r18 // Add time interpolator offset
293 ;; 291 ;;
294 ld4 r10 = [r29] // xtime_lock.sequence 292 ld4 r10 = [r20] // gtod_lock.sequence
295(p15) add r8 = r8, r17 // Add monotonic.nsecs to nsecs 293 shr.u r2 = r2,r23 // shift by factor
296 shr.u r2 = r2,r21 294 ;; // ? overloaded 3 bundles!
297 ;; // overloaded 3 bundles!
298 // End critical section.
299 add r8 = r8,r2 // Add xtime.nsecs 295 add r8 = r8,r2 // Add xtime.nsecs
300 cmp4.ne.or p7,p0 = r28,r10 296 cmp4.ne p7,p0 = r28,r10
301(p7) br.cond.dpnt.few .time_redo // sequence number changed ? 297(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
298 // End critical section.
302 // Now r8=tv->tv_nsec and r9=tv->tv_sec 299 // Now r8=tv->tv_nsec and r9=tv->tv_sec
303 mov r10 = r0 300 mov r10 = r0
304 movl r2 = 1000000000 301 movl r2 = 1000000000
@@ -308,19 +305,19 @@ EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare
308.time_normalize: 305.time_normalize:
309 mov r21 = r8 306 mov r21 = r8
310 cmp.ge p6,p0 = r8,r2 307 cmp.ge p6,p0 = r8,r2
311(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting some time 308(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting time
312 ;; 309 ;;
313(p14) setf.sig f8 = r20 310(p14) setf.sig f8 = r20
314(p6) sub r8 = r8,r2 311(p6) sub r8 = r8,r2
315(p6) add r9 = 1,r9 // two nops before the branch. 312(p6) add r9 = 1,r9 // two nops before the branch.
316(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod 313(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod
317(p6) br.cond.dpnt.few .time_normalize 314(p6) br.cond.dpnt.few .time_normalize
318 ;; 315 ;;
319 // Divided by 8 though shift. Now divide by 125 316 // Divided by 8 though shift. Now divide by 125
320 // The compiler was able to do that with a multiply 317 // The compiler was able to do that with a multiply
321 // and a shift and we do the same 318 // and a shift and we do the same
322EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles 319EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
323(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it... 320(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it
324 ;; 321 ;;
325 mov r8 = r0 322 mov r8 = r0
326(p14) getf.sig r2 = f8 323(p14) getf.sig r2 = f8
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
new file mode 100644
index 000000000000..490dab55fba3
--- /dev/null
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -0,0 +1,23 @@
1/*
2 * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
3 * Contributed by Peter Keilty <peter.keilty@hp.com>
4 *
5 * fsyscall gettimeofday data
6 */
7
8struct fsyscall_gtod_data_t {
9 seqlock_t lock;
10 struct timespec wall_time;
11 struct timespec monotonic_time;
12 cycle_t clk_mask;
13 u32 clk_mult;
14 u32 clk_shift;
15 void *clk_fsys_mmio;
16 cycle_t clk_cycle_last;
17} __attribute__ ((aligned (L1_CACHE_BYTES)));
18
19struct itc_jitter_data_t {
20 int itc_jitter;
21 cycle_t itc_lastcycle;
22} __attribute__ ((aligned (L1_CACHE_BYTES)));
23
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 37f46527d233..91e6dc1e7baf 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -118,15 +118,25 @@ static DEFINE_SPINLOCK(iosapic_lock);
118 * vector. 118 * vector.
119 */ 119 */
120 120
121struct iosapic_rte_info { 121#define NO_REF_RTE 0
122 struct list_head rte_list; /* node in list of RTEs sharing the 122
123 * same vector */ 123static struct iosapic {
124 char __iomem *addr; /* base address of IOSAPIC */ 124 char __iomem *addr; /* base address of IOSAPIC */
125 unsigned int gsi_base; /* first GSI assigned to this 125 unsigned int gsi_base; /* GSI base */
126 * IOSAPIC */ 126 unsigned short num_rte; /* # of RTEs on this IOSAPIC */
127 int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
128#ifdef CONFIG_NUMA
129 unsigned short node; /* numa node association via pxm */
130#endif
131 spinlock_t lock; /* lock for indirect reg access */
132} iosapic_lists[NR_IOSAPICS];
133
134struct iosapic_rte_info {
135 struct list_head rte_list; /* RTEs sharing the same vector */
127 char rte_index; /* IOSAPIC RTE index */ 136 char rte_index; /* IOSAPIC RTE index */
128 int refcnt; /* reference counter */ 137 int refcnt; /* reference counter */
129 unsigned int flags; /* flags */ 138 unsigned int flags; /* flags */
139 struct iosapic *iosapic;
130} ____cacheline_aligned; 140} ____cacheline_aligned;
131 141
132static struct iosapic_intr_info { 142static struct iosapic_intr_info {
@@ -140,24 +150,23 @@ static struct iosapic_intr_info {
140 unsigned char polarity: 1; /* interrupt polarity 150 unsigned char polarity: 1; /* interrupt polarity
141 * (see iosapic.h) */ 151 * (see iosapic.h) */
142 unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ 152 unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
143} iosapic_intr_info[IA64_NUM_VECTORS]; 153} iosapic_intr_info[NR_IRQS];
144
145static struct iosapic {
146 char __iomem *addr; /* base address of IOSAPIC */
147 unsigned int gsi_base; /* first GSI assigned to this
148 * IOSAPIC */
149 unsigned short num_rte; /* # of RTEs on this IOSAPIC */
150 int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
151#ifdef CONFIG_NUMA
152 unsigned short node; /* numa node association via pxm */
153#endif
154} iosapic_lists[NR_IOSAPICS];
155 154
156static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ 155static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
157 156
158static int iosapic_kmalloc_ok; 157static int iosapic_kmalloc_ok;
159static LIST_HEAD(free_rte_list); 158static LIST_HEAD(free_rte_list);
160 159
160static inline void
161iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
162{
163 unsigned long flags;
164
165 spin_lock_irqsave(&iosapic->lock, flags);
166 __iosapic_write(iosapic->addr, reg, val);
167 spin_unlock_irqrestore(&iosapic->lock, flags);
168}
169
161/* 170/*
162 * Find an IOSAPIC associated with a GSI 171 * Find an IOSAPIC associated with a GSI
163 */ 172 */
@@ -175,17 +184,18 @@ find_iosapic (unsigned int gsi)
175 return -1; 184 return -1;
176} 185}
177 186
178static inline int 187static inline int __gsi_to_irq(unsigned int gsi)
179_gsi_to_vector (unsigned int gsi)
180{ 188{
189 int irq;
181 struct iosapic_intr_info *info; 190 struct iosapic_intr_info *info;
182 struct iosapic_rte_info *rte; 191 struct iosapic_rte_info *rte;
183 192
184 for (info = iosapic_intr_info; info < 193 for (irq = 0; irq < NR_IRQS; irq++) {
185 iosapic_intr_info + IA64_NUM_VECTORS; ++info) 194 info = &iosapic_intr_info[irq];
186 list_for_each_entry(rte, &info->rtes, rte_list) 195 list_for_each_entry(rte, &info->rtes, rte_list)
187 if (rte->gsi_base + rte->rte_index == gsi) 196 if (rte->iosapic->gsi_base + rte->rte_index == gsi)
188 return info - iosapic_intr_info; 197 return irq;
198 }
189 return -1; 199 return -1;
190} 200}
191 201
@@ -196,7 +206,10 @@ _gsi_to_vector (unsigned int gsi)
196inline int 206inline int
197gsi_to_vector (unsigned int gsi) 207gsi_to_vector (unsigned int gsi)
198{ 208{
199 return _gsi_to_vector(gsi); 209 int irq = __gsi_to_irq(gsi);
210 if (check_irq_used(irq) < 0)
211 return -1;
212 return irq_to_vector(irq);
200} 213}
201 214
202int 215int
@@ -204,66 +217,48 @@ gsi_to_irq (unsigned int gsi)
204{ 217{
205 unsigned long flags; 218 unsigned long flags;
206 int irq; 219 int irq;
207 /* 220
208 * XXX fix me: this assumes an identity mapping between IA-64 vector
209 * and Linux irq numbers...
210 */
211 spin_lock_irqsave(&iosapic_lock, flags); 221 spin_lock_irqsave(&iosapic_lock, flags);
212 { 222 irq = __gsi_to_irq(gsi);
213 irq = _gsi_to_vector(gsi);
214 }
215 spin_unlock_irqrestore(&iosapic_lock, flags); 223 spin_unlock_irqrestore(&iosapic_lock, flags);
216
217 return irq; 224 return irq;
218} 225}
219 226
220static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, 227static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
221 unsigned int vec)
222{ 228{
223 struct iosapic_rte_info *rte; 229 struct iosapic_rte_info *rte;
224 230
225 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) 231 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
226 if (rte->gsi_base + rte->rte_index == gsi) 232 if (rte->iosapic->gsi_base + rte->rte_index == gsi)
227 return rte; 233 return rte;
228 return NULL; 234 return NULL;
229} 235}
230 236
231static void 237static void
232set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) 238set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
233{ 239{
234 unsigned long pol, trigger, dmode; 240 unsigned long pol, trigger, dmode;
235 u32 low32, high32; 241 u32 low32, high32;
236 char __iomem *addr;
237 int rte_index; 242 int rte_index;
238 char redir; 243 char redir;
239 struct iosapic_rte_info *rte; 244 struct iosapic_rte_info *rte;
245 ia64_vector vector = irq_to_vector(irq);
240 246
241 DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest); 247 DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
242 248
243 rte = gsi_vector_to_rte(gsi, vector); 249 rte = find_rte(irq, gsi);
244 if (!rte) 250 if (!rte)
245 return; /* not an IOSAPIC interrupt */ 251 return; /* not an IOSAPIC interrupt */
246 252
247 rte_index = rte->rte_index; 253 rte_index = rte->rte_index;
248 addr = rte->addr; 254 pol = iosapic_intr_info[irq].polarity;
249 pol = iosapic_intr_info[vector].polarity; 255 trigger = iosapic_intr_info[irq].trigger;
250 trigger = iosapic_intr_info[vector].trigger; 256 dmode = iosapic_intr_info[irq].dmode;
251 dmode = iosapic_intr_info[vector].dmode;
252 257
253 redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0; 258 redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
254 259
255#ifdef CONFIG_SMP 260#ifdef CONFIG_SMP
256 { 261 set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
257 unsigned int irq;
258
259 for (irq = 0; irq < NR_IRQS; ++irq)
260 if (irq_to_vector(irq) == vector) {
261 set_irq_affinity_info(irq,
262 (int)(dest & 0xffff),
263 redir);
264 break;
265 }
266 }
267#endif 262#endif
268 263
269 low32 = ((pol << IOSAPIC_POLARITY_SHIFT) | 264 low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
@@ -275,10 +270,10 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
275 /* dest contains both id and eid */ 270 /* dest contains both id and eid */
276 high32 = (dest << IOSAPIC_DEST_SHIFT); 271 high32 = (dest << IOSAPIC_DEST_SHIFT);
277 272
278 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); 273 iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
279 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 274 iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
280 iosapic_intr_info[vector].low32 = low32; 275 iosapic_intr_info[irq].low32 = low32;
281 iosapic_intr_info[vector].dest = dest; 276 iosapic_intr_info[irq].dest = dest;
282} 277}
283 278
284static void 279static void
@@ -294,15 +289,18 @@ kexec_disable_iosapic(void)
294{ 289{
295 struct iosapic_intr_info *info; 290 struct iosapic_intr_info *info;
296 struct iosapic_rte_info *rte; 291 struct iosapic_rte_info *rte;
297 u8 vec = 0; 292 ia64_vector vec;
298 for (info = iosapic_intr_info; info < 293 int irq;
299 iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) { 294
295 for (irq = 0; irq < NR_IRQS; irq++) {
296 info = &iosapic_intr_info[irq];
297 vec = irq_to_vector(irq);
300 list_for_each_entry(rte, &info->rtes, 298 list_for_each_entry(rte, &info->rtes,
301 rte_list) { 299 rte_list) {
302 iosapic_write(rte->addr, 300 iosapic_write(rte->iosapic,
303 IOSAPIC_RTE_LOW(rte->rte_index), 301 IOSAPIC_RTE_LOW(rte->rte_index),
304 IOSAPIC_MASK|vec); 302 IOSAPIC_MASK|vec);
305 iosapic_eoi(rte->addr, vec); 303 iosapic_eoi(rte->iosapic->addr, vec);
306 } 304 }
307 } 305 }
308} 306}
@@ -311,54 +309,36 @@ kexec_disable_iosapic(void)
311static void 309static void
312mask_irq (unsigned int irq) 310mask_irq (unsigned int irq)
313{ 311{
314 unsigned long flags;
315 char __iomem *addr;
316 u32 low32; 312 u32 low32;
317 int rte_index; 313 int rte_index;
318 ia64_vector vec = irq_to_vector(irq);
319 struct iosapic_rte_info *rte; 314 struct iosapic_rte_info *rte;
320 315
321 if (list_empty(&iosapic_intr_info[vec].rtes)) 316 if (list_empty(&iosapic_intr_info[irq].rtes))
322 return; /* not an IOSAPIC interrupt! */ 317 return; /* not an IOSAPIC interrupt! */
323 318
324 spin_lock_irqsave(&iosapic_lock, flags); 319 /* set only the mask bit */
325 { 320 low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
326 /* set only the mask bit */ 321 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
327 low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; 322 rte_index = rte->rte_index;
328 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, 323 iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
329 rte_list) {
330 addr = rte->addr;
331 rte_index = rte->rte_index;
332 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
333 }
334 } 324 }
335 spin_unlock_irqrestore(&iosapic_lock, flags);
336} 325}
337 326
338static void 327static void
339unmask_irq (unsigned int irq) 328unmask_irq (unsigned int irq)
340{ 329{
341 unsigned long flags;
342 char __iomem *addr;
343 u32 low32; 330 u32 low32;
344 int rte_index; 331 int rte_index;
345 ia64_vector vec = irq_to_vector(irq);
346 struct iosapic_rte_info *rte; 332 struct iosapic_rte_info *rte;
347 333
348 if (list_empty(&iosapic_intr_info[vec].rtes)) 334 if (list_empty(&iosapic_intr_info[irq].rtes))
349 return; /* not an IOSAPIC interrupt! */ 335 return; /* not an IOSAPIC interrupt! */
350 336
351 spin_lock_irqsave(&iosapic_lock, flags); 337 low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
352 { 338 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
353 low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; 339 rte_index = rte->rte_index;
354 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, 340 iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
355 rte_list) {
356 addr = rte->addr;
357 rte_index = rte->rte_index;
358 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
359 }
360 } 341 }
361 spin_unlock_irqrestore(&iosapic_lock, flags);
362} 342}
363 343
364 344
@@ -366,23 +346,24 @@ static void
366iosapic_set_affinity (unsigned int irq, cpumask_t mask) 346iosapic_set_affinity (unsigned int irq, cpumask_t mask)
367{ 347{
368#ifdef CONFIG_SMP 348#ifdef CONFIG_SMP
369 unsigned long flags;
370 u32 high32, low32; 349 u32 high32, low32;
371 int dest, rte_index; 350 int dest, rte_index;
372 char __iomem *addr;
373 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 351 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
374 ia64_vector vec;
375 struct iosapic_rte_info *rte; 352 struct iosapic_rte_info *rte;
353 struct iosapic *iosapic;
376 354
377 irq &= (~IA64_IRQ_REDIRECTED); 355 irq &= (~IA64_IRQ_REDIRECTED);
378 vec = irq_to_vector(irq);
379 356
357 cpus_and(mask, mask, cpu_online_map);
380 if (cpus_empty(mask)) 358 if (cpus_empty(mask))
381 return; 359 return;
382 360
361 if (reassign_irq_vector(irq, first_cpu(mask)))
362 return;
363
383 dest = cpu_physical_id(first_cpu(mask)); 364 dest = cpu_physical_id(first_cpu(mask));
384 365
385 if (list_empty(&iosapic_intr_info[vec].rtes)) 366 if (list_empty(&iosapic_intr_info[irq].rtes))
386 return; /* not an IOSAPIC interrupt */ 367 return; /* not an IOSAPIC interrupt */
387 368
388 set_irq_affinity_info(irq, dest, redir); 369 set_irq_affinity_info(irq, dest, redir);
@@ -390,31 +371,24 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
390 /* dest contains both id and eid */ 371 /* dest contains both id and eid */
391 high32 = dest << IOSAPIC_DEST_SHIFT; 372 high32 = dest << IOSAPIC_DEST_SHIFT;
392 373
393 spin_lock_irqsave(&iosapic_lock, flags); 374 low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
394 { 375 if (redir)
395 low32 = iosapic_intr_info[vec].low32 & 376 /* change delivery mode to lowest priority */
396 ~(7 << IOSAPIC_DELIVERY_SHIFT); 377 low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
397 378 else
398 if (redir) 379 /* change delivery mode to fixed */
399 /* change delivery mode to lowest priority */ 380 low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
400 low32 |= (IOSAPIC_LOWEST_PRIORITY << 381 low32 &= IOSAPIC_VECTOR_MASK;
401 IOSAPIC_DELIVERY_SHIFT); 382 low32 |= irq_to_vector(irq);
402 else 383
403 /* change delivery mode to fixed */ 384 iosapic_intr_info[irq].low32 = low32;
404 low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); 385 iosapic_intr_info[irq].dest = dest;
405 386 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
406 iosapic_intr_info[vec].low32 = low32; 387 iosapic = rte->iosapic;
407 iosapic_intr_info[vec].dest = dest; 388 rte_index = rte->rte_index;
408 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, 389 iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
409 rte_list) { 390 iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
410 addr = rte->addr;
411 rte_index = rte->rte_index;
412 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index),
413 high32);
414 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
415 }
416 } 391 }
417 spin_unlock_irqrestore(&iosapic_lock, flags);
418#endif 392#endif
419} 393}
420 394
@@ -434,10 +408,20 @@ iosapic_end_level_irq (unsigned int irq)
434{ 408{
435 ia64_vector vec = irq_to_vector(irq); 409 ia64_vector vec = irq_to_vector(irq);
436 struct iosapic_rte_info *rte; 410 struct iosapic_rte_info *rte;
411 int do_unmask_irq = 0;
437 412
438 move_native_irq(irq); 413 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
439 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) 414 do_unmask_irq = 1;
440 iosapic_eoi(rte->addr, vec); 415 mask_irq(irq);
416 }
417
418 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
419 iosapic_eoi(rte->iosapic->addr, vec);
420
421 if (unlikely(do_unmask_irq)) {
422 move_masked_irq(irq);
423 unmask_irq(irq);
424 }
441} 425}
442 426
443#define iosapic_shutdown_level_irq mask_irq 427#define iosapic_shutdown_level_irq mask_irq
@@ -519,13 +503,12 @@ iosapic_version (char __iomem *addr)
519 * unsigned int reserved2 : 8; 503 * unsigned int reserved2 : 8;
520 * } 504 * }
521 */ 505 */
522 return iosapic_read(addr, IOSAPIC_VERSION); 506 return __iosapic_read(addr, IOSAPIC_VERSION);
523} 507}
524 508
525static int iosapic_find_sharable_vector (unsigned long trigger, 509static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
526 unsigned long pol)
527{ 510{
528 int i, vector = -1, min_count = -1; 511 int i, irq = -ENOSPC, min_count = -1;
529 struct iosapic_intr_info *info; 512 struct iosapic_intr_info *info;
530 513
531 /* 514 /*
@@ -533,21 +516,21 @@ static int iosapic_find_sharable_vector (unsigned long trigger,
533 * supported yet 516 * supported yet
534 */ 517 */
535 if (trigger == IOSAPIC_EDGE) 518 if (trigger == IOSAPIC_EDGE)
536 return -1; 519 return -EINVAL;
537 520
538 for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) { 521 for (i = 0; i <= NR_IRQS; i++) {
539 info = &iosapic_intr_info[i]; 522 info = &iosapic_intr_info[i];
540 if (info->trigger == trigger && info->polarity == pol && 523 if (info->trigger == trigger && info->polarity == pol &&
541 (info->dmode == IOSAPIC_FIXED || info->dmode == 524 (info->dmode == IOSAPIC_FIXED ||
542 IOSAPIC_LOWEST_PRIORITY)) { 525 info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
526 can_request_irq(i, IRQF_SHARED)) {
543 if (min_count == -1 || info->count < min_count) { 527 if (min_count == -1 || info->count < min_count) {
544 vector = i; 528 irq = i;
545 min_count = info->count; 529 min_count = info->count;
546 } 530 }
547 } 531 }
548 } 532 }
549 533 return irq;
550 return vector;
551} 534}
552 535
553/* 536/*
@@ -555,25 +538,25 @@ static int iosapic_find_sharable_vector (unsigned long trigger,
555 * assign a new vector for the other and make the vector available 538 * assign a new vector for the other and make the vector available
556 */ 539 */
557static void __init 540static void __init
558iosapic_reassign_vector (int vector) 541iosapic_reassign_vector (int irq)
559{ 542{
560 int new_vector; 543 int new_irq;
561 544
562 if (!list_empty(&iosapic_intr_info[vector].rtes)) { 545 if (!list_empty(&iosapic_intr_info[irq].rtes)) {
563 new_vector = assign_irq_vector(AUTO_ASSIGN); 546 new_irq = create_irq();
564 if (new_vector < 0) 547 if (new_irq < 0)
565 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 548 panic("%s: out of interrupt vectors!\n", __FUNCTION__);
566 printk(KERN_INFO "Reassigning vector %d to %d\n", 549 printk(KERN_INFO "Reassigning vector %d to %d\n",
567 vector, new_vector); 550 irq_to_vector(irq), irq_to_vector(new_irq));
568 memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], 551 memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
569 sizeof(struct iosapic_intr_info)); 552 sizeof(struct iosapic_intr_info));
570 INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes); 553 INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
571 list_move(iosapic_intr_info[vector].rtes.next, 554 list_move(iosapic_intr_info[irq].rtes.next,
572 &iosapic_intr_info[new_vector].rtes); 555 &iosapic_intr_info[new_irq].rtes);
573 memset(&iosapic_intr_info[vector], 0, 556 memset(&iosapic_intr_info[irq], 0,
574 sizeof(struct iosapic_intr_info)); 557 sizeof(struct iosapic_intr_info));
575 iosapic_intr_info[vector].low32 = IOSAPIC_MASK; 558 iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
576 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); 559 INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
577 } 560 }
578} 561}
579 562
@@ -610,29 +593,18 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void)
610 return rte; 593 return rte;
611} 594}
612 595
613static void iosapic_free_rte (struct iosapic_rte_info *rte) 596static inline int irq_is_shared (int irq)
614{ 597{
615 if (rte->flags & RTE_PREALLOCATED) 598 return (iosapic_intr_info[irq].count > 1);
616 list_add_tail(&rte->rte_list, &free_rte_list);
617 else
618 kfree(rte);
619}
620
621static inline int vector_is_shared (int vector)
622{
623 return (iosapic_intr_info[vector].count > 1);
624} 599}
625 600
626static int 601static int
627register_intr (unsigned int gsi, int vector, unsigned char delivery, 602register_intr (unsigned int gsi, int irq, unsigned char delivery,
628 unsigned long polarity, unsigned long trigger) 603 unsigned long polarity, unsigned long trigger)
629{ 604{
630 irq_desc_t *idesc; 605 irq_desc_t *idesc;
631 struct hw_interrupt_type *irq_type; 606 struct hw_interrupt_type *irq_type;
632 int rte_index;
633 int index; 607 int index;
634 unsigned long gsi_base;
635 void __iomem *iosapic_address;
636 struct iosapic_rte_info *rte; 608 struct iosapic_rte_info *rte;
637 609
638 index = find_iosapic(gsi); 610 index = find_iosapic(gsi);
@@ -642,10 +614,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
642 return -ENODEV; 614 return -ENODEV;
643 } 615 }
644 616
645 iosapic_address = iosapic_lists[index].addr; 617 rte = find_rte(irq, gsi);
646 gsi_base = iosapic_lists[index].gsi_base;
647
648 rte = gsi_vector_to_rte(gsi, vector);
649 if (!rte) { 618 if (!rte) {
650 rte = iosapic_alloc_rte(); 619 rte = iosapic_alloc_rte();
651 if (!rte) { 620 if (!rte) {
@@ -654,40 +623,42 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
654 return -ENOMEM; 623 return -ENOMEM;
655 } 624 }
656 625
657 rte_index = gsi - gsi_base; 626 rte->iosapic = &iosapic_lists[index];
658 rte->rte_index = rte_index; 627 rte->rte_index = gsi - rte->iosapic->gsi_base;
659 rte->addr = iosapic_address;
660 rte->gsi_base = gsi_base;
661 rte->refcnt++; 628 rte->refcnt++;
662 list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes); 629 list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
663 iosapic_intr_info[vector].count++; 630 iosapic_intr_info[irq].count++;
664 iosapic_lists[index].rtes_inuse++; 631 iosapic_lists[index].rtes_inuse++;
665 } 632 }
666 else if (vector_is_shared(vector)) { 633 else if (rte->refcnt == NO_REF_RTE) {
667 struct iosapic_intr_info *info = &iosapic_intr_info[vector]; 634 struct iosapic_intr_info *info = &iosapic_intr_info[irq];
668 if (info->trigger != trigger || info->polarity != polarity) { 635 if (info->count > 0 &&
636 (info->trigger != trigger || info->polarity != polarity)){
669 printk (KERN_WARNING 637 printk (KERN_WARNING
670 "%s: cannot override the interrupt\n", 638 "%s: cannot override the interrupt\n",
671 __FUNCTION__); 639 __FUNCTION__);
672 return -EINVAL; 640 return -EINVAL;
673 } 641 }
642 rte->refcnt++;
643 iosapic_intr_info[irq].count++;
644 iosapic_lists[index].rtes_inuse++;
674 } 645 }
675 646
676 iosapic_intr_info[vector].polarity = polarity; 647 iosapic_intr_info[irq].polarity = polarity;
677 iosapic_intr_info[vector].dmode = delivery; 648 iosapic_intr_info[irq].dmode = delivery;
678 iosapic_intr_info[vector].trigger = trigger; 649 iosapic_intr_info[irq].trigger = trigger;
679 650
680 if (trigger == IOSAPIC_EDGE) 651 if (trigger == IOSAPIC_EDGE)
681 irq_type = &irq_type_iosapic_edge; 652 irq_type = &irq_type_iosapic_edge;
682 else 653 else
683 irq_type = &irq_type_iosapic_level; 654 irq_type = &irq_type_iosapic_level;
684 655
685 idesc = irq_desc + vector; 656 idesc = irq_desc + irq;
686 if (idesc->chip != irq_type) { 657 if (idesc->chip != irq_type) {
687 if (idesc->chip != &no_irq_type) 658 if (idesc->chip != &no_irq_type)
688 printk(KERN_WARNING 659 printk(KERN_WARNING
689 "%s: changing vector %d from %s to %s\n", 660 "%s: changing vector %d from %s to %s\n",
690 __FUNCTION__, vector, 661 __FUNCTION__, irq_to_vector(irq),
691 idesc->chip->name, irq_type->name); 662 idesc->chip->name, irq_type->name);
692 idesc->chip = irq_type; 663 idesc->chip = irq_type;
693 } 664 }
@@ -695,18 +666,19 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
695} 666}
696 667
697static unsigned int 668static unsigned int
698get_target_cpu (unsigned int gsi, int vector) 669get_target_cpu (unsigned int gsi, int irq)
699{ 670{
700#ifdef CONFIG_SMP 671#ifdef CONFIG_SMP
701 static int cpu = -1; 672 static int cpu = -1;
702 extern int cpe_vector; 673 extern int cpe_vector;
674 cpumask_t domain = irq_to_domain(irq);
703 675
704 /* 676 /*
705 * In case of vector shared by multiple RTEs, all RTEs that 677 * In case of vector shared by multiple RTEs, all RTEs that
706 * share the vector need to use the same destination CPU. 678 * share the vector need to use the same destination CPU.
707 */ 679 */
708 if (!list_empty(&iosapic_intr_info[vector].rtes)) 680 if (!list_empty(&iosapic_intr_info[irq].rtes))
709 return iosapic_intr_info[vector].dest; 681 return iosapic_intr_info[irq].dest;
710 682
711 /* 683 /*
712 * If the platform supports redirection via XTP, let it 684 * If the platform supports redirection via XTP, let it
@@ -723,7 +695,7 @@ get_target_cpu (unsigned int gsi, int vector)
723 return cpu_physical_id(smp_processor_id()); 695 return cpu_physical_id(smp_processor_id());
724 696
725#ifdef CONFIG_ACPI 697#ifdef CONFIG_ACPI
726 if (cpe_vector > 0 && vector == IA64_CPEP_VECTOR) 698 if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
727 return get_cpei_target_cpu(); 699 return get_cpei_target_cpu();
728#endif 700#endif
729 701
@@ -738,7 +710,7 @@ get_target_cpu (unsigned int gsi, int vector)
738 goto skip_numa_setup; 710 goto skip_numa_setup;
739 711
740 cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); 712 cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
741 713 cpus_and(cpu_mask, cpu_mask, domain);
742 for_each_cpu_mask(numa_cpu, cpu_mask) { 714 for_each_cpu_mask(numa_cpu, cpu_mask) {
743 if (!cpu_online(numa_cpu)) 715 if (!cpu_online(numa_cpu))
744 cpu_clear(numa_cpu, cpu_mask); 716 cpu_clear(numa_cpu, cpu_mask);
@@ -749,8 +721,8 @@ get_target_cpu (unsigned int gsi, int vector)
749 if (!num_cpus) 721 if (!num_cpus)
750 goto skip_numa_setup; 722 goto skip_numa_setup;
751 723
752 /* Use vector assignment to distribute across cpus in node */ 724 /* Use irq assignment to distribute across cpus in node */
753 cpu_index = vector % num_cpus; 725 cpu_index = irq % num_cpus;
754 726
755 for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) 727 for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
756 numa_cpu = next_cpu(numa_cpu, cpu_mask); 728 numa_cpu = next_cpu(numa_cpu, cpu_mask);
@@ -768,7 +740,7 @@ skip_numa_setup:
768 do { 740 do {
769 if (++cpu >= NR_CPUS) 741 if (++cpu >= NR_CPUS)
770 cpu = 0; 742 cpu = 0;
771 } while (!cpu_online(cpu)); 743 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
772 744
773 return cpu_physical_id(cpu); 745 return cpu_physical_id(cpu);
774#else /* CONFIG_SMP */ 746#else /* CONFIG_SMP */
@@ -785,84 +757,72 @@ int
785iosapic_register_intr (unsigned int gsi, 757iosapic_register_intr (unsigned int gsi,
786 unsigned long polarity, unsigned long trigger) 758 unsigned long polarity, unsigned long trigger)
787{ 759{
788 int vector, mask = 1, err; 760 int irq, mask = 1, err;
789 unsigned int dest; 761 unsigned int dest;
790 unsigned long flags; 762 unsigned long flags;
791 struct iosapic_rte_info *rte; 763 struct iosapic_rte_info *rte;
792 u32 low32; 764 u32 low32;
793again: 765
794 /* 766 /*
795 * If this GSI has already been registered (i.e., it's a 767 * If this GSI has already been registered (i.e., it's a
796 * shared interrupt, or we lost a race to register it), 768 * shared interrupt, or we lost a race to register it),
797 * don't touch the RTE. 769 * don't touch the RTE.
798 */ 770 */
799 spin_lock_irqsave(&iosapic_lock, flags); 771 spin_lock_irqsave(&iosapic_lock, flags);
800 { 772 irq = __gsi_to_irq(gsi);
801 vector = gsi_to_vector(gsi); 773 if (irq > 0) {
802 if (vector > 0) { 774 rte = find_rte(irq, gsi);
803 rte = gsi_vector_to_rte(gsi, vector); 775 if(iosapic_intr_info[irq].count == 0) {
776 assign_irq_vector(irq);
777 dynamic_irq_init(irq);
778 } else if (rte->refcnt != NO_REF_RTE) {
804 rte->refcnt++; 779 rte->refcnt++;
805 spin_unlock_irqrestore(&iosapic_lock, flags); 780 goto unlock_iosapic_lock;
806 return vector;
807 } 781 }
808 } 782 } else
809 spin_unlock_irqrestore(&iosapic_lock, flags); 783 irq = create_irq();
810 784
811 /* If vector is running out, we try to find a sharable vector */ 785 /* If vector is running out, we try to find a sharable vector */
812 vector = assign_irq_vector(AUTO_ASSIGN); 786 if (irq < 0) {
813 if (vector < 0) { 787 irq = iosapic_find_sharable_irq(trigger, polarity);
814 vector = iosapic_find_sharable_vector(trigger, polarity); 788 if (irq < 0)
815 if (vector < 0) 789 goto unlock_iosapic_lock;
816 return -ENOSPC;
817 } 790 }
818 791
819 spin_lock_irqsave(&irq_desc[vector].lock, flags); 792 spin_lock(&irq_desc[irq].lock);
820 spin_lock(&iosapic_lock); 793 dest = get_target_cpu(gsi, irq);
821 { 794 err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY,
822 if (gsi_to_vector(gsi) > 0) { 795 polarity, trigger);
823 if (list_empty(&iosapic_intr_info[vector].rtes)) 796 if (err < 0) {
824 free_irq_vector(vector); 797 irq = err;
825 spin_unlock(&iosapic_lock); 798 goto unlock_all;
826 spin_unlock_irqrestore(&irq_desc[vector].lock,
827 flags);
828 goto again;
829 }
830
831 dest = get_target_cpu(gsi, vector);
832 err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
833 polarity, trigger);
834 if (err < 0) {
835 spin_unlock(&iosapic_lock);
836 spin_unlock_irqrestore(&irq_desc[vector].lock,
837 flags);
838 return err;
839 }
840
841 /*
842 * If the vector is shared and already unmasked for
843 * other interrupt sources, don't mask it.
844 */
845 low32 = iosapic_intr_info[vector].low32;
846 if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK))
847 mask = 0;
848 set_rte(gsi, vector, dest, mask);
849 } 799 }
850 spin_unlock(&iosapic_lock); 800
851 spin_unlock_irqrestore(&irq_desc[vector].lock, flags); 801 /*
802 * If the vector is shared and already unmasked for other
803 * interrupt sources, don't mask it.
804 */
805 low32 = iosapic_intr_info[irq].low32;
806 if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
807 mask = 0;
808 set_rte(gsi, irq, dest, mask);
852 809
853 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", 810 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
854 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), 811 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
855 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 812 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
856 cpu_logical_id(dest), dest, vector); 813 cpu_logical_id(dest), dest, irq_to_vector(irq));
857 814 unlock_all:
858 return vector; 815 spin_unlock(&irq_desc[irq].lock);
816 unlock_iosapic_lock:
817 spin_unlock_irqrestore(&iosapic_lock, flags);
818 return irq;
859} 819}
860 820
861void 821void
862iosapic_unregister_intr (unsigned int gsi) 822iosapic_unregister_intr (unsigned int gsi)
863{ 823{
864 unsigned long flags; 824 unsigned long flags;
865 int irq, vector, index; 825 int irq, index;
866 irq_desc_t *idesc; 826 irq_desc_t *idesc;
867 u32 low32; 827 u32 low32;
868 unsigned long trigger, polarity; 828 unsigned long trigger, polarity;
@@ -881,78 +841,56 @@ iosapic_unregister_intr (unsigned int gsi)
881 WARN_ON(1); 841 WARN_ON(1);
882 return; 842 return;
883 } 843 }
884 vector = irq_to_vector(irq);
885 844
886 idesc = irq_desc + irq; 845 spin_lock_irqsave(&iosapic_lock, flags);
887 spin_lock_irqsave(&idesc->lock, flags); 846 if ((rte = find_rte(irq, gsi)) == NULL) {
888 spin_lock(&iosapic_lock); 847 printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
889 { 848 gsi);
890 if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) { 849 WARN_ON(1);
891 printk(KERN_ERR 850 goto out;
892 "iosapic_unregister_intr(%u) unbalanced\n", 851 }
893 gsi);
894 WARN_ON(1);
895 goto out;
896 }
897 852
898 if (--rte->refcnt > 0) 853 if (--rte->refcnt > 0)
899 goto out; 854 goto out;
900 855
901 /* Mask the interrupt */ 856 idesc = irq_desc + irq;
902 low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK; 857 rte->refcnt = NO_REF_RTE;
903 iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index),
904 low32);
905 858
906 /* Remove the rte entry from the list */ 859 /* Mask the interrupt */
907 list_del(&rte->rte_list); 860 low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
908 iosapic_intr_info[vector].count--; 861 iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
909 iosapic_free_rte(rte);
910 index = find_iosapic(gsi);
911 iosapic_lists[index].rtes_inuse--;
912 WARN_ON(iosapic_lists[index].rtes_inuse < 0);
913
914 trigger = iosapic_intr_info[vector].trigger;
915 polarity = iosapic_intr_info[vector].polarity;
916 dest = iosapic_intr_info[vector].dest;
917 printk(KERN_INFO
918 "GSI %u (%s, %s) -> CPU %d (0x%04x)"
919 " vector %d unregistered\n",
920 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
921 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
922 cpu_logical_id(dest), dest, vector);
923 862
924 if (list_empty(&iosapic_intr_info[vector].rtes)) { 863 iosapic_intr_info[irq].count--;
925 /* Sanity check */ 864 index = find_iosapic(gsi);
926 BUG_ON(iosapic_intr_info[vector].count); 865 iosapic_lists[index].rtes_inuse--;
866 WARN_ON(iosapic_lists[index].rtes_inuse < 0);
927 867
928 /* Clear the interrupt controller descriptor */ 868 trigger = iosapic_intr_info[irq].trigger;
929 idesc->chip = &no_irq_type; 869 polarity = iosapic_intr_info[irq].polarity;
870 dest = iosapic_intr_info[irq].dest;
871 printk(KERN_INFO
872 "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
873 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
874 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
875 cpu_logical_id(dest), dest, irq_to_vector(irq));
930 876
877 if (iosapic_intr_info[irq].count == 0) {
931#ifdef CONFIG_SMP 878#ifdef CONFIG_SMP
932 /* Clear affinity */ 879 /* Clear affinity */
933 cpus_setall(idesc->affinity); 880 cpus_setall(idesc->affinity);
934#endif 881#endif
935 882 /* Clear the interrupt information */
936 /* Clear the interrupt information */ 883 iosapic_intr_info[irq].dest = 0;
937 memset(&iosapic_intr_info[vector], 0, 884 iosapic_intr_info[irq].dmode = 0;
938 sizeof(struct iosapic_intr_info)); 885 iosapic_intr_info[irq].polarity = 0;
939 iosapic_intr_info[vector].low32 |= IOSAPIC_MASK; 886 iosapic_intr_info[irq].trigger = 0;
940 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); 887 iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
941 888
942 if (idesc->action) { 889 /* Destroy and reserve IRQ */
943 printk(KERN_ERR 890 destroy_and_reserve_irq(irq);
944 "interrupt handlers still exist on"
945 "IRQ %u\n", irq);
946 WARN_ON(1);
947 }
948
949 /* Free the interrupt vector */
950 free_irq_vector(vector);
951 }
952 } 891 }
953 out: 892 out:
954 spin_unlock(&iosapic_lock); 893 spin_unlock_irqrestore(&iosapic_lock, flags);
955 spin_unlock_irqrestore(&idesc->lock, flags);
956} 894}
957 895
958/* 896/*
@@ -965,27 +903,30 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
965{ 903{
966 static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"}; 904 static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
967 unsigned char delivery; 905 unsigned char delivery;
968 int vector, mask = 0; 906 int irq, vector, mask = 0;
969 unsigned int dest = ((id << 8) | eid) & 0xffff; 907 unsigned int dest = ((id << 8) | eid) & 0xffff;
970 908
971 switch (int_type) { 909 switch (int_type) {
972 case ACPI_INTERRUPT_PMI: 910 case ACPI_INTERRUPT_PMI:
973 vector = iosapic_vector; 911 irq = vector = iosapic_vector;
912 bind_irq_vector(irq, vector, CPU_MASK_ALL);
974 /* 913 /*
975 * since PMI vector is alloc'd by FW(ACPI) not by kernel, 914 * since PMI vector is alloc'd by FW(ACPI) not by kernel,
976 * we need to make sure the vector is available 915 * we need to make sure the vector is available
977 */ 916 */
978 iosapic_reassign_vector(vector); 917 iosapic_reassign_vector(irq);
979 delivery = IOSAPIC_PMI; 918 delivery = IOSAPIC_PMI;
980 break; 919 break;
981 case ACPI_INTERRUPT_INIT: 920 case ACPI_INTERRUPT_INIT:
982 vector = assign_irq_vector(AUTO_ASSIGN); 921 irq = create_irq();
983 if (vector < 0) 922 if (irq < 0)
984 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 923 panic("%s: out of interrupt vectors!\n", __FUNCTION__);
924 vector = irq_to_vector(irq);
985 delivery = IOSAPIC_INIT; 925 delivery = IOSAPIC_INIT;
986 break; 926 break;
987 case ACPI_INTERRUPT_CPEI: 927 case ACPI_INTERRUPT_CPEI:
988 vector = IA64_CPE_VECTOR; 928 irq = vector = IA64_CPE_VECTOR;
929 BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
989 delivery = IOSAPIC_LOWEST_PRIORITY; 930 delivery = IOSAPIC_LOWEST_PRIORITY;
990 mask = 1; 931 mask = 1;
991 break; 932 break;
@@ -995,7 +936,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
995 return -1; 936 return -1;
996 } 937 }
997 938
998 register_intr(gsi, vector, delivery, polarity, trigger); 939 register_intr(gsi, irq, delivery, polarity, trigger);
999 940
1000 printk(KERN_INFO 941 printk(KERN_INFO
1001 "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)" 942 "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
@@ -1005,7 +946,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
1005 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 946 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
1006 cpu_logical_id(dest), dest, vector); 947 cpu_logical_id(dest), dest, vector);
1007 948
1008 set_rte(gsi, vector, dest, mask); 949 set_rte(gsi, irq, dest, mask);
1009 return vector; 950 return vector;
1010} 951}
1011 952
@@ -1017,30 +958,32 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
1017 unsigned long polarity, 958 unsigned long polarity,
1018 unsigned long trigger) 959 unsigned long trigger)
1019{ 960{
1020 int vector; 961 int vector, irq;
1021 unsigned int dest = cpu_physical_id(smp_processor_id()); 962 unsigned int dest = cpu_physical_id(smp_processor_id());
1022 963
1023 vector = isa_irq_to_vector(isa_irq); 964 irq = vector = isa_irq_to_vector(isa_irq);
1024 965 BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
1025 register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, polarity, trigger); 966 register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
1026 967
1027 DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n", 968 DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
1028 isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level", 969 isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
1029 polarity == IOSAPIC_POL_HIGH ? "high" : "low", 970 polarity == IOSAPIC_POL_HIGH ? "high" : "low",
1030 cpu_logical_id(dest), dest, vector); 971 cpu_logical_id(dest), dest, vector);
1031 972
1032 set_rte(gsi, vector, dest, 1); 973 set_rte(gsi, irq, dest, 1);
1033} 974}
1034 975
1035void __init 976void __init
1036iosapic_system_init (int system_pcat_compat) 977iosapic_system_init (int system_pcat_compat)
1037{ 978{
1038 int vector; 979 int irq;
1039 980
1040 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) { 981 for (irq = 0; irq < NR_IRQS; ++irq) {
1041 iosapic_intr_info[vector].low32 = IOSAPIC_MASK; 982 iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
1042 /* mark as unused */ 983 /* mark as unused */
1043 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); 984 INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
985
986 iosapic_intr_info[irq].count = 0;
1044 } 987 }
1045 988
1046 pcat_compat = system_pcat_compat; 989 pcat_compat = system_pcat_compat;
@@ -1108,31 +1051,35 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
1108 unsigned long flags; 1051 unsigned long flags;
1109 1052
1110 spin_lock_irqsave(&iosapic_lock, flags); 1053 spin_lock_irqsave(&iosapic_lock, flags);
1111 { 1054 index = find_iosapic(gsi_base);
1112 addr = ioremap(phys_addr, 0); 1055 if (index >= 0) {
1113 ver = iosapic_version(addr); 1056 spin_unlock_irqrestore(&iosapic_lock, flags);
1057 return -EBUSY;
1058 }
1114 1059
1115 if ((err = iosapic_check_gsi_range(gsi_base, ver))) { 1060 addr = ioremap(phys_addr, 0);
1116 iounmap(addr); 1061 ver = iosapic_version(addr);
1117 spin_unlock_irqrestore(&iosapic_lock, flags); 1062 if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
1118 return err; 1063 iounmap(addr);
1119 } 1064 spin_unlock_irqrestore(&iosapic_lock, flags);
1065 return err;
1066 }
1120 1067
1121 /* 1068 /*
1122 * The MAX_REDIR register holds the highest input pin 1069 * The MAX_REDIR register holds the highest input pin number
1123 * number (starting from 0). 1070 * (starting from 0). We add 1 so that we can use it for
1124 * We add 1 so that we can use it for number of pins (= RTEs) 1071 * number of pins (= RTEs)
1125 */ 1072 */
1126 num_rte = ((ver >> 16) & 0xff) + 1; 1073 num_rte = ((ver >> 16) & 0xff) + 1;
1127 1074
1128 index = iosapic_alloc(); 1075 index = iosapic_alloc();
1129 iosapic_lists[index].addr = addr; 1076 iosapic_lists[index].addr = addr;
1130 iosapic_lists[index].gsi_base = gsi_base; 1077 iosapic_lists[index].gsi_base = gsi_base;
1131 iosapic_lists[index].num_rte = num_rte; 1078 iosapic_lists[index].num_rte = num_rte;
1132#ifdef CONFIG_NUMA 1079#ifdef CONFIG_NUMA
1133 iosapic_lists[index].node = MAX_NUMNODES; 1080 iosapic_lists[index].node = MAX_NUMNODES;
1134#endif 1081#endif
1135 } 1082 spin_lock_init(&iosapic_lists[index].lock);
1136 spin_unlock_irqrestore(&iosapic_lock, flags); 1083 spin_unlock_irqrestore(&iosapic_lock, flags);
1137 1084
1138 if ((gsi_base == 0) && pcat_compat) { 1085 if ((gsi_base == 0) && pcat_compat) {
@@ -1157,25 +1104,22 @@ iosapic_remove (unsigned int gsi_base)
1157 unsigned long flags; 1104 unsigned long flags;
1158 1105
1159 spin_lock_irqsave(&iosapic_lock, flags); 1106 spin_lock_irqsave(&iosapic_lock, flags);
1160 { 1107 index = find_iosapic(gsi_base);
1161 index = find_iosapic(gsi_base); 1108 if (index < 0) {
1162 if (index < 0) { 1109 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
1163 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", 1110 __FUNCTION__, gsi_base);
1164 __FUNCTION__, gsi_base); 1111 goto out;
1165 goto out; 1112 }
1166 }
1167
1168 if (iosapic_lists[index].rtes_inuse) {
1169 err = -EBUSY;
1170 printk(KERN_WARNING
1171 "%s: IOSAPIC for GSI base %u is busy\n",
1172 __FUNCTION__, gsi_base);
1173 goto out;
1174 }
1175 1113
1176 iounmap(iosapic_lists[index].addr); 1114 if (iosapic_lists[index].rtes_inuse) {
1177 iosapic_free(index); 1115 err = -EBUSY;
1116 printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
1117 __FUNCTION__, gsi_base);
1118 goto out;
1178 } 1119 }
1120
1121 iounmap(iosapic_lists[index].addr);
1122 iosapic_free(index);
1179 out: 1123 out:
1180 spin_unlock_irqrestore(&iosapic_lock, flags); 1124 spin_unlock_irqrestore(&iosapic_lock, flags);
1181 return err; 1125 return err;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 407b45870489..cc3ee4ef37af 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq)
35#ifdef CONFIG_IA64_GENERIC 35#ifdef CONFIG_IA64_GENERIC
36unsigned int __ia64_local_vector_to_irq (ia64_vector vec) 36unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
37{ 37{
38 return (unsigned int) vec; 38 return __get_cpu_var(vector_irq)[vec];
39} 39}
40#endif 40#endif
41 41
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index bc47049f060f..91797c111162 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -46,6 +46,12 @@
46 46
47#define IRQ_DEBUG 0 47#define IRQ_DEBUG 0
48 48
49#define IRQ_VECTOR_UNASSIGNED (0)
50
51#define IRQ_UNUSED (0)
52#define IRQ_USED (1)
53#define IRQ_RSVD (2)
54
49/* These can be overridden in platform_irq_init */ 55/* These can be overridden in platform_irq_init */
50int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; 56int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
51int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; 57int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
@@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
54void __iomem *ipi_base_addr = ((void __iomem *) 60void __iomem *ipi_base_addr = ((void __iomem *)
55 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); 61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
56 62
63static cpumask_t vector_allocation_domain(int cpu);
64
57/* 65/*
58 * Legacy IRQ to IA-64 vector translation table. 66 * Legacy IRQ to IA-64 vector translation table.
59 */ 67 */
@@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = {
64}; 72};
65EXPORT_SYMBOL(isa_irq_to_vector_map); 73EXPORT_SYMBOL(isa_irq_to_vector_map);
66 74
67static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; 75DEFINE_SPINLOCK(vector_lock);
76
77struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
78 [0 ... NR_IRQS - 1] = {
79 .vector = IRQ_VECTOR_UNASSIGNED,
80 .domain = CPU_MASK_NONE
81 }
82};
83
84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
85 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
86};
87
88static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
89 [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
90};
91
92static int irq_status[NR_IRQS] = {
93 [0 ... NR_IRQS -1] = IRQ_UNUSED
94};
95
96int check_irq_used(int irq)
97{
98 if (irq_status[irq] == IRQ_USED)
99 return 1;
100
101 return -1;
102}
103
104static void reserve_irq(unsigned int irq)
105{
106 unsigned long flags;
107
108 spin_lock_irqsave(&vector_lock, flags);
109 irq_status[irq] = IRQ_RSVD;
110 spin_unlock_irqrestore(&vector_lock, flags);
111}
112
113static inline int find_unassigned_irq(void)
114{
115 int irq;
116
117 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
118 if (irq_status[irq] == IRQ_UNUSED)
119 return irq;
120 return -ENOSPC;
121}
122
123static inline int find_unassigned_vector(cpumask_t domain)
124{
125 cpumask_t mask;
126 int pos;
127
128 cpus_and(mask, domain, cpu_online_map);
129 if (cpus_empty(mask))
130 return -EINVAL;
131
132 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
133 cpus_and(mask, domain, vector_table[pos]);
134 if (!cpus_empty(mask))
135 continue;
136 return IA64_FIRST_DEVICE_VECTOR + pos;
137 }
138 return -ENOSPC;
139}
140
141static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
142{
143 cpumask_t mask;
144 int cpu, pos;
145 struct irq_cfg *cfg = &irq_cfg[irq];
146
147 cpus_and(mask, domain, cpu_online_map);
148 if (cpus_empty(mask))
149 return -EINVAL;
150 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
151 return 0;
152 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
153 return -EBUSY;
154 for_each_cpu_mask(cpu, mask)
155 per_cpu(vector_irq, cpu)[vector] = irq;
156 cfg->vector = vector;
157 cfg->domain = domain;
158 irq_status[irq] = IRQ_USED;
159 pos = vector - IA64_FIRST_DEVICE_VECTOR;
160 cpus_or(vector_table[pos], vector_table[pos], domain);
161 return 0;
162}
163
164int bind_irq_vector(int irq, int vector, cpumask_t domain)
165{
166 unsigned long flags;
167 int ret;
168
169 spin_lock_irqsave(&vector_lock, flags);
170 ret = __bind_irq_vector(irq, vector, domain);
171 spin_unlock_irqrestore(&vector_lock, flags);
172 return ret;
173}
174
175static void __clear_irq_vector(int irq)
176{
177 int vector, cpu, pos;
178 cpumask_t mask;
179 cpumask_t domain;
180 struct irq_cfg *cfg = &irq_cfg[irq];
181
182 BUG_ON((unsigned)irq >= NR_IRQS);
183 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
184 vector = cfg->vector;
185 domain = cfg->domain;
186 cpus_and(mask, cfg->domain, cpu_online_map);
187 for_each_cpu_mask(cpu, mask)
188 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
189 cfg->vector = IRQ_VECTOR_UNASSIGNED;
190 cfg->domain = CPU_MASK_NONE;
191 irq_status[irq] = IRQ_UNUSED;
192 pos = vector - IA64_FIRST_DEVICE_VECTOR;
193 cpus_andnot(vector_table[pos], vector_table[pos], domain);
194}
195
196static void clear_irq_vector(int irq)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(&vector_lock, flags);
201 __clear_irq_vector(irq);
202 spin_unlock_irqrestore(&vector_lock, flags);
203}
68 204
69int 205int
70assign_irq_vector (int irq) 206assign_irq_vector (int irq)
71{ 207{
72 int pos, vector; 208 unsigned long flags;
73 again: 209 int vector, cpu;
74 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); 210 cpumask_t domain;
75 vector = IA64_FIRST_DEVICE_VECTOR + pos; 211
76 if (vector > IA64_LAST_DEVICE_VECTOR) 212 vector = -ENOSPC;
77 return -ENOSPC; 213
78 if (test_and_set_bit(pos, ia64_vector_mask)) 214 spin_lock_irqsave(&vector_lock, flags);
79 goto again; 215 if (irq < 0) {
216 goto out;
217 }
218 for_each_online_cpu(cpu) {
219 domain = vector_allocation_domain(cpu);
220 vector = find_unassigned_vector(domain);
221 if (vector >= 0)
222 break;
223 }
224 if (vector < 0)
225 goto out;
226 BUG_ON(__bind_irq_vector(irq, vector, domain));
227 out:
228 spin_unlock_irqrestore(&vector_lock, flags);
80 return vector; 229 return vector;
81} 230}
82 231
83void 232void
84free_irq_vector (int vector) 233free_irq_vector (int vector)
85{ 234{
86 int pos; 235 if (vector < IA64_FIRST_DEVICE_VECTOR ||
87 236 vector > IA64_LAST_DEVICE_VECTOR)
88 if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
89 return; 237 return;
90 238 clear_irq_vector(vector);
91 pos = vector - IA64_FIRST_DEVICE_VECTOR;
92 if (!test_and_clear_bit(pos, ia64_vector_mask))
93 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
94} 239}
95 240
96int 241int
97reserve_irq_vector (int vector) 242reserve_irq_vector (int vector)
98{ 243{
99 int pos;
100
101 if (vector < IA64_FIRST_DEVICE_VECTOR || 244 if (vector < IA64_FIRST_DEVICE_VECTOR ||
102 vector > IA64_LAST_DEVICE_VECTOR) 245 vector > IA64_LAST_DEVICE_VECTOR)
103 return -EINVAL; 246 return -EINVAL;
247 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
248}
104 249
105 pos = vector - IA64_FIRST_DEVICE_VECTOR; 250/*
106 return test_and_set_bit(pos, ia64_vector_mask); 251 * Initialize vector_irq on a new cpu. This function must be called
252 * with vector_lock held.
253 */
254void __setup_vector_irq(int cpu)
255{
256 int irq, vector;
257
258 /* Clear vector_irq */
259 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
260 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
261 /* Mark the inuse vectors */
262 for (irq = 0; irq < NR_IRQS; ++irq) {
263 if (!cpu_isset(cpu, irq_cfg[irq].domain))
264 continue;
265 vector = irq_to_vector(irq);
266 per_cpu(vector_irq, cpu)[vector] = irq;
267 }
268}
269
270#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
271static enum vector_domain_type {
272 VECTOR_DOMAIN_NONE,
273 VECTOR_DOMAIN_PERCPU
274} vector_domain_type = VECTOR_DOMAIN_NONE;
275
276static cpumask_t vector_allocation_domain(int cpu)
277{
278 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
279 return cpumask_of_cpu(cpu);
280 return CPU_MASK_ALL;
281}
282
283static int __init parse_vector_domain(char *arg)
284{
285 if (!arg)
286 return -EINVAL;
287 if (!strcmp(arg, "percpu")) {
288 vector_domain_type = VECTOR_DOMAIN_PERCPU;
289 no_int_routing = 1;
290 }
291 return 1;
292}
293early_param("vector", parse_vector_domain);
294#else
295static cpumask_t vector_allocation_domain(int cpu)
296{
297 return CPU_MASK_ALL;
298}
299#endif
300
301
302void destroy_and_reserve_irq(unsigned int irq)
303{
304 dynamic_irq_cleanup(irq);
305
306 clear_irq_vector(irq);
307 reserve_irq(irq);
308}
309
310static int __reassign_irq_vector(int irq, int cpu)
311{
312 struct irq_cfg *cfg = &irq_cfg[irq];
313 int vector;
314 cpumask_t domain;
315
316 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
317 return -EINVAL;
318 if (cpu_isset(cpu, cfg->domain))
319 return 0;
320 domain = vector_allocation_domain(cpu);
321 vector = find_unassigned_vector(domain);
322 if (vector < 0)
323 return -ENOSPC;
324 __clear_irq_vector(irq);
325 BUG_ON(__bind_irq_vector(irq, vector, domain));
326 return 0;
327}
328
329int reassign_irq_vector(int irq, int cpu)
330{
331 unsigned long flags;
332 int ret;
333
334 spin_lock_irqsave(&vector_lock, flags);
335 ret = __reassign_irq_vector(irq, cpu);
336 spin_unlock_irqrestore(&vector_lock, flags);
337 return ret;
107} 338}
108 339
109/* 340/*
@@ -111,18 +342,35 @@ reserve_irq_vector (int vector)
111 */ 342 */
112int create_irq(void) 343int create_irq(void)
113{ 344{
114 int vector = assign_irq_vector(AUTO_ASSIGN); 345 unsigned long flags;
115 346 int irq, vector, cpu;
116 if (vector >= 0) 347 cpumask_t domain;
117 dynamic_irq_init(vector); 348
118 349 irq = vector = -ENOSPC;
119 return vector; 350 spin_lock_irqsave(&vector_lock, flags);
351 for_each_online_cpu(cpu) {
352 domain = vector_allocation_domain(cpu);
353 vector = find_unassigned_vector(domain);
354 if (vector >= 0)
355 break;
356 }
357 if (vector < 0)
358 goto out;
359 irq = find_unassigned_irq();
360 if (irq < 0)
361 goto out;
362 BUG_ON(__bind_irq_vector(irq, vector, domain));
363 out:
364 spin_unlock_irqrestore(&vector_lock, flags);
365 if (irq >= 0)
366 dynamic_irq_init(irq);
367 return irq;
120} 368}
121 369
122void destroy_irq(unsigned int irq) 370void destroy_irq(unsigned int irq)
123{ 371{
124 dynamic_irq_cleanup(irq); 372 dynamic_irq_cleanup(irq);
125 free_irq_vector(irq); 373 clear_irq_vector(irq);
126} 374}
127 375
128#ifdef CONFIG_SMP 376#ifdef CONFIG_SMP
@@ -301,14 +549,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
301 irq_desc_t *desc; 549 irq_desc_t *desc;
302 unsigned int irq; 550 unsigned int irq;
303 551
304 for (irq = 0; irq < NR_IRQS; ++irq) 552 irq = vec;
305 if (irq_to_vector(irq) == vec) { 553 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
306 desc = irq_desc + irq; 554 desc = irq_desc + irq;
307 desc->status |= IRQ_PER_CPU; 555 desc->status |= IRQ_PER_CPU;
308 desc->chip = &irq_type_ia64_lsapic; 556 desc->chip = &irq_type_ia64_lsapic;
309 if (action) 557 if (action)
310 setup_irq(irq, action); 558 setup_irq(irq, action);
311 }
312} 559}
313 560
314void __init 561void __init
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index c81080df70df..2fdbd5c3f213 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -13,6 +13,7 @@
13 13
14#define MSI_DATA_VECTOR_SHIFT 0 14#define MSI_DATA_VECTOR_SHIFT 0
15#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) 15#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
16#define MSI_DATA_VECTOR_MASK 0xffffff00
16 17
17#define MSI_DATA_DELIVERY_SHIFT 8 18#define MSI_DATA_DELIVERY_SHIFT 8
18#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) 19#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
@@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip;
50static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 51static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
51{ 52{
52 struct msi_msg msg; 53 struct msi_msg msg;
53 u32 addr; 54 u32 addr, data;
55 int cpu = first_cpu(cpu_mask);
56
57 if (!cpu_online(cpu))
58 return;
59
60 if (reassign_irq_vector(irq, cpu))
61 return;
54 62
55 read_msi_msg(irq, &msg); 63 read_msi_msg(irq, &msg);
56 64
57 addr = msg.address_lo; 65 addr = msg.address_lo;
58 addr &= MSI_ADDR_DESTID_MASK; 66 addr &= MSI_ADDR_DESTID_MASK;
59 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask))); 67 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
60 msg.address_lo = addr; 68 msg.address_lo = addr;
61 69
70 data = msg.data;
71 data &= MSI_DATA_VECTOR_MASK;
72 data |= MSI_DATA_VECTOR(irq_to_vector(irq));
73 msg.data = data;
74
62 write_msi_msg(irq, &msg); 75 write_msi_msg(irq, &msg);
63 irq_desc[irq].affinity = cpu_mask; 76 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
64} 77}
65#endif /* CONFIG_SMP */ 78#endif /* CONFIG_SMP */
66 79
@@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
69 struct msi_msg msg; 82 struct msi_msg msg;
70 unsigned long dest_phys_id; 83 unsigned long dest_phys_id;
71 int irq, vector; 84 int irq, vector;
85 cpumask_t mask;
72 86
73 irq = create_irq(); 87 irq = create_irq();
74 if (irq < 0) 88 if (irq < 0)
75 return irq; 89 return irq;
76 90
77 set_irq_msi(irq, desc); 91 set_irq_msi(irq, desc);
78 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); 92 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
93 dest_phys_id = cpu_physical_id(first_cpu(mask));
79 vector = irq_to_vector(irq); 94 vector = irq_to_vector(irq);
80 95
81 msg.address_hi = 0; 96 msg.address_hi = 0;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 3c9d8e6089cf..9f5c90b594b9 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -395,9 +395,13 @@ smp_callin (void)
395 fix_b0_for_bsp(); 395 fix_b0_for_bsp();
396 396
397 lock_ipi_calllock(); 397 lock_ipi_calllock();
398 spin_lock(&vector_lock);
399 /* Setup the per cpu irq handling data structures */
400 __setup_vector_irq(cpuid);
398 cpu_set(cpuid, cpu_online_map); 401 cpu_set(cpuid, cpu_online_map);
399 unlock_ipi_calllock(); 402 unlock_ipi_calllock();
400 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 403 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
404 spin_unlock(&vector_lock);
401 405
402 smp_setup_percpu_timer(); 406 smp_setup_percpu_timer();
403 407
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 3486fe7d6e65..627785c48ea9 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -19,6 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/efi.h> 20#include <linux/efi.h>
21#include <linux/timex.h> 21#include <linux/timex.h>
22#include <linux/clocksource.h>
22 23
23#include <asm/machvec.h> 24#include <asm/machvec.h>
24#include <asm/delay.h> 25#include <asm/delay.h>
@@ -28,6 +29,16 @@
28#include <asm/sections.h> 29#include <asm/sections.h>
29#include <asm/system.h> 30#include <asm/system.h>
30 31
32#include "fsyscall_gtod_data.h"
33
34static cycle_t itc_get_cycles(void);
35
36struct fsyscall_gtod_data_t fsyscall_gtod_data = {
37 .lock = SEQLOCK_UNLOCKED,
38};
39
40struct itc_jitter_data_t itc_jitter_data;
41
31volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ 42volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
32 43
33#ifdef CONFIG_IA64_DEBUG_IRQ 44#ifdef CONFIG_IA64_DEBUG_IRQ
@@ -37,11 +48,16 @@ EXPORT_SYMBOL(last_cli_ip);
37 48
38#endif 49#endif
39 50
40static struct time_interpolator itc_interpolator = { 51static struct clocksource clocksource_itc = {
41 .shift = 16, 52 .name = "itc",
42 .mask = 0xffffffffffffffffLL, 53 .rating = 350,
43 .source = TIME_SOURCE_CPU 54 .read = itc_get_cycles,
55 .mask = 0xffffffffffffffff,
56 .mult = 0, /*to be caluclated*/
57 .shift = 16,
58 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
44}; 59};
60static struct clocksource *itc_clocksource;
45 61
46static irqreturn_t 62static irqreturn_t
47timer_interrupt (int irq, void *dev_id) 63timer_interrupt (int irq, void *dev_id)
@@ -210,8 +226,6 @@ ia64_init_itm (void)
210 + itc_freq/2)/itc_freq; 226 + itc_freq/2)/itc_freq;
211 227
212 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { 228 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
213 itc_interpolator.frequency = local_cpu_data->itc_freq;
214 itc_interpolator.drift = itc_drift;
215#ifdef CONFIG_SMP 229#ifdef CONFIG_SMP
216 /* On IA64 in an SMP configuration ITCs are never accurately synchronized. 230 /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
217 * Jitter compensation requires a cmpxchg which may limit 231 * Jitter compensation requires a cmpxchg which may limit
@@ -223,15 +237,50 @@ ia64_init_itm (void)
223 * even going backward) if the ITC offsets between the individual CPUs 237 * even going backward) if the ITC offsets between the individual CPUs
224 * are too large. 238 * are too large.
225 */ 239 */
226 if (!nojitter) itc_interpolator.jitter = 1; 240 if (!nojitter)
241 itc_jitter_data.itc_jitter = 1;
227#endif 242#endif
228 register_time_interpolator(&itc_interpolator);
229 } 243 }
230 244
231 /* Setup the CPU local timer tick */ 245 /* Setup the CPU local timer tick */
232 ia64_cpu_local_tick(); 246 ia64_cpu_local_tick();
247
248 if (!itc_clocksource) {
249 /* Sort out mult/shift values: */
250 clocksource_itc.mult =
251 clocksource_hz2mult(local_cpu_data->itc_freq,
252 clocksource_itc.shift);
253 clocksource_register(&clocksource_itc);
254 itc_clocksource = &clocksource_itc;
255 }
233} 256}
234 257
258static cycle_t itc_get_cycles()
259{
260 u64 lcycle, now, ret;
261
262 if (!itc_jitter_data.itc_jitter)
263 return get_cycles();
264
265 lcycle = itc_jitter_data.itc_lastcycle;
266 now = get_cycles();
267 if (lcycle && time_after(lcycle, now))
268 return lcycle;
269
270 /*
271 * Keep track of the last timer value returned.
272 * In an SMP environment, you could lose out in contention of
273 * cmpxchg. If so, your cmpxchg returns new value which the
274 * winner of contention updated to. Use the new value instead.
275 */
276 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
277 if (unlikely(ret != lcycle))
278 return ret;
279
280 return now;
281}
282
283
235static struct irqaction timer_irqaction = { 284static struct irqaction timer_irqaction = {
236 .handler = timer_interrupt, 285 .handler = timer_interrupt,
237 .flags = IRQF_DISABLED | IRQF_IRQPOLL, 286 .flags = IRQF_DISABLED | IRQF_IRQPOLL,
@@ -307,3 +356,34 @@ ia64_setup_printk_clock(void)
307 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) 356 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT))
308 ia64_printk_clock = ia64_itc_printk_clock; 357 ia64_printk_clock = ia64_itc_printk_clock;
309} 358}
359
360void update_vsyscall(struct timespec *wall, struct clocksource *c)
361{
362 unsigned long flags;
363
364 write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
365
366 /* copy fsyscall clock data */
367 fsyscall_gtod_data.clk_mask = c->mask;
368 fsyscall_gtod_data.clk_mult = c->mult;
369 fsyscall_gtod_data.clk_shift = c->shift;
370 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
371 fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
372
373 /* copy kernel time structures */
374 fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
375 fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
376 fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec
377 + wall->tv_sec;
378 fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec
379 + wall->tv_nsec;
380
381 /* normalize */
382 while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
383 fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
384 fsyscall_gtod_data.monotonic_time.tv_sec++;
385 }
386
387 write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
388}
389
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index 56a88b6df4b4..19e25d2b64fc 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -11,6 +11,7 @@
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/clocksource.h>
14 15
15#include <asm/hw_irq.h> 16#include <asm/hw_irq.h>
16#include <asm/system.h> 17#include <asm/system.h>
@@ -22,11 +23,21 @@
22 23
23extern unsigned long sn_rtc_cycles_per_second; 24extern unsigned long sn_rtc_cycles_per_second;
24 25
25static struct time_interpolator sn2_interpolator = { 26static void __iomem *sn2_mc;
26 .drift = -1, 27
27 .shift = 10, 28static cycle_t read_sn2(void)
28 .mask = (1LL << 55) - 1, 29{
29 .source = TIME_SOURCE_MMIO64 30 return (cycle_t)readq(sn2_mc);
31}
32
33static struct clocksource clocksource_sn2 = {
34 .name = "sn2_rtc",
35 .rating = 300,
36 .read = read_sn2,
37 .mask = (1LL << 55) - 1,
38 .mult = 0,
39 .shift = 10,
40 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
30}; 41};
31 42
32/* 43/*
@@ -47,9 +58,11 @@ ia64_sn_udelay (unsigned long usecs)
47 58
48void __init sn_timer_init(void) 59void __init sn_timer_init(void)
49{ 60{
50 sn2_interpolator.frequency = sn_rtc_cycles_per_second; 61 sn2_mc = RTC_COUNTER_ADDR;
51 sn2_interpolator.addr = RTC_COUNTER_ADDR; 62 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR;
52 register_time_interpolator(&sn2_interpolator); 63 clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
64 clocksource_sn2.shift);
65 clocksource_register(&clocksource_sn2);
53 66
54 ia64_udelay = &ia64_sn_udelay; 67 ia64_udelay = &ia64_sn_udelay;
55} 68}
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index a86e2e9a639f..20a9c08e59c3 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -37,6 +37,10 @@ config TIME_LOW_RES
37 bool 37 bool
38 default y 38 default y
39 39
40config GENERIC_IOMAP
41 bool
42 default y
43
40config ARCH_MAY_HAVE_PC_FDC 44config ARCH_MAY_HAVE_PC_FDC
41 bool 45 bool
42 depends on Q40 || (BROKEN && SUN3X) 46 depends on Q40 || (BROKEN && SUN3X)
@@ -45,6 +49,9 @@ config ARCH_MAY_HAVE_PC_FDC
45config NO_IOPORT 49config NO_IOPORT
46 def_bool y 50 def_bool y
47 51
52config NO_DMA
53 def_bool SUN3
54
48mainmenu "Linux/68k Kernel Configuration" 55mainmenu "Linux/68k Kernel Configuration"
49 56
50source "init/Kconfig" 57source "init/Kconfig"
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index cb8e7609df4c..78df98f2029a 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -148,8 +148,8 @@ void dn_serial_print (const char *str)
148 } 148 }
149} 149}
150 150
151void config_apollo(void) { 151void __init config_apollo(void)
152 152{
153 int i; 153 int i;
154 154
155 dn_setup_model(); 155 dn_setup_model();
diff --git a/arch/m68k/apollo/dn_ints.c b/arch/m68k/apollo/dn_ints.c
index 13bd41bed28e..5d47f3aa3810 100644
--- a/arch/m68k/apollo/dn_ints.c
+++ b/arch/m68k/apollo/dn_ints.c
@@ -37,7 +37,7 @@ static struct irq_controller apollo_irq_controller = {
37}; 37};
38 38
39 39
40void dn_init_IRQ(void) 40void __init dn_init_IRQ(void)
41{ 41{
42 m68k_setup_user_interrupt(VEC_USER + 96, 16, dn_process_int); 42 m68k_setup_user_interrupt(VEC_USER + 96, 16, dn_process_int);
43 m68k_setup_irq_controller(&apollo_irq_controller, IRQ_APOLLO, 16); 43 m68k_setup_irq_controller(&apollo_irq_controller, IRQ_APOLLO, 16);
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index 1c29603b16b3..2b5f64726a2e 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -13,6 +13,7 @@
13 * enhanced by Bjoern Brauel and Roman Hodek 13 * enhanced by Bjoern Brauel and Roman Hodek
14 */ 14 */
15 15
16#include <linux/module.h>
16#include <linux/sched.h> 17#include <linux/sched.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/interrupt.h> 19#include <linux/interrupt.h>
@@ -42,6 +43,9 @@ void (*atari_mouse_interrupt_hook) (char *);
42void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); 43void (*atari_input_keyboard_interrupt_hook) (unsigned char, char);
43/* Hook for mouse inputdev driver */ 44/* Hook for mouse inputdev driver */
44void (*atari_input_mouse_interrupt_hook) (char *); 45void (*atari_input_mouse_interrupt_hook) (char *);
46EXPORT_SYMBOL(atari_mouse_interrupt_hook);
47EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook);
48EXPORT_SYMBOL(atari_input_mouse_interrupt_hook);
45 49
46/* variables for IKBD self test: */ 50/* variables for IKBD self test: */
47 51
@@ -429,6 +433,7 @@ void ikbd_mouse_rel_pos(void)
429 433
430 ikbd_write(cmd, 1); 434 ikbd_write(cmd, 1);
431} 435}
436EXPORT_SYMBOL(ikbd_mouse_rel_pos);
432 437
433/* Set absolute mouse position reporting */ 438/* Set absolute mouse position reporting */
434void ikbd_mouse_abs_pos(int xmax, int ymax) 439void ikbd_mouse_abs_pos(int xmax, int ymax)
@@ -453,6 +458,7 @@ void ikbd_mouse_thresh(int x, int y)
453 458
454 ikbd_write(cmd, 3); 459 ikbd_write(cmd, 3);
455} 460}
461EXPORT_SYMBOL(ikbd_mouse_thresh);
456 462
457/* Set mouse scale */ 463/* Set mouse scale */
458void ikbd_mouse_scale(int x, int y) 464void ikbd_mouse_scale(int x, int y)
@@ -495,6 +501,7 @@ void ikbd_mouse_y0_top(void)
495 501
496 ikbd_write(cmd, 1); 502 ikbd_write(cmd, 1);
497} 503}
504EXPORT_SYMBOL(ikbd_mouse_y0_top);
498 505
499/* Resume */ 506/* Resume */
500void ikbd_resume(void) 507void ikbd_resume(void)
@@ -511,6 +518,7 @@ void ikbd_mouse_disable(void)
511 518
512 ikbd_write(cmd, 1); 519 ikbd_write(cmd, 1);
513} 520}
521EXPORT_SYMBOL(ikbd_mouse_disable);
514 522
515/* Pause output */ 523/* Pause output */
516void ikbd_pause(void) 524void ikbd_pause(void)
@@ -696,7 +704,6 @@ int __init atari_keyb_init(void)
696 return 0; 704 return 0;
697} 705}
698 706
699
700int atari_kbdrate(struct kbd_repeat *k) 707int atari_kbdrate(struct kbd_repeat *k)
701{ 708{
702 if (k->delay > 0) { 709 if (k->delay > 0) {
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 896ae3d3d919..9433a88a33c4 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -97,7 +97,7 @@ static int bvme6000_get_hardware_list(char *buffer)
97 * This function is called during kernel startup to initialize 97 * This function is called during kernel startup to initialize
98 * the bvme6000 IRQ handling routines. 98 * the bvme6000 IRQ handling routines.
99 */ 99 */
100static void bvme6000_init_IRQ(void) 100static void __init bvme6000_init_IRQ(void)
101{ 101{
102 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 102 m68k_setup_user_interrupt(VEC_USER, 192, NULL);
103} 103}
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index 05741f233567..faa6764f1d13 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -577,7 +577,7 @@ func_define putn,1
577#endif 577#endif
578.endm 578.endm
579 579
580.text 580.section ".text.head","ax"
581ENTRY(_stext) 581ENTRY(_stext)
582/* 582/*
583 * Version numbers of the bootinfo interface 583 * Version numbers of the bootinfo interface
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 215c7bd43924..7e6d5fb75390 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -58,6 +58,7 @@ extern int end;
58extern unsigned long availmem; 58extern unsigned long availmem;
59 59
60int m68k_num_memory; 60int m68k_num_memory;
61EXPORT_SYMBOL(m68k_num_memory);
61int m68k_realnum_memory; 62int m68k_realnum_memory;
62EXPORT_SYMBOL(m68k_realnum_memory); 63EXPORT_SYMBOL(m68k_realnum_memory);
63unsigned long m68k_memoffset; 64unsigned long m68k_memoffset;
diff --git a/arch/m68k/kernel/sun3-head.S b/arch/m68k/kernel/sun3-head.S
index 4b5f050204e8..aad01592dbbc 100644
--- a/arch/m68k/kernel/sun3-head.S
+++ b/arch/m68k/kernel/sun3-head.S
@@ -29,7 +29,7 @@ kernel_pmd_table: .skip 0x2000
29.globl kernel_pg_dir 29.globl kernel_pg_dir
30.equ kernel_pg_dir,kernel_pmd_table 30.equ kernel_pg_dir,kernel_pmd_table
31 31
32 .section .head 32 .section .text.head
33ENTRY(_stext) 33ENTRY(_stext)
34ENTRY(_start) 34ENTRY(_start)
35 35
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 4c065f9ceffc..7db41594d7b6 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -72,7 +72,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
72 return IRQ_HANDLED; 72 return IRQ_HANDLED;
73} 73}
74 74
75void time_init(void) 75void __init time_init(void)
76{ 76{
77 struct rtc_time time; 77 struct rtc_time time;
78 78
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 40f02b128f22..c42245775a4d 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -11,6 +11,7 @@ SECTIONS
11 . = 0x1000; 11 . = 0x1000;
12 _text = .; /* Text and read-only data */ 12 _text = .; /* Text and read-only data */
13 .text : { 13 .text : {
14 *(.text.head)
14 TEXT_TEXT 15 TEXT_TEXT
15 SCHED_TEXT 16 SCHED_TEXT
16 LOCK_TEXT 17 LOCK_TEXT
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index f06425b6d206..4adffefb5c48 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -11,7 +11,7 @@ SECTIONS
11 . = 0xE002000; 11 . = 0xE002000;
12 _text = .; /* Text and read-only data */ 12 _text = .; /* Text and read-only data */
13 .text : { 13 .text : {
14 *(.head) 14 *(.text.head)
15 TEXT_TEXT 15 TEXT_TEXT
16 SCHED_TEXT 16 SCHED_TEXT
17 LOCK_TEXT 17 LOCK_TEXT
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 5fd413246f89..8547dbc5e8d7 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -49,6 +49,7 @@ struct mac_booter_data mac_bi_data;
49int mac_bisize = sizeof mac_bi_data; 49int mac_bisize = sizeof mac_bi_data;
50 50
51struct mac_hw_present mac_hw_present; 51struct mac_hw_present mac_hw_present;
52EXPORT_SYMBOL(mac_hw_present);
52 53
53/* New m68k bootinfo stuff and videobase */ 54/* New m68k bootinfo stuff and videobase */
54 55
@@ -84,7 +85,7 @@ extern void nubus_sweep_video(void);
84 85
85static void mac_get_model(char *str); 86static void mac_get_model(char *str);
86 87
87static void mac_sched_init(irq_handler_t vector) 88static void __init mac_sched_init(irq_handler_t vector)
88{ 89{
89 via_init_clock(vector); 90 via_init_clock(vector);
90} 91}
@@ -769,7 +770,7 @@ static struct mac_model mac_data_table[] = {
769 } 770 }
770}; 771};
771 772
772void mac_identify(void) 773void __init mac_identify(void)
773{ 774{
774 struct mac_model *m; 775 struct mac_model *m;
775 776
@@ -846,7 +847,7 @@ void mac_identify(void)
846 baboon_init(); 847 baboon_init();
847} 848}
848 849
849void mac_report_hardware(void) 850void __init mac_report_hardware(void)
850{ 851{
851 printk(KERN_INFO "Apple Macintosh %s\n", macintosh_config->name); 852 printk(KERN_INFO "Apple Macintosh %s\n", macintosh_config->name);
852} 853}
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 0fc72d8f786e..ecddac4a02b9 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -114,6 +114,7 @@
114 * 114 *
115 */ 115 */
116 116
117#include <linux/module.h>
117#include <linux/types.h> 118#include <linux/types.h>
118#include <linux/kernel.h> 119#include <linux/kernel.h>
119#include <linux/sched.h> 120#include <linux/sched.h>
@@ -224,7 +225,7 @@ static struct irq_controller mac_irq_controller = {
224 .disable = mac_disable_irq, 225 .disable = mac_disable_irq,
225}; 226};
226 227
227void mac_init_IRQ(void) 228void __init mac_init_IRQ(void)
228{ 229{
229#ifdef DEBUG_MACINTS 230#ifdef DEBUG_MACINTS
230 printk("mac_init_IRQ(): Setting things up...\n"); 231 printk("mac_init_IRQ(): Setting things up...\n");
@@ -391,6 +392,7 @@ int mac_irq_pending(unsigned int irq)
391 } 392 }
392 return 0; 393 return 0;
393} 394}
395EXPORT_SYMBOL(mac_irq_pending);
394 396
395static int num_debug[8]; 397static int num_debug[8];
396 398
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index f1de19e1dde6..f42caa79e4e8 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -44,7 +44,7 @@ pg_data_t *pg_data_table[65];
44EXPORT_SYMBOL(pg_data_table); 44EXPORT_SYMBOL(pg_data_table);
45#endif 45#endif
46 46
47void m68k_setup_node(int node) 47void __init m68k_setup_node(int node)
48{ 48{
49#ifndef CONFIG_SINGLE_MEMORY_CHUNK 49#ifndef CONFIG_SINGLE_MEMORY_CHUNK
50 struct mem_info *info = m68k_memory + node; 50 struct mem_info *info = m68k_memory + node;
diff --git a/arch/m68k/mm/sun3kmap.c b/arch/m68k/mm/sun3kmap.c
index 1af24cb5bfe1..3dc41158c05e 100644
--- a/arch/m68k/mm/sun3kmap.c
+++ b/arch/m68k/mm/sun3kmap.c
@@ -105,6 +105,7 @@ void __iomem *sun3_ioremap(unsigned long phys, unsigned long size,
105 return (void __iomem *)ret; 105 return (void __iomem *)ret;
106 106
107} 107}
108EXPORT_SYMBOL(sun3_ioremap);
108 109
109 110
110void __iomem *__ioremap(unsigned long phys, unsigned long size, int cache) 111void __iomem *__ioremap(unsigned long phys, unsigned long size, int cache)
@@ -157,3 +158,4 @@ int sun3_map_test(unsigned long addr, char *val)
157 158
158 return ret; 159 return ret;
159} 160}
161EXPORT_SYMBOL(sun3_map_test);
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 4a7df9c3f85a..92fe50714112 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -89,7 +89,7 @@ static int mvme147_get_hardware_list(char *buffer)
89 * the mvme147 IRQ handling routines. 89 * the mvme147 IRQ handling routines.
90 */ 90 */
91 91
92void mvme147_init_IRQ(void) 92void __init mvme147_init_IRQ(void)
93{ 93{
94 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 94 m68k_setup_user_interrupt(VEC_USER, 192, NULL);
95} 95}
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index c829ebb6b1af..daa785161401 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -119,7 +119,7 @@ static int mvme16x_get_hardware_list(char *buffer)
119 * that the base vectors for the VMEChip2 and PCCChip2 are valid. 119 * that the base vectors for the VMEChip2 and PCCChip2 are valid.
120 */ 120 */
121 121
122static void mvme16x_init_IRQ (void) 122static void __init mvme16x_init_IRQ (void)
123{ 123{
124 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 124 m68k_setup_user_interrupt(VEC_USER, 192, NULL);
125} 125}
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 2fb25ae46a8a..ad3ed1fb8879 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -79,7 +79,7 @@ static struct irq_controller q40_irq_controller = {
79 79
80static int disabled; 80static int disabled;
81 81
82void q40_init_IRQ(void) 82void __init q40_init_IRQ(void)
83{ 83{
84 m68k_setup_irq_controller(&q40_irq_controller, 1, Q40_IRQ_MAX); 84 m68k_setup_irq_controller(&q40_irq_controller, 1, Q40_IRQ_MAX);
85 85
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 50df34bf80e3..cf93481adb1d 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -97,7 +97,7 @@ static struct irq_controller sun3_irq_controller = {
97 .disable = sun3_disable_irq, 97 .disable = sun3_disable_irq,
98}; 98};
99 99
100void sun3_init_IRQ(void) 100void __init sun3_init_IRQ(void)
101{ 101{
102 *sun3_intreg = 1; 102 *sun3_intreg = 1;
103 103
diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c
index 48f8eb7b1565..a7b7e818d627 100644
--- a/arch/m68k/sun3x/prom.c
+++ b/arch/m68k/sun3x/prom.c
@@ -92,7 +92,7 @@ static struct console sun3x_debug = {
92 .index = -1, 92 .index = -1,
93}; 93};
94 94
95void sun3x_prom_init(void) 95void __init sun3x_prom_init(void)
96{ 96{
97 /* Read the vector table */ 97 /* Read the vector table */
98 98
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index 80f4e9d74ac1..2203f694f26b 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -231,32 +231,33 @@ void setup_arch(char **cmdline_p)
231/* 231/*
232 * Get CPU information for use by the procfs. 232 * Get CPU information for use by the procfs.
233 */ 233 */
234
235static int show_cpuinfo(struct seq_file *m, void *v) 234static int show_cpuinfo(struct seq_file *m, void *v)
236{ 235{
237 char *cpu, *mmu, *fpu; 236 char *cpu, *mmu, *fpu;
238 u_long clockfreq; 237 u_long clockfreq;
239 238
240 cpu = CPU; 239 cpu = CPU;
241 mmu = "none"; 240 mmu = "none";
242 fpu = "none"; 241 fpu = "none";
243 242
244#ifdef CONFIG_COLDFIRE 243#ifdef CONFIG_COLDFIRE
245 clockfreq = (loops_per_jiffy*HZ)*3; 244 clockfreq = (loops_per_jiffy * HZ) * 3;
246#else 245#else
247 clockfreq = (loops_per_jiffy*HZ)*16; 246 clockfreq = (loops_per_jiffy * HZ) * 16;
248#endif 247#endif
249 248
250 seq_printf(m, "CPU:\t\t%s\n" 249 seq_printf(m, "CPU:\t\t%s\n"
251 "MMU:\t\t%s\n" 250 "MMU:\t\t%s\n"
252 "FPU:\t\t%s\n" 251 "FPU:\t\t%s\n"
253 "Clocking:\t%lu.%1luMHz\n" 252 "Clocking:\t%lu.%1luMHz\n"
254 "BogoMips:\t%lu.%02lu\n" 253 "BogoMips:\t%lu.%02lu\n"
255 "Calibration:\t%lu loops\n", 254 "Calibration:\t%lu loops\n",
256 cpu, mmu, fpu, 255 cpu, mmu, fpu,
257 clockfreq/1000000,(clockfreq/100000)%10, 256 clockfreq / 1000000,
258 (loops_per_jiffy*HZ)/500000,((loops_per_jiffy*HZ)/5000)%100, 257 (clockfreq / 100000) % 10,
259 (loops_per_jiffy*HZ)); 258 (loops_per_jiffy * HZ) / 500000,
259 ((loops_per_jiffy * HZ) / 5000) % 100,
260 (loops_per_jiffy * HZ));
260 261
261 return 0; 262 return 0;
262} 263}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5c863bcd5614..1e3aeccd7322 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1190,8 +1190,19 @@ config SYS_HAS_CPU_RM9000
1190config SYS_HAS_CPU_SB1 1190config SYS_HAS_CPU_SB1
1191 bool 1191 bool
1192 1192
1193#
1194# CPU may reorder R->R, R->W, W->R, W->W
1195# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
1196#
1193config WEAK_ORDERING 1197config WEAK_ORDERING
1194 bool 1198 bool
1199
1200#
1201# CPU may reorder reads and writes beyond LL/SC
1202# CPU may reorder R->LL, R->LL, W->LL, W->LL, R->SC, R->SC, W->SC, W->SC
1203#
1204config WEAK_REORDERING_BEYOND_LLSC
1205 bool
1195endmenu 1206endmenu
1196 1207
1197# 1208#
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c6b8b074a81a..06448a9656dc 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -75,6 +75,27 @@ static void r4k_wait_irqoff(void)
75 local_irq_enable(); 75 local_irq_enable();
76} 76}
77 77
78/*
79 * The RM7000 variant has to handle erratum 38. The workaround is to not
80 * have any pending stores when the WAIT instruction is executed.
81 */
82static void rm7k_wait_irqoff(void)
83{
84 local_irq_disable();
85 if (!need_resched())
86 __asm__(
87 " .set push \n"
88 " .set mips3 \n"
89 " .set noat \n"
90 " mfc0 $1, $12 \n"
91 " sync \n"
92 " mtc0 $1, $12 # stalls until W stage \n"
93 " wait \n"
94 " mtc0 $1, $12 # stalls until W stage \n"
95 " .set pop \n");
96 local_irq_enable();
97}
98
78/* The Au1xxx wait is available only if using 32khz counter or 99/* The Au1xxx wait is available only if using 32khz counter or
79 * external timer source, but specifically not CP0 Counter. */ 100 * external timer source, but specifically not CP0 Counter. */
80int allow_au1k_wait; 101int allow_au1k_wait;
@@ -132,7 +153,6 @@ static inline void check_wait(void)
132 case CPU_R4700: 153 case CPU_R4700:
133 case CPU_R5000: 154 case CPU_R5000:
134 case CPU_NEVADA: 155 case CPU_NEVADA:
135 case CPU_RM7000:
136 case CPU_4KC: 156 case CPU_4KC:
137 case CPU_4KEC: 157 case CPU_4KEC:
138 case CPU_4KSC: 158 case CPU_4KSC:
@@ -142,6 +162,10 @@ static inline void check_wait(void)
142 cpu_wait = r4k_wait; 162 cpu_wait = r4k_wait;
143 break; 163 break;
144 164
165 case CPU_RM7000:
166 cpu_wait = rm7k_wait_irqoff;
167 break;
168
145 case CPU_24K: 169 case CPU_24K:
146 case CPU_34K: 170 case CPU_34K:
147 cpu_wait = r4k_wait; 171 cpu_wait = r4k_wait;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 8f4cf27c7157..bd05f5a927ea 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -25,7 +25,9 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/completion.h> 26#include <linux/completion.h>
27#include <linux/kallsyms.h> 27#include <linux/kallsyms.h>
28#include <linux/random.h>
28 29
30#include <asm/asm.h>
29#include <asm/bootinfo.h> 31#include <asm/bootinfo.h>
30#include <asm/cpu.h> 32#include <asm/cpu.h>
31#include <asm/dsp.h> 33#include <asm/dsp.h>
@@ -460,3 +462,15 @@ unsigned long get_wchan(struct task_struct *task)
460out: 462out:
461 return pc; 463 return pc;
462} 464}
465
466/*
467 * Don't forget that the stack pointer must be aligned on a 8 bytes
468 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
469 */
470unsigned long arch_align_stack(unsigned long sp)
471{
472 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
473 sp -= get_random_int() & ~PAGE_MASK;
474
475 return sp & ALMASK;
476}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d860b640a140..853c282da22e 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -92,6 +92,9 @@ config ARCH_MAY_HAVE_PC_FDC
92config PPC_OF 92config PPC_OF
93 def_bool y 93 def_bool y
94 94
95config OF
96 def_bool y
97
95config PPC_UDBG_16550 98config PPC_UDBG_16550
96 bool 99 bool
97 default n 100 default n
diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
index 1a6d64a68df5..a55c2735f759 100644
--- a/arch/powerpc/boot/ps3-head.S
+++ b/arch/powerpc/boot/ps3-head.S
@@ -20,6 +20,8 @@
20 20
21#include "ppc_asm.h" 21#include "ppc_asm.h"
22 22
23 .machine "ppc64"
24
23 .text 25 .text
24 26
25/* 27/*
diff --git a/arch/powerpc/boot/ps3-hvcall.S b/arch/powerpc/boot/ps3-hvcall.S
index c8b7df3210d1..585965f7e6a8 100644
--- a/arch/powerpc/boot/ps3-hvcall.S
+++ b/arch/powerpc/boot/ps3-hvcall.S
@@ -20,6 +20,8 @@
20 20
21#include "ppc_asm.h" 21#include "ppc_asm.h"
22 22
23 .machine "ppc64"
24
23/* 25/*
24 * The PS3 hypervisor uses a 64 bit "C" language calling convention. 26 * The PS3 hypervisor uses a 64 bit "C" language calling convention.
25 * The routines here marshal arguments between the 32 bit wrapper 27 * The routines here marshal arguments between the 32 bit wrapper
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 74f83f4a4e5e..d9ac24e8de16 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -1455,7 +1455,8 @@ CONFIG_HAS_DMA=y
1455# Instrumentation Support 1455# Instrumentation Support
1456# 1456#
1457CONFIG_PROFILING=y 1457CONFIG_PROFILING=y
1458CONFIG_OPROFILE=y 1458CONFIG_OPROFILE=m
1459CONFIG_OPROFILE_CELL=y
1459# CONFIG_KPROBES is not set 1460# CONFIG_KPROBES is not set
1460 1461
1461# 1462#
diff --git a/arch/powerpc/configs/prpmc2800_defconfig b/arch/powerpc/configs/prpmc2800_defconfig
index fb504a714625..858f865f2d59 100644
--- a/arch/powerpc/configs/prpmc2800_defconfig
+++ b/arch/powerpc/configs/prpmc2800_defconfig
@@ -48,7 +48,7 @@ CONFIG_PPC_STD_MMU_32=y
48# CONFIG_PPC_MM_SLICES is not set 48# CONFIG_PPC_MM_SLICES is not set
49# CONFIG_SMP is not set 49# CONFIG_SMP is not set
50CONFIG_NOT_COHERENT_CACHE=y 50CONFIG_NOT_COHERENT_CACHE=y
51CONFIG_CONFIG_CHECK_CACHE_COHERENCY=y 51CONFIG_CHECK_CACHE_COHERENCY=y
52CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 52CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
53 53
54# 54#
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index d3f2080d2eee..37658ea417fa 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -219,6 +219,72 @@ void crash_kexec_secondary(struct pt_regs *regs)
219 cpus_in_sr = CPU_MASK_NONE; 219 cpus_in_sr = CPU_MASK_NONE;
220} 220}
221#endif 221#endif
222#ifdef CONFIG_SPU_BASE
223
224#include <asm/spu.h>
225#include <asm/spu_priv1.h>
226
227struct crash_spu_info {
228 struct spu *spu;
229 u32 saved_spu_runcntl_RW;
230 u32 saved_spu_status_R;
231 u32 saved_spu_npc_RW;
232 u64 saved_mfc_sr1_RW;
233 u64 saved_mfc_dar;
234 u64 saved_mfc_dsisr;
235};
236
237#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
238static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
239
240static void crash_kexec_stop_spus(void)
241{
242 struct spu *spu;
243 int i;
244 u64 tmp;
245
246 for (i = 0; i < CRASH_NUM_SPUS; i++) {
247 if (!crash_spu_info[i].spu)
248 continue;
249
250 spu = crash_spu_info[i].spu;
251
252 crash_spu_info[i].saved_spu_runcntl_RW =
253 in_be32(&spu->problem->spu_runcntl_RW);
254 crash_spu_info[i].saved_spu_status_R =
255 in_be32(&spu->problem->spu_status_R);
256 crash_spu_info[i].saved_spu_npc_RW =
257 in_be32(&spu->problem->spu_npc_RW);
258
259 crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
260 crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
261 tmp = spu_mfc_sr1_get(spu);
262 crash_spu_info[i].saved_mfc_sr1_RW = tmp;
263
264 tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
265 spu_mfc_sr1_set(spu, tmp);
266
267 __delay(200);
268 }
269}
270
271void crash_register_spus(struct list_head *list)
272{
273 struct spu *spu;
274
275 list_for_each_entry(spu, list, full_list) {
276 if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
277 continue;
278
279 crash_spu_info[spu->number].spu = spu;
280 }
281}
282
283#else
284static inline void crash_kexec_stop_spus(void)
285{
286}
287#endif /* CONFIG_SPU_BASE */
222 288
223void default_machine_crash_shutdown(struct pt_regs *regs) 289void default_machine_crash_shutdown(struct pt_regs *regs)
224{ 290{
@@ -254,6 +320,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
254 crash_save_cpu(regs, crashing_cpu); 320 crash_save_cpu(regs, crashing_cpu);
255 crash_kexec_prepare_cpus(crashing_cpu); 321 crash_kexec_prepare_cpus(crashing_cpu);
256 cpu_set(crashing_cpu, cpus_in_crash); 322 cpu_set(crashing_cpu, cpus_in_crash);
323 crash_kexec_stop_spus();
257 if (ppc_md.kexec_cpu_down) 324 if (ppc_md.kexec_cpu_down)
258 ppc_md.kexec_cpu_down(1, 0); 325 ppc_md.kexec_cpu_down(1, 0);
259} 326}
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index a464d67248df..89b911e83c04 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -1,5 +1,6 @@
1#include <linux/string.h> 1#include <linux/string.h>
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/of.h>
3#include <linux/init.h> 4#include <linux/init.h>
4#include <linux/module.h> 5#include <linux/module.h>
5#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
@@ -8,118 +9,6 @@
8#include <asm/errno.h> 9#include <asm/errno.h>
9#include <asm/of_device.h> 10#include <asm/of_device.h>
10 11
11/**
12 * of_match_node - Tell if an device_node has a matching of_match structure
13 * @ids: array of of device match structures to search in
14 * @node: the of device structure to match against
15 *
16 * Low level utility function used by device matching.
17 */
18const struct of_device_id *of_match_node(const struct of_device_id *matches,
19 const struct device_node *node)
20{
21 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
22 int match = 1;
23 if (matches->name[0])
24 match &= node->name
25 && !strcmp(matches->name, node->name);
26 if (matches->type[0])
27 match &= node->type
28 && !strcmp(matches->type, node->type);
29 if (matches->compatible[0])
30 match &= of_device_is_compatible(node,
31 matches->compatible);
32 if (match)
33 return matches;
34 matches++;
35 }
36 return NULL;
37}
38
39/**
40 * of_match_device - Tell if an of_device structure has a matching
41 * of_match structure
42 * @ids: array of of device match structures to search in
43 * @dev: the of device structure to match against
44 *
45 * Used by a driver to check whether an of_device present in the
46 * system is in its list of supported devices.
47 */
48const struct of_device_id *of_match_device(const struct of_device_id *matches,
49 const struct of_device *dev)
50{
51 if (!dev->node)
52 return NULL;
53 return of_match_node(matches, dev->node);
54}
55
56struct of_device *of_dev_get(struct of_device *dev)
57{
58 struct device *tmp;
59
60 if (!dev)
61 return NULL;
62 tmp = get_device(&dev->dev);
63 if (tmp)
64 return to_of_device(tmp);
65 else
66 return NULL;
67}
68
69void of_dev_put(struct of_device *dev)
70{
71 if (dev)
72 put_device(&dev->dev);
73}
74
75static ssize_t dev_show_devspec(struct device *dev,
76 struct device_attribute *attr, char *buf)
77{
78 struct of_device *ofdev;
79
80 ofdev = to_of_device(dev);
81 return sprintf(buf, "%s", ofdev->node->full_name);
82}
83
84static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
85
86/**
87 * of_release_dev - free an of device structure when all users of it are finished.
88 * @dev: device that's been disconnected
89 *
90 * Will be called only by the device core when all users of this of device are
91 * done.
92 */
93void of_release_dev(struct device *dev)
94{
95 struct of_device *ofdev;
96
97 ofdev = to_of_device(dev);
98 of_node_put(ofdev->node);
99 kfree(ofdev);
100}
101
102int of_device_register(struct of_device *ofdev)
103{
104 int rc;
105
106 BUG_ON(ofdev->node == NULL);
107
108 rc = device_register(&ofdev->dev);
109 if (rc)
110 return rc;
111
112 return device_create_file(&ofdev->dev, &dev_attr_devspec);
113}
114
115void of_device_unregister(struct of_device *ofdev)
116{
117 device_remove_file(&ofdev->dev, &dev_attr_devspec);
118
119 device_unregister(&ofdev->dev);
120}
121
122
123ssize_t of_device_get_modalias(struct of_device *ofdev, 12ssize_t of_device_get_modalias(struct of_device *ofdev,
124 char *str, ssize_t len) 13 char *str, ssize_t len)
125{ 14{
@@ -229,14 +118,5 @@ int of_device_uevent(struct device *dev,
229 118
230 return 0; 119 return 0;
231} 120}
232
233
234EXPORT_SYMBOL(of_match_node);
235EXPORT_SYMBOL(of_match_device);
236EXPORT_SYMBOL(of_device_register);
237EXPORT_SYMBOL(of_device_unregister);
238EXPORT_SYMBOL(of_dev_get);
239EXPORT_SYMBOL(of_dev_put);
240EXPORT_SYMBOL(of_release_dev);
241EXPORT_SYMBOL(of_device_uevent); 121EXPORT_SYMBOL(of_device_uevent);
242EXPORT_SYMBOL(of_device_get_modalias); 122EXPORT_SYMBOL(of_device_get_modalias);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 8ded4e7dc87e..f70e787d556f 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -55,94 +55,14 @@ static struct of_device_id of_default_bus_ids[] = {
55 55
56static atomic_t bus_no_reg_magic; 56static atomic_t bus_no_reg_magic;
57 57
58/*
59 *
60 * OF platform device type definition & base infrastructure
61 *
62 */
63
64static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
65{
66 struct of_device * of_dev = to_of_device(dev);
67 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
68 const struct of_device_id * matches = of_drv->match_table;
69
70 if (!matches)
71 return 0;
72
73 return of_match_device(matches, of_dev) != NULL;
74}
75
76static int of_platform_device_probe(struct device *dev)
77{
78 int error = -ENODEV;
79 struct of_platform_driver *drv;
80 struct of_device *of_dev;
81 const struct of_device_id *match;
82
83 drv = to_of_platform_driver(dev->driver);
84 of_dev = to_of_device(dev);
85
86 if (!drv->probe)
87 return error;
88
89 of_dev_get(of_dev);
90
91 match = of_match_device(drv->match_table, of_dev);
92 if (match)
93 error = drv->probe(of_dev, match);
94 if (error)
95 of_dev_put(of_dev);
96
97 return error;
98}
99
100static int of_platform_device_remove(struct device *dev)
101{
102 struct of_device * of_dev = to_of_device(dev);
103 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
104
105 if (dev->driver && drv->remove)
106 drv->remove(of_dev);
107 return 0;
108}
109
110static int of_platform_device_suspend(struct device *dev, pm_message_t state)
111{
112 struct of_device * of_dev = to_of_device(dev);
113 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
114 int error = 0;
115
116 if (dev->driver && drv->suspend)
117 error = drv->suspend(of_dev, state);
118 return error;
119}
120
121static int of_platform_device_resume(struct device * dev)
122{
123 struct of_device * of_dev = to_of_device(dev);
124 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
125 int error = 0;
126
127 if (dev->driver && drv->resume)
128 error = drv->resume(of_dev);
129 return error;
130}
131
132struct bus_type of_platform_bus_type = { 58struct bus_type of_platform_bus_type = {
133 .name = "of_platform",
134 .match = of_platform_bus_match,
135 .uevent = of_device_uevent, 59 .uevent = of_device_uevent,
136 .probe = of_platform_device_probe,
137 .remove = of_platform_device_remove,
138 .suspend = of_platform_device_suspend,
139 .resume = of_platform_device_resume,
140}; 60};
141EXPORT_SYMBOL(of_platform_bus_type); 61EXPORT_SYMBOL(of_platform_bus_type);
142 62
143static int __init of_bus_driver_init(void) 63static int __init of_bus_driver_init(void)
144{ 64{
145 return bus_register(&of_platform_bus_type); 65 return of_bus_type_init(&of_platform_bus_type, "of_platform");
146} 66}
147 67
148postcore_initcall(of_bus_driver_init); 68postcore_initcall(of_bus_driver_init);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 94b4a028232a..fe7d1255e11e 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -166,7 +166,7 @@ int pcibios_add_platform_entries(struct pci_dev *pdev)
166 166
167} 167}
168 168
169char __init *pcibios_setup(char *str) 169char __devinit *pcibios_setup(char *str)
170{ 170{
171 return str; 171 return str;
172} 172}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 37ff99bd98b4..a38197b12d3e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -78,12 +78,9 @@ static struct boot_param_header *initial_boot_params __initdata;
78struct boot_param_header *initial_boot_params; 78struct boot_param_header *initial_boot_params;
79#endif 79#endif
80 80
81static struct device_node *allnodes = NULL; 81extern struct device_node *allnodes; /* temporary while merging */
82 82
83/* use when traversing tree through the allnext, child, sibling, 83extern rwlock_t devtree_lock; /* temporary while merging */
84 * or parent members of struct device_node.
85 */
86static DEFINE_RWLOCK(devtree_lock);
87 84
88/* export that to outside world */ 85/* export that to outside world */
89struct device_node *of_chosen; 86struct device_node *of_chosen;
@@ -1056,60 +1053,6 @@ void __init early_init_devtree(void *params)
1056 DBG(" <- early_init_devtree()\n"); 1053 DBG(" <- early_init_devtree()\n");
1057} 1054}
1058 1055
1059int of_n_addr_cells(struct device_node* np)
1060{
1061 const int *ip;
1062 do {
1063 if (np->parent)
1064 np = np->parent;
1065 ip = of_get_property(np, "#address-cells", NULL);
1066 if (ip != NULL)
1067 return *ip;
1068 } while (np->parent);
1069 /* No #address-cells property for the root node, default to 1 */
1070 return 1;
1071}
1072EXPORT_SYMBOL(of_n_addr_cells);
1073
1074int of_n_size_cells(struct device_node* np)
1075{
1076 const int* ip;
1077 do {
1078 if (np->parent)
1079 np = np->parent;
1080 ip = of_get_property(np, "#size-cells", NULL);
1081 if (ip != NULL)
1082 return *ip;
1083 } while (np->parent);
1084 /* No #size-cells property for the root node, default to 1 */
1085 return 1;
1086}
1087EXPORT_SYMBOL(of_n_size_cells);
1088
1089/** Checks if the given "compat" string matches one of the strings in
1090 * the device's "compatible" property
1091 */
1092int of_device_is_compatible(const struct device_node *device,
1093 const char *compat)
1094{
1095 const char* cp;
1096 int cplen, l;
1097
1098 cp = of_get_property(device, "compatible", &cplen);
1099 if (cp == NULL)
1100 return 0;
1101 while (cplen > 0) {
1102 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1103 return 1;
1104 l = strlen(cp) + 1;
1105 cp += l;
1106 cplen -= l;
1107 }
1108
1109 return 0;
1110}
1111EXPORT_SYMBOL(of_device_is_compatible);
1112
1113 1056
1114/** 1057/**
1115 * Indicates whether the root node has a given value in its 1058 * Indicates whether the root node has a given value in its
@@ -1141,119 +1084,6 @@ EXPORT_SYMBOL(machine_is_compatible);
1141 *******/ 1084 *******/
1142 1085
1143/** 1086/**
1144 * of_find_node_by_name - Find a node by its "name" property
1145 * @from: The node to start searching from or NULL, the node
1146 * you pass will not be searched, only the next one
1147 * will; typically, you pass what the previous call
1148 * returned. of_node_put() will be called on it
1149 * @name: The name string to match against
1150 *
1151 * Returns a node pointer with refcount incremented, use
1152 * of_node_put() on it when done.
1153 */
1154struct device_node *of_find_node_by_name(struct device_node *from,
1155 const char *name)
1156{
1157 struct device_node *np;
1158
1159 read_lock(&devtree_lock);
1160 np = from ? from->allnext : allnodes;
1161 for (; np != NULL; np = np->allnext)
1162 if (np->name != NULL && strcasecmp(np->name, name) == 0
1163 && of_node_get(np))
1164 break;
1165 of_node_put(from);
1166 read_unlock(&devtree_lock);
1167 return np;
1168}
1169EXPORT_SYMBOL(of_find_node_by_name);
1170
1171/**
1172 * of_find_node_by_type - Find a node by its "device_type" property
1173 * @from: The node to start searching from, or NULL to start searching
1174 * the entire device tree. The node you pass will not be
1175 * searched, only the next one will; typically, you pass
1176 * what the previous call returned. of_node_put() will be
1177 * called on from for you.
1178 * @type: The type string to match against
1179 *
1180 * Returns a node pointer with refcount incremented, use
1181 * of_node_put() on it when done.
1182 */
1183struct device_node *of_find_node_by_type(struct device_node *from,
1184 const char *type)
1185{
1186 struct device_node *np;
1187
1188 read_lock(&devtree_lock);
1189 np = from ? from->allnext : allnodes;
1190 for (; np != 0; np = np->allnext)
1191 if (np->type != 0 && strcasecmp(np->type, type) == 0
1192 && of_node_get(np))
1193 break;
1194 of_node_put(from);
1195 read_unlock(&devtree_lock);
1196 return np;
1197}
1198EXPORT_SYMBOL(of_find_node_by_type);
1199
1200/**
1201 * of_find_compatible_node - Find a node based on type and one of the
1202 * tokens in its "compatible" property
1203 * @from: The node to start searching from or NULL, the node
1204 * you pass will not be searched, only the next one
1205 * will; typically, you pass what the previous call
1206 * returned. of_node_put() will be called on it
1207 * @type: The type string to match "device_type" or NULL to ignore
1208 * @compatible: The string to match to one of the tokens in the device
1209 * "compatible" list.
1210 *
1211 * Returns a node pointer with refcount incremented, use
1212 * of_node_put() on it when done.
1213 */
1214struct device_node *of_find_compatible_node(struct device_node *from,
1215 const char *type, const char *compatible)
1216{
1217 struct device_node *np;
1218
1219 read_lock(&devtree_lock);
1220 np = from ? from->allnext : allnodes;
1221 for (; np != 0; np = np->allnext) {
1222 if (type != NULL
1223 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1224 continue;
1225 if (of_device_is_compatible(np, compatible) && of_node_get(np))
1226 break;
1227 }
1228 of_node_put(from);
1229 read_unlock(&devtree_lock);
1230 return np;
1231}
1232EXPORT_SYMBOL(of_find_compatible_node);
1233
1234/**
1235 * of_find_node_by_path - Find a node matching a full OF path
1236 * @path: The full path to match
1237 *
1238 * Returns a node pointer with refcount incremented, use
1239 * of_node_put() on it when done.
1240 */
1241struct device_node *of_find_node_by_path(const char *path)
1242{
1243 struct device_node *np = allnodes;
1244
1245 read_lock(&devtree_lock);
1246 for (; np != 0; np = np->allnext) {
1247 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1248 && of_node_get(np))
1249 break;
1250 }
1251 read_unlock(&devtree_lock);
1252 return np;
1253}
1254EXPORT_SYMBOL(of_find_node_by_path);
1255
1256/**
1257 * of_find_node_by_phandle - Find a node given a phandle 1087 * of_find_node_by_phandle - Find a node given a phandle
1258 * @handle: phandle of the node to find 1088 * @handle: phandle of the node to find
1259 * 1089 *
@@ -1298,51 +1128,6 @@ struct device_node *of_find_all_nodes(struct device_node *prev)
1298EXPORT_SYMBOL(of_find_all_nodes); 1128EXPORT_SYMBOL(of_find_all_nodes);
1299 1129
1300/** 1130/**
1301 * of_get_parent - Get a node's parent if any
1302 * @node: Node to get parent
1303 *
1304 * Returns a node pointer with refcount incremented, use
1305 * of_node_put() on it when done.
1306 */
1307struct device_node *of_get_parent(const struct device_node *node)
1308{
1309 struct device_node *np;
1310
1311 if (!node)
1312 return NULL;
1313
1314 read_lock(&devtree_lock);
1315 np = of_node_get(node->parent);
1316 read_unlock(&devtree_lock);
1317 return np;
1318}
1319EXPORT_SYMBOL(of_get_parent);
1320
1321/**
1322 * of_get_next_child - Iterate a node childs
1323 * @node: parent node
1324 * @prev: previous child of the parent node, or NULL to get first
1325 *
1326 * Returns a node pointer with refcount incremented, use
1327 * of_node_put() on it when done.
1328 */
1329struct device_node *of_get_next_child(const struct device_node *node,
1330 struct device_node *prev)
1331{
1332 struct device_node *next;
1333
1334 read_lock(&devtree_lock);
1335 next = prev ? prev->sibling : node->child;
1336 for (; next != 0; next = next->sibling)
1337 if (of_node_get(next))
1338 break;
1339 of_node_put(prev);
1340 read_unlock(&devtree_lock);
1341 return next;
1342}
1343EXPORT_SYMBOL(of_get_next_child);
1344
1345/**
1346 * of_node_get - Increment refcount of a node 1131 * of_node_get - Increment refcount of a node
1347 * @node: Node to inc refcount, NULL is supported to 1132 * @node: Node to inc refcount, NULL is supported to
1348 * simplify writing of callers 1133 * simplify writing of callers
@@ -1433,7 +1218,7 @@ void of_attach_node(struct device_node *np)
1433 * a reference to the node. The memory associated with the node 1218 * a reference to the node. The memory associated with the node
1434 * is not freed until its refcount goes to zero. 1219 * is not freed until its refcount goes to zero.
1435 */ 1220 */
1436void of_detach_node(const struct device_node *np) 1221void of_detach_node(struct device_node *np)
1437{ 1222{
1438 struct device_node *parent; 1223 struct device_node *parent;
1439 1224
@@ -1543,37 +1328,6 @@ static int __init prom_reconfig_setup(void)
1543__initcall(prom_reconfig_setup); 1328__initcall(prom_reconfig_setup);
1544#endif 1329#endif
1545 1330
1546struct property *of_find_property(const struct device_node *np,
1547 const char *name,
1548 int *lenp)
1549{
1550 struct property *pp;
1551
1552 read_lock(&devtree_lock);
1553 for (pp = np->properties; pp != 0; pp = pp->next)
1554 if (strcmp(pp->name, name) == 0) {
1555 if (lenp != 0)
1556 *lenp = pp->length;
1557 break;
1558 }
1559 read_unlock(&devtree_lock);
1560
1561 return pp;
1562}
1563EXPORT_SYMBOL(of_find_property);
1564
1565/*
1566 * Find a property with a given name for a given node
1567 * and return the value.
1568 */
1569const void *of_get_property(const struct device_node *np, const char *name,
1570 int *lenp)
1571{
1572 struct property *pp = of_find_property(np,name,lenp);
1573 return pp ? pp->value : NULL;
1574}
1575EXPORT_SYMBOL(of_get_property);
1576
1577/* 1331/*
1578 * Add a property to a node 1332 * Add a property to a node
1579 */ 1333 */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d577b71db375..087c92f2a3eb 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -284,7 +284,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int
284 int wait) 284 int wait)
285{ 285{
286 cpumask_t map = CPU_MASK_NONE; 286 cpumask_t map = CPU_MASK_NONE;
287 int ret = -EBUSY; 287 int ret = 0;
288 288
289 if (!cpu_online(cpu)) 289 if (!cpu_online(cpu))
290 return -EINVAL; 290 return -EINVAL;
@@ -292,6 +292,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int
292 cpu_set(cpu, map); 292 cpu_set(cpu, map);
293 if (cpu != get_cpu()) 293 if (cpu != get_cpu())
294 ret = smp_call_function_map(func,info,nonatomic,wait,map); 294 ret = smp_call_function_map(func,info,nonatomic,wait,map);
295 else {
296 local_irq_disable();
297 func(info);
298 local_irq_enable();
299 }
295 put_cpu(); 300 put_cpu();
296 return ret; 301 return ret;
297} 302}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e5df167f7824..727a6699f2f4 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -122,6 +122,7 @@ extern struct timezone sys_tz;
122static long timezone_offset; 122static long timezone_offset;
123 123
124unsigned long ppc_proc_freq; 124unsigned long ppc_proc_freq;
125EXPORT_SYMBOL(ppc_proc_freq);
125unsigned long ppc_tb_freq; 126unsigned long ppc_tb_freq;
126 127
127static u64 tb_last_jiffy __cacheline_aligned_in_smp; 128static u64 tb_last_jiffy __cacheline_aligned_in_smp;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 3767211b3d0f..ab3546c5ac3a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -283,7 +283,13 @@ good_area:
283 /* protection fault */ 283 /* protection fault */
284 if (error_code & DSISR_PROTFAULT) 284 if (error_code & DSISR_PROTFAULT)
285 goto bad_area; 285 goto bad_area;
286 if (!(vma->vm_flags & VM_EXEC)) 286 /*
287 * Allow execution from readable areas if the MMU does not
288 * provide separate controls over reading and executing.
289 */
290 if (!(vma->vm_flags & VM_EXEC) &&
291 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
292 !(vma->vm_flags & (VM_READ | VM_WRITE))))
287 goto bad_area; 293 goto bad_area;
288#else 294#else
289 pte_t *ptep; 295 pte_t *ptep;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2ce9491b48d4..bc7b0cedae5e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -609,7 +609,7 @@ static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
609 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; 609 mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
610#endif /* CONFIG_PPC_MM_SLICES */ 610#endif /* CONFIG_PPC_MM_SLICES */
611 611
612#ifdef CONFIG_SPE_BASE 612#ifdef CONFIG_SPU_BASE
613 spu_flush_all_slbs(mm); 613 spu_flush_all_slbs(mm);
614#endif 614#endif
615} 615}
@@ -744,7 +744,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
744 "to 4kB pages because of " 744 "to 4kB pages because of "
745 "non-cacheable mapping\n"); 745 "non-cacheable mapping\n");
746 psize = mmu_vmalloc_psize = MMU_PAGE_4K; 746 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
747#ifdef CONFIG_SPE_BASE 747#ifdef CONFIG_SPU_BASE
748 spu_flush_all_slbs(mm); 748 spu_flush_all_slbs(mm);
749#endif 749#endif
750 } 750 }
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 06c7e77e097a..eb4b512d65fa 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -26,6 +26,8 @@
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/pagemap.h>
30
29#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
30#include <asm/tlb.h> 32#include <asm/tlb.h>
31 33
diff --git a/arch/powerpc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig
index eb2dece76a54..7089e79689b9 100644
--- a/arch/powerpc/oprofile/Kconfig
+++ b/arch/powerpc/oprofile/Kconfig
@@ -15,3 +15,10 @@ config OPROFILE
15 15
16 If unsure, say N. 16 If unsure, say N.
17 17
18config OPROFILE_CELL
19 bool "OProfile for Cell Broadband Engine"
20 depends on (SPU_FS = y && OPROFILE = m) || (SPU_FS = y && OPROFILE = y) || (SPU_FS = m && OPROFILE = m)
21 default y
22 help
23 Profiling of Cell BE SPUs requires special support enabled
24 by this option.
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index 4b5f9528218c..c5f64c3bd668 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -11,7 +11,9 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
11 timer_int.o ) 11 timer_int.o )
12 12
13oprofile-y := $(DRIVER_OBJS) common.o backtrace.o 13oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
14oprofile-$(CONFIG_PPC_CELL_NATIVE) += op_model_cell.o 14oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \
15 cell/spu_profiler.o cell/vma_map.o \
16 cell/spu_task_sync.o
15oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o op_model_pa6t.o 17oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o op_model_pa6t.o
16oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o 18oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
17oprofile-$(CONFIG_6xx) += op_model_7450.o 19oprofile-$(CONFIG_6xx) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
new file mode 100644
index 000000000000..e5704f00c8b4
--- /dev/null
+++ b/arch/powerpc/oprofile/cell/pr_util.h
@@ -0,0 +1,97 @@
1 /*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Author: Maynard Johnson <maynardj@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef PR_UTIL_H
15#define PR_UTIL_H
16
17#include <linux/cpumask.h>
18#include <linux/oprofile.h>
19#include <asm/cell-pmu.h>
20#include <asm/spu.h>
21
22#include "../../platforms/cell/cbe_regs.h"
23
24/* Defines used for sync_start */
25#define SKIP_GENERIC_SYNC 0
26#define SYNC_START_ERROR -1
27#define DO_GENERIC_SYNC 1
28
29struct spu_overlay_info { /* map of sections within an SPU overlay */
30 unsigned int vma; /* SPU virtual memory address from elf */
31 unsigned int size; /* size of section from elf */
32 unsigned int offset; /* offset of section into elf file */
33 unsigned int buf;
34};
35
36struct vma_to_fileoffset_map { /* map of sections within an SPU program */
37 struct vma_to_fileoffset_map *next; /* list pointer */
38 unsigned int vma; /* SPU virtual memory address from elf */
39 unsigned int size; /* size of section from elf */
40 unsigned int offset; /* offset of section into elf file */
41 unsigned int guard_ptr;
42 unsigned int guard_val;
43 /*
44 * The guard pointer is an entry in the _ovly_buf_table,
45 * computed using ovly.buf as the index into the table. Since
46 * ovly.buf values begin at '1' to reference the first (or 0th)
47 * entry in the _ovly_buf_table, the computation subtracts 1
48 * from ovly.buf.
49 * The guard value is stored in the _ovly_buf_table entry and
50 * is an index (starting at 1) back to the _ovly_table entry
51 * that is pointing at this _ovly_buf_table entry. So, for
52 * example, for an overlay scenario with one overlay segment
53 * and two overlay sections:
54 * - Section 1 points to the first entry of the
55 * _ovly_buf_table, which contains a guard value
56 * of '1', referencing the first (index=0) entry of
57 * _ovly_table.
58 * - Section 2 points to the second entry of the
59 * _ovly_buf_table, which contains a guard value
60 * of '2', referencing the second (index=1) entry of
61 * _ovly_table.
62 */
63
64};
65
66/* The three functions below are for maintaining and accessing
67 * the vma-to-fileoffset map.
68 */
69struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
70 u64 objectid);
71unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
72 unsigned int vma, const struct spu *aSpu,
73 int *grd_val);
74void vma_map_free(struct vma_to_fileoffset_map *map);
75
76/*
77 * Entry point for SPU profiling.
78 * cycles_reset is the SPU_CYCLES count value specified by the user.
79 */
80int start_spu_profiling(unsigned int cycles_reset);
81
82void stop_spu_profiling(void);
83
84
85/* add the necessary profiling hooks */
86int spu_sync_start(void);
87
88/* remove the hooks */
89int spu_sync_stop(void);
90
91/* Record SPU program counter samples to the oprofile event buffer. */
92void spu_sync_buffer(int spu_num, unsigned int *samples,
93 int num_samples);
94
95void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset);
96
97#endif /* PR_UTIL_H */
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
new file mode 100644
index 000000000000..380d7e217531
--- /dev/null
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -0,0 +1,221 @@
1/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Authors: Maynard Johnson <maynardj@us.ibm.com>
7 * Carl Love <carll@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/hrtimer.h>
16#include <linux/smp.h>
17#include <linux/slab.h>
18#include <asm/cell-pmu.h>
19#include "pr_util.h"
20
21#define TRACE_ARRAY_SIZE 1024
22#define SCALE_SHIFT 14
23
24static u32 *samples;
25
26static int spu_prof_running;
27static unsigned int profiling_interval;
28
29#define NUM_SPU_BITS_TRBUF 16
30#define SPUS_PER_TB_ENTRY 4
31#define SPUS_PER_NODE 8
32
33#define SPU_PC_MASK 0xFFFF
34
35static DEFINE_SPINLOCK(sample_array_lock);
36unsigned long sample_array_lock_flags;
37
38void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
39{
40 unsigned long ns_per_cyc;
41
42 if (!freq_khz)
43 freq_khz = ppc_proc_freq/1000;
44
45 /* To calculate a timeout in nanoseconds, the basic
46 * formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency).
47 * To avoid floating point math, we use the scale math
48 * technique as described in linux/jiffies.h. We use
49 * a scale factor of SCALE_SHIFT, which provides 4 decimal places
50 * of precision. This is close enough for the purpose at hand.
51 *
52 * The value of the timeout should be small enough that the hw
53 * trace buffer will not get more then about 1/3 full for the
54 * maximum user specified (the LFSR value) hw sampling frequency.
55 * This is to ensure the trace buffer will never fill even if the
56 * kernel thread scheduling varies under a heavy system load.
57 */
58
59 ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz;
60 profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT;
61
62}
63
64/*
65 * Extract SPU PC from trace buffer entry
66 */
67static void spu_pc_extract(int cpu, int entry)
68{
69 /* the trace buffer is 128 bits */
70 u64 trace_buffer[2];
71 u64 spu_mask;
72 int spu;
73
74 spu_mask = SPU_PC_MASK;
75
76 /* Each SPU PC is 16 bits; hence, four spus in each of
77 * the two 64-bit buffer entries that make up the
78 * 128-bit trace_buffer entry. Process two 64-bit values
79 * simultaneously.
80 * trace[0] SPU PC contents are: 0 1 2 3
81 * trace[1] SPU PC contents are: 4 5 6 7
82 */
83
84 cbe_read_trace_buffer(cpu, trace_buffer);
85
86 for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) {
87 /* spu PC trace entry is upper 16 bits of the
88 * 18 bit SPU program counter
89 */
90 samples[spu * TRACE_ARRAY_SIZE + entry]
91 = (spu_mask & trace_buffer[0]) << 2;
92 samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry]
93 = (spu_mask & trace_buffer[1]) << 2;
94
95 trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF;
96 trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF;
97 }
98}
99
100static int cell_spu_pc_collection(int cpu)
101{
102 u32 trace_addr;
103 int entry;
104
105 /* process the collected SPU PC for the node */
106
107 entry = 0;
108
109 trace_addr = cbe_read_pm(cpu, trace_address);
110 while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
111 /* there is data in the trace buffer to process */
112 spu_pc_extract(cpu, entry);
113
114 entry++;
115
116 if (entry >= TRACE_ARRAY_SIZE)
117 /* spu_samples is full */
118 break;
119
120 trace_addr = cbe_read_pm(cpu, trace_address);
121 }
122
123 return entry;
124}
125
126
127static enum hrtimer_restart profile_spus(struct hrtimer *timer)
128{
129 ktime_t kt;
130 int cpu, node, k, num_samples, spu_num;
131
132 if (!spu_prof_running)
133 goto stop;
134
135 for_each_online_cpu(cpu) {
136 if (cbe_get_hw_thread_id(cpu))
137 continue;
138
139 node = cbe_cpu_to_node(cpu);
140
141 /* There should only be one kernel thread at a time processing
142 * the samples. In the very unlikely case that the processing
143 * is taking a very long time and multiple kernel threads are
144 * started to process the samples. Make sure only one kernel
145 * thread is working on the samples array at a time. The
146 * sample array must be loaded and then processed for a given
147 * cpu. The sample array is not per cpu.
148 */
149 spin_lock_irqsave(&sample_array_lock,
150 sample_array_lock_flags);
151 num_samples = cell_spu_pc_collection(cpu);
152
153 if (num_samples == 0) {
154 spin_unlock_irqrestore(&sample_array_lock,
155 sample_array_lock_flags);
156 continue;
157 }
158
159 for (k = 0; k < SPUS_PER_NODE; k++) {
160 spu_num = k + (node * SPUS_PER_NODE);
161 spu_sync_buffer(spu_num,
162 samples + (k * TRACE_ARRAY_SIZE),
163 num_samples);
164 }
165
166 spin_unlock_irqrestore(&sample_array_lock,
167 sample_array_lock_flags);
168
169 }
170 smp_wmb(); /* insure spu event buffer updates are written */
171 /* don't want events intermingled... */
172
173 kt = ktime_set(0, profiling_interval);
174 if (!spu_prof_running)
175 goto stop;
176 hrtimer_forward(timer, timer->base->get_time(), kt);
177 return HRTIMER_RESTART;
178
179 stop:
180 printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
181 return HRTIMER_NORESTART;
182}
183
184static struct hrtimer timer;
185/*
186 * Entry point for SPU profiling.
187 * NOTE: SPU profiling is done system-wide, not per-CPU.
188 *
189 * cycles_reset is the count value specified by the user when
190 * setting up OProfile to count SPU_CYCLES.
191 */
192int start_spu_profiling(unsigned int cycles_reset)
193{
194 ktime_t kt;
195
196 pr_debug("timer resolution: %lu\n", TICK_NSEC);
197 kt = ktime_set(0, profiling_interval);
198 hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
199 timer.expires = kt;
200 timer.function = profile_spus;
201
202 /* Allocate arrays for collecting SPU PC samples */
203 samples = kzalloc(SPUS_PER_NODE *
204 TRACE_ARRAY_SIZE * sizeof(u32), GFP_KERNEL);
205
206 if (!samples)
207 return -ENOMEM;
208
209 spu_prof_running = 1;
210 hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
211
212 return 0;
213}
214
215void stop_spu_profiling(void)
216{
217 spu_prof_running = 0;
218 hrtimer_cancel(&timer);
219 kfree(samples);
220 pr_debug("SPU_PROF: stop_spu_profiling issued\n");
221}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
new file mode 100644
index 000000000000..133665754a75
--- /dev/null
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -0,0 +1,484 @@
1/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Author: Maynard Johnson <maynardj@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14/* The purpose of this file is to handle SPU event task switching
15 * and to record SPU context information into the OProfile
16 * event buffer.
17 *
18 * Additionally, the spu_sync_buffer function is provided as a helper
19 * for recoding actual SPU program counter samples to the event buffer.
20 */
21#include <linux/dcookies.h>
22#include <linux/kref.h>
23#include <linux/mm.h>
24#include <linux/module.h>
25#include <linux/notifier.h>
26#include <linux/numa.h>
27#include <linux/oprofile.h>
28#include <linux/spinlock.h>
29#include "pr_util.h"
30
31#define RELEASE_ALL 9999
32
33static DEFINE_SPINLOCK(buffer_lock);
34static DEFINE_SPINLOCK(cache_lock);
35static int num_spu_nodes;
36int spu_prof_num_nodes;
37int last_guard_val[MAX_NUMNODES * 8];
38
39/* Container for caching information about an active SPU task. */
40struct cached_info {
41 struct vma_to_fileoffset_map *map;
42 struct spu *the_spu; /* needed to access pointer to local_store */
43 struct kref cache_ref;
44};
45
46static struct cached_info *spu_info[MAX_NUMNODES * 8];
47
48static void destroy_cached_info(struct kref *kref)
49{
50 struct cached_info *info;
51
52 info = container_of(kref, struct cached_info, cache_ref);
53 vma_map_free(info->map);
54 kfree(info);
55 module_put(THIS_MODULE);
56}
57
58/* Return the cached_info for the passed SPU number.
59 * ATTENTION: Callers are responsible for obtaining the
60 * cache_lock if needed prior to invoking this function.
61 */
62static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num)
63{
64 struct kref *ref;
65 struct cached_info *ret_info;
66
67 if (spu_num >= num_spu_nodes) {
68 printk(KERN_ERR "SPU_PROF: "
69 "%s, line %d: Invalid index %d into spu info cache\n",
70 __FUNCTION__, __LINE__, spu_num);
71 ret_info = NULL;
72 goto out;
73 }
74 if (!spu_info[spu_num] && the_spu) {
75 ref = spu_get_profile_private_kref(the_spu->ctx);
76 if (ref) {
77 spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref);
78 kref_get(&spu_info[spu_num]->cache_ref);
79 }
80 }
81
82 ret_info = spu_info[spu_num];
83 out:
84 return ret_info;
85}
86
87
88/* Looks for cached info for the passed spu. If not found, the
89 * cached info is created for the passed spu.
90 * Returns 0 for success; otherwise, -1 for error.
91 */
92static int
93prepare_cached_spu_info(struct spu *spu, unsigned long objectId)
94{
95 unsigned long flags;
96 struct vma_to_fileoffset_map *new_map;
97 int retval = 0;
98 struct cached_info *info;
99
100 /* We won't bother getting cache_lock here since
101 * don't do anything with the cached_info that's returned.
102 */
103 info = get_cached_info(spu, spu->number);
104
105 if (info) {
106 pr_debug("Found cached SPU info.\n");
107 goto out;
108 }
109
110 /* Create cached_info and set spu_info[spu->number] to point to it.
111 * spu->number is a system-wide value, not a per-node value.
112 */
113 info = kzalloc(sizeof(struct cached_info), GFP_KERNEL);
114 if (!info) {
115 printk(KERN_ERR "SPU_PROF: "
116 "%s, line %d: create vma_map failed\n",
117 __FUNCTION__, __LINE__);
118 retval = -ENOMEM;
119 goto err_alloc;
120 }
121 new_map = create_vma_map(spu, objectId);
122 if (!new_map) {
123 printk(KERN_ERR "SPU_PROF: "
124 "%s, line %d: create vma_map failed\n",
125 __FUNCTION__, __LINE__);
126 retval = -ENOMEM;
127 goto err_alloc;
128 }
129
130 pr_debug("Created vma_map\n");
131 info->map = new_map;
132 info->the_spu = spu;
133 kref_init(&info->cache_ref);
134 spin_lock_irqsave(&cache_lock, flags);
135 spu_info[spu->number] = info;
136 /* Increment count before passing off ref to SPUFS. */
137 kref_get(&info->cache_ref);
138
139 /* We increment the module refcount here since SPUFS is
140 * responsible for the final destruction of the cached_info,
141 * and it must be able to access the destroy_cached_info()
142 * function defined in the OProfile module. We decrement
143 * the module refcount in destroy_cached_info.
144 */
145 try_module_get(THIS_MODULE);
146 spu_set_profile_private_kref(spu->ctx, &info->cache_ref,
147 destroy_cached_info);
148 spin_unlock_irqrestore(&cache_lock, flags);
149 goto out;
150
151err_alloc:
152 kfree(info);
153out:
154 return retval;
155}
156
157/*
158 * NOTE: The caller is responsible for locking the
159 * cache_lock prior to calling this function.
160 */
161static int release_cached_info(int spu_index)
162{
163 int index, end;
164
165 if (spu_index == RELEASE_ALL) {
166 end = num_spu_nodes;
167 index = 0;
168 } else {
169 if (spu_index >= num_spu_nodes) {
170 printk(KERN_ERR "SPU_PROF: "
171 "%s, line %d: "
172 "Invalid index %d into spu info cache\n",
173 __FUNCTION__, __LINE__, spu_index);
174 goto out;
175 }
176 end = spu_index + 1;
177 index = spu_index;
178 }
179 for (; index < end; index++) {
180 if (spu_info[index]) {
181 kref_put(&spu_info[index]->cache_ref,
182 destroy_cached_info);
183 spu_info[index] = NULL;
184 }
185 }
186
187out:
188 return 0;
189}
190
191/* The source code for fast_get_dcookie was "borrowed"
192 * from drivers/oprofile/buffer_sync.c.
193 */
194
195/* Optimisation. We can manage without taking the dcookie sem
196 * because we cannot reach this code without at least one
197 * dcookie user still being registered (namely, the reader
198 * of the event buffer).
199 */
200static inline unsigned long fast_get_dcookie(struct dentry *dentry,
201 struct vfsmount *vfsmnt)
202{
203 unsigned long cookie;
204
205 if (dentry->d_cookie)
206 return (unsigned long)dentry;
207 get_dcookie(dentry, vfsmnt, &cookie);
208 return cookie;
209}
210
211/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
212 * which corresponds loosely to "application name". Also, determine
213 * the offset for the SPU ELF object. If computed offset is
214 * non-zero, it implies an embedded SPU object; otherwise, it's a
215 * separate SPU binary, in which case we retrieve it's dcookie.
216 * For the embedded case, we must determine if SPU ELF is embedded
217 * in the executable application or another file (i.e., shared lib).
218 * If embedded in a shared lib, we must get the dcookie and return
219 * that to the caller.
220 */
221static unsigned long
222get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
223 unsigned long *spu_bin_dcookie,
224 unsigned long spu_ref)
225{
226 unsigned long app_cookie = 0;
227 unsigned int my_offset = 0;
228 struct file *app = NULL;
229 struct vm_area_struct *vma;
230 struct mm_struct *mm = spu->mm;
231
232 if (!mm)
233 goto out;
234
235 down_read(&mm->mmap_sem);
236
237 for (vma = mm->mmap; vma; vma = vma->vm_next) {
238 if (!vma->vm_file)
239 continue;
240 if (!(vma->vm_flags & VM_EXECUTABLE))
241 continue;
242 app_cookie = fast_get_dcookie(vma->vm_file->f_dentry,
243 vma->vm_file->f_vfsmnt);
244 pr_debug("got dcookie for %s\n",
245 vma->vm_file->f_dentry->d_name.name);
246 app = vma->vm_file;
247 break;
248 }
249
250 for (vma = mm->mmap; vma; vma = vma->vm_next) {
251 if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
252 continue;
253 my_offset = spu_ref - vma->vm_start;
254 if (!vma->vm_file)
255 goto fail_no_image_cookie;
256
257 pr_debug("Found spu ELF at %X(object-id:%lx) for file %s\n",
258 my_offset, spu_ref,
259 vma->vm_file->f_dentry->d_name.name);
260 *offsetp = my_offset;
261 break;
262 }
263
264 *spu_bin_dcookie = fast_get_dcookie(vma->vm_file->f_dentry,
265 vma->vm_file->f_vfsmnt);
266 pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name);
267
268 up_read(&mm->mmap_sem);
269
270out:
271 return app_cookie;
272
273fail_no_image_cookie:
274 up_read(&mm->mmap_sem);
275
276 printk(KERN_ERR "SPU_PROF: "
277 "%s, line %d: Cannot find dcookie for SPU binary\n",
278 __FUNCTION__, __LINE__);
279 goto out;
280}
281
282
283
284/* This function finds or creates cached context information for the
285 * passed SPU and records SPU context information into the OProfile
286 * event buffer.
287 */
288static int process_context_switch(struct spu *spu, unsigned long objectId)
289{
290 unsigned long flags;
291 int retval;
292 unsigned int offset = 0;
293 unsigned long spu_cookie = 0, app_dcookie;
294
295 retval = prepare_cached_spu_info(spu, objectId);
296 if (retval)
297 goto out;
298
299 /* Get dcookie first because a mutex_lock is taken in that
300 * code path, so interrupts must not be disabled.
301 */
302 app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId);
303 if (!app_dcookie || !spu_cookie) {
304 retval = -ENOENT;
305 goto out;
306 }
307
308 /* Record context info in event buffer */
309 spin_lock_irqsave(&buffer_lock, flags);
310 add_event_entry(ESCAPE_CODE);
311 add_event_entry(SPU_CTX_SWITCH_CODE);
312 add_event_entry(spu->number);
313 add_event_entry(spu->pid);
314 add_event_entry(spu->tgid);
315 add_event_entry(app_dcookie);
316 add_event_entry(spu_cookie);
317 add_event_entry(offset);
318 spin_unlock_irqrestore(&buffer_lock, flags);
319 smp_wmb(); /* insure spu event buffer updates are written */
320 /* don't want entries intermingled... */
321out:
322 return retval;
323}
324
325/*
326 * This function is invoked on either a bind_context or unbind_context.
327 * If called for an unbind_context, the val arg is 0; otherwise,
328 * it is the object-id value for the spu context.
329 * The data arg is of type 'struct spu *'.
330 */
331static int spu_active_notify(struct notifier_block *self, unsigned long val,
332 void *data)
333{
334 int retval;
335 unsigned long flags;
336 struct spu *the_spu = data;
337
338 pr_debug("SPU event notification arrived\n");
339 if (!val) {
340 spin_lock_irqsave(&cache_lock, flags);
341 retval = release_cached_info(the_spu->number);
342 spin_unlock_irqrestore(&cache_lock, flags);
343 } else {
344 retval = process_context_switch(the_spu, val);
345 }
346 return retval;
347}
348
349static struct notifier_block spu_active = {
350 .notifier_call = spu_active_notify,
351};
352
353static int number_of_online_nodes(void)
354{
355 u32 cpu; u32 tmp;
356 int nodes = 0;
357 for_each_online_cpu(cpu) {
358 tmp = cbe_cpu_to_node(cpu) + 1;
359 if (tmp > nodes)
360 nodes++;
361 }
362 return nodes;
363}
364
365/* The main purpose of this function is to synchronize
366 * OProfile with SPUFS by registering to be notified of
367 * SPU task switches.
368 *
369 * NOTE: When profiling SPUs, we must ensure that only
370 * spu_sync_start is invoked and not the generic sync_start
371 * in drivers/oprofile/oprof.c. A return value of
372 * SKIP_GENERIC_SYNC or SYNC_START_ERROR will
373 * accomplish this.
374 */
375int spu_sync_start(void)
376{
377 int k;
378 int ret = SKIP_GENERIC_SYNC;
379 int register_ret;
380 unsigned long flags = 0;
381
382 spu_prof_num_nodes = number_of_online_nodes();
383 num_spu_nodes = spu_prof_num_nodes * 8;
384
385 spin_lock_irqsave(&buffer_lock, flags);
386 add_event_entry(ESCAPE_CODE);
387 add_event_entry(SPU_PROFILING_CODE);
388 add_event_entry(num_spu_nodes);
389 spin_unlock_irqrestore(&buffer_lock, flags);
390
391 /* Register for SPU events */
392 register_ret = spu_switch_event_register(&spu_active);
393 if (register_ret) {
394 ret = SYNC_START_ERROR;
395 goto out;
396 }
397
398 for (k = 0; k < (MAX_NUMNODES * 8); k++)
399 last_guard_val[k] = 0;
400 pr_debug("spu_sync_start -- running.\n");
401out:
402 return ret;
403}
404
405/* Record SPU program counter samples to the oprofile event buffer. */
406void spu_sync_buffer(int spu_num, unsigned int *samples,
407 int num_samples)
408{
409 unsigned long long file_offset;
410 unsigned long flags;
411 int i;
412 struct vma_to_fileoffset_map *map;
413 struct spu *the_spu;
414 unsigned long long spu_num_ll = spu_num;
415 unsigned long long spu_num_shifted = spu_num_ll << 32;
416 struct cached_info *c_info;
417
418 /* We need to obtain the cache_lock here because it's
419 * possible that after getting the cached_info, the SPU job
420 * corresponding to this cached_info may end, thus resulting
421 * in the destruction of the cached_info.
422 */
423 spin_lock_irqsave(&cache_lock, flags);
424 c_info = get_cached_info(NULL, spu_num);
425 if (!c_info) {
426 /* This legitimately happens when the SPU task ends before all
427 * samples are recorded.
428 * No big deal -- so we just drop a few samples.
429 */
430 pr_debug("SPU_PROF: No cached SPU contex "
431 "for SPU #%d. Dropping samples.\n", spu_num);
432 goto out;
433 }
434
435 map = c_info->map;
436 the_spu = c_info->the_spu;
437 spin_lock(&buffer_lock);
438 for (i = 0; i < num_samples; i++) {
439 unsigned int sample = *(samples+i);
440 int grd_val = 0;
441 file_offset = 0;
442 if (sample == 0)
443 continue;
444 file_offset = vma_map_lookup( map, sample, the_spu, &grd_val);
445
446 /* If overlays are used by this SPU application, the guard
447 * value is non-zero, indicating which overlay section is in
448 * use. We need to discard samples taken during the time
449 * period which an overlay occurs (i.e., guard value changes).
450 */
451 if (grd_val && grd_val != last_guard_val[spu_num]) {
452 last_guard_val[spu_num] = grd_val;
453 /* Drop the rest of the samples. */
454 break;
455 }
456
457 add_event_entry(file_offset | spu_num_shifted);
458 }
459 spin_unlock(&buffer_lock);
460out:
461 spin_unlock_irqrestore(&cache_lock, flags);
462}
463
464
465int spu_sync_stop(void)
466{
467 unsigned long flags = 0;
468 int ret = spu_switch_event_unregister(&spu_active);
469 if (ret) {
470 printk(KERN_ERR "SPU_PROF: "
471 "%s, line %d: spu_switch_event_unregister returned %d\n",
472 __FUNCTION__, __LINE__, ret);
473 goto out;
474 }
475
476 spin_lock_irqsave(&cache_lock, flags);
477 ret = release_cached_info(RELEASE_ALL);
478 spin_unlock_irqrestore(&cache_lock, flags);
479out:
480 pr_debug("spu_sync_stop -- done.\n");
481 return ret;
482}
483
484
diff --git a/arch/powerpc/oprofile/cell/vma_map.c b/arch/powerpc/oprofile/cell/vma_map.c
new file mode 100644
index 000000000000..76ec1d16aef7
--- /dev/null
+++ b/arch/powerpc/oprofile/cell/vma_map.c
@@ -0,0 +1,287 @@
1/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Author: Maynard Johnson <maynardj@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14/* The code in this source file is responsible for generating
15 * vma-to-fileOffset maps for both overlay and non-overlay SPU
16 * applications.
17 */
18
19#include <linux/mm.h>
20#include <linux/string.h>
21#include <linux/uaccess.h>
22#include <linux/elf.h>
23#include "pr_util.h"
24
25
26void vma_map_free(struct vma_to_fileoffset_map *map)
27{
28 while (map) {
29 struct vma_to_fileoffset_map *next = map->next;
30 kfree(map);
31 map = next;
32 }
33}
34
35unsigned int
36vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma,
37 const struct spu *aSpu, int *grd_val)
38{
39 /*
40 * Default the offset to the physical address + a flag value.
41 * Addresses of dynamically generated code can't be found in the vma
42 * map. For those addresses the flagged value will be sent on to
43 * the user space tools so they can be reported rather than just
44 * thrown away.
45 */
46 u32 offset = 0x10000000 + vma;
47 u32 ovly_grd;
48
49 for (; map; map = map->next) {
50 if (vma < map->vma || vma >= map->vma + map->size)
51 continue;
52
53 if (map->guard_ptr) {
54 ovly_grd = *(u32 *)(aSpu->local_store + map->guard_ptr);
55 if (ovly_grd != map->guard_val)
56 continue;
57 *grd_val = ovly_grd;
58 }
59 offset = vma - map->vma + map->offset;
60 break;
61 }
62
63 return offset;
64}
65
66static struct vma_to_fileoffset_map *
67vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma,
68 unsigned int size, unsigned int offset, unsigned int guard_ptr,
69 unsigned int guard_val)
70{
71 struct vma_to_fileoffset_map *new =
72 kzalloc(sizeof(struct vma_to_fileoffset_map), GFP_KERNEL);
73 if (!new) {
74 printk(KERN_ERR "SPU_PROF: %s, line %d: malloc failed\n",
75 __FUNCTION__, __LINE__);
76 vma_map_free(map);
77 return NULL;
78 }
79
80 new->next = map;
81 new->vma = vma;
82 new->size = size;
83 new->offset = offset;
84 new->guard_ptr = guard_ptr;
85 new->guard_val = guard_val;
86
87 return new;
88}
89
90
91/* Parse SPE ELF header and generate a list of vma_maps.
92 * A pointer to the first vma_map in the generated list
93 * of vma_maps is returned. */
94struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
95 unsigned long spu_elf_start)
96{
97 static const unsigned char expected[EI_PAD] = {
98 [EI_MAG0] = ELFMAG0,
99 [EI_MAG1] = ELFMAG1,
100 [EI_MAG2] = ELFMAG2,
101 [EI_MAG3] = ELFMAG3,
102 [EI_CLASS] = ELFCLASS32,
103 [EI_DATA] = ELFDATA2MSB,
104 [EI_VERSION] = EV_CURRENT,
105 [EI_OSABI] = ELFOSABI_NONE
106 };
107
108 int grd_val;
109 struct vma_to_fileoffset_map *map = NULL;
110 struct spu_overlay_info ovly;
111 unsigned int overlay_tbl_offset = -1;
112 unsigned long phdr_start, shdr_start;
113 Elf32_Ehdr ehdr;
114 Elf32_Phdr phdr;
115 Elf32_Shdr shdr, shdr_str;
116 Elf32_Sym sym;
117 int i, j;
118 char name[32];
119
120 unsigned int ovly_table_sym = 0;
121 unsigned int ovly_buf_table_sym = 0;
122 unsigned int ovly_table_end_sym = 0;
123 unsigned int ovly_buf_table_end_sym = 0;
124 unsigned long ovly_table;
125 unsigned int n_ovlys;
126
127 /* Get and validate ELF header. */
128
129 if (copy_from_user(&ehdr, (void *) spu_elf_start, sizeof (ehdr)))
130 goto fail;
131
132 if (memcmp(ehdr.e_ident, expected, EI_PAD) != 0) {
133 printk(KERN_ERR "SPU_PROF: "
134 "%s, line %d: Unexpected e_ident parsing SPU ELF\n",
135 __FUNCTION__, __LINE__);
136 goto fail;
137 }
138 if (ehdr.e_machine != EM_SPU) {
139 printk(KERN_ERR "SPU_PROF: "
140 "%s, line %d: Unexpected e_machine parsing SPU ELF\n",
141 __FUNCTION__, __LINE__);
142 goto fail;
143 }
144 if (ehdr.e_type != ET_EXEC) {
145 printk(KERN_ERR "SPU_PROF: "
146 "%s, line %d: Unexpected e_type parsing SPU ELF\n",
147 __FUNCTION__, __LINE__);
148 goto fail;
149 }
150 phdr_start = spu_elf_start + ehdr.e_phoff;
151 shdr_start = spu_elf_start + ehdr.e_shoff;
152
153 /* Traverse program headers. */
154 for (i = 0; i < ehdr.e_phnum; i++) {
155 if (copy_from_user(&phdr,
156 (void *) (phdr_start + i * sizeof(phdr)),
157 sizeof(phdr)))
158 goto fail;
159
160 if (phdr.p_type != PT_LOAD)
161 continue;
162 if (phdr.p_flags & (1 << 27))
163 continue;
164
165 map = vma_map_add(map, phdr.p_vaddr, phdr.p_memsz,
166 phdr.p_offset, 0, 0);
167 if (!map)
168 goto fail;
169 }
170
171 pr_debug("SPU_PROF: Created non-overlay maps\n");
172 /* Traverse section table and search for overlay-related symbols. */
173 for (i = 0; i < ehdr.e_shnum; i++) {
174 if (copy_from_user(&shdr,
175 (void *) (shdr_start + i * sizeof(shdr)),
176 sizeof(shdr)))
177 goto fail;
178
179 if (shdr.sh_type != SHT_SYMTAB)
180 continue;
181 if (shdr.sh_entsize != sizeof (sym))
182 continue;
183
184 if (copy_from_user(&shdr_str,
185 (void *) (shdr_start + shdr.sh_link *
186 sizeof(shdr)),
187 sizeof(shdr)))
188 goto fail;
189
190 if (shdr_str.sh_type != SHT_STRTAB)
191 goto fail;;
192
193 for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
194 if (copy_from_user(&sym, (void *) (spu_elf_start +
195 shdr.sh_offset + j *
196 sizeof (sym)),
197 sizeof (sym)))
198 goto fail;
199
200 if (copy_from_user(name, (void *)
201 (spu_elf_start + shdr_str.sh_offset +
202 sym.st_name),
203 20))
204 goto fail;
205
206 if (memcmp(name, "_ovly_table", 12) == 0)
207 ovly_table_sym = sym.st_value;
208 if (memcmp(name, "_ovly_buf_table", 16) == 0)
209 ovly_buf_table_sym = sym.st_value;
210 if (memcmp(name, "_ovly_table_end", 16) == 0)
211 ovly_table_end_sym = sym.st_value;
212 if (memcmp(name, "_ovly_buf_table_end", 20) == 0)
213 ovly_buf_table_end_sym = sym.st_value;
214 }
215 }
216
217 /* If we don't have overlays, we're done. */
218 if (ovly_table_sym == 0 || ovly_buf_table_sym == 0
219 || ovly_table_end_sym == 0 || ovly_buf_table_end_sym == 0) {
220 pr_debug("SPU_PROF: No overlay table found\n");
221 goto out;
222 } else {
223 pr_debug("SPU_PROF: Overlay table found\n");
224 }
225
226 /* The _ovly_table symbol represents a table with one entry
227 * per overlay section. The _ovly_buf_table symbol represents
228 * a table with one entry per overlay region.
229 * The struct spu_overlay_info gives the structure of the _ovly_table
230 * entries. The structure of _ovly_table_buf is simply one
231 * u32 word per entry.
232 */
233 overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym,
234 aSpu, &grd_val);
235 if (overlay_tbl_offset < 0) {
236 printk(KERN_ERR "SPU_PROF: "
237 "%s, line %d: Error finding SPU overlay table\n",
238 __FUNCTION__, __LINE__);
239 goto fail;
240 }
241 ovly_table = spu_elf_start + overlay_tbl_offset;
242
243 n_ovlys = (ovly_table_end_sym -
244 ovly_table_sym) / sizeof (ovly);
245
246 /* Traverse overlay table. */
247 for (i = 0; i < n_ovlys; i++) {
248 if (copy_from_user(&ovly, (void *)
249 (ovly_table + i * sizeof (ovly)),
250 sizeof (ovly)))
251 goto fail;
252
253 /* The ovly.vma/size/offset arguments are analogous to the same
254 * arguments used above for non-overlay maps. The final two
255 * args are referred to as the guard pointer and the guard
256 * value.
257 * The guard pointer is an entry in the _ovly_buf_table,
258 * computed using ovly.buf as the index into the table. Since
259 * ovly.buf values begin at '1' to reference the first (or 0th)
260 * entry in the _ovly_buf_table, the computation subtracts 1
261 * from ovly.buf.
262 * The guard value is stored in the _ovly_buf_table entry and
263 * is an index (starting at 1) back to the _ovly_table entry
264 * that is pointing at this _ovly_buf_table entry. So, for
265 * example, for an overlay scenario with one overlay segment
266 * and two overlay sections:
267 * - Section 1 points to the first entry of the
268 * _ovly_buf_table, which contains a guard value
269 * of '1', referencing the first (index=0) entry of
270 * _ovly_table.
271 * - Section 2 points to the second entry of the
272 * _ovly_buf_table, which contains a guard value
273 * of '2', referencing the second (index=1) entry of
274 * _ovly_table.
275 */
276 map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset,
277 ovly_buf_table_sym + (ovly.buf-1) * 4, i+1);
278 if (!map)
279 goto fail;
280 }
281 goto out;
282
283 fail:
284 map = NULL;
285 out:
286 return map;
287}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 1a7ef7e246d2..a28cce1d6c24 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -29,6 +29,8 @@ static struct op_powerpc_model *model;
29static struct op_counter_config ctr[OP_MAX_COUNTER]; 29static struct op_counter_config ctr[OP_MAX_COUNTER];
30static struct op_system_config sys; 30static struct op_system_config sys;
31 31
32static int op_per_cpu_rc;
33
32static void op_handle_interrupt(struct pt_regs *regs) 34static void op_handle_interrupt(struct pt_regs *regs)
33{ 35{
34 model->handle_interrupt(regs, ctr); 36 model->handle_interrupt(regs, ctr);
@@ -36,25 +38,41 @@ static void op_handle_interrupt(struct pt_regs *regs)
36 38
37static void op_powerpc_cpu_setup(void *dummy) 39static void op_powerpc_cpu_setup(void *dummy)
38{ 40{
39 model->cpu_setup(ctr); 41 int ret;
42
43 ret = model->cpu_setup(ctr);
44
45 if (ret != 0)
46 op_per_cpu_rc = ret;
40} 47}
41 48
42static int op_powerpc_setup(void) 49static int op_powerpc_setup(void)
43{ 50{
44 int err; 51 int err;
45 52
53 op_per_cpu_rc = 0;
54
46 /* Grab the hardware */ 55 /* Grab the hardware */
47 err = reserve_pmc_hardware(op_handle_interrupt); 56 err = reserve_pmc_hardware(op_handle_interrupt);
48 if (err) 57 if (err)
49 return err; 58 return err;
50 59
51 /* Pre-compute the values to stuff in the hardware registers. */ 60 /* Pre-compute the values to stuff in the hardware registers. */
52 model->reg_setup(ctr, &sys, model->num_counters); 61 op_per_cpu_rc = model->reg_setup(ctr, &sys, model->num_counters);
53 62
54 /* Configure the registers on all cpus. */ 63 if (op_per_cpu_rc)
64 goto out;
65
66 /* Configure the registers on all cpus. If an error occurs on one
67 * of the cpus, op_per_cpu_rc will be set to the error */
55 on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1); 68 on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1);
56 69
57 return 0; 70out: if (op_per_cpu_rc) {
71 /* error on setup release the performance counter hardware */
72 release_pmc_hardware();
73 }
74
75 return op_per_cpu_rc;
58} 76}
59 77
60static void op_powerpc_shutdown(void) 78static void op_powerpc_shutdown(void)
@@ -64,16 +82,29 @@ static void op_powerpc_shutdown(void)
64 82
65static void op_powerpc_cpu_start(void *dummy) 83static void op_powerpc_cpu_start(void *dummy)
66{ 84{
67 model->start(ctr); 85 /* If any of the cpus have return an error, set the
86 * global flag to the error so it can be returned
87 * to the generic OProfile caller.
88 */
89 int ret;
90
91 ret = model->start(ctr);
92 if (ret != 0)
93 op_per_cpu_rc = ret;
68} 94}
69 95
70static int op_powerpc_start(void) 96static int op_powerpc_start(void)
71{ 97{
98 op_per_cpu_rc = 0;
99
72 if (model->global_start) 100 if (model->global_start)
73 model->global_start(ctr); 101 return model->global_start(ctr);
74 if (model->start) 102 if (model->start) {
75 on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1); 103 on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
76 return 0; 104 return op_per_cpu_rc;
105 }
106 return -EIO; /* No start function is defined for this
107 power architecture */
77} 108}
78 109
79static inline void op_powerpc_cpu_stop(void *dummy) 110static inline void op_powerpc_cpu_stop(void *dummy)
@@ -147,11 +178,13 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
147 178
148 switch (cur_cpu_spec->oprofile_type) { 179 switch (cur_cpu_spec->oprofile_type) {
149#ifdef CONFIG_PPC64 180#ifdef CONFIG_PPC64
150#ifdef CONFIG_PPC_CELL_NATIVE 181#ifdef CONFIG_OPROFILE_CELL
151 case PPC_OPROFILE_CELL: 182 case PPC_OPROFILE_CELL:
152 if (firmware_has_feature(FW_FEATURE_LPAR)) 183 if (firmware_has_feature(FW_FEATURE_LPAR))
153 return -ENODEV; 184 return -ENODEV;
154 model = &op_model_cell; 185 model = &op_model_cell;
186 ops->sync_start = model->sync_start;
187 ops->sync_stop = model->sync_stop;
155 break; 188 break;
156#endif 189#endif
157 case PPC_OPROFILE_RS64: 190 case PPC_OPROFILE_RS64:
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
index 5d1bbaf35ccb..cc599eb8768b 100644
--- a/arch/powerpc/oprofile/op_model_7450.c
+++ b/arch/powerpc/oprofile/op_model_7450.c
@@ -81,7 +81,7 @@ static void pmc_stop_ctrs(void)
81 81
82/* Configures the counters on this CPU based on the global 82/* Configures the counters on this CPU based on the global
83 * settings */ 83 * settings */
84static void fsl7450_cpu_setup(struct op_counter_config *ctr) 84static int fsl7450_cpu_setup(struct op_counter_config *ctr)
85{ 85{
86 /* freeze all counters */ 86 /* freeze all counters */
87 pmc_stop_ctrs(); 87 pmc_stop_ctrs();
@@ -89,12 +89,14 @@ static void fsl7450_cpu_setup(struct op_counter_config *ctr)
89 mtspr(SPRN_MMCR0, mmcr0_val); 89 mtspr(SPRN_MMCR0, mmcr0_val);
90 mtspr(SPRN_MMCR1, mmcr1_val); 90 mtspr(SPRN_MMCR1, mmcr1_val);
91 mtspr(SPRN_MMCR2, mmcr2_val); 91 mtspr(SPRN_MMCR2, mmcr2_val);
92
93 return 0;
92} 94}
93 95
94#define NUM_CTRS 6 96#define NUM_CTRS 6
95 97
96/* Configures the global settings for the countes on all CPUs. */ 98/* Configures the global settings for the countes on all CPUs. */
97static void fsl7450_reg_setup(struct op_counter_config *ctr, 99static int fsl7450_reg_setup(struct op_counter_config *ctr,
98 struct op_system_config *sys, 100 struct op_system_config *sys,
99 int num_ctrs) 101 int num_ctrs)
100{ 102{
@@ -126,10 +128,12 @@ static void fsl7450_reg_setup(struct op_counter_config *ctr,
126 | mmcr1_event6(ctr[5].event); 128 | mmcr1_event6(ctr[5].event);
127 129
128 mmcr2_val = 0; 130 mmcr2_val = 0;
131
132 return 0;
129} 133}
130 134
131/* Sets the counters on this CPU to the chosen values, and starts them */ 135/* Sets the counters on this CPU to the chosen values, and starts them */
132static void fsl7450_start(struct op_counter_config *ctr) 136static int fsl7450_start(struct op_counter_config *ctr)
133{ 137{
134 int i; 138 int i;
135 139
@@ -148,6 +152,8 @@ static void fsl7450_start(struct op_counter_config *ctr)
148 pmc_start_ctrs(); 152 pmc_start_ctrs();
149 153
150 oprofile_running = 1; 154 oprofile_running = 1;
155
156 return 0;
151} 157}
152 158
153/* Stop the counters on this CPU */ 159/* Stop the counters on this CPU */
@@ -193,7 +199,7 @@ static void fsl7450_handle_interrupt(struct pt_regs *regs,
193 /* The freeze bit was set by the interrupt. */ 199 /* The freeze bit was set by the interrupt. */
194 /* Clear the freeze bit, and reenable the interrupt. 200 /* Clear the freeze bit, and reenable the interrupt.
195 * The counters won't actually start until the rfi clears 201 * The counters won't actually start until the rfi clears
196 * the PMM bit */ 202 * the PM/M bit */
197 pmc_start_ctrs(); 203 pmc_start_ctrs();
198} 204}
199 205
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index c29293befba9..d928b54f3a0f 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -5,8 +5,8 @@
5 * 5 *
6 * Author: David Erb (djerb@us.ibm.com) 6 * Author: David Erb (djerb@us.ibm.com)
7 * Modifications: 7 * Modifications:
8 * Carl Love <carll@us.ibm.com> 8 * Carl Love <carll@us.ibm.com>
9 * Maynard Johnson <maynardj@us.ibm.com> 9 * Maynard Johnson <maynardj@us.ibm.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
@@ -38,12 +38,25 @@
38 38
39#include "../platforms/cell/interrupt.h" 39#include "../platforms/cell/interrupt.h"
40#include "../platforms/cell/cbe_regs.h" 40#include "../platforms/cell/cbe_regs.h"
41#include "cell/pr_util.h"
42
43static void cell_global_stop_spu(void);
44
45/*
46 * spu_cycle_reset is the number of cycles between samples.
47 * This variable is used for SPU profiling and should ONLY be set
48 * at the beginning of cell_reg_setup; otherwise, it's read-only.
49 */
50static unsigned int spu_cycle_reset;
51
52#define NUM_SPUS_PER_NODE 8
53#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
41 54
42#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */ 55#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
43#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying 56#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
44 * PPU_CYCLES event 57 * PPU_CYCLES event
45 */ 58 */
46#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */ 59#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
47 60
48#define NUM_THREADS 2 /* number of physical threads in 61#define NUM_THREADS 2 /* number of physical threads in
49 * physical processor 62 * physical processor
@@ -51,6 +64,7 @@
51#define NUM_TRACE_BUS_WORDS 4 64#define NUM_TRACE_BUS_WORDS 4
52#define NUM_INPUT_BUS_WORDS 2 65#define NUM_INPUT_BUS_WORDS 2
53 66
67#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
54 68
55struct pmc_cntrl_data { 69struct pmc_cntrl_data {
56 unsigned long vcntr; 70 unsigned long vcntr;
@@ -62,11 +76,10 @@ struct pmc_cntrl_data {
62/* 76/*
63 * ibm,cbe-perftools rtas parameters 77 * ibm,cbe-perftools rtas parameters
64 */ 78 */
65
66struct pm_signal { 79struct pm_signal {
67 u16 cpu; /* Processor to modify */ 80 u16 cpu; /* Processor to modify */
68 u16 sub_unit; /* hw subunit this applies to (if applicable) */ 81 u16 sub_unit; /* hw subunit this applies to (if applicable)*/
69 short int signal_group; /* Signal Group to Enable/Disable */ 82 short int signal_group; /* Signal Group to Enable/Disable */
70 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event 83 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
71 * Bus Word(s) (bitmask) 84 * Bus Word(s) (bitmask)
72 */ 85 */
@@ -112,21 +125,42 @@ static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
112 125
113static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; 126static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
114 127
115/* Interpetation of hdw_thread: 128/*
129 * The CELL profiling code makes rtas calls to setup the debug bus to
130 * route the performance signals. Additionally, SPU profiling requires
131 * a second rtas call to setup the hardware to capture the SPU PCs.
132 * The EIO error value is returned if the token lookups or the rtas
133 * call fail. The EIO error number is the best choice of the existing
134 * error numbers. The probability of rtas related error is very low. But
135 * by returning EIO and printing additional information to dmsg the user
136 * will know that OProfile did not start and dmesg will tell them why.
137 * OProfile does not support returning errors on Stop. Not a huge issue
138 * since failure to reset the debug bus or stop the SPU PC collection is
139 * not a fatel issue. Chances are if the Stop failed, Start doesn't work
140 * either.
141 */
142
143/*
144 * Interpetation of hdw_thread:
116 * 0 - even virtual cpus 0, 2, 4,... 145 * 0 - even virtual cpus 0, 2, 4,...
117 * 1 - odd virtual cpus 1, 3, 5, ... 146 * 1 - odd virtual cpus 1, 3, 5, ...
147 *
148 * FIXME: this is strictly wrong, we need to clean this up in a number
149 * of places. It works for now. -arnd
118 */ 150 */
119static u32 hdw_thread; 151static u32 hdw_thread;
120 152
121static u32 virt_cntr_inter_mask; 153static u32 virt_cntr_inter_mask;
122static struct timer_list timer_virt_cntr; 154static struct timer_list timer_virt_cntr;
123 155
124/* pm_signal needs to be global since it is initialized in 156/*
157 * pm_signal needs to be global since it is initialized in
125 * cell_reg_setup at the time when the necessary information 158 * cell_reg_setup at the time when the necessary information
126 * is available. 159 * is available.
127 */ 160 */
128static struct pm_signal pm_signal[NR_PHYS_CTRS]; 161static struct pm_signal pm_signal[NR_PHYS_CTRS];
129static int pm_rtas_token; 162static int pm_rtas_token; /* token for debug bus setup call */
163static int spu_rtas_token; /* token for SPU cycle profiling */
130 164
131static u32 reset_value[NR_PHYS_CTRS]; 165static u32 reset_value[NR_PHYS_CTRS];
132static int num_counters; 166static int num_counters;
@@ -147,8 +181,8 @@ rtas_ibm_cbe_perftools(int subfunc, int passthru,
147{ 181{
148 u64 paddr = __pa(address); 182 u64 paddr = __pa(address);
149 183
150 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc, passthru, 184 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
151 paddr >> 32, paddr & 0xffffffff, length); 185 passthru, paddr >> 32, paddr & 0xffffffff, length);
152} 186}
153 187
154static void pm_rtas_reset_signals(u32 node) 188static void pm_rtas_reset_signals(u32 node)
@@ -156,12 +190,13 @@ static void pm_rtas_reset_signals(u32 node)
156 int ret; 190 int ret;
157 struct pm_signal pm_signal_local; 191 struct pm_signal pm_signal_local;
158 192
159 /* The debug bus is being set to the passthru disable state. 193 /*
160 * However, the FW still expects atleast one legal signal routing 194 * The debug bus is being set to the passthru disable state.
161 * entry or it will return an error on the arguments. If we don't 195 * However, the FW still expects atleast one legal signal routing
162 * supply a valid entry, we must ignore all return values. Ignoring 196 * entry or it will return an error on the arguments. If we don't
163 * all return values means we might miss an error we should be 197 * supply a valid entry, we must ignore all return values. Ignoring
164 * concerned about. 198 * all return values means we might miss an error we should be
199 * concerned about.
165 */ 200 */
166 201
167 /* fw expects physical cpu #. */ 202 /* fw expects physical cpu #. */
@@ -175,18 +210,24 @@ static void pm_rtas_reset_signals(u32 node)
175 &pm_signal_local, 210 &pm_signal_local,
176 sizeof(struct pm_signal)); 211 sizeof(struct pm_signal));
177 212
178 if (ret) 213 if (unlikely(ret))
214 /*
215 * Not a fatal error. For Oprofile stop, the oprofile
216 * functions do not support returning an error for
217 * failure to stop OProfile.
218 */
179 printk(KERN_WARNING "%s: rtas returned: %d\n", 219 printk(KERN_WARNING "%s: rtas returned: %d\n",
180 __FUNCTION__, ret); 220 __FUNCTION__, ret);
181} 221}
182 222
183static void pm_rtas_activate_signals(u32 node, u32 count) 223static int pm_rtas_activate_signals(u32 node, u32 count)
184{ 224{
185 int ret; 225 int ret;
186 int i, j; 226 int i, j;
187 struct pm_signal pm_signal_local[NR_PHYS_CTRS]; 227 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
188 228
189 /* There is no debug setup required for the cycles event. 229 /*
230 * There is no debug setup required for the cycles event.
190 * Note that only events in the same group can be used. 231 * Note that only events in the same group can be used.
191 * Otherwise, there will be conflicts in correctly routing 232 * Otherwise, there will be conflicts in correctly routing
192 * the signals on the debug bus. It is the responsiblity 233 * the signals on the debug bus. It is the responsiblity
@@ -213,10 +254,14 @@ static void pm_rtas_activate_signals(u32 node, u32 count)
213 pm_signal_local, 254 pm_signal_local,
214 i * sizeof(struct pm_signal)); 255 i * sizeof(struct pm_signal));
215 256
216 if (ret) 257 if (unlikely(ret)) {
217 printk(KERN_WARNING "%s: rtas returned: %d\n", 258 printk(KERN_WARNING "%s: rtas returned: %d\n",
218 __FUNCTION__, ret); 259 __FUNCTION__, ret);
260 return -EIO;
261 }
219 } 262 }
263
264 return 0;
220} 265}
221 266
222/* 267/*
@@ -260,11 +305,12 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
260 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity); 305 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
261 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control); 306 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
262 307
263 /* Some of the islands signal selection is based on 64 bit words. 308 /*
309 * Some of the islands signal selection is based on 64 bit words.
264 * The debug bus words are 32 bits, the input words to the performance 310 * The debug bus words are 32 bits, the input words to the performance
265 * counters are defined as 32 bits. Need to convert the 64 bit island 311 * counters are defined as 32 bits. Need to convert the 64 bit island
266 * specification to the appropriate 32 input bit and bus word for the 312 * specification to the appropriate 32 input bit and bus word for the
267 * performance counter event selection. See the CELL Performance 313 * performance counter event selection. See the CELL Performance
268 * monitoring signals manual and the Perf cntr hardware descriptions 314 * monitoring signals manual and the Perf cntr hardware descriptions
269 * for the details. 315 * for the details.
270 */ 316 */
@@ -298,6 +344,7 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
298 input_bus[j] = i; 344 input_bus[j] = i;
299 pm_regs.group_control |= 345 pm_regs.group_control |=
300 (i << (31 - i)); 346 (i << (31 - i));
347
301 break; 348 break;
302 } 349 }
303 } 350 }
@@ -309,7 +356,8 @@ out:
309 356
310static void write_pm_cntrl(int cpu) 357static void write_pm_cntrl(int cpu)
311{ 358{
312 /* Oprofile will use 32 bit counters, set bits 7:10 to 0 359 /*
360 * Oprofile will use 32 bit counters, set bits 7:10 to 0
313 * pmregs.pm_cntrl is a global 361 * pmregs.pm_cntrl is a global
314 */ 362 */
315 363
@@ -326,7 +374,8 @@ static void write_pm_cntrl(int cpu)
326 if (pm_regs.pm_cntrl.freeze == 1) 374 if (pm_regs.pm_cntrl.freeze == 1)
327 val |= CBE_PM_FREEZE_ALL_CTRS; 375 val |= CBE_PM_FREEZE_ALL_CTRS;
328 376
329 /* Routine set_count_mode must be called previously to set 377 /*
378 * Routine set_count_mode must be called previously to set
330 * the count mode based on the user selection of user and kernel. 379 * the count mode based on the user selection of user and kernel.
331 */ 380 */
332 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode); 381 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
@@ -336,7 +385,8 @@ static void write_pm_cntrl(int cpu)
336static inline void 385static inline void
337set_count_mode(u32 kernel, u32 user) 386set_count_mode(u32 kernel, u32 user)
338{ 387{
339 /* The user must specify user and kernel if they want them. If 388 /*
389 * The user must specify user and kernel if they want them. If
340 * neither is specified, OProfile will count in hypervisor mode. 390 * neither is specified, OProfile will count in hypervisor mode.
341 * pm_regs.pm_cntrl is a global 391 * pm_regs.pm_cntrl is a global
342 */ 392 */
@@ -364,7 +414,7 @@ static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
364 414
365/* 415/*
366 * Oprofile is expected to collect data on all CPUs simultaneously. 416 * Oprofile is expected to collect data on all CPUs simultaneously.
367 * However, there is one set of performance counters per node. There are 417 * However, there is one set of performance counters per node. There are
368 * two hardware threads or virtual CPUs on each node. Hence, OProfile must 418 * two hardware threads or virtual CPUs on each node. Hence, OProfile must
369 * multiplex in time the performance counter collection on the two virtual 419 * multiplex in time the performance counter collection on the two virtual
370 * CPUs. The multiplexing of the performance counters is done by this 420 * CPUs. The multiplexing of the performance counters is done by this
@@ -377,19 +427,19 @@ static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
377 * pair of per-cpu arrays is used for storing the previous and next 427 * pair of per-cpu arrays is used for storing the previous and next
378 * pmc values for a given node. 428 * pmc values for a given node.
379 * NOTE: We use the per-cpu variable to improve cache performance. 429 * NOTE: We use the per-cpu variable to improve cache performance.
430 *
431 * This routine will alternate loading the virtual counters for
432 * virtual CPUs
380 */ 433 */
381static void cell_virtual_cntr(unsigned long data) 434static void cell_virtual_cntr(unsigned long data)
382{ 435{
383 /* This routine will alternate loading the virtual counters for
384 * virtual CPUs
385 */
386 int i, prev_hdw_thread, next_hdw_thread; 436 int i, prev_hdw_thread, next_hdw_thread;
387 u32 cpu; 437 u32 cpu;
388 unsigned long flags; 438 unsigned long flags;
389 439
390 /* Make sure that the interrupt_hander and 440 /*
391 * the virt counter are not both playing with 441 * Make sure that the interrupt_hander and the virt counter are
392 * the counters on the same node. 442 * not both playing with the counters on the same node.
393 */ 443 */
394 444
395 spin_lock_irqsave(&virt_cntr_lock, flags); 445 spin_lock_irqsave(&virt_cntr_lock, flags);
@@ -400,22 +450,25 @@ static void cell_virtual_cntr(unsigned long data)
400 hdw_thread = 1 ^ hdw_thread; 450 hdw_thread = 1 ^ hdw_thread;
401 next_hdw_thread = hdw_thread; 451 next_hdw_thread = hdw_thread;
402 452
403 for (i = 0; i < num_counters; i++) 453 /*
404 /* There are some per thread events. Must do the 454 * There are some per thread events. Must do the
405 * set event, for the thread that is being started 455 * set event, for the thread that is being started
406 */ 456 */
457 for (i = 0; i < num_counters; i++)
407 set_pm_event(i, 458 set_pm_event(i,
408 pmc_cntrl[next_hdw_thread][i].evnts, 459 pmc_cntrl[next_hdw_thread][i].evnts,
409 pmc_cntrl[next_hdw_thread][i].masks); 460 pmc_cntrl[next_hdw_thread][i].masks);
410 461
411 /* The following is done only once per each node, but 462 /*
463 * The following is done only once per each node, but
412 * we need cpu #, not node #, to pass to the cbe_xxx functions. 464 * we need cpu #, not node #, to pass to the cbe_xxx functions.
413 */ 465 */
414 for_each_online_cpu(cpu) { 466 for_each_online_cpu(cpu) {
415 if (cbe_get_hw_thread_id(cpu)) 467 if (cbe_get_hw_thread_id(cpu))
416 continue; 468 continue;
417 469
418 /* stop counters, save counter values, restore counts 470 /*
471 * stop counters, save counter values, restore counts
419 * for previous thread 472 * for previous thread
420 */ 473 */
421 cbe_disable_pm(cpu); 474 cbe_disable_pm(cpu);
@@ -428,7 +481,7 @@ static void cell_virtual_cntr(unsigned long data)
428 == 0xFFFFFFFF) 481 == 0xFFFFFFFF)
429 /* If the cntr value is 0xffffffff, we must 482 /* If the cntr value is 0xffffffff, we must
430 * reset that to 0xfffffff0 when the current 483 * reset that to 0xfffffff0 when the current
431 * thread is restarted. This will generate a 484 * thread is restarted. This will generate a
432 * new interrupt and make sure that we never 485 * new interrupt and make sure that we never
433 * restore the counters to the max value. If 486 * restore the counters to the max value. If
434 * the counters were restored to the max value, 487 * the counters were restored to the max value,
@@ -444,13 +497,15 @@ static void cell_virtual_cntr(unsigned long data)
444 next_hdw_thread)[i]); 497 next_hdw_thread)[i]);
445 } 498 }
446 499
447 /* Switch to the other thread. Change the interrupt 500 /*
501 * Switch to the other thread. Change the interrupt
448 * and control regs to be scheduled on the CPU 502 * and control regs to be scheduled on the CPU
449 * corresponding to the thread to execute. 503 * corresponding to the thread to execute.
450 */ 504 */
451 for (i = 0; i < num_counters; i++) { 505 for (i = 0; i < num_counters; i++) {
452 if (pmc_cntrl[next_hdw_thread][i].enabled) { 506 if (pmc_cntrl[next_hdw_thread][i].enabled) {
453 /* There are some per thread events. 507 /*
508 * There are some per thread events.
454 * Must do the set event, enable_cntr 509 * Must do the set event, enable_cntr
455 * for each cpu. 510 * for each cpu.
456 */ 511 */
@@ -482,17 +537,42 @@ static void start_virt_cntrs(void)
482} 537}
483 538
484/* This function is called once for all cpus combined */ 539/* This function is called once for all cpus combined */
485static void 540static int cell_reg_setup(struct op_counter_config *ctr,
486cell_reg_setup(struct op_counter_config *ctr, 541 struct op_system_config *sys, int num_ctrs)
487 struct op_system_config *sys, int num_ctrs)
488{ 542{
489 int i, j, cpu; 543 int i, j, cpu;
544 spu_cycle_reset = 0;
545
546 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
547 spu_cycle_reset = ctr[0].count;
548
549 /*
550 * Each node will need to make the rtas call to start
551 * and stop SPU profiling. Get the token once and store it.
552 */
553 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
554
555 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
556 printk(KERN_ERR
557 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
558 __FUNCTION__);
559 return -EIO;
560 }
561 }
490 562
491 pm_rtas_token = rtas_token("ibm,cbe-perftools"); 563 pm_rtas_token = rtas_token("ibm,cbe-perftools");
492 if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) { 564
493 printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n", 565 /*
566 * For all events excetp PPU CYCLEs, each node will need to make
567 * the rtas cbe-perftools call to setup and reset the debug bus.
568 * Make the token lookup call once and store it in the global
569 * variable pm_rtas_token.
570 */
571 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
572 printk(KERN_ERR
573 "%s: rtas token ibm,cbe-perftools unknown\n",
494 __FUNCTION__); 574 __FUNCTION__);
495 goto out; 575 return -EIO;
496 } 576 }
497 577
498 num_counters = num_ctrs; 578 num_counters = num_ctrs;
@@ -520,7 +600,8 @@ cell_reg_setup(struct op_counter_config *ctr,
520 per_cpu(pmc_values, j)[i] = 0; 600 per_cpu(pmc_values, j)[i] = 0;
521 } 601 }
522 602
523 /* Setup the thread 1 events, map the thread 0 event to the 603 /*
604 * Setup the thread 1 events, map the thread 0 event to the
524 * equivalent thread 1 event. 605 * equivalent thread 1 event.
525 */ 606 */
526 for (i = 0; i < num_ctrs; ++i) { 607 for (i = 0; i < num_ctrs; ++i) {
@@ -544,9 +625,10 @@ cell_reg_setup(struct op_counter_config *ctr,
544 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) 625 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
545 input_bus[i] = 0xff; 626 input_bus[i] = 0xff;
546 627
547 /* Our counters count up, and "count" refers to 628 /*
629 * Our counters count up, and "count" refers to
548 * how much before the next interrupt, and we interrupt 630 * how much before the next interrupt, and we interrupt
549 * on overflow. So we calculate the starting value 631 * on overflow. So we calculate the starting value
550 * which will give us "count" until overflow. 632 * which will give us "count" until overflow.
551 * Then we set the events on the enabled counters. 633 * Then we set the events on the enabled counters.
552 */ 634 */
@@ -569,28 +651,27 @@ cell_reg_setup(struct op_counter_config *ctr,
569 for (i = 0; i < num_counters; ++i) { 651 for (i = 0; i < num_counters; ++i) {
570 per_cpu(pmc_values, cpu)[i] = reset_value[i]; 652 per_cpu(pmc_values, cpu)[i] = reset_value[i];
571 } 653 }
572out: 654
573 ; 655 return 0;
574} 656}
575 657
658
659
576/* This function is called once for each cpu */ 660/* This function is called once for each cpu */
577static void cell_cpu_setup(struct op_counter_config *cntr) 661static int cell_cpu_setup(struct op_counter_config *cntr)
578{ 662{
579 u32 cpu = smp_processor_id(); 663 u32 cpu = smp_processor_id();
580 u32 num_enabled = 0; 664 u32 num_enabled = 0;
581 int i; 665 int i;
582 666
667 if (spu_cycle_reset)
668 return 0;
669
583 /* There is one performance monitor per processor chip (i.e. node), 670 /* There is one performance monitor per processor chip (i.e. node),
584 * so we only need to perform this function once per node. 671 * so we only need to perform this function once per node.
585 */ 672 */
586 if (cbe_get_hw_thread_id(cpu)) 673 if (cbe_get_hw_thread_id(cpu))
587 goto out; 674 return 0;
588
589 if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) {
590 printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n",
591 __FUNCTION__);
592 goto out;
593 }
594 675
595 /* Stop all counters */ 676 /* Stop all counters */
596 cbe_disable_pm(cpu); 677 cbe_disable_pm(cpu);
@@ -609,16 +690,286 @@ static void cell_cpu_setup(struct op_counter_config *cntr)
609 } 690 }
610 } 691 }
611 692
612 pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled); 693 /*
694 * The pm_rtas_activate_signals will return -EIO if the FW
695 * call failed.
696 */
697 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
698}
699
700#define ENTRIES 303
701#define MAXLFSR 0xFFFFFF
702
703/* precomputed table of 24 bit LFSR values */
704static int initial_lfsr[] = {
705 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
706 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
707 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
708 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
709 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
710 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
711 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
712 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
713 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
714 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
715 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
716 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
717 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
718 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
719 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
720 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
721 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
722 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
723 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
724 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
725 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
726 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
727 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
728 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
729 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
730 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
731 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
732 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
733 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
734 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
735 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
736 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
737 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
738 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
739 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
740 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
741 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
742 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
743};
744
745/*
746 * The hardware uses an LFSR counting sequence to determine when to capture
747 * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
748 * where each number occurs once in the sequence but the sequence is not in
749 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
750 * the last value in the sequence. Hence the user specified value N
751 * corresponds to the LFSR number that is N from the end of the sequence.
752 *
753 * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
754 * LFSR sequence is broken into four ranges. The spacing of the precomputed
755 * values is adjusted in each range so the error between the user specifed
756 * number (N) of events between samples and the actual number of events based
757 * on the precomputed value will be les then about 6.2%. Note, if the user
758 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
759 * This is to prevent the loss of samples because the trace buffer is full.
760 *
761 * User specified N Step between Index in
762 * precomputed values precomputed
763 * table
764 * 0 to 2^16-1 ---- 0
765 * 2^16 to 2^16+2^19-1 2^12 1 to 128
766 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
767 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
768 *
769 *
770 * For example, the LFSR values in the second range are computed for 2^16,
771 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
772 * 1, 2,..., 127, 128.
773 *
774 * The 24 bit LFSR value for the nth number in the sequence can be
775 * calculated using the following code:
776 *
777 * #define size 24
778 * int calculate_lfsr(int n)
779 * {
780 * int i;
781 * unsigned int newlfsr0;
782 * unsigned int lfsr = 0xFFFFFF;
783 * unsigned int howmany = n;
784 *
785 * for (i = 2; i < howmany + 2; i++) {
786 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
787 * ((lfsr >> (size - 1 - 1)) & 1) ^
788 * (((lfsr >> (size - 1 - 6)) & 1) ^
789 * ((lfsr >> (size - 1 - 23)) & 1)));
790 *
791 * lfsr >>= 1;
792 * lfsr = lfsr | (newlfsr0 << (size - 1));
793 * }
794 * return lfsr;
795 * }
796 */
797
798#define V2_16 (0x1 << 16)
799#define V2_19 (0x1 << 19)
800#define V2_22 (0x1 << 22)
801
802static int calculate_lfsr(int n)
803{
804 /*
805 * The ranges and steps are in powers of 2 so the calculations
806 * can be done using shifts rather then divide.
807 */
808 int index;
809
810 if ((n >> 16) == 0)
811 index = 0;
812 else if (((n - V2_16) >> 19) == 0)
813 index = ((n - V2_16) >> 12) + 1;
814 else if (((n - V2_16 - V2_19) >> 22) == 0)
815 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
816 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
817 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
818 else
819 index = ENTRIES-1;
820
821 /* make sure index is valid */
822 if ((index > ENTRIES) || (index < 0))
823 index = ENTRIES-1;
824
825 return initial_lfsr[index];
826}
827
828static int pm_rtas_activate_spu_profiling(u32 node)
829{
830 int ret, i;
831 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
832
833 /*
834 * Set up the rtas call to configure the debug bus to
835 * route the SPU PCs. Setup the pm_signal for each SPU
836 */
837 for (i = 0; i < NUM_SPUS_PER_NODE; i++) {
838 pm_signal_local[i].cpu = node;
839 pm_signal_local[i].signal_group = 41;
840 /* spu i on word (i/2) */
841 pm_signal_local[i].bus_word = 1 << i / 2;
842 /* spu i */
843 pm_signal_local[i].sub_unit = i;
844 pm_signal_local[i].bit = 63;
845 }
846
847 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
848 PASSTHRU_ENABLE, pm_signal_local,
849 (NUM_SPUS_PER_NODE
850 * sizeof(struct pm_signal)));
851
852 if (unlikely(ret)) {
853 printk(KERN_WARNING "%s: rtas returned: %d\n",
854 __FUNCTION__, ret);
855 return -EIO;
856 }
857
858 return 0;
859}
860
861#ifdef CONFIG_CPU_FREQ
862static int
863oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
864{
865 int ret = 0;
866 struct cpufreq_freqs *frq = data;
867 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
868 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
869 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
870 set_spu_profiling_frequency(frq->new, spu_cycle_reset);
871 return ret;
872}
873
874static struct notifier_block cpu_freq_notifier_block = {
875 .notifier_call = oprof_cpufreq_notify
876};
877#endif
878
879static int cell_global_start_spu(struct op_counter_config *ctr)
880{
881 int subfunc;
882 unsigned int lfsr_value;
883 int cpu;
884 int ret;
885 int rtas_error;
886 unsigned int cpu_khzfreq = 0;
887
888 /* The SPU profiling uses time-based profiling based on
889 * cpu frequency, so if configured with the CPU_FREQ
890 * option, we should detect frequency changes and react
891 * accordingly.
892 */
893#ifdef CONFIG_CPU_FREQ
894 ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
895 CPUFREQ_TRANSITION_NOTIFIER);
896 if (ret < 0)
897 /* this is not a fatal error */
898 printk(KERN_ERR "CPU freq change registration failed: %d\n",
899 ret);
900
901 else
902 cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
903#endif
904
905 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
906
907 for_each_online_cpu(cpu) {
908 if (cbe_get_hw_thread_id(cpu))
909 continue;
910
911 /*
912 * Setup SPU cycle-based profiling.
913 * Set perf_mon_control bit 0 to a zero before
914 * enabling spu collection hardware.
915 */
916 cbe_write_pm(cpu, pm_control, 0);
917
918 if (spu_cycle_reset > MAX_SPU_COUNT)
919 /* use largest possible value */
920 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
921 else
922 lfsr_value = calculate_lfsr(spu_cycle_reset);
923
924 /* must use a non zero value. Zero disables data collection. */
925 if (lfsr_value == 0)
926 lfsr_value = calculate_lfsr(1);
927
928 lfsr_value = lfsr_value << 8; /* shift lfsr to correct
929 * register location
930 */
931
932 /* debug bus setup */
933 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
934
935 if (unlikely(ret)) {
936 rtas_error = ret;
937 goto out;
938 }
939
940
941 subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
942
943 /* start profiling */
944 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
945 cbe_cpu_to_node(cpu), lfsr_value);
946
947 if (unlikely(ret != 0)) {
948 printk(KERN_ERR
949 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
950 __FUNCTION__, ret);
951 rtas_error = -EIO;
952 goto out;
953 }
954 }
955
956 rtas_error = start_spu_profiling(spu_cycle_reset);
957 if (rtas_error)
958 goto out_stop;
959
960 oprofile_running = 1;
961 return 0;
962
963out_stop:
964 cell_global_stop_spu(); /* clean up the PMU/debug bus */
613out: 965out:
614 ; 966 return rtas_error;
615} 967}
616 968
617static void cell_global_start(struct op_counter_config *ctr) 969static int cell_global_start_ppu(struct op_counter_config *ctr)
618{ 970{
619 u32 cpu; 971 u32 cpu, i;
620 u32 interrupt_mask = 0; 972 u32 interrupt_mask = 0;
621 u32 i;
622 973
623 /* This routine gets called once for the system. 974 /* This routine gets called once for the system.
624 * There is one performance monitor per node, so we 975 * There is one performance monitor per node, so we
@@ -651,19 +1002,79 @@ static void cell_global_start(struct op_counter_config *ctr)
651 oprofile_running = 1; 1002 oprofile_running = 1;
652 smp_wmb(); 1003 smp_wmb();
653 1004
654 /* NOTE: start_virt_cntrs will result in cell_virtual_cntr() being 1005 /*
655 * executed which manipulates the PMU. We start the "virtual counter" 1006 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1007 * executed which manipulates the PMU. We start the "virtual counter"
656 * here so that we do not need to synchronize access to the PMU in 1008 * here so that we do not need to synchronize access to the PMU in
657 * the above for-loop. 1009 * the above for-loop.
658 */ 1010 */
659 start_virt_cntrs(); 1011 start_virt_cntrs();
1012
1013 return 0;
660} 1014}
661 1015
662static void cell_global_stop(void) 1016static int cell_global_start(struct op_counter_config *ctr)
1017{
1018 if (spu_cycle_reset)
1019 return cell_global_start_spu(ctr);
1020 else
1021 return cell_global_start_ppu(ctr);
1022}
1023
1024/*
1025 * Note the generic OProfile stop calls do not support returning
1026 * an error on stop. Hence, will not return an error if the FW
1027 * calls fail on stop. Failure to reset the debug bus is not an issue.
1028 * Failure to disable the SPU profiling is not an issue. The FW calls
1029 * to enable the performance counters and debug bus will work even if
1030 * the hardware was not cleanly reset.
1031 */
1032static void cell_global_stop_spu(void)
1033{
1034 int subfunc, rtn_value;
1035 unsigned int lfsr_value;
1036 int cpu;
1037
1038 oprofile_running = 0;
1039
1040#ifdef CONFIG_CPU_FREQ
1041 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1042 CPUFREQ_TRANSITION_NOTIFIER);
1043#endif
1044
1045 for_each_online_cpu(cpu) {
1046 if (cbe_get_hw_thread_id(cpu))
1047 continue;
1048
1049 subfunc = 3; /*
1050 * 2 - activate SPU tracing,
1051 * 3 - deactivate
1052 */
1053 lfsr_value = 0x8f100000;
1054
1055 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1056 subfunc, cbe_cpu_to_node(cpu),
1057 lfsr_value);
1058
1059 if (unlikely(rtn_value != 0)) {
1060 printk(KERN_ERR
1061 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
1062 __FUNCTION__, rtn_value);
1063 }
1064
1065 /* Deactivate the signals */
1066 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1067 }
1068
1069 stop_spu_profiling();
1070}
1071
1072static void cell_global_stop_ppu(void)
663{ 1073{
664 int cpu; 1074 int cpu;
665 1075
666 /* This routine will be called once for the system. 1076 /*
1077 * This routine will be called once for the system.
667 * There is one performance monitor per node, so we 1078 * There is one performance monitor per node, so we
668 * only need to perform this function once per node. 1079 * only need to perform this function once per node.
669 */ 1080 */
@@ -687,8 +1098,16 @@ static void cell_global_stop(void)
687 } 1098 }
688} 1099}
689 1100
690static void 1101static void cell_global_stop(void)
691cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) 1102{
1103 if (spu_cycle_reset)
1104 cell_global_stop_spu();
1105 else
1106 cell_global_stop_ppu();
1107}
1108
1109static void cell_handle_interrupt(struct pt_regs *regs,
1110 struct op_counter_config *ctr)
692{ 1111{
693 u32 cpu; 1112 u32 cpu;
694 u64 pc; 1113 u64 pc;
@@ -699,13 +1118,15 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
699 1118
700 cpu = smp_processor_id(); 1119 cpu = smp_processor_id();
701 1120
702 /* Need to make sure the interrupt handler and the virt counter 1121 /*
1122 * Need to make sure the interrupt handler and the virt counter
703 * routine are not running at the same time. See the 1123 * routine are not running at the same time. See the
704 * cell_virtual_cntr() routine for additional comments. 1124 * cell_virtual_cntr() routine for additional comments.
705 */ 1125 */
706 spin_lock_irqsave(&virt_cntr_lock, flags); 1126 spin_lock_irqsave(&virt_cntr_lock, flags);
707 1127
708 /* Need to disable and reenable the performance counters 1128 /*
1129 * Need to disable and reenable the performance counters
709 * to get the desired behavior from the hardware. This 1130 * to get the desired behavior from the hardware. This
710 * is hardware specific. 1131 * is hardware specific.
711 */ 1132 */
@@ -714,7 +1135,8 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
714 1135
715 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu); 1136 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
716 1137
717 /* If the interrupt mask has been cleared, then the virt cntr 1138 /*
1139 * If the interrupt mask has been cleared, then the virt cntr
718 * has cleared the interrupt. When the thread that generated 1140 * has cleared the interrupt. When the thread that generated
719 * the interrupt is restored, the data count will be restored to 1141 * the interrupt is restored, the data count will be restored to
720 * 0xffffff0 to cause the interrupt to be regenerated. 1142 * 0xffffff0 to cause the interrupt to be regenerated.
@@ -732,18 +1154,20 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
732 } 1154 }
733 } 1155 }
734 1156
735 /* The counters were frozen by the interrupt. 1157 /*
1158 * The counters were frozen by the interrupt.
736 * Reenable the interrupt and restart the counters. 1159 * Reenable the interrupt and restart the counters.
737 * If there was a race between the interrupt handler and 1160 * If there was a race between the interrupt handler and
738 * the virtual counter routine. The virutal counter 1161 * the virtual counter routine. The virutal counter
739 * routine may have cleared the interrupts. Hence must 1162 * routine may have cleared the interrupts. Hence must
740 * use the virt_cntr_inter_mask to re-enable the interrupts. 1163 * use the virt_cntr_inter_mask to re-enable the interrupts.
741 */ 1164 */
742 cbe_enable_pm_interrupts(cpu, hdw_thread, 1165 cbe_enable_pm_interrupts(cpu, hdw_thread,
743 virt_cntr_inter_mask); 1166 virt_cntr_inter_mask);
744 1167
745 /* The writes to the various performance counters only writes 1168 /*
746 * to a latch. The new values (interrupt setting bits, reset 1169 * The writes to the various performance counters only writes
1170 * to a latch. The new values (interrupt setting bits, reset
747 * counter value etc.) are not copied to the actual registers 1171 * counter value etc.) are not copied to the actual registers
748 * until the performance monitor is enabled. In order to get 1172 * until the performance monitor is enabled. In order to get
749 * this to work as desired, the permormance monitor needs to 1173 * this to work as desired, the permormance monitor needs to
@@ -755,10 +1179,33 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
755 spin_unlock_irqrestore(&virt_cntr_lock, flags); 1179 spin_unlock_irqrestore(&virt_cntr_lock, flags);
756} 1180}
757 1181
1182/*
1183 * This function is called from the generic OProfile
1184 * driver. When profiling PPUs, we need to do the
1185 * generic sync start; otherwise, do spu_sync_start.
1186 */
1187static int cell_sync_start(void)
1188{
1189 if (spu_cycle_reset)
1190 return spu_sync_start();
1191 else
1192 return DO_GENERIC_SYNC;
1193}
1194
1195static int cell_sync_stop(void)
1196{
1197 if (spu_cycle_reset)
1198 return spu_sync_stop();
1199 else
1200 return 1;
1201}
1202
758struct op_powerpc_model op_model_cell = { 1203struct op_powerpc_model op_model_cell = {
759 .reg_setup = cell_reg_setup, 1204 .reg_setup = cell_reg_setup,
760 .cpu_setup = cell_cpu_setup, 1205 .cpu_setup = cell_cpu_setup,
761 .global_start = cell_global_start, 1206 .global_start = cell_global_start,
762 .global_stop = cell_global_stop, 1207 .global_stop = cell_global_stop,
1208 .sync_start = cell_sync_start,
1209 .sync_stop = cell_sync_stop,
763 .handle_interrupt = cell_handle_interrupt, 1210 .handle_interrupt = cell_handle_interrupt,
764}; 1211};
diff --git a/arch/powerpc/oprofile/op_model_fsl_booke.c b/arch/powerpc/oprofile/op_model_fsl_booke.c
index 2267eb8c661b..183a28bb1812 100644
--- a/arch/powerpc/oprofile/op_model_fsl_booke.c
+++ b/arch/powerpc/oprofile/op_model_fsl_booke.c
@@ -244,7 +244,7 @@ static void dump_pmcs(void)
244 mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3)); 244 mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
245} 245}
246 246
247static void fsl_booke_cpu_setup(struct op_counter_config *ctr) 247static int fsl_booke_cpu_setup(struct op_counter_config *ctr)
248{ 248{
249 int i; 249 int i;
250 250
@@ -258,9 +258,11 @@ static void fsl_booke_cpu_setup(struct op_counter_config *ctr)
258 258
259 set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel); 259 set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
260 } 260 }
261
262 return 0;
261} 263}
262 264
263static void fsl_booke_reg_setup(struct op_counter_config *ctr, 265static int fsl_booke_reg_setup(struct op_counter_config *ctr,
264 struct op_system_config *sys, 266 struct op_system_config *sys,
265 int num_ctrs) 267 int num_ctrs)
266{ 268{
@@ -276,9 +278,10 @@ static void fsl_booke_reg_setup(struct op_counter_config *ctr,
276 for (i = 0; i < num_counters; ++i) 278 for (i = 0; i < num_counters; ++i)
277 reset_value[i] = 0x80000000UL - ctr[i].count; 279 reset_value[i] = 0x80000000UL - ctr[i].count;
278 280
281 return 0;
279} 282}
280 283
281static void fsl_booke_start(struct op_counter_config *ctr) 284static int fsl_booke_start(struct op_counter_config *ctr)
282{ 285{
283 int i; 286 int i;
284 287
@@ -308,6 +311,8 @@ static void fsl_booke_start(struct op_counter_config *ctr)
308 311
309 pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(), 312 pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
310 mfpmr(PMRN_PMGC0)); 313 mfpmr(PMRN_PMGC0));
314
315 return 0;
311} 316}
312 317
313static void fsl_booke_stop(void) 318static void fsl_booke_stop(void)
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
index e8a56b0adadc..c40de461fd4e 100644
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ b/arch/powerpc/oprofile/op_model_pa6t.c
@@ -89,7 +89,7 @@ static inline void ctr_write(unsigned int i, u64 val)
89 89
90 90
91/* precompute the values to stuff in the hardware registers */ 91/* precompute the values to stuff in the hardware registers */
92static void pa6t_reg_setup(struct op_counter_config *ctr, 92static int pa6t_reg_setup(struct op_counter_config *ctr,
93 struct op_system_config *sys, 93 struct op_system_config *sys,
94 int num_ctrs) 94 int num_ctrs)
95{ 95{
@@ -135,10 +135,12 @@ static void pa6t_reg_setup(struct op_counter_config *ctr,
135 pr_debug("reset_value for pmc%u inited to 0x%lx\n", 135 pr_debug("reset_value for pmc%u inited to 0x%lx\n",
136 pmc, reset_value[pmc]); 136 pmc, reset_value[pmc]);
137 } 137 }
138
139 return 0;
138} 140}
139 141
140/* configure registers on this cpu */ 142/* configure registers on this cpu */
141static void pa6t_cpu_setup(struct op_counter_config *ctr) 143static int pa6t_cpu_setup(struct op_counter_config *ctr)
142{ 144{
143 u64 mmcr0 = mmcr0_val; 145 u64 mmcr0 = mmcr0_val;
144 u64 mmcr1 = mmcr1_val; 146 u64 mmcr1 = mmcr1_val;
@@ -154,9 +156,11 @@ static void pa6t_cpu_setup(struct op_counter_config *ctr)
154 mfspr(SPRN_PA6T_MMCR0)); 156 mfspr(SPRN_PA6T_MMCR0));
155 pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(), 157 pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(),
156 mfspr(SPRN_PA6T_MMCR1)); 158 mfspr(SPRN_PA6T_MMCR1));
159
160 return 0;
157} 161}
158 162
159static void pa6t_start(struct op_counter_config *ctr) 163static int pa6t_start(struct op_counter_config *ctr)
160{ 164{
161 int i; 165 int i;
162 166
@@ -174,6 +178,8 @@ static void pa6t_start(struct op_counter_config *ctr)
174 oprofile_running = 1; 178 oprofile_running = 1;
175 179
176 pr_debug("start on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0); 180 pr_debug("start on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0);
181
182 return 0;
177} 183}
178 184
179static void pa6t_stop(void) 185static void pa6t_stop(void)
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index a7c206b665af..cddc250a6a5c 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -32,7 +32,7 @@ static u32 mmcr0_val;
32static u64 mmcr1_val; 32static u64 mmcr1_val;
33static u64 mmcra_val; 33static u64 mmcra_val;
34 34
35static void power4_reg_setup(struct op_counter_config *ctr, 35static int power4_reg_setup(struct op_counter_config *ctr,
36 struct op_system_config *sys, 36 struct op_system_config *sys,
37 int num_ctrs) 37 int num_ctrs)
38{ 38{
@@ -60,6 +60,8 @@ static void power4_reg_setup(struct op_counter_config *ctr,
60 mmcr0_val &= ~MMCR0_PROBLEM_DISABLE; 60 mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
61 else 61 else
62 mmcr0_val |= MMCR0_PROBLEM_DISABLE; 62 mmcr0_val |= MMCR0_PROBLEM_DISABLE;
63
64 return 0;
63} 65}
64 66
65extern void ppc64_enable_pmcs(void); 67extern void ppc64_enable_pmcs(void);
@@ -84,7 +86,7 @@ static inline int mmcra_must_set_sample(void)
84 return 0; 86 return 0;
85} 87}
86 88
87static void power4_cpu_setup(struct op_counter_config *ctr) 89static int power4_cpu_setup(struct op_counter_config *ctr)
88{ 90{
89 unsigned int mmcr0 = mmcr0_val; 91 unsigned int mmcr0 = mmcr0_val;
90 unsigned long mmcra = mmcra_val; 92 unsigned long mmcra = mmcra_val;
@@ -111,9 +113,11 @@ static void power4_cpu_setup(struct op_counter_config *ctr)
111 mfspr(SPRN_MMCR1)); 113 mfspr(SPRN_MMCR1));
112 dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(), 114 dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
113 mfspr(SPRN_MMCRA)); 115 mfspr(SPRN_MMCRA));
116
117 return 0;
114} 118}
115 119
116static void power4_start(struct op_counter_config *ctr) 120static int power4_start(struct op_counter_config *ctr)
117{ 121{
118 int i; 122 int i;
119 unsigned int mmcr0; 123 unsigned int mmcr0;
@@ -148,6 +152,7 @@ static void power4_start(struct op_counter_config *ctr)
148 oprofile_running = 1; 152 oprofile_running = 1;
149 153
150 dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); 154 dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
155 return 0;
151} 156}
152 157
153static void power4_stop(void) 158static void power4_stop(void)
diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index c731acbfb2a5..a20afe45d936 100644
--- a/arch/powerpc/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -88,7 +88,7 @@ static unsigned long reset_value[OP_MAX_COUNTER];
88 88
89static int num_counters; 89static int num_counters;
90 90
91static void rs64_reg_setup(struct op_counter_config *ctr, 91static int rs64_reg_setup(struct op_counter_config *ctr,
92 struct op_system_config *sys, 92 struct op_system_config *sys,
93 int num_ctrs) 93 int num_ctrs)
94{ 94{
@@ -100,9 +100,10 @@ static void rs64_reg_setup(struct op_counter_config *ctr,
100 reset_value[i] = 0x80000000UL - ctr[i].count; 100 reset_value[i] = 0x80000000UL - ctr[i].count;
101 101
102 /* XXX setup user and kernel profiling */ 102 /* XXX setup user and kernel profiling */
103 return 0;
103} 104}
104 105
105static void rs64_cpu_setup(struct op_counter_config *ctr) 106static int rs64_cpu_setup(struct op_counter_config *ctr)
106{ 107{
107 unsigned int mmcr0; 108 unsigned int mmcr0;
108 109
@@ -125,9 +126,11 @@ static void rs64_cpu_setup(struct op_counter_config *ctr)
125 mfspr(SPRN_MMCR0)); 126 mfspr(SPRN_MMCR0));
126 dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(), 127 dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
127 mfspr(SPRN_MMCR1)); 128 mfspr(SPRN_MMCR1));
129
130 return 0;
128} 131}
129 132
130static void rs64_start(struct op_counter_config *ctr) 133static int rs64_start(struct op_counter_config *ctr)
131{ 134{
132 int i; 135 int i;
133 unsigned int mmcr0; 136 unsigned int mmcr0;
@@ -155,6 +158,7 @@ static void rs64_start(struct op_counter_config *ctr)
155 mtspr(SPRN_MMCR0, mmcr0); 158 mtspr(SPRN_MMCR0, mmcr0);
156 159
157 dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); 160 dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
161 return 0;
158} 162}
159 163
160static void rs64_stop(void) 164static void rs64_stop(void)
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 33545d352e92..932538a93c2b 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -272,4 +272,14 @@ config CPM2
272 you wish to build a kernel for a machine with a CPM2 coprocessor 272 you wish to build a kernel for a machine with a CPM2 coprocessor
273 on it (826x, 827x, 8560). 273 on it (826x, 827x, 8560).
274 274
275config AXON_RAM
276 tristate "Axon DDR2 memory device driver"
277 depends on PPC_IBM_CELL_BLADE
278 default m
279 help
280 It registers one block device per Axon's DDR2 memory bank found
281 on a system. Block devices are called axonram?, their major and
282 minor numbers are available in /proc/devices, /proc/partitions or
283 in /sys/block/axonram?/dev.
284
275endmenu 285endmenu
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index b8b5fde94668..e4b2aee53a73 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -215,7 +215,7 @@ config NOT_COHERENT_CACHE
215 depends on 4xx || 8xx || E200 215 depends on 4xx || 8xx || E200
216 default y 216 default y
217 217
218config CONFIG_CHECK_CACHE_COHERENCY 218config CHECK_CACHE_COHERENCY
219 bool 219 bool
220 220
221endmenu 221endmenu
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 9b2b386ccf48..ac8032034fb8 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -73,4 +73,14 @@ config CBE_CPUFREQ
73 For details, take a look at <file:Documentation/cpu-freq/>. 73 For details, take a look at <file:Documentation/cpu-freq/>.
74 If you don't have such processor, say N 74 If you don't have such processor, say N
75 75
76config CBE_CPUFREQ_PMI
77 tristate "CBE frequency scaling using PMI interface"
78 depends on CBE_CPUFREQ && PPC_PMI && EXPERIMENTAL
79 default n
80 help
81 Select this, if you want to use the PMI interface
82 to switch frequencies. Using PMI, the
83 processor will not only be able to run at lower speed,
84 but also at lower core voltage.
85
76endmenu 86endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 869af89df6ff..f88a7c76f296 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -4,7 +4,9 @@ obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \
4obj-$(CONFIG_CBE_RAS) += ras.o 4obj-$(CONFIG_CBE_RAS) += ras.o
5 5
6obj-$(CONFIG_CBE_THERM) += cbe_thermal.o 6obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
7obj-$(CONFIG_CBE_CPUFREQ) += cbe_cpufreq.o 7obj-$(CONFIG_CBE_CPUFREQ_PMI) += cbe_cpufreq_pmi.o
8obj-$(CONFIG_CBE_CPUFREQ) += cbe-cpufreq.o
9cbe-cpufreq-y += cbe_cpufreq_pervasive.o cbe_cpufreq.o
8 10
9ifeq ($(CONFIG_SMP),y) 11ifeq ($(CONFIG_SMP),y)
10obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o 12obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
@@ -23,3 +25,5 @@ obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
23 $(spu-priv1-y) \ 25 $(spu-priv1-y) \
24 $(spu-manage-y) \ 26 $(spu-manage-y) \
25 spufs/ 27 spufs/
28
29obj-$(CONFIG_PCI_MSI) += axon_msi.o
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
new file mode 100644
index 000000000000..4c9ab5b70bae
--- /dev/null
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -0,0 +1,445 @@
1/*
2 * Copyright 2007, Michael Ellerman, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10
11#include <linux/interrupt.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/msi.h>
16#include <linux/reboot.h>
17
18#include <asm/dcr.h>
19#include <asm/machdep.h>
20#include <asm/prom.h>
21
22
23/*
24 * MSIC registers, specified as offsets from dcr_base
25 */
26#define MSIC_CTRL_REG 0x0
27
28/* Base Address registers specify FIFO location in BE memory */
29#define MSIC_BASE_ADDR_HI_REG 0x3
30#define MSIC_BASE_ADDR_LO_REG 0x4
31
32/* Hold the read/write offsets into the FIFO */
33#define MSIC_READ_OFFSET_REG 0x5
34#define MSIC_WRITE_OFFSET_REG 0x6
35
36
37/* MSIC control register flags */
38#define MSIC_CTRL_ENABLE 0x0001
39#define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
40#define MSIC_CTRL_IRQ_ENABLE 0x0008
41#define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
42
43/*
44 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
45 * Currently we're using a 64KB FIFO size.
46 */
47#define MSIC_FIFO_SIZE_SHIFT 16
48#define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
49
50/*
51 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
52 * 8-9 of the MSIC control reg.
53 */
54#define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
55
56/*
57 * We need to mask the read/write offsets to make sure they stay within
58 * the bounds of the FIFO. Also they should always be 16-byte aligned.
59 */
60#define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
61
62/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
63#define MSIC_FIFO_ENTRY_SIZE 0x10
64
65
66struct axon_msic {
67 struct device_node *dn;
68 struct irq_host *irq_host;
69 __le32 *fifo;
70 dcr_host_t dcr_host;
71 struct list_head list;
72 u32 read_offset;
73 u32 dcr_base;
74};
75
76static LIST_HEAD(axon_msic_list);
77
78static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
79{
80 pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
81
82 dcr_write(msic->dcr_host, msic->dcr_base + dcr_n, val);
83}
84
85static u32 msic_dcr_read(struct axon_msic *msic, unsigned int dcr_n)
86{
87 return dcr_read(msic->dcr_host, msic->dcr_base + dcr_n);
88}
89
90static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
91{
92 struct axon_msic *msic = get_irq_data(irq);
93 u32 write_offset, msi;
94 int idx;
95
96 write_offset = msic_dcr_read(msic, MSIC_WRITE_OFFSET_REG);
97 pr_debug("axon_msi: original write_offset 0x%x\n", write_offset);
98
99 /* write_offset doesn't wrap properly, so we have to mask it */
100 write_offset &= MSIC_FIFO_SIZE_MASK;
101
102 while (msic->read_offset != write_offset) {
103 idx = msic->read_offset / sizeof(__le32);
104 msi = le32_to_cpu(msic->fifo[idx]);
105 msi &= 0xFFFF;
106
107 pr_debug("axon_msi: woff %x roff %x msi %x\n",
108 write_offset, msic->read_offset, msi);
109
110 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
111 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
112
113 if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host)
114 generic_handle_irq(msi);
115 else
116 pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
117 }
118
119 desc->chip->eoi(irq);
120}
121
122static struct axon_msic *find_msi_translator(struct pci_dev *dev)
123{
124 struct irq_host *irq_host;
125 struct device_node *dn, *tmp;
126 const phandle *ph;
127 struct axon_msic *msic = NULL;
128
129 dn = pci_device_to_OF_node(dev);
130 if (!dn) {
131 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
132 return NULL;
133 }
134
135 for (; dn; tmp = of_get_parent(dn), of_node_put(dn), dn = tmp) {
136 ph = of_get_property(dn, "msi-translator", NULL);
137 if (ph)
138 break;
139 }
140
141 if (!ph) {
142 dev_dbg(&dev->dev,
143 "axon_msi: no msi-translator property found\n");
144 goto out_error;
145 }
146
147 tmp = dn;
148 dn = of_find_node_by_phandle(*ph);
149 if (!dn) {
150 dev_dbg(&dev->dev,
151 "axon_msi: msi-translator doesn't point to a node\n");
152 goto out_error;
153 }
154
155 irq_host = irq_find_host(dn);
156 if (!irq_host) {
157 dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n",
158 dn->full_name);
159 goto out_error;
160 }
161
162 msic = irq_host->host_data;
163
164out_error:
165 of_node_put(dn);
166 of_node_put(tmp);
167
168 return msic;
169}
170
171static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
172{
173 if (!find_msi_translator(dev))
174 return -ENODEV;
175
176 return 0;
177}
178
179static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
180{
181 struct device_node *dn, *tmp;
182 struct msi_desc *entry;
183 int len;
184 const u32 *prop;
185
186 dn = pci_device_to_OF_node(dev);
187 if (!dn) {
188 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
189 return -ENODEV;
190 }
191
192 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
193
194 for (; dn; tmp = of_get_parent(dn), of_node_put(dn), dn = tmp) {
195 if (entry->msi_attrib.is_64) {
196 prop = of_get_property(dn, "msi-address-64", &len);
197 if (prop)
198 break;
199 }
200
201 prop = of_get_property(dn, "msi-address-32", &len);
202 if (prop)
203 break;
204 }
205
206 if (!prop) {
207 dev_dbg(&dev->dev,
208 "axon_msi: no msi-address-(32|64) properties found\n");
209 return -ENOENT;
210 }
211
212 switch (len) {
213 case 8:
214 msg->address_hi = prop[0];
215 msg->address_lo = prop[1];
216 break;
217 case 4:
218 msg->address_hi = 0;
219 msg->address_lo = prop[0];
220 break;
221 default:
222 dev_dbg(&dev->dev,
223 "axon_msi: malformed msi-address-(32|64) property\n");
224 of_node_put(dn);
225 return -EINVAL;
226 }
227
228 of_node_put(dn);
229
230 return 0;
231}
232
233static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
234{
235 unsigned int virq, rc;
236 struct msi_desc *entry;
237 struct msi_msg msg;
238 struct axon_msic *msic;
239
240 msic = find_msi_translator(dev);
241 if (!msic)
242 return -ENODEV;
243
244 rc = setup_msi_msg_address(dev, &msg);
245 if (rc)
246 return rc;
247
248 /* We rely on being able to stash a virq in a u16 */
249 BUILD_BUG_ON(NR_IRQS > 65536);
250
251 list_for_each_entry(entry, &dev->msi_list, list) {
252 virq = irq_create_direct_mapping(msic->irq_host);
253 if (virq == NO_IRQ) {
254 dev_warn(&dev->dev,
255 "axon_msi: virq allocation failed!\n");
256 return -1;
257 }
258 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
259
260 set_irq_msi(virq, entry);
261 msg.data = virq;
262 write_msi_msg(virq, &msg);
263 }
264
265 return 0;
266}
267
268static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
269{
270 struct msi_desc *entry;
271
272 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
273
274 list_for_each_entry(entry, &dev->msi_list, list) {
275 if (entry->irq == NO_IRQ)
276 continue;
277
278 set_irq_msi(entry->irq, NULL);
279 irq_dispose_mapping(entry->irq);
280 }
281}
282
283static struct irq_chip msic_irq_chip = {
284 .mask = mask_msi_irq,
285 .unmask = unmask_msi_irq,
286 .shutdown = unmask_msi_irq,
287 .typename = "AXON-MSI",
288};
289
290static int msic_host_map(struct irq_host *h, unsigned int virq,
291 irq_hw_number_t hw)
292{
293 set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
294
295 return 0;
296}
297
298static int msic_host_match(struct irq_host *host, struct device_node *dn)
299{
300 struct axon_msic *msic = host->host_data;
301
302 return msic->dn == dn;
303}
304
305static struct irq_host_ops msic_host_ops = {
306 .match = msic_host_match,
307 .map = msic_host_map,
308};
309
310static int axon_msi_notify_reboot(struct notifier_block *nb,
311 unsigned long code, void *data)
312{
313 struct axon_msic *msic;
314 u32 tmp;
315
316 list_for_each_entry(msic, &axon_msic_list, list) {
317 pr_debug("axon_msi: disabling %s\n", msic->dn->full_name);
318 tmp = msic_dcr_read(msic, MSIC_CTRL_REG);
319 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
320 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
321 }
322
323 return 0;
324}
325
326static struct notifier_block axon_msi_reboot_notifier = {
327 .notifier_call = axon_msi_notify_reboot
328};
329
330static int axon_msi_setup_one(struct device_node *dn)
331{
332 struct page *page;
333 struct axon_msic *msic;
334 unsigned int virq;
335 int dcr_len;
336
337 pr_debug("axon_msi: setting up dn %s\n", dn->full_name);
338
339 msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
340 if (!msic) {
341 printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
342 dn->full_name);
343 goto out;
344 }
345
346 msic->dcr_base = dcr_resource_start(dn, 0);
347 dcr_len = dcr_resource_len(dn, 0);
348
349 if (msic->dcr_base == 0 || dcr_len == 0) {
350 printk(KERN_ERR
351 "axon_msi: couldn't parse dcr properties on %s\n",
352 dn->full_name);
353 goto out;
354 }
355
356 msic->dcr_host = dcr_map(dn, msic->dcr_base, dcr_len);
357 if (!DCR_MAP_OK(msic->dcr_host)) {
358 printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
359 dn->full_name);
360 goto out_free_msic;
361 }
362
363 page = alloc_pages_node(of_node_to_nid(dn), GFP_KERNEL,
364 get_order(MSIC_FIFO_SIZE_BYTES));
365 if (!page) {
366 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
367 dn->full_name);
368 goto out_free_msic;
369 }
370
371 msic->fifo = page_address(page);
372
373 msic->irq_host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, NR_IRQS,
374 &msic_host_ops, 0);
375 if (!msic->irq_host) {
376 printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
377 dn->full_name);
378 goto out_free_fifo;
379 }
380
381 msic->irq_host->host_data = msic;
382
383 virq = irq_of_parse_and_map(dn, 0);
384 if (virq == NO_IRQ) {
385 printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
386 dn->full_name);
387 goto out_free_host;
388 }
389
390 msic->dn = of_node_get(dn);
391
392 set_irq_data(virq, msic);
393 set_irq_chained_handler(virq, axon_msi_cascade);
394 pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq);
395
396 /* Enable the MSIC hardware */
397 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, (u64)msic->fifo >> 32);
398 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
399 (u64)msic->fifo & 0xFFFFFFFF);
400 msic_dcr_write(msic, MSIC_CTRL_REG,
401 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
402 MSIC_CTRL_FIFO_SIZE);
403
404 list_add(&msic->list, &axon_msic_list);
405
406 printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
407
408 return 0;
409
410out_free_host:
411 kfree(msic->irq_host);
412out_free_fifo:
413 __free_pages(virt_to_page(msic->fifo), get_order(MSIC_FIFO_SIZE_BYTES));
414out_free_msic:
415 kfree(msic);
416out:
417
418 return -1;
419}
420
421static int axon_msi_init(void)
422{
423 struct device_node *dn;
424 int found = 0;
425
426 pr_debug("axon_msi: initialising ...\n");
427
428 for_each_compatible_node(dn, NULL, "ibm,axon-msic") {
429 if (axon_msi_setup_one(dn) == 0)
430 found++;
431 }
432
433 if (found) {
434 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
435 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
436 ppc_md.msi_check_device = axon_msi_check_device;
437
438 register_reboot_notifier(&axon_msi_reboot_notifier);
439
440 pr_debug("axon_msi: registered callbacks!\n");
441 }
442
443 return 0;
444}
445arch_initcall(axon_msi_init);
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index ab511d5b65a4..0b6e8ee85ab1 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cpufreq driver for the cell processor 2 * cpufreq driver for the cell processor
3 * 3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
5 * 5 *
6 * Author: Christian Krafft <krafft@de.ibm.com> 6 * Author: Christian Krafft <krafft@de.ibm.com>
7 * 7 *
@@ -21,18 +21,11 @@
21 */ 21 */
22 22
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/timer.h>
25
26#include <asm/hw_irq.h>
27#include <asm/io.h>
28#include <asm/machdep.h> 24#include <asm/machdep.h>
29#include <asm/processor.h>
30#include <asm/prom.h>
31#include <asm/time.h>
32#include <asm/pmi.h>
33#include <asm/of_platform.h> 25#include <asm/of_platform.h>
34 26#include <asm/prom.h>
35#include "cbe_regs.h" 27#include "cbe_regs.h"
28#include "cbe_cpufreq.h"
36 29
37static DEFINE_MUTEX(cbe_switch_mutex); 30static DEFINE_MUTEX(cbe_switch_mutex);
38 31
@@ -50,159 +43,24 @@ static struct cpufreq_frequency_table cbe_freqs[] = {
50 {0, CPUFREQ_TABLE_END}, 43 {0, CPUFREQ_TABLE_END},
51}; 44};
52 45
53/* to write to MIC register */
54static u64 MIC_Slow_Fast_Timer_table[] = {
55 [0 ... 7] = 0x007fc00000000000ull,
56};
57
58/* more values for the MIC */
59static u64 MIC_Slow_Next_Timer_table[] = {
60 0x0000240000000000ull,
61 0x0000268000000000ull,
62 0x000029C000000000ull,
63 0x00002D0000000000ull,
64 0x0000300000000000ull,
65 0x0000334000000000ull,
66 0x000039C000000000ull,
67 0x00003FC000000000ull,
68};
69
70static unsigned int pmi_frequency_limit = 0;
71/* 46/*
72 * hardware specific functions 47 * hardware specific functions
73 */ 48 */
74 49
75static struct of_device *pmi_dev; 50static int set_pmode(unsigned int cpu, unsigned int slow_mode)
76
77#ifdef CONFIG_PPC_PMI
78static int set_pmode_pmi(int cpu, unsigned int pmode)
79{
80 int ret;
81 pmi_message_t pmi_msg;
82#ifdef DEBUG
83 u64 time;
84#endif
85
86 pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
87 pmi_msg.data1 = cbe_cpu_to_node(cpu);
88 pmi_msg.data2 = pmode;
89
90#ifdef DEBUG
91 time = (u64) get_cycles();
92#endif
93
94 pmi_send_message(pmi_dev, pmi_msg);
95 ret = pmi_msg.data2;
96
97 pr_debug("PMI returned slow mode %d\n", ret);
98
99#ifdef DEBUG
100 time = (u64) get_cycles() - time; /* actual cycles (not cpu cycles!) */
101 time = 1000000000 * time / CLOCK_TICK_RATE; /* time in ns (10^-9) */
102 pr_debug("had to wait %lu ns for a transition\n", time);
103#endif
104 return ret;
105}
106#endif
107
108static int get_pmode(int cpu)
109{ 51{
110 int ret; 52 int rc;
111 struct cbe_pmd_regs __iomem *pmd_regs;
112
113 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
114 ret = in_be64(&pmd_regs->pmsr) & 0x07;
115
116 return ret;
117}
118
119static int set_pmode_reg(int cpu, unsigned int pmode)
120{
121 struct cbe_pmd_regs __iomem *pmd_regs;
122 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
123 u64 flags;
124 u64 value;
125
126 local_irq_save(flags);
127
128 mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
129 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
130
131 pr_debug("pm register is mapped at %p\n", &pmd_regs->pmcr);
132 pr_debug("mic register is mapped at %p\n", &mic_tm_regs->slow_fast_timer_0);
133
134 out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
135 out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
136
137 out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
138 out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
139
140 value = in_be64(&pmd_regs->pmcr);
141 /* set bits to zero */
142 value &= 0xFFFFFFFFFFFFFFF8ull;
143 /* set bits to next pmode */
144 value |= pmode;
145
146 out_be64(&pmd_regs->pmcr, value);
147
148 /* wait until new pmode appears in status register */
149 value = in_be64(&pmd_regs->pmsr) & 0x07;
150 while(value != pmode) {
151 cpu_relax();
152 value = in_be64(&pmd_regs->pmsr) & 0x07;
153 }
154
155 local_irq_restore(flags);
156
157 return 0;
158}
159 53
160static int set_pmode(int cpu, unsigned int slow_mode) { 54 if (cbe_cpufreq_has_pmi)
161#ifdef CONFIG_PPC_PMI 55 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
162 if (pmi_dev)
163 return set_pmode_pmi(cpu, slow_mode);
164 else 56 else
165#endif 57 rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
166 return set_pmode_reg(cpu, slow_mode);
167}
168
169static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
170{
171 u8 cpu;
172 u8 cbe_pmode_new;
173
174 BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
175 58
176 cpu = cbe_node_to_cpu(pmi_msg.data1); 59 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
177 cbe_pmode_new = pmi_msg.data2;
178 60
179 pmi_frequency_limit = cbe_freqs[cbe_pmode_new].frequency; 61 return rc;
180
181 pr_debug("cbe_handle_pmi: max freq=%d\n", pmi_frequency_limit);
182}
183
184static int pmi_notifier(struct notifier_block *nb,
185 unsigned long event, void *data)
186{
187 struct cpufreq_policy *policy = data;
188
189 if (event != CPUFREQ_INCOMPATIBLE)
190 return 0;
191
192 cpufreq_verify_within_limits(policy, 0, pmi_frequency_limit);
193 return 0;
194} 62}
195 63
196static struct notifier_block pmi_notifier_block = {
197 .notifier_call = pmi_notifier,
198};
199
200static struct pmi_handler cbe_pmi_handler = {
201 .type = PMI_TYPE_FREQ_CHANGE,
202 .handle_pmi_message = cbe_cpufreq_handle_pmi,
203};
204
205
206/* 64/*
207 * cpufreq functions 65 * cpufreq functions
208 */ 66 */
@@ -221,8 +79,19 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
221 79
222 pr_debug("init cpufreq on CPU %d\n", policy->cpu); 80 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
223 81
82 /*
83 * Let's check we can actually get to the CELL regs
84 */
85 if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
86 !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
87 pr_info("invalid CBE regs pointers for cpufreq\n");
88 return -EINVAL;
89 }
90
224 max_freqp = of_get_property(cpu, "clock-frequency", NULL); 91 max_freqp = of_get_property(cpu, "clock-frequency", NULL);
225 92
93 of_node_put(cpu);
94
226 if (!max_freqp) 95 if (!max_freqp)
227 return -EINVAL; 96 return -EINVAL;
228 97
@@ -239,10 +108,12 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
239 } 108 }
240 109
241 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 110 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
242 /* if DEBUG is enabled set_pmode() measures the correct latency of a transition */ 111
112 /* if DEBUG is enabled set_pmode() measures the latency
113 * of a transition */
243 policy->cpuinfo.transition_latency = 25000; 114 policy->cpuinfo.transition_latency = 25000;
244 115
245 cur_pmode = get_pmode(policy->cpu); 116 cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
246 pr_debug("current pmode is at %d\n",cur_pmode); 117 pr_debug("current pmode is at %d\n",cur_pmode);
247 118
248 policy->cur = cbe_freqs[cur_pmode].frequency; 119 policy->cur = cbe_freqs[cur_pmode].frequency;
@@ -253,21 +124,13 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
253 124
254 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); 125 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
255 126
256 if (pmi_dev) { 127 /* this ensures that policy->cpuinfo_min
257 /* frequency might get limited later, initialize limit with max_freq */ 128 * and policy->cpuinfo_max are set correctly */
258 pmi_frequency_limit = max_freq;
259 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
260 }
261
262 /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
263 return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); 129 return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
264} 130}
265 131
266static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) 132static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
267{ 133{
268 if (pmi_dev)
269 cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
270
271 cpufreq_frequency_table_put_attr(policy->cpu); 134 cpufreq_frequency_table_put_attr(policy->cpu);
272 return 0; 135 return 0;
273} 136}
@@ -277,13 +140,13 @@ static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
277 return cpufreq_frequency_table_verify(policy, cbe_freqs); 140 return cpufreq_frequency_table_verify(policy, cbe_freqs);
278} 141}
279 142
280 143static int cbe_cpufreq_target(struct cpufreq_policy *policy,
281static int cbe_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, 144 unsigned int target_freq,
282 unsigned int relation) 145 unsigned int relation)
283{ 146{
284 int rc; 147 int rc;
285 struct cpufreq_freqs freqs; 148 struct cpufreq_freqs freqs;
286 int cbe_pmode_new; 149 unsigned int cbe_pmode_new;
287 150
288 cpufreq_frequency_table_target(policy, 151 cpufreq_frequency_table_target(policy,
289 cbe_freqs, 152 cbe_freqs,
@@ -298,12 +161,14 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy, unsigned int target
298 mutex_lock(&cbe_switch_mutex); 161 mutex_lock(&cbe_switch_mutex);
299 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 162 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
300 163
301 pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", 164 pr_debug("setting frequency for cpu %d to %d kHz, " \
165 "1/%d of max frequency\n",
302 policy->cpu, 166 policy->cpu,
303 cbe_freqs[cbe_pmode_new].frequency, 167 cbe_freqs[cbe_pmode_new].frequency,
304 cbe_freqs[cbe_pmode_new].index); 168 cbe_freqs[cbe_pmode_new].index);
305 169
306 rc = set_pmode(policy->cpu, cbe_pmode_new); 170 rc = set_pmode(policy->cpu, cbe_pmode_new);
171
307 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 172 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
308 mutex_unlock(&cbe_switch_mutex); 173 mutex_unlock(&cbe_switch_mutex);
309 174
@@ -326,28 +191,14 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
326 191
327static int __init cbe_cpufreq_init(void) 192static int __init cbe_cpufreq_init(void)
328{ 193{
329#ifdef CONFIG_PPC_PMI
330 struct device_node *np;
331#endif
332 if (!machine_is(cell)) 194 if (!machine_is(cell))
333 return -ENODEV; 195 return -ENODEV;
334#ifdef CONFIG_PPC_PMI
335 np = of_find_node_by_type(NULL, "ibm,pmi");
336
337 pmi_dev = of_find_device_by_node(np);
338 196
339 if (pmi_dev)
340 pmi_register_handler(pmi_dev, &cbe_pmi_handler);
341#endif
342 return cpufreq_register_driver(&cbe_cpufreq_driver); 197 return cpufreq_register_driver(&cbe_cpufreq_driver);
343} 198}
344 199
345static void __exit cbe_cpufreq_exit(void) 200static void __exit cbe_cpufreq_exit(void)
346{ 201{
347#ifdef CONFIG_PPC_PMI
348 if (pmi_dev)
349 pmi_unregister_handler(pmi_dev, &cbe_pmi_handler);
350#endif
351 cpufreq_unregister_driver(&cbe_cpufreq_driver); 202 cpufreq_unregister_driver(&cbe_cpufreq_driver);
352} 203}
353 204
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/arch/powerpc/platforms/cell/cbe_cpufreq.h
new file mode 100644
index 000000000000..c1d86bfa92ff
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.h
@@ -0,0 +1,24 @@
1/*
2 * cbe_cpufreq.h
3 *
4 * This file contains the definitions used by the cbe_cpufreq driver.
5 *
6 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
7 *
8 * Author: Christian Krafft <krafft@de.ibm.com>
9 *
10 */
11
12#include <linux/cpufreq.h>
13#include <linux/types.h>
14
15int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
16int cbe_cpufreq_get_pmode(int cpu);
17
18int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
19
20#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE)
21extern bool cbe_cpufreq_has_pmi;
22#else
23#define cbe_cpufreq_has_pmi (0)
24#endif
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
new file mode 100644
index 000000000000..163263b3e1cd
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
@@ -0,0 +1,115 @@
1/*
2 * pervasive backend for the cbe_cpufreq driver
3 *
4 * This driver makes use of the pervasive unit to
5 * engage the desired frequency.
6 *
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
8 *
9 * Author: Christian Krafft <krafft@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/io.h>
27#include <linux/kernel.h>
28#include <linux/time.h>
29#include <asm/machdep.h>
30#include <asm/hw_irq.h>
31
32#include "cbe_regs.h"
33#include "cbe_cpufreq.h"
34
35/* to write to MIC register */
36static u64 MIC_Slow_Fast_Timer_table[] = {
37 [0 ... 7] = 0x007fc00000000000ull,
38};
39
40/* more values for the MIC */
41static u64 MIC_Slow_Next_Timer_table[] = {
42 0x0000240000000000ull,
43 0x0000268000000000ull,
44 0x000029C000000000ull,
45 0x00002D0000000000ull,
46 0x0000300000000000ull,
47 0x0000334000000000ull,
48 0x000039C000000000ull,
49 0x00003FC000000000ull,
50};
51
52
53int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
54{
55 struct cbe_pmd_regs __iomem *pmd_regs;
56 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
57 u64 flags;
58 u64 value;
59#ifdef DEBUG
60 long time;
61#endif
62
63 local_irq_save(flags);
64
65 mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
66 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
67
68#ifdef DEBUG
69 time = jiffies;
70#endif
71
72 out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
73 out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
74
75 out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
76 out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
77
78 value = in_be64(&pmd_regs->pmcr);
79 /* set bits to zero */
80 value &= 0xFFFFFFFFFFFFFFF8ull;
81 /* set bits to next pmode */
82 value |= pmode;
83
84 out_be64(&pmd_regs->pmcr, value);
85
86#ifdef DEBUG
87 /* wait until new pmode appears in status register */
88 value = in_be64(&pmd_regs->pmsr) & 0x07;
89 while (value != pmode) {
90 cpu_relax();
91 value = in_be64(&pmd_regs->pmsr) & 0x07;
92 }
93
94 time = jiffies - time;
95 time = jiffies_to_msecs(time);
96 pr_debug("had to wait %lu ms for a transition using " \
97 "pervasive unit\n", time);
98#endif
99 local_irq_restore(flags);
100
101 return 0;
102}
103
104
105int cbe_cpufreq_get_pmode(int cpu)
106{
107 int ret;
108 struct cbe_pmd_regs __iomem *pmd_regs;
109
110 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
111 ret = in_be64(&pmd_regs->pmsr) & 0x07;
112
113 return ret;
114}
115
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
new file mode 100644
index 000000000000..fc6f38982ff4
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
@@ -0,0 +1,148 @@
1/*
2 * pmi backend for the cbe_cpufreq driver
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/timer.h>
26#include <asm/of_platform.h>
27#include <asm/processor.h>
28#include <asm/prom.h>
29#include <asm/pmi.h>
30
31#ifdef DEBUG
32#include <asm/time.h>
33#endif
34
35#include "cbe_regs.h"
36#include "cbe_cpufreq.h"
37
38static u8 pmi_slow_mode_limit[MAX_CBE];
39
40bool cbe_cpufreq_has_pmi = false;
41EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
42
43/*
44 * hardware specific functions
45 */
46
47int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
48{
49 int ret;
50 pmi_message_t pmi_msg;
51#ifdef DEBUG
52 long time;
53#endif
54 pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
55 pmi_msg.data1 = cbe_cpu_to_node(cpu);
56 pmi_msg.data2 = pmode;
57
58#ifdef DEBUG
59 time = jiffies;
60#endif
61 pmi_send_message(pmi_msg);
62
63#ifdef DEBUG
64 time = jiffies - time;
65 time = jiffies_to_msecs(time);
66 pr_debug("had to wait %lu ms for a transition using " \
67 "PMI\n", time);
68#endif
69 ret = pmi_msg.data2;
70 pr_debug("PMI returned slow mode %d\n", ret);
71
72 return ret;
73}
74EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
75
76
77static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
78{
79 u8 node, slow_mode;
80
81 BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
82
83 node = pmi_msg.data1;
84 slow_mode = pmi_msg.data2;
85
86 pmi_slow_mode_limit[node] = slow_mode;
87
88 pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
89}
90
91static int pmi_notifier(struct notifier_block *nb,
92 unsigned long event, void *data)
93{
94 struct cpufreq_policy *policy = data;
95 struct cpufreq_frequency_table *cbe_freqs;
96 u8 node;
97
98 cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
99 node = cbe_cpu_to_node(policy->cpu);
100
101 pr_debug("got notified, event=%lu, node=%u\n", event, node);
102
103 if (pmi_slow_mode_limit[node] != 0) {
104 pr_debug("limiting node %d to slow mode %d\n",
105 node, pmi_slow_mode_limit[node]);
106
107 cpufreq_verify_within_limits(policy, 0,
108
109 cbe_freqs[pmi_slow_mode_limit[node]].frequency);
110 }
111
112 return 0;
113}
114
115static struct notifier_block pmi_notifier_block = {
116 .notifier_call = pmi_notifier,
117};
118
119static struct pmi_handler cbe_pmi_handler = {
120 .type = PMI_TYPE_FREQ_CHANGE,
121 .handle_pmi_message = cbe_cpufreq_handle_pmi,
122};
123
124
125
126static int __init cbe_cpufreq_pmi_init(void)
127{
128 cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
129
130 if (!cbe_cpufreq_has_pmi)
131 return -ENODEV;
132
133 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
134
135 return 0;
136}
137
138static void __exit cbe_cpufreq_pmi_exit(void)
139{
140 cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
141 pmi_unregister_handler(&cbe_pmi_handler);
142}
143
144module_init(cbe_cpufreq_pmi_init);
145module_exit(cbe_cpufreq_pmi_exit);
146
147MODULE_LICENSE("GPL");
148MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 12c9674b4b1f..c8f7f0007422 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -174,6 +174,13 @@ static struct device_node *cbe_get_be_node(int cpu_id)
174 174
175 cpu_handle = of_get_property(np, "cpus", &len); 175 cpu_handle = of_get_property(np, "cpus", &len);
176 176
177 /*
178 * the CAB SLOF tree is non compliant, so we just assume
179 * there is only one node
180 */
181 if (WARN_ON_ONCE(!cpu_handle))
182 return np;
183
177 for (i=0; i<len; i++) 184 for (i=0; i<len; i++)
178 if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL)) 185 if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL))
179 return np; 186 return np;
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index f370f0fa6f4c..e4132f8f51b3 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -292,7 +292,7 @@ static struct attribute_group ppe_attribute_group = {
292/* 292/*
293 * initialize throttling with default values 293 * initialize throttling with default values
294 */ 294 */
295static void __init init_default_values(void) 295static int __init init_default_values(void)
296{ 296{
297 int cpu; 297 int cpu;
298 struct cbe_pmd_regs __iomem *pmd_regs; 298 struct cbe_pmd_regs __iomem *pmd_regs;
@@ -339,25 +339,40 @@ static void __init init_default_values(void)
339 for_each_possible_cpu (cpu) { 339 for_each_possible_cpu (cpu) {
340 pr_debug("processing cpu %d\n", cpu); 340 pr_debug("processing cpu %d\n", cpu);
341 sysdev = get_cpu_sysdev(cpu); 341 sysdev = get_cpu_sysdev(cpu);
342
343 if (!sysdev) {
344 pr_info("invalid sysdev pointer for cbe_thermal\n");
345 return -EINVAL;
346 }
347
342 pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id); 348 pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
343 349
350 if (!pmd_regs) {
351 pr_info("invalid CBE regs pointer for cbe_thermal\n");
352 return -EINVAL;
353 }
354
344 out_be64(&pmd_regs->tm_str2, str2); 355 out_be64(&pmd_regs->tm_str2, str2);
345 out_be64(&pmd_regs->tm_str1.val, str1.val); 356 out_be64(&pmd_regs->tm_str1.val, str1.val);
346 out_be64(&pmd_regs->tm_tpr.val, tpr.val); 357 out_be64(&pmd_regs->tm_tpr.val, tpr.val);
347 out_be64(&pmd_regs->tm_cr1.val, cr1.val); 358 out_be64(&pmd_regs->tm_cr1.val, cr1.val);
348 out_be64(&pmd_regs->tm_cr2, cr2); 359 out_be64(&pmd_regs->tm_cr2, cr2);
349 } 360 }
361
362 return 0;
350} 363}
351 364
352 365
353static int __init thermal_init(void) 366static int __init thermal_init(void)
354{ 367{
355 init_default_values(); 368 int rc = init_default_values();
356 369
357 spu_add_sysdev_attr_group(&spu_attribute_group); 370 if (rc == 0) {
358 cpu_add_sysdev_attr_group(&ppe_attribute_group); 371 spu_add_sysdev_attr_group(&spu_attribute_group);
372 cpu_add_sysdev_attr_group(&ppe_attribute_group);
373 }
359 374
360 return 0; 375 return rc;
361} 376}
362module_init(thermal_init); 377module_init(thermal_init);
363 378
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 96a8f609690c..90124228b8f4 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -35,18 +35,37 @@
35#include <asm/spu.h> 35#include <asm/spu.h>
36#include <asm/spu_priv1.h> 36#include <asm/spu_priv1.h>
37#include <asm/xmon.h> 37#include <asm/xmon.h>
38#include <asm/prom.h>
39#include "spu_priv1_mmio.h"
38 40
39const struct spu_management_ops *spu_management_ops; 41const struct spu_management_ops *spu_management_ops;
40EXPORT_SYMBOL_GPL(spu_management_ops); 42EXPORT_SYMBOL_GPL(spu_management_ops);
41 43
42const struct spu_priv1_ops *spu_priv1_ops; 44const struct spu_priv1_ops *spu_priv1_ops;
45EXPORT_SYMBOL_GPL(spu_priv1_ops);
43 46
44static struct list_head spu_list[MAX_NUMNODES]; 47struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
45static LIST_HEAD(spu_full_list); 48EXPORT_SYMBOL_GPL(cbe_spu_info);
46static DEFINE_MUTEX(spu_mutex);
47static DEFINE_SPINLOCK(spu_list_lock);
48 49
49EXPORT_SYMBOL_GPL(spu_priv1_ops); 50/*
51 * Protects cbe_spu_info and spu->number.
52 */
53static DEFINE_SPINLOCK(spu_lock);
54
55/*
56 * List of all spus in the system.
57 *
58 * This list is iterated by callers from irq context and callers that
59 * want to sleep. Thus modifications need to be done with both
60 * spu_full_list_lock and spu_full_list_mutex held, while iterating
61 * through it requires either of these locks.
62 *
63 * In addition spu_full_list_lock protects all assignmens to
64 * spu->mm.
65 */
66static LIST_HEAD(spu_full_list);
67static DEFINE_SPINLOCK(spu_full_list_lock);
68static DEFINE_MUTEX(spu_full_list_mutex);
50 69
51void spu_invalidate_slbs(struct spu *spu) 70void spu_invalidate_slbs(struct spu *spu)
52{ 71{
@@ -65,12 +84,12 @@ void spu_flush_all_slbs(struct mm_struct *mm)
65 struct spu *spu; 84 struct spu *spu;
66 unsigned long flags; 85 unsigned long flags;
67 86
68 spin_lock_irqsave(&spu_list_lock, flags); 87 spin_lock_irqsave(&spu_full_list_lock, flags);
69 list_for_each_entry(spu, &spu_full_list, full_list) { 88 list_for_each_entry(spu, &spu_full_list, full_list) {
70 if (spu->mm == mm) 89 if (spu->mm == mm)
71 spu_invalidate_slbs(spu); 90 spu_invalidate_slbs(spu);
72 } 91 }
73 spin_unlock_irqrestore(&spu_list_lock, flags); 92 spin_unlock_irqrestore(&spu_full_list_lock, flags);
74} 93}
75 94
76/* The hack below stinks... try to do something better one of 95/* The hack below stinks... try to do something better one of
@@ -88,9 +107,9 @@ void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
88{ 107{
89 unsigned long flags; 108 unsigned long flags;
90 109
91 spin_lock_irqsave(&spu_list_lock, flags); 110 spin_lock_irqsave(&spu_full_list_lock, flags);
92 spu->mm = mm; 111 spu->mm = mm;
93 spin_unlock_irqrestore(&spu_list_lock, flags); 112 spin_unlock_irqrestore(&spu_full_list_lock, flags);
94 if (mm) 113 if (mm)
95 mm_needs_global_tlbie(mm); 114 mm_needs_global_tlbie(mm);
96} 115}
@@ -390,7 +409,7 @@ static void spu_free_irqs(struct spu *spu)
390 free_irq(spu->irqs[2], spu); 409 free_irq(spu->irqs[2], spu);
391} 410}
392 411
393static void spu_init_channels(struct spu *spu) 412void spu_init_channels(struct spu *spu)
394{ 413{
395 static const struct { 414 static const struct {
396 unsigned channel; 415 unsigned channel;
@@ -423,46 +442,7 @@ static void spu_init_channels(struct spu *spu)
423 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); 442 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
424 } 443 }
425} 444}
426 445EXPORT_SYMBOL_GPL(spu_init_channels);
427struct spu *spu_alloc_node(int node)
428{
429 struct spu *spu = NULL;
430
431 mutex_lock(&spu_mutex);
432 if (!list_empty(&spu_list[node])) {
433 spu = list_entry(spu_list[node].next, struct spu, list);
434 list_del_init(&spu->list);
435 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
436 }
437 mutex_unlock(&spu_mutex);
438
439 if (spu)
440 spu_init_channels(spu);
441 return spu;
442}
443EXPORT_SYMBOL_GPL(spu_alloc_node);
444
445struct spu *spu_alloc(void)
446{
447 struct spu *spu = NULL;
448 int node;
449
450 for (node = 0; node < MAX_NUMNODES; node++) {
451 spu = spu_alloc_node(node);
452 if (spu)
453 break;
454 }
455
456 return spu;
457}
458
459void spu_free(struct spu *spu)
460{
461 mutex_lock(&spu_mutex);
462 list_add_tail(&spu->list, &spu_list[spu->node]);
463 mutex_unlock(&spu_mutex);
464}
465EXPORT_SYMBOL_GPL(spu_free);
466 446
467static int spu_shutdown(struct sys_device *sysdev) 447static int spu_shutdown(struct sys_device *sysdev)
468{ 448{
@@ -481,12 +461,12 @@ struct sysdev_class spu_sysdev_class = {
481int spu_add_sysdev_attr(struct sysdev_attribute *attr) 461int spu_add_sysdev_attr(struct sysdev_attribute *attr)
482{ 462{
483 struct spu *spu; 463 struct spu *spu;
484 mutex_lock(&spu_mutex);
485 464
465 mutex_lock(&spu_full_list_mutex);
486 list_for_each_entry(spu, &spu_full_list, full_list) 466 list_for_each_entry(spu, &spu_full_list, full_list)
487 sysdev_create_file(&spu->sysdev, attr); 467 sysdev_create_file(&spu->sysdev, attr);
468 mutex_unlock(&spu_full_list_mutex);
488 469
489 mutex_unlock(&spu_mutex);
490 return 0; 470 return 0;
491} 471}
492EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); 472EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
@@ -494,12 +474,12 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
494int spu_add_sysdev_attr_group(struct attribute_group *attrs) 474int spu_add_sysdev_attr_group(struct attribute_group *attrs)
495{ 475{
496 struct spu *spu; 476 struct spu *spu;
497 mutex_lock(&spu_mutex);
498 477
478 mutex_lock(&spu_full_list_mutex);
499 list_for_each_entry(spu, &spu_full_list, full_list) 479 list_for_each_entry(spu, &spu_full_list, full_list)
500 sysfs_create_group(&spu->sysdev.kobj, attrs); 480 sysfs_create_group(&spu->sysdev.kobj, attrs);
481 mutex_unlock(&spu_full_list_mutex);
501 482
502 mutex_unlock(&spu_mutex);
503 return 0; 483 return 0;
504} 484}
505EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); 485EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
@@ -508,24 +488,22 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
508void spu_remove_sysdev_attr(struct sysdev_attribute *attr) 488void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
509{ 489{
510 struct spu *spu; 490 struct spu *spu;
511 mutex_lock(&spu_mutex);
512 491
492 mutex_lock(&spu_full_list_mutex);
513 list_for_each_entry(spu, &spu_full_list, full_list) 493 list_for_each_entry(spu, &spu_full_list, full_list)
514 sysdev_remove_file(&spu->sysdev, attr); 494 sysdev_remove_file(&spu->sysdev, attr);
515 495 mutex_unlock(&spu_full_list_mutex);
516 mutex_unlock(&spu_mutex);
517} 496}
518EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); 497EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
519 498
520void spu_remove_sysdev_attr_group(struct attribute_group *attrs) 499void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
521{ 500{
522 struct spu *spu; 501 struct spu *spu;
523 mutex_lock(&spu_mutex);
524 502
503 mutex_lock(&spu_full_list_mutex);
525 list_for_each_entry(spu, &spu_full_list, full_list) 504 list_for_each_entry(spu, &spu_full_list, full_list)
526 sysfs_remove_group(&spu->sysdev.kobj, attrs); 505 sysfs_remove_group(&spu->sysdev.kobj, attrs);
527 506 mutex_unlock(&spu_full_list_mutex);
528 mutex_unlock(&spu_mutex);
529} 507}
530EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); 508EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
531 509
@@ -553,16 +531,19 @@ static int __init create_spu(void *data)
553 int ret; 531 int ret;
554 static int number; 532 static int number;
555 unsigned long flags; 533 unsigned long flags;
534 struct timespec ts;
556 535
557 ret = -ENOMEM; 536 ret = -ENOMEM;
558 spu = kzalloc(sizeof (*spu), GFP_KERNEL); 537 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
559 if (!spu) 538 if (!spu)
560 goto out; 539 goto out;
561 540
541 spu->alloc_state = SPU_FREE;
542
562 spin_lock_init(&spu->register_lock); 543 spin_lock_init(&spu->register_lock);
563 mutex_lock(&spu_mutex); 544 spin_lock(&spu_lock);
564 spu->number = number++; 545 spu->number = number++;
565 mutex_unlock(&spu_mutex); 546 spin_unlock(&spu_lock);
566 547
567 ret = spu_create_spu(spu, data); 548 ret = spu_create_spu(spu, data);
568 549
@@ -579,15 +560,22 @@ static int __init create_spu(void *data)
579 if (ret) 560 if (ret)
580 goto out_free_irqs; 561 goto out_free_irqs;
581 562
582 mutex_lock(&spu_mutex); 563 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
583 spin_lock_irqsave(&spu_list_lock, flags); 564 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
584 list_add(&spu->list, &spu_list[spu->node]); 565 cbe_spu_info[spu->node].n_spus++;
566 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
567
568 mutex_lock(&spu_full_list_mutex);
569 spin_lock_irqsave(&spu_full_list_lock, flags);
585 list_add(&spu->full_list, &spu_full_list); 570 list_add(&spu->full_list, &spu_full_list);
586 spin_unlock_irqrestore(&spu_list_lock, flags); 571 spin_unlock_irqrestore(&spu_full_list_lock, flags);
587 mutex_unlock(&spu_mutex); 572 mutex_unlock(&spu_full_list_mutex);
573
574 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
575 ktime_get_ts(&ts);
576 spu->stats.tstamp = timespec_to_ns(&ts);
588 577
589 spu->stats.utilization_state = SPU_UTIL_IDLE; 578 INIT_LIST_HEAD(&spu->aff_list);
590 spu->stats.tstamp = jiffies;
591 579
592 goto out; 580 goto out;
593 581
@@ -608,12 +596,20 @@ static const char *spu_state_names[] = {
608static unsigned long long spu_acct_time(struct spu *spu, 596static unsigned long long spu_acct_time(struct spu *spu,
609 enum spu_utilization_state state) 597 enum spu_utilization_state state)
610{ 598{
599 struct timespec ts;
611 unsigned long long time = spu->stats.times[state]; 600 unsigned long long time = spu->stats.times[state];
612 601
613 if (spu->stats.utilization_state == state) 602 /*
614 time += jiffies - spu->stats.tstamp; 603 * If the spu is idle or the context is stopped, utilization
604 * statistics are not updated. Apply the time delta from the
605 * last recorded state of the spu.
606 */
607 if (spu->stats.util_state == state) {
608 ktime_get_ts(&ts);
609 time += timespec_to_ns(&ts) - spu->stats.tstamp;
610 }
615 611
616 return jiffies_to_msecs(time); 612 return time / NSEC_PER_MSEC;
617} 613}
618 614
619 615
@@ -623,11 +619,11 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
623 619
624 return sprintf(buf, "%s %llu %llu %llu %llu " 620 return sprintf(buf, "%s %llu %llu %llu %llu "
625 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 621 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
626 spu_state_names[spu->stats.utilization_state], 622 spu_state_names[spu->stats.util_state],
627 spu_acct_time(spu, SPU_UTIL_USER), 623 spu_acct_time(spu, SPU_UTIL_USER),
628 spu_acct_time(spu, SPU_UTIL_SYSTEM), 624 spu_acct_time(spu, SPU_UTIL_SYSTEM),
629 spu_acct_time(spu, SPU_UTIL_IOWAIT), 625 spu_acct_time(spu, SPU_UTIL_IOWAIT),
630 spu_acct_time(spu, SPU_UTIL_IDLE), 626 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
631 spu->stats.vol_ctx_switch, 627 spu->stats.vol_ctx_switch,
632 spu->stats.invol_ctx_switch, 628 spu->stats.invol_ctx_switch,
633 spu->stats.slb_flt, 629 spu->stats.slb_flt,
@@ -640,12 +636,146 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
640 636
641static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); 637static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
642 638
639/* Hardcoded affinity idxs for QS20 */
640#define SPES_PER_BE 8
641static int QS20_reg_idxs[SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 };
642static int QS20_reg_memory[SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
643
644static struct spu *spu_lookup_reg(int node, u32 reg)
645{
646 struct spu *spu;
647
648 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
649 if (*(u32 *)get_property(spu_devnode(spu), "reg", NULL) == reg)
650 return spu;
651 }
652 return NULL;
653}
654
655static void init_aff_QS20_harcoded(void)
656{
657 int node, i;
658 struct spu *last_spu, *spu;
659 u32 reg;
660
661 for (node = 0; node < MAX_NUMNODES; node++) {
662 last_spu = NULL;
663 for (i = 0; i < SPES_PER_BE; i++) {
664 reg = QS20_reg_idxs[i];
665 spu = spu_lookup_reg(node, reg);
666 if (!spu)
667 continue;
668 spu->has_mem_affinity = QS20_reg_memory[reg];
669 if (last_spu)
670 list_add_tail(&spu->aff_list,
671 &last_spu->aff_list);
672 last_spu = spu;
673 }
674 }
675}
676
677static int of_has_vicinity(void)
678{
679 struct spu* spu;
680
681 spu = list_entry(cbe_spu_info[0].spus.next, struct spu, cbe_list);
682 return of_find_property(spu_devnode(spu), "vicinity", NULL) != NULL;
683}
684
685static struct spu *aff_devnode_spu(int cbe, struct device_node *dn)
686{
687 struct spu *spu;
688
689 list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list)
690 if (spu_devnode(spu) == dn)
691 return spu;
692 return NULL;
693}
694
695static struct spu *
696aff_node_next_to(int cbe, struct device_node *target, struct device_node *avoid)
697{
698 struct spu *spu;
699 const phandle *vic_handles;
700 int lenp, i;
701
702 list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) {
703 if (spu_devnode(spu) == avoid)
704 continue;
705 vic_handles = get_property(spu_devnode(spu), "vicinity", &lenp);
706 for (i=0; i < (lenp / sizeof(phandle)); i++) {
707 if (vic_handles[i] == target->linux_phandle)
708 return spu;
709 }
710 }
711 return NULL;
712}
713
714static void init_aff_fw_vicinity_node(int cbe)
715{
716 struct spu *spu, *last_spu;
717 struct device_node *vic_dn, *last_spu_dn;
718 phandle avoid_ph;
719 const phandle *vic_handles;
720 const char *name;
721 int lenp, i, added, mem_aff;
722
723 last_spu = list_entry(cbe_spu_info[cbe].spus.next, struct spu, cbe_list);
724 avoid_ph = 0;
725 for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) {
726 last_spu_dn = spu_devnode(last_spu);
727 vic_handles = get_property(last_spu_dn, "vicinity", &lenp);
728
729 for (i = 0; i < (lenp / sizeof(phandle)); i++) {
730 if (vic_handles[i] == avoid_ph)
731 continue;
732
733 vic_dn = of_find_node_by_phandle(vic_handles[i]);
734 if (!vic_dn)
735 continue;
736
737 name = get_property(vic_dn, "name", NULL);
738 if (strcmp(name, "spe") == 0) {
739 spu = aff_devnode_spu(cbe, vic_dn);
740 avoid_ph = last_spu_dn->linux_phandle;
741 }
742 else {
743 mem_aff = strcmp(name, "mic-tm") == 0;
744 spu = aff_node_next_to(cbe, vic_dn, last_spu_dn);
745 if (!spu)
746 continue;
747 if (mem_aff) {
748 last_spu->has_mem_affinity = 1;
749 spu->has_mem_affinity = 1;
750 }
751 avoid_ph = vic_dn->linux_phandle;
752 }
753 list_add_tail(&spu->aff_list, &last_spu->aff_list);
754 last_spu = spu;
755 break;
756 }
757 }
758}
759
760static void init_aff_fw_vicinity(void)
761{
762 int cbe;
763
764 /* sets has_mem_affinity for each spu, as long as the
765 * spu->aff_list list, linking each spu to its neighbors
766 */
767 for (cbe = 0; cbe < MAX_NUMNODES; cbe++)
768 init_aff_fw_vicinity_node(cbe);
769}
770
643static int __init init_spu_base(void) 771static int __init init_spu_base(void)
644{ 772{
645 int i, ret = 0; 773 int i, ret = 0;
646 774
647 for (i = 0; i < MAX_NUMNODES; i++) 775 for (i = 0; i < MAX_NUMNODES; i++) {
648 INIT_LIST_HEAD(&spu_list[i]); 776 mutex_init(&cbe_spu_info[i].list_mutex);
777 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
778 }
649 779
650 if (!spu_management_ops) 780 if (!spu_management_ops)
651 goto out; 781 goto out;
@@ -675,16 +805,25 @@ static int __init init_spu_base(void)
675 fb_append_extra_logo(&logo_spe_clut224, ret); 805 fb_append_extra_logo(&logo_spe_clut224, ret);
676 } 806 }
677 807
808 mutex_lock(&spu_full_list_mutex);
678 xmon_register_spus(&spu_full_list); 809 xmon_register_spus(&spu_full_list);
679 810 crash_register_spus(&spu_full_list);
811 mutex_unlock(&spu_full_list_mutex);
680 spu_add_sysdev_attr(&attr_stat); 812 spu_add_sysdev_attr(&attr_stat);
681 813
814 if (of_has_vicinity()) {
815 init_aff_fw_vicinity();
816 } else {
817 long root = of_get_flat_dt_root();
818 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
819 init_aff_QS20_harcoded();
820 }
821
682 return 0; 822 return 0;
683 823
684 out_unregister_sysdev_class: 824 out_unregister_sysdev_class:
685 sysdev_class_unregister(&spu_sysdev_class); 825 sysdev_class_unregister(&spu_sysdev_class);
686 out: 826 out:
687
688 return ret; 827 return ret;
689} 828}
690module_init(init_spu_base); 829module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 261b507a901a..dd2c6688c8aa 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -34,14 +34,27 @@ struct spufs_calls spufs_calls = {
34 * this file is not used and the syscalls directly enter the fs code */ 34 * this file is not used and the syscalls directly enter the fs code */
35 35
36asmlinkage long sys_spu_create(const char __user *name, 36asmlinkage long sys_spu_create(const char __user *name,
37 unsigned int flags, mode_t mode) 37 unsigned int flags, mode_t mode, int neighbor_fd)
38{ 38{
39 long ret; 39 long ret;
40 struct module *owner = spufs_calls.owner; 40 struct module *owner = spufs_calls.owner;
41 struct file *neighbor;
42 int fput_needed;
41 43
42 ret = -ENOSYS; 44 ret = -ENOSYS;
43 if (owner && try_module_get(owner)) { 45 if (owner && try_module_get(owner)) {
44 ret = spufs_calls.create_thread(name, flags, mode); 46 if (flags & SPU_CREATE_AFFINITY_SPU) {
47 neighbor = fget_light(neighbor_fd, &fput_needed);
48 if (neighbor) {
49 ret = spufs_calls.create_thread(name, flags,
50 mode, neighbor);
51 fput_light(neighbor, fput_needed);
52 }
53 }
54 else {
55 ret = spufs_calls.create_thread(name, flags,
56 mode, NULL);
57 }
45 module_put(owner); 58 module_put(owner);
46 } 59 }
47 return ret; 60 return ret;
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 6d7bd60f5380..6694f86d7000 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/module.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <asm/atomic.h> 27#include <asm/atomic.h>
27#include <asm/spu.h> 28#include <asm/spu.h>
@@ -55,12 +56,12 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
55 ctx->ops = &spu_backing_ops; 56 ctx->ops = &spu_backing_ops;
56 ctx->owner = get_task_mm(current); 57 ctx->owner = get_task_mm(current);
57 INIT_LIST_HEAD(&ctx->rq); 58 INIT_LIST_HEAD(&ctx->rq);
59 INIT_LIST_HEAD(&ctx->aff_list);
58 if (gang) 60 if (gang)
59 spu_gang_add_ctx(gang, ctx); 61 spu_gang_add_ctx(gang, ctx);
60 ctx->cpus_allowed = current->cpus_allowed; 62 ctx->cpus_allowed = current->cpus_allowed;
61 spu_set_timeslice(ctx); 63 spu_set_timeslice(ctx);
62 ctx->stats.execution_state = SPUCTX_UTIL_USER; 64 ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
63 ctx->stats.tstamp = jiffies;
64 65
65 atomic_inc(&nr_spu_contexts); 66 atomic_inc(&nr_spu_contexts);
66 goto out; 67 goto out;
@@ -81,6 +82,8 @@ void destroy_spu_context(struct kref *kref)
81 spu_fini_csa(&ctx->csa); 82 spu_fini_csa(&ctx->csa);
82 if (ctx->gang) 83 if (ctx->gang)
83 spu_gang_remove_ctx(ctx->gang, ctx); 84 spu_gang_remove_ctx(ctx->gang, ctx);
85 if (ctx->prof_priv_kref)
86 kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
84 BUG_ON(!list_empty(&ctx->rq)); 87 BUG_ON(!list_empty(&ctx->rq));
85 atomic_dec(&nr_spu_contexts); 88 atomic_dec(&nr_spu_contexts);
86 kfree(ctx); 89 kfree(ctx);
@@ -166,6 +169,39 @@ int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
166void spu_acquire_saved(struct spu_context *ctx) 169void spu_acquire_saved(struct spu_context *ctx)
167{ 170{
168 spu_acquire(ctx); 171 spu_acquire(ctx);
169 if (ctx->state != SPU_STATE_SAVED) 172 if (ctx->state != SPU_STATE_SAVED) {
173 set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
170 spu_deactivate(ctx); 174 spu_deactivate(ctx);
175 }
176}
177
178/**
179 * spu_release_saved - unlock spu context and return it to the runqueue
180 * @ctx: context to unlock
181 */
182void spu_release_saved(struct spu_context *ctx)
183{
184 BUG_ON(ctx->state != SPU_STATE_SAVED);
185
186 if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags))
187 spu_activate(ctx, 0);
188
189 spu_release(ctx);
171} 190}
191
192void spu_set_profile_private_kref(struct spu_context *ctx,
193 struct kref *prof_info_kref,
194 void ( * prof_info_release) (struct kref *kref))
195{
196 ctx->prof_priv_kref = prof_info_kref;
197 ctx->prof_priv_release = prof_info_release;
198}
199EXPORT_SYMBOL_GPL(spu_set_profile_private_kref);
200
201void *spu_get_profile_private_kref(struct spu_context *ctx)
202{
203 return ctx->prof_priv_kref;
204}
205EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
206
207
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 5d9ad5a0307b..5e31799b1e3f 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -226,7 +226,7 @@ static void spufs_arch_write_notes(struct file *file)
226 spu_acquire_saved(ctx_info->ctx); 226 spu_acquire_saved(ctx_info->ctx);
227 for (j = 0; j < spufs_coredump_num_notes; j++) 227 for (j = 0; j < spufs_coredump_num_notes; j++)
228 spufs_arch_write_note(ctx_info, j, file); 228 spufs_arch_write_note(ctx_info, j, file);
229 spu_release(ctx_info->ctx); 229 spu_release_saved(ctx_info->ctx);
230 list_del(&ctx_info->list); 230 list_del(&ctx_info->list);
231 kfree(ctx_info); 231 kfree(ctx_info);
232 } 232 }
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index f53a07437472..917eab4be486 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -179,16 +179,14 @@ int spufs_handle_class1(struct spu_context *ctx)
179 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) 179 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
180 return 0; 180 return 0;
181 181
182 spuctx_switch_state(ctx, SPUCTX_UTIL_IOWAIT); 182 spuctx_switch_state(ctx, SPU_UTIL_IOWAIT);
183 183
184 pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea, 184 pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
185 dsisr, ctx->state); 185 dsisr, ctx->state);
186 186
187 ctx->stats.hash_flt++; 187 ctx->stats.hash_flt++;
188 if (ctx->state == SPU_STATE_RUNNABLE) { 188 if (ctx->state == SPU_STATE_RUNNABLE)
189 ctx->spu->stats.hash_flt++; 189 ctx->spu->stats.hash_flt++;
190 spu_switch_state(ctx->spu, SPU_UTIL_IOWAIT);
191 }
192 190
193 /* we must not hold the lock when entering spu_handle_mm_fault */ 191 /* we must not hold the lock when entering spu_handle_mm_fault */
194 spu_release(ctx); 192 spu_release(ctx);
@@ -226,7 +224,7 @@ int spufs_handle_class1(struct spu_context *ctx)
226 } else 224 } else
227 spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); 225 spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
228 226
229 spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM); 227 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
230 return ret; 228 return ret;
231} 229}
232EXPORT_SYMBOL_GPL(spufs_handle_class1); 230EXPORT_SYMBOL_GPL(spufs_handle_class1);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index c2814ea96af2..4100ddc52f02 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -370,7 +370,7 @@ spufs_regs_read(struct file *file, char __user *buffer,
370 370
371 spu_acquire_saved(ctx); 371 spu_acquire_saved(ctx);
372 ret = __spufs_regs_read(ctx, buffer, size, pos); 372 ret = __spufs_regs_read(ctx, buffer, size, pos);
373 spu_release(ctx); 373 spu_release_saved(ctx);
374 return ret; 374 return ret;
375} 375}
376 376
@@ -392,7 +392,7 @@ spufs_regs_write(struct file *file, const char __user *buffer,
392 ret = copy_from_user(lscsa->gprs + *pos - size, 392 ret = copy_from_user(lscsa->gprs + *pos - size,
393 buffer, size) ? -EFAULT : size; 393 buffer, size) ? -EFAULT : size;
394 394
395 spu_release(ctx); 395 spu_release_saved(ctx);
396 return ret; 396 return ret;
397} 397}
398 398
@@ -421,7 +421,7 @@ spufs_fpcr_read(struct file *file, char __user * buffer,
421 421
422 spu_acquire_saved(ctx); 422 spu_acquire_saved(ctx);
423 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 423 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
424 spu_release(ctx); 424 spu_release_saved(ctx);
425 return ret; 425 return ret;
426} 426}
427 427
@@ -443,7 +443,7 @@ spufs_fpcr_write(struct file *file, const char __user * buffer,
443 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, 443 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
444 buffer, size) ? -EFAULT : size; 444 buffer, size) ? -EFAULT : size;
445 445
446 spu_release(ctx); 446 spu_release_saved(ctx);
447 return ret; 447 return ret;
448} 448}
449 449
@@ -868,7 +868,7 @@ static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
868 868
869 spu_acquire_saved(ctx); 869 spu_acquire_saved(ctx);
870 ret = __spufs_signal1_read(ctx, buf, len, pos); 870 ret = __spufs_signal1_read(ctx, buf, len, pos);
871 spu_release(ctx); 871 spu_release_saved(ctx);
872 872
873 return ret; 873 return ret;
874} 874}
@@ -934,6 +934,13 @@ static const struct file_operations spufs_signal1_fops = {
934 .mmap = spufs_signal1_mmap, 934 .mmap = spufs_signal1_mmap,
935}; 935};
936 936
937static const struct file_operations spufs_signal1_nosched_fops = {
938 .open = spufs_signal1_open,
939 .release = spufs_signal1_release,
940 .write = spufs_signal1_write,
941 .mmap = spufs_signal1_mmap,
942};
943
937static int spufs_signal2_open(struct inode *inode, struct file *file) 944static int spufs_signal2_open(struct inode *inode, struct file *file)
938{ 945{
939 struct spufs_inode_info *i = SPUFS_I(inode); 946 struct spufs_inode_info *i = SPUFS_I(inode);
@@ -992,7 +999,7 @@ static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
992 999
993 spu_acquire_saved(ctx); 1000 spu_acquire_saved(ctx);
994 ret = __spufs_signal2_read(ctx, buf, len, pos); 1001 ret = __spufs_signal2_read(ctx, buf, len, pos);
995 spu_release(ctx); 1002 spu_release_saved(ctx);
996 1003
997 return ret; 1004 return ret;
998} 1005}
@@ -1062,6 +1069,13 @@ static const struct file_operations spufs_signal2_fops = {
1062 .mmap = spufs_signal2_mmap, 1069 .mmap = spufs_signal2_mmap,
1063}; 1070};
1064 1071
1072static const struct file_operations spufs_signal2_nosched_fops = {
1073 .open = spufs_signal2_open,
1074 .release = spufs_signal2_release,
1075 .write = spufs_signal2_write,
1076 .mmap = spufs_signal2_mmap,
1077};
1078
1065static void spufs_signal1_type_set(void *data, u64 val) 1079static void spufs_signal1_type_set(void *data, u64 val)
1066{ 1080{
1067 struct spu_context *ctx = data; 1081 struct spu_context *ctx = data;
@@ -1612,7 +1626,7 @@ static void spufs_decr_set(void *data, u64 val)
1612 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1626 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1613 spu_acquire_saved(ctx); 1627 spu_acquire_saved(ctx);
1614 lscsa->decr.slot[0] = (u32) val; 1628 lscsa->decr.slot[0] = (u32) val;
1615 spu_release(ctx); 1629 spu_release_saved(ctx);
1616} 1630}
1617 1631
1618static u64 __spufs_decr_get(void *data) 1632static u64 __spufs_decr_get(void *data)
@@ -1628,7 +1642,7 @@ static u64 spufs_decr_get(void *data)
1628 u64 ret; 1642 u64 ret;
1629 spu_acquire_saved(ctx); 1643 spu_acquire_saved(ctx);
1630 ret = __spufs_decr_get(data); 1644 ret = __spufs_decr_get(data);
1631 spu_release(ctx); 1645 spu_release_saved(ctx);
1632 return ret; 1646 return ret;
1633} 1647}
1634DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1648DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
@@ -1637,17 +1651,21 @@ DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1637static void spufs_decr_status_set(void *data, u64 val) 1651static void spufs_decr_status_set(void *data, u64 val)
1638{ 1652{
1639 struct spu_context *ctx = data; 1653 struct spu_context *ctx = data;
1640 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1641 spu_acquire_saved(ctx); 1654 spu_acquire_saved(ctx);
1642 lscsa->decr_status.slot[0] = (u32) val; 1655 if (val)
1643 spu_release(ctx); 1656 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1657 else
1658 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1659 spu_release_saved(ctx);
1644} 1660}
1645 1661
1646static u64 __spufs_decr_status_get(void *data) 1662static u64 __spufs_decr_status_get(void *data)
1647{ 1663{
1648 struct spu_context *ctx = data; 1664 struct spu_context *ctx = data;
1649 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1665 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1650 return lscsa->decr_status.slot[0]; 1666 return SPU_DECR_STATUS_RUNNING;
1667 else
1668 return 0;
1651} 1669}
1652 1670
1653static u64 spufs_decr_status_get(void *data) 1671static u64 spufs_decr_status_get(void *data)
@@ -1656,7 +1674,7 @@ static u64 spufs_decr_status_get(void *data)
1656 u64 ret; 1674 u64 ret;
1657 spu_acquire_saved(ctx); 1675 spu_acquire_saved(ctx);
1658 ret = __spufs_decr_status_get(data); 1676 ret = __spufs_decr_status_get(data);
1659 spu_release(ctx); 1677 spu_release_saved(ctx);
1660 return ret; 1678 return ret;
1661} 1679}
1662DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1680DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
@@ -1668,7 +1686,7 @@ static void spufs_event_mask_set(void *data, u64 val)
1668 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1686 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1669 spu_acquire_saved(ctx); 1687 spu_acquire_saved(ctx);
1670 lscsa->event_mask.slot[0] = (u32) val; 1688 lscsa->event_mask.slot[0] = (u32) val;
1671 spu_release(ctx); 1689 spu_release_saved(ctx);
1672} 1690}
1673 1691
1674static u64 __spufs_event_mask_get(void *data) 1692static u64 __spufs_event_mask_get(void *data)
@@ -1684,7 +1702,7 @@ static u64 spufs_event_mask_get(void *data)
1684 u64 ret; 1702 u64 ret;
1685 spu_acquire_saved(ctx); 1703 spu_acquire_saved(ctx);
1686 ret = __spufs_event_mask_get(data); 1704 ret = __spufs_event_mask_get(data);
1687 spu_release(ctx); 1705 spu_release_saved(ctx);
1688 return ret; 1706 return ret;
1689} 1707}
1690DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1708DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
@@ -1708,7 +1726,7 @@ static u64 spufs_event_status_get(void *data)
1708 1726
1709 spu_acquire_saved(ctx); 1727 spu_acquire_saved(ctx);
1710 ret = __spufs_event_status_get(data); 1728 ret = __spufs_event_status_get(data);
1711 spu_release(ctx); 1729 spu_release_saved(ctx);
1712 return ret; 1730 return ret;
1713} 1731}
1714DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1732DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
@@ -1720,7 +1738,7 @@ static void spufs_srr0_set(void *data, u64 val)
1720 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1738 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1721 spu_acquire_saved(ctx); 1739 spu_acquire_saved(ctx);
1722 lscsa->srr0.slot[0] = (u32) val; 1740 lscsa->srr0.slot[0] = (u32) val;
1723 spu_release(ctx); 1741 spu_release_saved(ctx);
1724} 1742}
1725 1743
1726static u64 spufs_srr0_get(void *data) 1744static u64 spufs_srr0_get(void *data)
@@ -1730,7 +1748,7 @@ static u64 spufs_srr0_get(void *data)
1730 u64 ret; 1748 u64 ret;
1731 spu_acquire_saved(ctx); 1749 spu_acquire_saved(ctx);
1732 ret = lscsa->srr0.slot[0]; 1750 ret = lscsa->srr0.slot[0];
1733 spu_release(ctx); 1751 spu_release_saved(ctx);
1734 return ret; 1752 return ret;
1735} 1753}
1736DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1754DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
@@ -1786,7 +1804,7 @@ static u64 spufs_lslr_get(void *data)
1786 1804
1787 spu_acquire_saved(ctx); 1805 spu_acquire_saved(ctx);
1788 ret = __spufs_lslr_get(data); 1806 ret = __spufs_lslr_get(data);
1789 spu_release(ctx); 1807 spu_release_saved(ctx);
1790 1808
1791 return ret; 1809 return ret;
1792} 1810}
@@ -1850,7 +1868,7 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1850 spin_lock(&ctx->csa.register_lock); 1868 spin_lock(&ctx->csa.register_lock);
1851 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 1869 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1852 spin_unlock(&ctx->csa.register_lock); 1870 spin_unlock(&ctx->csa.register_lock);
1853 spu_release(ctx); 1871 spu_release_saved(ctx);
1854 1872
1855 return ret; 1873 return ret;
1856} 1874}
@@ -1888,7 +1906,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1888 spin_lock(&ctx->csa.register_lock); 1906 spin_lock(&ctx->csa.register_lock);
1889 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 1907 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1890 spin_unlock(&ctx->csa.register_lock); 1908 spin_unlock(&ctx->csa.register_lock);
1891 spu_release(ctx); 1909 spu_release_saved(ctx);
1892 1910
1893 return ret; 1911 return ret;
1894} 1912}
@@ -1929,7 +1947,7 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1929 spin_lock(&ctx->csa.register_lock); 1947 spin_lock(&ctx->csa.register_lock);
1930 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 1948 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1931 spin_unlock(&ctx->csa.register_lock); 1949 spin_unlock(&ctx->csa.register_lock);
1932 spu_release(ctx); 1950 spu_release_saved(ctx);
1933 1951
1934 return ret; 1952 return ret;
1935} 1953}
@@ -1979,7 +1997,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1979 spin_lock(&ctx->csa.register_lock); 1997 spin_lock(&ctx->csa.register_lock);
1980 ret = __spufs_dma_info_read(ctx, buf, len, pos); 1998 ret = __spufs_dma_info_read(ctx, buf, len, pos);
1981 spin_unlock(&ctx->csa.register_lock); 1999 spin_unlock(&ctx->csa.register_lock);
1982 spu_release(ctx); 2000 spu_release_saved(ctx);
1983 2001
1984 return ret; 2002 return ret;
1985} 2003}
@@ -2030,7 +2048,7 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2030 spin_lock(&ctx->csa.register_lock); 2048 spin_lock(&ctx->csa.register_lock);
2031 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2049 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2032 spin_unlock(&ctx->csa.register_lock); 2050 spin_unlock(&ctx->csa.register_lock);
2033 spu_release(ctx); 2051 spu_release_saved(ctx);
2034 2052
2035 return ret; 2053 return ret;
2036} 2054}
@@ -2065,14 +2083,26 @@ static const char *ctx_state_names[] = {
2065}; 2083};
2066 2084
2067static unsigned long long spufs_acct_time(struct spu_context *ctx, 2085static unsigned long long spufs_acct_time(struct spu_context *ctx,
2068 enum spuctx_execution_state state) 2086 enum spu_utilization_state state)
2069{ 2087{
2070 unsigned long time = ctx->stats.times[state]; 2088 struct timespec ts;
2089 unsigned long long time = ctx->stats.times[state];
2071 2090
2072 if (ctx->stats.execution_state == state) 2091 /*
2073 time += jiffies - ctx->stats.tstamp; 2092 * In general, utilization statistics are updated by the controlling
2093 * thread as the spu context moves through various well defined
2094 * state transitions, but if the context is lazily loaded its
2095 * utilization statistics are not updated as the controlling thread
2096 * is not tightly coupled with the execution of the spu context. We
2097 * calculate and apply the time delta from the last recorded state
2098 * of the spu context.
2099 */
2100 if (ctx->spu && ctx->stats.util_state == state) {
2101 ktime_get_ts(&ts);
2102 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2103 }
2074 2104
2075 return jiffies_to_msecs(time); 2105 return time / NSEC_PER_MSEC;
2076} 2106}
2077 2107
2078static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2108static unsigned long long spufs_slb_flts(struct spu_context *ctx)
@@ -2107,11 +2137,11 @@ static int spufs_show_stat(struct seq_file *s, void *private)
2107 spu_acquire(ctx); 2137 spu_acquire(ctx);
2108 seq_printf(s, "%s %llu %llu %llu %llu " 2138 seq_printf(s, "%s %llu %llu %llu %llu "
2109 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2139 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2110 ctx_state_names[ctx->stats.execution_state], 2140 ctx_state_names[ctx->stats.util_state],
2111 spufs_acct_time(ctx, SPUCTX_UTIL_USER), 2141 spufs_acct_time(ctx, SPU_UTIL_USER),
2112 spufs_acct_time(ctx, SPUCTX_UTIL_SYSTEM), 2142 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2113 spufs_acct_time(ctx, SPUCTX_UTIL_IOWAIT), 2143 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2114 spufs_acct_time(ctx, SPUCTX_UTIL_LOADED), 2144 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2115 ctx->stats.vol_ctx_switch, 2145 ctx->stats.vol_ctx_switch,
2116 ctx->stats.invol_ctx_switch, 2146 ctx->stats.invol_ctx_switch,
2117 spufs_slb_flts(ctx), 2147 spufs_slb_flts(ctx),
@@ -2147,8 +2177,8 @@ struct tree_descr spufs_dir_contents[] = {
2147 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2177 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2148 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2178 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2149 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2179 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2150 { "signal1", &spufs_signal1_fops, 0666, }, 2180 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2151 { "signal2", &spufs_signal2_fops, 0666, }, 2181 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2152 { "signal1_type", &spufs_signal1_type, 0666, }, 2182 { "signal1_type", &spufs_signal1_type, 0666, },
2153 { "signal2_type", &spufs_signal2_type, 0666, }, 2183 { "signal2_type", &spufs_signal2_type, 0666, },
2154 { "cntl", &spufs_cntl_fops, 0666, }, 2184 { "cntl", &spufs_cntl_fops, 0666, },
@@ -2184,8 +2214,8 @@ struct tree_descr spufs_dir_nosched_contents[] = {
2184 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2214 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2185 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2215 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2186 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2216 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2187 { "signal1", &spufs_signal1_fops, 0666, }, 2217 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2188 { "signal2", &spufs_signal2_fops, 0666, }, 2218 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2189 { "signal1_type", &spufs_signal1_type, 0666, }, 2219 { "signal1_type", &spufs_signal1_type, 0666, },
2190 { "signal2_type", &spufs_signal2_type, 0666, }, 2220 { "signal2_type", &spufs_signal2_type, 0666, },
2191 { "mss", &spufs_mss_fops, 0666, }, 2221 { "mss", &spufs_mss_fops, 0666, },
diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
index 212ea78f9051..71a443253021 100644
--- a/arch/powerpc/platforms/cell/spufs/gang.c
+++ b/arch/powerpc/platforms/cell/spufs/gang.c
@@ -35,7 +35,9 @@ struct spu_gang *alloc_spu_gang(void)
35 35
36 kref_init(&gang->kref); 36 kref_init(&gang->kref);
37 mutex_init(&gang->mutex); 37 mutex_init(&gang->mutex);
38 mutex_init(&gang->aff_mutex);
38 INIT_LIST_HEAD(&gang->list); 39 INIT_LIST_HEAD(&gang->list);
40 INIT_LIST_HEAD(&gang->aff_list_head);
39 41
40out: 42out:
41 return gang; 43 return gang;
@@ -73,6 +75,10 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx)
73{ 75{
74 mutex_lock(&gang->mutex); 76 mutex_lock(&gang->mutex);
75 WARN_ON(ctx->gang != gang); 77 WARN_ON(ctx->gang != gang);
78 if (!list_empty(&ctx->aff_list)) {
79 list_del_init(&ctx->aff_list);
80 gang->aff_flags &= ~AFF_OFFSETS_SET;
81 }
76 list_del_init(&ctx->gang_list); 82 list_del_init(&ctx->gang_list);
77 gang->contexts--; 83 gang->contexts--;
78 mutex_unlock(&gang->mutex); 84 mutex_unlock(&gang->mutex);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 7eb4d6cbcb74..b3d0dd118dd0 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -316,11 +316,107 @@ out:
316 return ret; 316 return ret;
317} 317}
318 318
319static int spufs_create_context(struct inode *inode, 319static struct spu_context *
320 struct dentry *dentry, 320spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
321 struct vfsmount *mnt, int flags, int mode) 321 struct file *filp)
322{
323 struct spu_context *tmp, *neighbor;
324 int count, node;
325 int aff_supp;
326
327 aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
328 struct spu, cbe_list))->aff_list);
329
330 if (!aff_supp)
331 return ERR_PTR(-EINVAL);
332
333 if (flags & SPU_CREATE_GANG)
334 return ERR_PTR(-EINVAL);
335
336 if (flags & SPU_CREATE_AFFINITY_MEM &&
337 gang->aff_ref_ctx &&
338 gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
339 return ERR_PTR(-EEXIST);
340
341 if (gang->aff_flags & AFF_MERGED)
342 return ERR_PTR(-EBUSY);
343
344 neighbor = NULL;
345 if (flags & SPU_CREATE_AFFINITY_SPU) {
346 if (!filp || filp->f_op != &spufs_context_fops)
347 return ERR_PTR(-EINVAL);
348
349 neighbor = get_spu_context(
350 SPUFS_I(filp->f_dentry->d_inode)->i_ctx);
351
352 if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
353 !list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
354 !list_entry(neighbor->aff_list.next, struct spu_context,
355 aff_list)->aff_head)
356 return ERR_PTR(-EEXIST);
357
358 if (gang != neighbor->gang)
359 return ERR_PTR(-EINVAL);
360
361 count = 1;
362 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
363 count++;
364 if (list_empty(&neighbor->aff_list))
365 count++;
366
367 for (node = 0; node < MAX_NUMNODES; node++) {
368 if ((cbe_spu_info[node].n_spus - atomic_read(
369 &cbe_spu_info[node].reserved_spus)) >= count)
370 break;
371 }
372
373 if (node == MAX_NUMNODES)
374 return ERR_PTR(-EEXIST);
375 }
376
377 return neighbor;
378}
379
380static void
381spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
382 struct spu_context *neighbor)
383{
384 if (flags & SPU_CREATE_AFFINITY_MEM)
385 ctx->gang->aff_ref_ctx = ctx;
386
387 if (flags & SPU_CREATE_AFFINITY_SPU) {
388 if (list_empty(&neighbor->aff_list)) {
389 list_add_tail(&neighbor->aff_list,
390 &ctx->gang->aff_list_head);
391 neighbor->aff_head = 1;
392 }
393
394 if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
395 || list_entry(neighbor->aff_list.next, struct spu_context,
396 aff_list)->aff_head) {
397 list_add(&ctx->aff_list, &neighbor->aff_list);
398 } else {
399 list_add_tail(&ctx->aff_list, &neighbor->aff_list);
400 if (neighbor->aff_head) {
401 neighbor->aff_head = 0;
402 ctx->aff_head = 1;
403 }
404 }
405
406 if (!ctx->gang->aff_ref_ctx)
407 ctx->gang->aff_ref_ctx = ctx;
408 }
409}
410
411static int
412spufs_create_context(struct inode *inode, struct dentry *dentry,
413 struct vfsmount *mnt, int flags, int mode,
414 struct file *aff_filp)
322{ 415{
323 int ret; 416 int ret;
417 int affinity;
418 struct spu_gang *gang;
419 struct spu_context *neighbor;
324 420
325 ret = -EPERM; 421 ret = -EPERM;
326 if ((flags & SPU_CREATE_NOSCHED) && 422 if ((flags & SPU_CREATE_NOSCHED) &&
@@ -336,9 +432,29 @@ static int spufs_create_context(struct inode *inode,
336 if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) 432 if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
337 goto out_unlock; 433 goto out_unlock;
338 434
435 gang = NULL;
436 neighbor = NULL;
437 affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
438 if (affinity) {
439 gang = SPUFS_I(inode)->i_gang;
440 ret = -EINVAL;
441 if (!gang)
442 goto out_unlock;
443 mutex_lock(&gang->aff_mutex);
444 neighbor = spufs_assert_affinity(flags, gang, aff_filp);
445 if (IS_ERR(neighbor)) {
446 ret = PTR_ERR(neighbor);
447 goto out_aff_unlock;
448 }
449 }
450
339 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO); 451 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
340 if (ret) 452 if (ret)
341 goto out_unlock; 453 goto out_aff_unlock;
454
455 if (affinity)
456 spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
457 neighbor);
342 458
343 /* 459 /*
344 * get references for dget and mntget, will be released 460 * get references for dget and mntget, will be released
@@ -352,6 +468,9 @@ static int spufs_create_context(struct inode *inode,
352 goto out; 468 goto out;
353 } 469 }
354 470
471out_aff_unlock:
472 if (affinity)
473 mutex_unlock(&gang->aff_mutex);
355out_unlock: 474out_unlock:
356 mutex_unlock(&inode->i_mutex); 475 mutex_unlock(&inode->i_mutex);
357out: 476out:
@@ -450,7 +569,8 @@ out:
450 569
451static struct file_system_type spufs_type; 570static struct file_system_type spufs_type;
452 571
453long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode) 572long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
573 struct file *filp)
454{ 574{
455 struct dentry *dentry; 575 struct dentry *dentry;
456 int ret; 576 int ret;
@@ -487,7 +607,7 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode)
487 dentry, nd->mnt, mode); 607 dentry, nd->mnt, mode);
488 else 608 else
489 return spufs_create_context(nd->dentry->d_inode, 609 return spufs_create_context(nd->dentry->d_inode,
490 dentry, nd->mnt, flags, mode); 610 dentry, nd->mnt, flags, mode, filp);
491 611
492out_dput: 612out_dput:
493 dput(dentry); 613 dput(dentry);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 58ae13b7de84..0b50fa5cb39d 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -18,15 +18,17 @@ void spufs_stop_callback(struct spu *spu)
18 wake_up_all(&ctx->stop_wq); 18 wake_up_all(&ctx->stop_wq);
19} 19}
20 20
21static inline int spu_stopped(struct spu_context *ctx, u32 * stat) 21static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
22{ 22{
23 struct spu *spu; 23 struct spu *spu;
24 u64 pte_fault; 24 u64 pte_fault;
25 25
26 *stat = ctx->ops->status_read(ctx); 26 *stat = ctx->ops->status_read(ctx);
27 if (ctx->state != SPU_STATE_RUNNABLE) 27
28 return 1;
29 spu = ctx->spu; 28 spu = ctx->spu;
29 if (ctx->state != SPU_STATE_RUNNABLE ||
30 test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
31 return 1;
30 pte_fault = spu->dsisr & 32 pte_fault = spu->dsisr &
31 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); 33 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
32 return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? 34 return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
@@ -124,8 +126,10 @@ out:
124 return ret; 126 return ret;
125} 127}
126 128
127static int spu_run_init(struct spu_context *ctx, u32 * npc) 129static int spu_run_init(struct spu_context *ctx, u32 *npc)
128{ 130{
131 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
132
129 if (ctx->flags & SPU_CREATE_ISOLATE) { 133 if (ctx->flags & SPU_CREATE_ISOLATE) {
130 unsigned long runcntl; 134 unsigned long runcntl;
131 135
@@ -151,16 +155,20 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc)
151 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 155 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
152 } 156 }
153 157
158 spuctx_switch_state(ctx, SPU_UTIL_USER);
159
154 return 0; 160 return 0;
155} 161}
156 162
157static int spu_run_fini(struct spu_context *ctx, u32 * npc, 163static int spu_run_fini(struct spu_context *ctx, u32 *npc,
158 u32 * status) 164 u32 *status)
159{ 165{
160 int ret = 0; 166 int ret = 0;
161 167
162 *status = ctx->ops->status_read(ctx); 168 *status = ctx->ops->status_read(ctx);
163 *npc = ctx->ops->npc_read(ctx); 169 *npc = ctx->ops->npc_read(ctx);
170
171 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
164 spu_release(ctx); 172 spu_release(ctx);
165 173
166 if (signal_pending(current)) 174 if (signal_pending(current))
@@ -289,10 +297,10 @@ static inline int spu_process_events(struct spu_context *ctx)
289 return ret; 297 return ret;
290} 298}
291 299
292long spufs_run_spu(struct file *file, struct spu_context *ctx, 300long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
293 u32 *npc, u32 *event)
294{ 301{
295 int ret; 302 int ret;
303 struct spu *spu;
296 u32 status; 304 u32 status;
297 305
298 if (mutex_lock_interruptible(&ctx->run_mutex)) 306 if (mutex_lock_interruptible(&ctx->run_mutex))
@@ -328,6 +336,17 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
328 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 336 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
329 if (unlikely(ret)) 337 if (unlikely(ret))
330 break; 338 break;
339 spu = ctx->spu;
340 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
341 &ctx->sched_flags))) {
342 if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
343 spu_switch_notify(spu, ctx);
344 continue;
345 }
346 }
347
348 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
349
331 if ((status & SPU_STATUS_STOPPED_BY_STOP) && 350 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
332 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 351 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
333 ret = spu_process_callback(ctx); 352 ret = spu_process_callback(ctx);
@@ -356,6 +375,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
356 (ctx->state == SPU_STATE_RUNNABLE)) 375 (ctx->state == SPU_STATE_RUNNABLE))
357 ctx->stats.libassist++; 376 ctx->stats.libassist++;
358 377
378
359 ctx->ops->master_stop(ctx); 379 ctx->ops->master_stop(ctx);
360 ret = spu_run_fini(ctx, npc, &status); 380 ret = spu_run_fini(ctx, npc, &status);
361 spu_yield(ctx); 381 spu_yield(ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index e5b4dd1db286..227968b4779d 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -51,9 +51,6 @@ struct spu_prio_array {
51 DECLARE_BITMAP(bitmap, MAX_PRIO); 51 DECLARE_BITMAP(bitmap, MAX_PRIO);
52 struct list_head runq[MAX_PRIO]; 52 struct list_head runq[MAX_PRIO];
53 spinlock_t runq_lock; 53 spinlock_t runq_lock;
54 struct list_head active_list[MAX_NUMNODES];
55 struct mutex active_mutex[MAX_NUMNODES];
56 int nr_active[MAX_NUMNODES];
57 int nr_waiting; 54 int nr_waiting;
58}; 55};
59 56
@@ -127,7 +124,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
127 ctx->policy = current->policy; 124 ctx->policy = current->policy;
128 125
129 /* 126 /*
130 * A lot of places that don't hold active_mutex poke into 127 * A lot of places that don't hold list_mutex poke into
131 * cpus_allowed, including grab_runnable_context which 128 * cpus_allowed, including grab_runnable_context which
132 * already holds the runq_lock. So abuse runq_lock 129 * already holds the runq_lock. So abuse runq_lock
133 * to protect this field aswell. 130 * to protect this field aswell.
@@ -141,9 +138,9 @@ void spu_update_sched_info(struct spu_context *ctx)
141{ 138{
142 int node = ctx->spu->node; 139 int node = ctx->spu->node;
143 140
144 mutex_lock(&spu_prio->active_mutex[node]); 141 mutex_lock(&cbe_spu_info[node].list_mutex);
145 __spu_update_sched_info(ctx); 142 __spu_update_sched_info(ctx);
146 mutex_unlock(&spu_prio->active_mutex[node]); 143 mutex_unlock(&cbe_spu_info[node].list_mutex);
147} 144}
148 145
149static int __node_allowed(struct spu_context *ctx, int node) 146static int __node_allowed(struct spu_context *ctx, int node)
@@ -169,56 +166,56 @@ static int node_allowed(struct spu_context *ctx, int node)
169 return rval; 166 return rval;
170} 167}
171 168
172/** 169static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
173 * spu_add_to_active_list - add spu to active list
174 * @spu: spu to add to the active list
175 */
176static void spu_add_to_active_list(struct spu *spu)
177{
178 int node = spu->node;
179
180 mutex_lock(&spu_prio->active_mutex[node]);
181 spu_prio->nr_active[node]++;
182 list_add_tail(&spu->list, &spu_prio->active_list[node]);
183 mutex_unlock(&spu_prio->active_mutex[node]);
184}
185 170
186static void __spu_remove_from_active_list(struct spu *spu) 171void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
187{ 172{
188 list_del_init(&spu->list); 173 blocking_notifier_call_chain(&spu_switch_notifier,
189 spu_prio->nr_active[spu->node]--; 174 ctx ? ctx->object_id : 0, spu);
190} 175}
191 176
192/** 177static void notify_spus_active(void)
193 * spu_remove_from_active_list - remove spu from active list
194 * @spu: spu to remove from the active list
195 */
196static void spu_remove_from_active_list(struct spu *spu)
197{ 178{
198 int node = spu->node; 179 int node;
199
200 mutex_lock(&spu_prio->active_mutex[node]);
201 __spu_remove_from_active_list(spu);
202 mutex_unlock(&spu_prio->active_mutex[node]);
203}
204 180
205static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); 181 /*
182 * Wake up the active spu_contexts.
183 *
184 * When the awakened processes see their "notify_active" flag is set,
185 * they will call spu_switch_notify();
186 */
187 for_each_online_node(node) {
188 struct spu *spu;
206 189
207static void spu_switch_notify(struct spu *spu, struct spu_context *ctx) 190 mutex_lock(&cbe_spu_info[node].list_mutex);
208{ 191 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
209 blocking_notifier_call_chain(&spu_switch_notifier, 192 if (spu->alloc_state != SPU_FREE) {
210 ctx ? ctx->object_id : 0, spu); 193 struct spu_context *ctx = spu->ctx;
194 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
195 &ctx->sched_flags);
196 mb();
197 wake_up_all(&ctx->stop_wq);
198 }
199 }
200 mutex_unlock(&cbe_spu_info[node].list_mutex);
201 }
211} 202}
212 203
213int spu_switch_event_register(struct notifier_block * n) 204int spu_switch_event_register(struct notifier_block * n)
214{ 205{
215 return blocking_notifier_chain_register(&spu_switch_notifier, n); 206 int ret;
207 ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
208 if (!ret)
209 notify_spus_active();
210 return ret;
216} 211}
212EXPORT_SYMBOL_GPL(spu_switch_event_register);
217 213
218int spu_switch_event_unregister(struct notifier_block * n) 214int spu_switch_event_unregister(struct notifier_block * n)
219{ 215{
220 return blocking_notifier_chain_unregister(&spu_switch_notifier, n); 216 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
221} 217}
218EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
222 219
223/** 220/**
224 * spu_bind_context - bind spu context to physical spu 221 * spu_bind_context - bind spu context to physical spu
@@ -229,6 +226,12 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
229{ 226{
230 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, 227 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
231 spu->number, spu->node); 228 spu->number, spu->node);
229 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
230
231 if (ctx->flags & SPU_CREATE_NOSCHED)
232 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
233 if (!list_empty(&ctx->aff_list))
234 atomic_inc(&ctx->gang->aff_sched_count);
232 235
233 ctx->stats.slb_flt_base = spu->stats.slb_flt; 236 ctx->stats.slb_flt_base = spu->stats.slb_flt;
234 ctx->stats.class2_intr_base = spu->stats.class2_intr; 237 ctx->stats.class2_intr_base = spu->stats.class2_intr;
@@ -238,6 +241,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
238 ctx->spu = spu; 241 ctx->spu = spu;
239 ctx->ops = &spu_hw_ops; 242 ctx->ops = &spu_hw_ops;
240 spu->pid = current->pid; 243 spu->pid = current->pid;
244 spu->tgid = current->tgid;
241 spu_associate_mm(spu, ctx->owner); 245 spu_associate_mm(spu, ctx->owner);
242 spu->ibox_callback = spufs_ibox_callback; 246 spu->ibox_callback = spufs_ibox_callback;
243 spu->wbox_callback = spufs_wbox_callback; 247 spu->wbox_callback = spufs_wbox_callback;
@@ -251,7 +255,153 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
251 spu_cpu_affinity_set(spu, raw_smp_processor_id()); 255 spu_cpu_affinity_set(spu, raw_smp_processor_id());
252 spu_switch_notify(spu, ctx); 256 spu_switch_notify(spu, ctx);
253 ctx->state = SPU_STATE_RUNNABLE; 257 ctx->state = SPU_STATE_RUNNABLE;
254 spu_switch_state(spu, SPU_UTIL_SYSTEM); 258
259 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
260}
261
262/*
263 * Must be used with the list_mutex held.
264 */
265static inline int sched_spu(struct spu *spu)
266{
267 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
268
269 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
270}
271
272static void aff_merge_remaining_ctxs(struct spu_gang *gang)
273{
274 struct spu_context *ctx;
275
276 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
277 if (list_empty(&ctx->aff_list))
278 list_add(&ctx->aff_list, &gang->aff_list_head);
279 }
280 gang->aff_flags |= AFF_MERGED;
281}
282
283static void aff_set_offsets(struct spu_gang *gang)
284{
285 struct spu_context *ctx;
286 int offset;
287
288 offset = -1;
289 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
290 aff_list) {
291 if (&ctx->aff_list == &gang->aff_list_head)
292 break;
293 ctx->aff_offset = offset--;
294 }
295
296 offset = 0;
297 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
298 if (&ctx->aff_list == &gang->aff_list_head)
299 break;
300 ctx->aff_offset = offset++;
301 }
302
303 gang->aff_flags |= AFF_OFFSETS_SET;
304}
305
306static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
307 int group_size, int lowest_offset)
308{
309 struct spu *spu;
310 int node, n;
311
312 /*
313 * TODO: A better algorithm could be used to find a good spu to be
314 * used as reference location for the ctxs chain.
315 */
316 node = cpu_to_node(raw_smp_processor_id());
317 for (n = 0; n < MAX_NUMNODES; n++, node++) {
318 node = (node < MAX_NUMNODES) ? node : 0;
319 if (!node_allowed(ctx, node))
320 continue;
321 mutex_lock(&cbe_spu_info[node].list_mutex);
322 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
323 if ((!mem_aff || spu->has_mem_affinity) &&
324 sched_spu(spu)) {
325 mutex_unlock(&cbe_spu_info[node].list_mutex);
326 return spu;
327 }
328 }
329 mutex_unlock(&cbe_spu_info[node].list_mutex);
330 }
331 return NULL;
332}
333
334static void aff_set_ref_point_location(struct spu_gang *gang)
335{
336 int mem_aff, gs, lowest_offset;
337 struct spu_context *ctx;
338 struct spu *tmp;
339
340 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
341 lowest_offset = 0;
342 gs = 0;
343
344 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
345 gs++;
346
347 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
348 aff_list) {
349 if (&ctx->aff_list == &gang->aff_list_head)
350 break;
351 lowest_offset = ctx->aff_offset;
352 }
353
354 gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset);
355}
356
357static struct spu *ctx_location(struct spu *ref, int offset, int node)
358{
359 struct spu *spu;
360
361 spu = NULL;
362 if (offset >= 0) {
363 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
364 BUG_ON(spu->node != node);
365 if (offset == 0)
366 break;
367 if (sched_spu(spu))
368 offset--;
369 }
370 } else {
371 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
372 BUG_ON(spu->node != node);
373 if (offset == 0)
374 break;
375 if (sched_spu(spu))
376 offset++;
377 }
378 }
379
380 return spu;
381}
382
383/*
384 * affinity_check is called each time a context is going to be scheduled.
385 * It returns the spu ptr on which the context must run.
386 */
387static int has_affinity(struct spu_context *ctx)
388{
389 struct spu_gang *gang = ctx->gang;
390
391 if (list_empty(&ctx->aff_list))
392 return 0;
393
394 mutex_lock(&gang->aff_mutex);
395 if (!gang->aff_ref_spu) {
396 if (!(gang->aff_flags & AFF_MERGED))
397 aff_merge_remaining_ctxs(gang);
398 if (!(gang->aff_flags & AFF_OFFSETS_SET))
399 aff_set_offsets(gang);
400 aff_set_ref_point_location(gang);
401 }
402 mutex_unlock(&gang->aff_mutex);
403
404 return gang->aff_ref_spu != NULL;
255} 405}
256 406
257/** 407/**
@@ -263,9 +413,13 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
263{ 413{
264 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, 414 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
265 spu->pid, spu->number, spu->node); 415 spu->pid, spu->number, spu->node);
416 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
266 417
267 spu_switch_state(spu, SPU_UTIL_IDLE); 418 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
268 419 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
420 if (!list_empty(&ctx->aff_list))
421 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
422 ctx->gang->aff_ref_spu = NULL;
269 spu_switch_notify(spu, NULL); 423 spu_switch_notify(spu, NULL);
270 spu_unmap_mappings(ctx); 424 spu_unmap_mappings(ctx);
271 spu_save(&ctx->csa, spu); 425 spu_save(&ctx->csa, spu);
@@ -278,8 +432,8 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
278 spu->dma_callback = NULL; 432 spu->dma_callback = NULL;
279 spu_associate_mm(spu, NULL); 433 spu_associate_mm(spu, NULL);
280 spu->pid = 0; 434 spu->pid = 0;
435 spu->tgid = 0;
281 ctx->ops = &spu_backing_ops; 436 ctx->ops = &spu_backing_ops;
282 ctx->spu = NULL;
283 spu->flags = 0; 437 spu->flags = 0;
284 spu->ctx = NULL; 438 spu->ctx = NULL;
285 439
@@ -287,6 +441,10 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
287 (spu->stats.slb_flt - ctx->stats.slb_flt_base); 441 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
288 ctx->stats.class2_intr += 442 ctx->stats.class2_intr +=
289 (spu->stats.class2_intr - ctx->stats.class2_intr_base); 443 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
444
445 /* This maps the underlying spu state to idle */
446 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
447 ctx->spu = NULL;
290} 448}
291 449
292/** 450/**
@@ -352,18 +510,41 @@ static void spu_prio_wait(struct spu_context *ctx)
352 510
353static struct spu *spu_get_idle(struct spu_context *ctx) 511static struct spu *spu_get_idle(struct spu_context *ctx)
354{ 512{
355 struct spu *spu = NULL; 513 struct spu *spu;
356 int node = cpu_to_node(raw_smp_processor_id()); 514 int node, n;
357 int n; 515
516 if (has_affinity(ctx)) {
517 node = ctx->gang->aff_ref_spu->node;
358 518
519 mutex_lock(&cbe_spu_info[node].list_mutex);
520 spu = ctx_location(ctx->gang->aff_ref_spu, ctx->aff_offset, node);
521 if (spu && spu->alloc_state == SPU_FREE)
522 goto found;
523 mutex_unlock(&cbe_spu_info[node].list_mutex);
524 return NULL;
525 }
526
527 node = cpu_to_node(raw_smp_processor_id());
359 for (n = 0; n < MAX_NUMNODES; n++, node++) { 528 for (n = 0; n < MAX_NUMNODES; n++, node++) {
360 node = (node < MAX_NUMNODES) ? node : 0; 529 node = (node < MAX_NUMNODES) ? node : 0;
361 if (!node_allowed(ctx, node)) 530 if (!node_allowed(ctx, node))
362 continue; 531 continue;
363 spu = spu_alloc_node(node); 532
364 if (spu) 533 mutex_lock(&cbe_spu_info[node].list_mutex);
365 break; 534 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
535 if (spu->alloc_state == SPU_FREE)
536 goto found;
537 }
538 mutex_unlock(&cbe_spu_info[node].list_mutex);
366 } 539 }
540
541 return NULL;
542
543 found:
544 spu->alloc_state = SPU_USED;
545 mutex_unlock(&cbe_spu_info[node].list_mutex);
546 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
547 spu_init_channels(spu);
367 return spu; 548 return spu;
368} 549}
369 550
@@ -393,15 +574,15 @@ static struct spu *find_victim(struct spu_context *ctx)
393 if (!node_allowed(ctx, node)) 574 if (!node_allowed(ctx, node))
394 continue; 575 continue;
395 576
396 mutex_lock(&spu_prio->active_mutex[node]); 577 mutex_lock(&cbe_spu_info[node].list_mutex);
397 list_for_each_entry(spu, &spu_prio->active_list[node], list) { 578 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
398 struct spu_context *tmp = spu->ctx; 579 struct spu_context *tmp = spu->ctx;
399 580
400 if (tmp->prio > ctx->prio && 581 if (tmp->prio > ctx->prio &&
401 (!victim || tmp->prio > victim->prio)) 582 (!victim || tmp->prio > victim->prio))
402 victim = spu->ctx; 583 victim = spu->ctx;
403 } 584 }
404 mutex_unlock(&spu_prio->active_mutex[node]); 585 mutex_unlock(&cbe_spu_info[node].list_mutex);
405 586
406 if (victim) { 587 if (victim) {
407 /* 588 /*
@@ -426,7 +607,11 @@ static struct spu *find_victim(struct spu_context *ctx)
426 victim = NULL; 607 victim = NULL;
427 goto restart; 608 goto restart;
428 } 609 }
429 spu_remove_from_active_list(spu); 610
611 mutex_lock(&cbe_spu_info[node].list_mutex);
612 cbe_spu_info[node].nr_active--;
613 mutex_unlock(&cbe_spu_info[node].list_mutex);
614
430 spu_unbind_context(spu, victim); 615 spu_unbind_context(spu, victim);
431 victim->stats.invol_ctx_switch++; 616 victim->stats.invol_ctx_switch++;
432 spu->stats.invol_ctx_switch++; 617 spu->stats.invol_ctx_switch++;
@@ -455,8 +640,6 @@ static struct spu *find_victim(struct spu_context *ctx)
455 */ 640 */
456int spu_activate(struct spu_context *ctx, unsigned long flags) 641int spu_activate(struct spu_context *ctx, unsigned long flags)
457{ 642{
458 spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM);
459
460 do { 643 do {
461 struct spu *spu; 644 struct spu *spu;
462 645
@@ -477,8 +660,12 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
477 if (!spu && rt_prio(ctx->prio)) 660 if (!spu && rt_prio(ctx->prio))
478 spu = find_victim(ctx); 661 spu = find_victim(ctx);
479 if (spu) { 662 if (spu) {
663 int node = spu->node;
664
665 mutex_lock(&cbe_spu_info[node].list_mutex);
480 spu_bind_context(spu, ctx); 666 spu_bind_context(spu, ctx);
481 spu_add_to_active_list(spu); 667 cbe_spu_info[node].nr_active++;
668 mutex_unlock(&cbe_spu_info[node].list_mutex);
482 return 0; 669 return 0;
483 } 670 }
484 671
@@ -500,7 +687,7 @@ static struct spu_context *grab_runnable_context(int prio, int node)
500 int best; 687 int best;
501 688
502 spin_lock(&spu_prio->runq_lock); 689 spin_lock(&spu_prio->runq_lock);
503 best = sched_find_first_bit(spu_prio->bitmap); 690 best = find_first_bit(spu_prio->bitmap, prio);
504 while (best < prio) { 691 while (best < prio) {
505 struct list_head *rq = &spu_prio->runq[best]; 692 struct list_head *rq = &spu_prio->runq[best];
506 693
@@ -527,11 +714,17 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
527 if (spu) { 714 if (spu) {
528 new = grab_runnable_context(max_prio, spu->node); 715 new = grab_runnable_context(max_prio, spu->node);
529 if (new || force) { 716 if (new || force) {
530 spu_remove_from_active_list(spu); 717 int node = spu->node;
718
719 mutex_lock(&cbe_spu_info[node].list_mutex);
531 spu_unbind_context(spu, ctx); 720 spu_unbind_context(spu, ctx);
721 spu->alloc_state = SPU_FREE;
722 cbe_spu_info[node].nr_active--;
723 mutex_unlock(&cbe_spu_info[node].list_mutex);
724
532 ctx->stats.vol_ctx_switch++; 725 ctx->stats.vol_ctx_switch++;
533 spu->stats.vol_ctx_switch++; 726 spu->stats.vol_ctx_switch++;
534 spu_free(spu); 727
535 if (new) 728 if (new)
536 wake_up(&new->stop_wq); 729 wake_up(&new->stop_wq);
537 } 730 }
@@ -550,21 +743,11 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
550 */ 743 */
551void spu_deactivate(struct spu_context *ctx) 744void spu_deactivate(struct spu_context *ctx)
552{ 745{
553 /*
554 * We must never reach this for a nosched context,
555 * but handle the case gracefull instead of panicing.
556 */
557 if (ctx->flags & SPU_CREATE_NOSCHED) {
558 WARN_ON(1);
559 return;
560 }
561
562 __spu_deactivate(ctx, 1, MAX_PRIO); 746 __spu_deactivate(ctx, 1, MAX_PRIO);
563 spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
564} 747}
565 748
566/** 749/**
567 * spu_yield - yield a physical spu if others are waiting 750 * spu_yield - yield a physical spu if others are waiting
568 * @ctx: spu context to yield 751 * @ctx: spu context to yield
569 * 752 *
570 * Check if there is a higher priority context waiting and if yes 753 * Check if there is a higher priority context waiting and if yes
@@ -575,17 +758,12 @@ void spu_yield(struct spu_context *ctx)
575{ 758{
576 if (!(ctx->flags & SPU_CREATE_NOSCHED)) { 759 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
577 mutex_lock(&ctx->state_mutex); 760 mutex_lock(&ctx->state_mutex);
578 if (__spu_deactivate(ctx, 0, MAX_PRIO)) 761 __spu_deactivate(ctx, 0, MAX_PRIO);
579 spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
580 else {
581 spuctx_switch_state(ctx, SPUCTX_UTIL_LOADED);
582 spu_switch_state(ctx->spu, SPU_UTIL_USER);
583 }
584 mutex_unlock(&ctx->state_mutex); 762 mutex_unlock(&ctx->state_mutex);
585 } 763 }
586} 764}
587 765
588static void spusched_tick(struct spu_context *ctx) 766static noinline void spusched_tick(struct spu_context *ctx)
589{ 767{
590 if (ctx->flags & SPU_CREATE_NOSCHED) 768 if (ctx->flags & SPU_CREATE_NOSCHED)
591 return; 769 return;
@@ -596,7 +774,7 @@ static void spusched_tick(struct spu_context *ctx)
596 return; 774 return;
597 775
598 /* 776 /*
599 * Unfortunately active_mutex ranks outside of state_mutex, so 777 * Unfortunately list_mutex ranks outside of state_mutex, so
600 * we have to trylock here. If we fail give the context another 778 * we have to trylock here. If we fail give the context another
601 * tick and try again. 779 * tick and try again.
602 */ 780 */
@@ -606,12 +784,11 @@ static void spusched_tick(struct spu_context *ctx)
606 784
607 new = grab_runnable_context(ctx->prio + 1, spu->node); 785 new = grab_runnable_context(ctx->prio + 1, spu->node);
608 if (new) { 786 if (new) {
609
610 __spu_remove_from_active_list(spu);
611 spu_unbind_context(spu, ctx); 787 spu_unbind_context(spu, ctx);
612 ctx->stats.invol_ctx_switch++; 788 ctx->stats.invol_ctx_switch++;
613 spu->stats.invol_ctx_switch++; 789 spu->stats.invol_ctx_switch++;
614 spu_free(spu); 790 spu->alloc_state = SPU_FREE;
791 cbe_spu_info[spu->node].nr_active--;
615 wake_up(&new->stop_wq); 792 wake_up(&new->stop_wq);
616 /* 793 /*
617 * We need to break out of the wait loop in 794 * We need to break out of the wait loop in
@@ -632,7 +809,7 @@ static void spusched_tick(struct spu_context *ctx)
632 * 809 *
633 * Return the number of tasks currently running or waiting to run. 810 * Return the number of tasks currently running or waiting to run.
634 * 811 *
635 * Note that we don't take runq_lock / active_mutex here. Reading 812 * Note that we don't take runq_lock / list_mutex here. Reading
636 * a single 32bit value is atomic on powerpc, and we don't care 813 * a single 32bit value is atomic on powerpc, and we don't care
637 * about memory ordering issues here. 814 * about memory ordering issues here.
638 */ 815 */
@@ -641,7 +818,7 @@ static unsigned long count_active_contexts(void)
641 int nr_active = 0, node; 818 int nr_active = 0, node;
642 819
643 for (node = 0; node < MAX_NUMNODES; node++) 820 for (node = 0; node < MAX_NUMNODES; node++)
644 nr_active += spu_prio->nr_active[node]; 821 nr_active += cbe_spu_info[node].nr_active;
645 nr_active += spu_prio->nr_waiting; 822 nr_active += spu_prio->nr_waiting;
646 823
647 return nr_active; 824 return nr_active;
@@ -681,19 +858,18 @@ static void spusched_wake(unsigned long data)
681 858
682static int spusched_thread(void *unused) 859static int spusched_thread(void *unused)
683{ 860{
684 struct spu *spu, *next; 861 struct spu *spu;
685 int node; 862 int node;
686 863
687 while (!kthread_should_stop()) { 864 while (!kthread_should_stop()) {
688 set_current_state(TASK_INTERRUPTIBLE); 865 set_current_state(TASK_INTERRUPTIBLE);
689 schedule(); 866 schedule();
690 for (node = 0; node < MAX_NUMNODES; node++) { 867 for (node = 0; node < MAX_NUMNODES; node++) {
691 mutex_lock(&spu_prio->active_mutex[node]); 868 mutex_lock(&cbe_spu_info[node].list_mutex);
692 list_for_each_entry_safe(spu, next, 869 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
693 &spu_prio->active_list[node], 870 if (spu->ctx)
694 list) 871 spusched_tick(spu->ctx);
695 spusched_tick(spu->ctx); 872 mutex_unlock(&cbe_spu_info[node].list_mutex);
696 mutex_unlock(&spu_prio->active_mutex[node]);
697 } 873 }
698 } 874 }
699 875
@@ -751,10 +927,9 @@ int __init spu_sched_init(void)
751 INIT_LIST_HEAD(&spu_prio->runq[i]); 927 INIT_LIST_HEAD(&spu_prio->runq[i]);
752 __clear_bit(i, spu_prio->bitmap); 928 __clear_bit(i, spu_prio->bitmap);
753 } 929 }
754 __set_bit(MAX_PRIO, spu_prio->bitmap);
755 for (i = 0; i < MAX_NUMNODES; i++) { 930 for (i = 0; i < MAX_NUMNODES; i++) {
756 mutex_init(&spu_prio->active_mutex[i]); 931 mutex_init(&cbe_spu_info[i].list_mutex);
757 INIT_LIST_HEAD(&spu_prio->active_list[i]); 932 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
758 } 933 }
759 spin_lock_init(&spu_prio->runq_lock); 934 spin_lock_init(&spu_prio->runq_lock);
760 935
@@ -783,9 +958,9 @@ int __init spu_sched_init(void)
783 return err; 958 return err;
784} 959}
785 960
786void __exit spu_sched_exit(void) 961void spu_sched_exit(void)
787{ 962{
788 struct spu *spu, *tmp; 963 struct spu *spu;
789 int node; 964 int node;
790 965
791 remove_proc_entry("spu_loadavg", NULL); 966 remove_proc_entry("spu_loadavg", NULL);
@@ -794,13 +969,11 @@ void __exit spu_sched_exit(void)
794 kthread_stop(spusched_task); 969 kthread_stop(spusched_task);
795 970
796 for (node = 0; node < MAX_NUMNODES; node++) { 971 for (node = 0; node < MAX_NUMNODES; node++) {
797 mutex_lock(&spu_prio->active_mutex[node]); 972 mutex_lock(&cbe_spu_info[node].list_mutex);
798 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node], 973 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
799 list) { 974 if (spu->alloc_state != SPU_FREE)
800 list_del_init(&spu->list); 975 spu->alloc_state = SPU_FREE;
801 spu_free(spu); 976 mutex_unlock(&cbe_spu_info[node].list_mutex);
802 }
803 mutex_unlock(&spu_prio->active_mutex[node]);
804 } 977 }
805 kfree(spu_prio); 978 kfree(spu_prio);
806} 979}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c
index 4e19ed7a0756..21a9c952d88b 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c
@@ -84,13 +84,13 @@ static inline void restore_decr(void)
84 unsigned int decr_running; 84 unsigned int decr_running;
85 unsigned int decr; 85 unsigned int decr;
86 86
87 /* Restore, Step 6: 87 /* Restore, Step 6(moved):
88 * If the LSCSA "decrementer running" flag is set 88 * If the LSCSA "decrementer running" flag is set
89 * then write the SPU_WrDec channel with the 89 * then write the SPU_WrDec channel with the
90 * decrementer value from LSCSA. 90 * decrementer value from LSCSA.
91 */ 91 */
92 offset = LSCSA_QW_OFFSET(decr_status); 92 offset = LSCSA_QW_OFFSET(decr_status);
93 decr_running = regs_spill[offset].slot[0]; 93 decr_running = regs_spill[offset].slot[0] & SPU_DECR_STATUS_RUNNING;
94 if (decr_running) { 94 if (decr_running) {
95 offset = LSCSA_QW_OFFSET(decr); 95 offset = LSCSA_QW_OFFSET(decr);
96 decr = regs_spill[offset].slot[0]; 96 decr = regs_spill[offset].slot[0];
@@ -318,10 +318,10 @@ int main()
318 build_dma_list(lscsa_ea); /* Step 3. */ 318 build_dma_list(lscsa_ea); /* Step 3. */
319 restore_upper_240kb(lscsa_ea); /* Step 4. */ 319 restore_upper_240kb(lscsa_ea); /* Step 4. */
320 /* Step 5: done by 'exit'. */ 320 /* Step 5: done by 'exit'. */
321 restore_decr(); /* Step 6. */
322 enqueue_putllc(lscsa_ea); /* Step 7. */ 321 enqueue_putllc(lscsa_ea); /* Step 7. */
323 set_tag_update(); /* Step 8. */ 322 set_tag_update(); /* Step 8. */
324 read_tag_status(); /* Step 9. */ 323 read_tag_status(); /* Step 9. */
324 restore_decr(); /* moved Step 6. */
325 read_llar_status(); /* Step 10. */ 325 read_llar_status(); /* Step 10. */
326 write_ppu_mb(); /* Step 11. */ 326 write_ppu_mb(); /* Step 11. */
327 write_ppuint_mb(); /* Step 12. */ 327 write_ppuint_mb(); /* Step 12. */
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
index 15183d209b58..f383b027e8bf 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
@@ -10,7 +10,7 @@ static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
100x24fd8081, 100x24fd8081,
110x1cd80081, 110x1cd80081,
120x33001180, 120x33001180,
130x42030003, 130x42034003,
140x33800284, 140x33800284,
150x1c010204, 150x1c010204,
160x40200000, 160x40200000,
@@ -24,22 +24,22 @@ static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
240x23fffd84, 240x23fffd84,
250x1c100183, 250x1c100183,
260x217ffa85, 260x217ffa85,
270x3080a000, 270x3080b000,
280x3080a201, 280x3080b201,
290x3080a402, 290x3080b402,
300x3080a603, 300x3080b603,
310x3080a804, 310x3080b804,
320x3080aa05, 320x3080ba05,
330x3080ac06, 330x3080bc06,
340x3080ae07, 340x3080be07,
350x3080b008, 350x3080c008,
360x3080b209, 360x3080c209,
370x3080b40a, 370x3080c40a,
380x3080b60b, 380x3080c60b,
390x3080b80c, 390x3080c80c,
400x3080ba0d, 400x3080ca0d,
410x3080bc0e, 410x3080cc0e,
420x3080be0f, 420x3080ce0f,
430x00003ffc, 430x00003ffc,
440x00000000, 440x00000000,
450x00000000, 450x00000000,
@@ -48,19 +48,18 @@ static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
480x3ec00083, 480x3ec00083,
490xb0a14103, 490xb0a14103,
500x01a00204, 500x01a00204,
510x3ec10082, 510x3ec10083,
520x4202800e, 520x4202c002,
530x04000703, 530xb0a14203,
540xb0a14202, 540x21a00802,
550x21a00803, 550x3fbf028a,
560x3fbf028d, 560x3f20050a,
570x3f20068d, 570x3fbe0502,
580x3fbe0682,
590x3fe30102, 580x3fe30102,
600x21a00882, 590x21a00882,
610x3f82028f, 600x3f82028b,
620x3fe3078f, 610x3fe3058b,
630x3fbf0784, 620x3fbf0584,
640x3f200204, 630x3f200204,
650x3fbe0204, 640x3fbe0204,
660x3fe30204, 650x3fe30204,
@@ -75,252 +74,285 @@ static unsigned int spu_restore_code[] __attribute__((__aligned__(128))) = {
750x21a00083, 740x21a00083,
760x40800082, 750x40800082,
770x21a00b02, 760x21a00b02,
780x10002818, 770x10002612,
790x42a00002, 780x42a00003,
800x32800007, 790x42074006,
810x4207000c, 800x1800c204,
820x18008208, 810x40a00008,
830x40a0000b, 820x40800789,
840x4080020a, 830x1c010305,
850x40800709, 840x34000302,
860x00200000,
870x42070002,
880x3ac30384,
890x1cffc489, 850x1cffc489,
900x00200000, 860x3ec00303,
910x18008383, 870x3ec00287,
920x38830382, 880xb0408403,
930x4cffc486, 890x24000302,
940x3ac28185, 900x34000282,
950xb0408584, 910x1c020306,
960x28830382, 920xb0408207,
970x1c020387, 930x18020204,
980x38828182, 940x24000282,
990xb0408405, 950x217ffa09,
1000x1802c408, 960x04000402,
1010x28828182, 970x21a00802,
1020x217ff886, 980x3fbe0504,
1030x04000583, 990x3fe30204,
1040x21a00803, 1000x21a00884,
1050x3fbe0682, 1010x42074002,
1060x3fe30102, 1020x21a00902,
1070x04000106, 1030x40803c03,
1080x21a00886, 1040x21a00983,
1090x04000603, 1050x04000485,
1100x21a00903, 1060x21a00a05,
1110x40803c02,
1120x21a00982,
1130x40800003,
1140x04000184,
1150x21a00a04,
1160x40802202, 1070x40802202,
1170x21a00a82, 1080x21a00a82,
1180x42028005, 1090x21a00805,
1190x34208702, 1100x21a00884,
1200x21002282, 1110x3fbf0582,
1210x21a00804,
1220x21a00886,
1230x3fbf0782,
1240x3f200102, 1120x3f200102,
1250x3fbe0102, 1130x3fbe0102,
1260x3fe30102, 1140x3fe30102,
1270x21a00902, 1150x21a00902,
1280x40804003, 1160x40804003,
1290x21a00983, 1170x21a00983,
1300x21a00a04, 1180x21a00a05,
1310x40805a02, 1190x40805a02,
1320x21a00a82, 1200x21a00a82,
1330x40800083, 1210x40800083,
1340x21a00b83, 1220x21a00b83,
1350x01a00c02, 1230x01a00c02,
1360x01a00d83, 1240x30809c03,
1370x3420c282, 1250x34000182,
1260x14004102,
1270x21002082,
1280x01a00d82,
1290x3080a003,
1300x34000182,
1380x21a00e02, 1310x21a00e02,
1390x34210283, 1320x3080a203,
1400x21a00f03, 1330x34000182,
1410x34200284, 1340x21a00f02,
1420x77400200, 1350x3080a403,
1430x3421c282, 1360x34000182,
1370x77400100,
1380x3080a603,
1390x34000182,
1440x21a00702, 1400x21a00702,
1450x34218283, 1410x3080a803,
1460x21a00083, 1420x34000182,
1470x34214282, 1430x21a00082,
1440x3080aa03,
1450x34000182,
1480x21a00b02, 1460x21a00b02,
1490x4200480c, 1470x4020007f,
1500x00200000, 1480x3080ae02,
1510x1c010286, 1490x42004805,
1520x34220284, 1500x3080ac04,
1530x34220302, 1510x34000103,
1540x0f608203, 1520x34000202,
1550x5c024204, 1530x1cffc183,
1560x3b81810b, 1540x3b810106,
1570x42013c02, 1550x0f608184,
1580x00200000, 1560x42013802,
1590x18008185, 1570x5c020183,
1600x38808183, 1580x38810102,
1610x3b814182, 1590x3b810102,
1620x21004e84, 1600x21000e83,
1630x4020007f, 1610x4020007f,
1640x35000100, 1620x35000100,
1650x000004e0, 1630x00000470,
1660x000002a0, 1640x000002f8,
1670x000002e8, 1650x00000430,
1680x00000428,
1690x00000360, 1660x00000360,
1700x000002e8, 1670x000002f8,
1710x000004a0,
1720x00000468,
1730x000003c8, 1680x000003c8,
1690x000004a8,
1700x00000298,
1740x00000360, 1710x00000360,
1720x00200000,
1750x409ffe02, 1730x409ffe02,
1760x30801203, 1740x30801203,
1770x40800204, 1750x40800208,
1780x3ec40085, 1760x3ec40084,
1790x10009c09, 1770x40800407,
1800x3ac10606, 1780x3ac20289,
1810xb060c105, 1790xb060c104,
1820x4020007f, 1800x3ac1c284,
1830x4020007f,
1840x20801203, 1810x20801203,
1850x38810602, 1820x38820282,
1860xb0408586, 1830x41004003,
1870x28810602, 1840xb0408189,
1880x32004180, 1850x28820282,
1890x34204702, 1860x3881c282,
1870xb0408304,
1880x2881c282,
1890x00400000,
1900x40800003,
1910x35000000,
1920x30809e03,
1930x34000182,
1900x21a00382, 1940x21a00382,
1910x4020007f, 1950x4020007f,
1920x327fdc80, 1960x327fde00,
1930x409ffe02, 1970x409ffe02,
1940x30801203, 1980x30801203,
1950x40800204, 1990x40800206,
1960x3ec40087, 2000x3ec40084,
1970x40800405, 2010x40800407,
1980x00200000, 2020x40800608,
1990x40800606, 2030x3ac1828a,
2000x3ac10608, 2040x3ac20289,
2010x3ac14609, 2050xb060c104,
2020x3ac1860a, 2060x3ac1c284,
2030xb060c107,
2040x20801203, 2070x20801203,
2080x38818282,
2050x41004003, 2090x41004003,
2060x38810602, 2100xb040818a,
2070x4020007f, 2110x10005b0b,
2080xb0408188, 2120x41201003,
2090x4020007f, 2130x28818282,
2100x28810602, 2140x3881c282,
2110x41201002, 2150xb0408184,
2120x38814603,
2130x10009c09,
2140xb060c109,
2150x4020007f,
2160x28814603,
2170x41193f83, 2160x41193f83,
2180x38818602,
2190x60ffc003, 2170x60ffc003,
2200xb040818a, 2180x2881c282,
2210x28818602, 2190x38820282,
2220x32003080, 2200xb0408189,
2210x28820282,
2220x327fef80,
2230x409ffe02, 2230x409ffe02,
2240x30801203, 2240x30801203,
2250x40800204, 2250x40800207,
2260x3ec40087, 2260x3ec40086,
2270x41201008, 2270x4120100b,
2280x10009c14, 2280x10005b14,
2290x40800405, 2290x40800404,
2300x3ac10609, 2300x3ac1c289,
2310x40800606, 2310x40800608,
2320x3ac1460a, 2320xb060c106,
2330xb060c107, 2330x3ac10286,
2340x3ac1860b, 2340x3ac2028a,
2350x20801203, 2350x20801203,
2360x38810602, 2360x3881c282,
2370xb0408409,
2380x28810602,
2390x38814603,
2400xb060c40a,
2410x4020007f,
2420x28814603,
2430x41193f83, 2370x41193f83,
2440x38818602,
2450x60ffc003, 2380x60ffc003,
2460xb040818b, 2390xb0408589,
2470x28818602, 2400x2881c282,
2480x32002380, 2410x38810282,
2490x409ffe02, 2420xb0408586,
2500x30801204, 2430x28810282,
2510x40800205, 2440x38820282,
2520x3ec40083, 2450xb040818a,
2530x40800406, 2460x28820282,
2540x3ac14607,
2550x3ac18608,
2560xb0810103,
2570x41004002,
2580x20801204,
2590x4020007f,
2600x38814603,
2610x10009c0b,
2620xb060c107,
2630x4020007f,
2640x4020007f,
2650x28814603,
2660x38818602,
2670x4020007f,
2680x4020007f, 2470x4020007f,
2690xb0408588, 2480x327fe280,
2700x28818602, 2490x409ffe02,
2500x30801203,
2510x40800207,
2520x3ec40084,
2530x40800408,
2540x10005b14,
2550x40800609,
2560x3ac1c28a,
2570x3ac2028b,
2580xb060c104,
2590x3ac24284,
2600x20801203,
2610x41201003,
2620x3881c282,
2630xb040830a,
2640x2881c282,
2650x38820282,
2660xb040818b,
2670x41193f83,
2680x60ffc003,
2690x28820282,
2700x38824282,
2710xb0408184,
2720x28824282,
2710x4020007f, 2730x4020007f,
2720x32001780, 2740x327fd580,
2730x409ffe02, 2750x409ffe02,
2740x1000640e, 2760x1000658e,
2750x40800204, 2770x40800206,
2760x30801203, 2780x30801203,
2770x40800405, 2790x40800407,
2780x3ec40087, 2800x3ec40084,
2790x40800606, 2810x40800608,
2800x3ac10608, 2820x3ac1828a,
2810x3ac14609, 2830x3ac20289,
2820x3ac1860a, 2840xb060c104,
2830xb060c107, 2850x3ac1c284,
2840x20801203, 2860x20801203,
2850x413d8003, 2870x413d8003,
2860x38810602, 2880x38818282,
2870x4020007f, 2890x4020007f,
2880x327fd780, 2900x327fd800,
2890x409ffe02, 2910x409ffe03,
2900x10007f0c, 2920x30801202,
2910x40800205, 2930x40800207,
2920x30801204, 2940x3ec40084,
2930x40800406, 2950x10005b09,
2940x3ec40083, 2960x3ac1c288,
2950x3ac14607, 2970xb0408184,
2960x3ac18608,
2970xb0810103,
2980x413d8002,
2990x20801204,
3000x38814603,
3010x4020007f, 2980x4020007f,
3020x327feb80, 2990x4020007f,
3000x20801202,
3010x3881c282,
3020xb0408308,
3030x2881c282,
3040x327fc680,
3030x409ffe02, 3050x409ffe02,
3060x1000588b,
3070x40800208,
3040x30801203, 3080x30801203,
3050x40800204, 3090x40800407,
3060x3ec40087, 3100x3ec40084,
3070x40800405, 3110x3ac20289,
3080x1000650a, 3120xb060c104,
3090x40800606, 3130x3ac1c284,
3100x3ac10608,
3110x3ac14609,
3120x3ac1860a,
3130xb060c107,
3140x20801203, 3140x20801203,
3150x38810602, 3150x413d8003,
3160xb0408588, 3160x38820282,
3170x4020007f, 3170x327fbd80,
3180x327fc980, 3180x00200000,
3190x00400000, 3190x00000da0,
3200x40800003, 3200x00000000,
3210x4020007f, 3210x00000000,
3220x35000000, 3220x00000000,
3230x00000d90,
3240x00000000,
3250x00000000,
3260x00000000,
3270x00000db0,
3280x00000000,
3290x00000000,
3300x00000000,
3310x00000dc0,
3320x00000000,
3330x00000000,
3340x00000000,
3350x00000d80,
3360x00000000,
3370x00000000,
3380x00000000,
3390x00000df0,
3400x00000000,
3410x00000000,
3420x00000000,
3430x00000de0,
3440x00000000,
3450x00000000,
3460x00000000,
3470x00000dd0,
3480x00000000,
3490x00000000,
3500x00000000,
3510x00000e04,
3520x00000000,
3530x00000000,
3230x00000000, 3540x00000000,
3550x00000e00,
3240x00000000, 3560x00000000,
3250x00000000, 3570x00000000,
3260x00000000, 3580x00000000,
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 08b3530288ac..8b20c0c1556f 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -40,17 +40,13 @@ enum {
40struct spu_context_ops; 40struct spu_context_ops;
41struct spu_gang; 41struct spu_gang;
42 42
43/* 43enum {
44 * This is the state for spu utilization reporting to userspace. 44 SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */
45 * Because this state is visible to userspace it must never change and needs 45};
46 * to be kept strictly separate from any internal state kept by the kernel. 46
47 */ 47/* ctx->sched_flags */
48enum spuctx_execution_state { 48enum {
49 SPUCTX_UTIL_USER = 0, 49 SPU_SCHED_NOTIFY_ACTIVE,
50 SPUCTX_UTIL_SYSTEM,
51 SPUCTX_UTIL_IOWAIT,
52 SPUCTX_UTIL_LOADED,
53 SPUCTX_UTIL_MAX
54}; 50};
55 51
56struct spu_context { 52struct spu_context {
@@ -89,6 +85,8 @@ struct spu_context {
89 85
90 struct list_head gang_list; 86 struct list_head gang_list;
91 struct spu_gang *gang; 87 struct spu_gang *gang;
88 struct kref *prof_priv_kref;
89 void ( * prof_priv_release) (struct kref *kref);
92 90
93 /* owner thread */ 91 /* owner thread */
94 pid_t tid; 92 pid_t tid;
@@ -104,9 +102,9 @@ struct spu_context {
104 /* statistics */ 102 /* statistics */
105 struct { 103 struct {
106 /* updates protected by ctx->state_mutex */ 104 /* updates protected by ctx->state_mutex */
107 enum spuctx_execution_state execution_state; 105 enum spu_utilization_state util_state;
108 unsigned long tstamp; /* time of last ctx switch */ 106 unsigned long long tstamp; /* time of last state switch */
109 unsigned long times[SPUCTX_UTIL_MAX]; 107 unsigned long long times[SPU_UTIL_MAX];
110 unsigned long long vol_ctx_switch; 108 unsigned long long vol_ctx_switch;
111 unsigned long long invol_ctx_switch; 109 unsigned long long invol_ctx_switch;
112 unsigned long long min_flt; 110 unsigned long long min_flt;
@@ -118,6 +116,10 @@ struct spu_context {
118 unsigned long long class2_intr_base; /* # at last ctx switch */ 116 unsigned long long class2_intr_base; /* # at last ctx switch */
119 unsigned long long libassist; 117 unsigned long long libassist;
120 } stats; 118 } stats;
119
120 struct list_head aff_list;
121 int aff_head;
122 int aff_offset;
121}; 123};
122 124
123struct spu_gang { 125struct spu_gang {
@@ -125,8 +127,19 @@ struct spu_gang {
125 struct mutex mutex; 127 struct mutex mutex;
126 struct kref kref; 128 struct kref kref;
127 int contexts; 129 int contexts;
130
131 struct spu_context *aff_ref_ctx;
132 struct list_head aff_list_head;
133 struct mutex aff_mutex;
134 int aff_flags;
135 struct spu *aff_ref_spu;
136 atomic_t aff_sched_count;
128}; 137};
129 138
139/* Flag bits for spu_gang aff_flags */
140#define AFF_OFFSETS_SET 1
141#define AFF_MERGED 2
142
130struct mfc_dma_command { 143struct mfc_dma_command {
131 int32_t pad; /* reserved */ 144 int32_t pad; /* reserved */
132 uint32_t lsa; /* local storage address */ 145 uint32_t lsa; /* local storage address */
@@ -190,10 +203,9 @@ extern struct tree_descr spufs_dir_contents[];
190extern struct tree_descr spufs_dir_nosched_contents[]; 203extern struct tree_descr spufs_dir_nosched_contents[];
191 204
192/* system call implementation */ 205/* system call implementation */
193long spufs_run_spu(struct file *file, 206long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
194 struct spu_context *ctx, u32 *npc, u32 *status); 207long spufs_create(struct nameidata *nd, unsigned int flags,
195long spufs_create(struct nameidata *nd, 208 mode_t mode, struct file *filp);
196 unsigned int flags, mode_t mode);
197extern const struct file_operations spufs_context_fops; 209extern const struct file_operations spufs_context_fops;
198 210
199/* gang management */ 211/* gang management */
@@ -206,6 +218,9 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
206/* fault handling */ 218/* fault handling */
207int spufs_handle_class1(struct spu_context *ctx); 219int spufs_handle_class1(struct spu_context *ctx);
208 220
221/* affinity */
222struct spu *affinity_check(struct spu_context *ctx);
223
209/* context management */ 224/* context management */
210extern atomic_t nr_spu_contexts; 225extern atomic_t nr_spu_contexts;
211static inline void spu_acquire(struct spu_context *ctx) 226static inline void spu_acquire(struct spu_context *ctx)
@@ -227,15 +242,17 @@ void spu_unmap_mappings(struct spu_context *ctx);
227void spu_forget(struct spu_context *ctx); 242void spu_forget(struct spu_context *ctx);
228int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); 243int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
229void spu_acquire_saved(struct spu_context *ctx); 244void spu_acquire_saved(struct spu_context *ctx);
245void spu_release_saved(struct spu_context *ctx);
230 246
231int spu_activate(struct spu_context *ctx, unsigned long flags); 247int spu_activate(struct spu_context *ctx, unsigned long flags);
232void spu_deactivate(struct spu_context *ctx); 248void spu_deactivate(struct spu_context *ctx);
233void spu_yield(struct spu_context *ctx); 249void spu_yield(struct spu_context *ctx);
250void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
234void spu_set_timeslice(struct spu_context *ctx); 251void spu_set_timeslice(struct spu_context *ctx);
235void spu_update_sched_info(struct spu_context *ctx); 252void spu_update_sched_info(struct spu_context *ctx);
236void __spu_update_sched_info(struct spu_context *ctx); 253void __spu_update_sched_info(struct spu_context *ctx);
237int __init spu_sched_init(void); 254int __init spu_sched_init(void);
238void __exit spu_sched_exit(void); 255void spu_sched_exit(void);
239 256
240extern char *isolated_loader; 257extern char *isolated_loader;
241 258
@@ -293,30 +310,34 @@ extern int spufs_coredump_num_notes;
293 * line. 310 * line.
294 */ 311 */
295static inline void spuctx_switch_state(struct spu_context *ctx, 312static inline void spuctx_switch_state(struct spu_context *ctx,
296 enum spuctx_execution_state new_state) 313 enum spu_utilization_state new_state)
297{ 314{
298 WARN_ON(!mutex_is_locked(&ctx->state_mutex)); 315 unsigned long long curtime;
299 316 signed long long delta;
300 if (ctx->stats.execution_state != new_state) { 317 struct timespec ts;
301 unsigned long curtime = jiffies; 318 struct spu *spu;
302 319 enum spu_utilization_state old_state;
303 ctx->stats.times[ctx->stats.execution_state] +=
304 curtime - ctx->stats.tstamp;
305 ctx->stats.tstamp = curtime;
306 ctx->stats.execution_state = new_state;
307 }
308}
309 320
310static inline void spu_switch_state(struct spu *spu, 321 ktime_get_ts(&ts);
311 enum spuctx_execution_state new_state) 322 curtime = timespec_to_ns(&ts);
312{ 323 delta = curtime - ctx->stats.tstamp;
313 if (spu->stats.utilization_state != new_state) {
314 unsigned long curtime = jiffies;
315 324
316 spu->stats.times[spu->stats.utilization_state] += 325 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
317 curtime - spu->stats.tstamp; 326 WARN_ON(delta < 0);
327
328 spu = ctx->spu;
329 old_state = ctx->stats.util_state;
330 ctx->stats.util_state = new_state;
331 ctx->stats.tstamp = curtime;
332
333 /*
334 * Update the physical SPU utilization statistics.
335 */
336 if (spu) {
337 ctx->stats.times[old_state] += delta;
338 spu->stats.times[old_state] += delta;
339 spu->stats.util_state = new_state;
318 spu->stats.tstamp = curtime; 340 spu->stats.tstamp = curtime;
319 spu->stats.utilization_state = new_state;
320 } 341 }
321} 342}
322 343
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 9c506ba08cdc..27ffdae98e5a 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -180,7 +180,7 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
180 case MFC_CNTL_SUSPEND_COMPLETE: 180 case MFC_CNTL_SUSPEND_COMPLETE:
181 if (csa) { 181 if (csa) {
182 csa->priv2.mfc_control_RW = 182 csa->priv2.mfc_control_RW =
183 in_be64(&priv2->mfc_control_RW) | 183 MFC_CNTL_SUSPEND_MASK |
184 MFC_CNTL_SUSPEND_DMA_QUEUE; 184 MFC_CNTL_SUSPEND_DMA_QUEUE;
185 } 185 }
186 break; 186 break;
@@ -190,9 +190,7 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
190 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 190 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
191 MFC_CNTL_SUSPEND_COMPLETE); 191 MFC_CNTL_SUSPEND_COMPLETE);
192 if (csa) { 192 if (csa) {
193 csa->priv2.mfc_control_RW = 193 csa->priv2.mfc_control_RW = 0;
194 in_be64(&priv2->mfc_control_RW) &
195 ~MFC_CNTL_SUSPEND_DMA_QUEUE;
196 } 194 }
197 break; 195 break;
198 } 196 }
@@ -251,16 +249,8 @@ static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
251 * Read MFC_CNTL[Ds]. Update saved copy of 249 * Read MFC_CNTL[Ds]. Update saved copy of
252 * CSA.MFC_CNTL[Ds]. 250 * CSA.MFC_CNTL[Ds].
253 */ 251 */
254 if (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING) { 252 csa->priv2.mfc_control_RW |=
255 csa->priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 253 in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING;
256 csa->suspend_time = get_cycles();
257 out_be64(&priv2->spu_chnlcntptr_RW, 7ULL);
258 eieio();
259 csa->spu_chnldata_RW[7] = in_be64(&priv2->spu_chnldata_RW);
260 eieio();
261 } else {
262 csa->priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
263 }
264} 254}
265 255
266static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) 256static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
@@ -271,7 +261,8 @@ static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
271 * Write MFC_CNTL[Dh] set to a '1' to halt 261 * Write MFC_CNTL[Dh] set to a '1' to halt
272 * the decrementer. 262 * the decrementer.
273 */ 263 */
274 out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED); 264 out_be64(&priv2->mfc_control_RW,
265 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
275 eieio(); 266 eieio();
276} 267}
277 268
@@ -615,7 +606,7 @@ static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
615static inline void save_ch_part1(struct spu_state *csa, struct spu *spu) 606static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
616{ 607{
617 struct spu_priv2 __iomem *priv2 = spu->priv2; 608 struct spu_priv2 __iomem *priv2 = spu->priv2;
618 u64 idx, ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 609 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
619 int i; 610 int i;
620 611
621 /* Save, Step 42: 612 /* Save, Step 42:
@@ -626,7 +617,7 @@ static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
626 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW); 617 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
627 618
628 /* Save the following CH: [0,3,4,24,25,27] */ 619 /* Save the following CH: [0,3,4,24,25,27] */
629 for (i = 0; i < 7; i++) { 620 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
630 idx = ch_indices[i]; 621 idx = ch_indices[i];
631 out_be64(&priv2->spu_chnlcntptr_RW, idx); 622 out_be64(&priv2->spu_chnlcntptr_RW, idx);
632 eieio(); 623 eieio();
@@ -983,13 +974,13 @@ static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
983 */ 974 */
984} 975}
985 976
986static inline void suspend_mfc(struct spu_state *csa, struct spu *spu) 977static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
978 struct spu *spu)
987{ 979{
988 struct spu_priv2 __iomem *priv2 = spu->priv2; 980 struct spu_priv2 __iomem *priv2 = spu->priv2;
989 981
990 /* Restore, Step 7: 982 /* Restore, Step 7:
991 * Restore, Step 47. 983 * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
992 * Write MFC_Cntl[Dh,Sc]='1','1' to suspend
993 * the queue and halt the decrementer. 984 * the queue and halt the decrementer.
994 */ 985 */
995 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE | 986 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
@@ -1090,7 +1081,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1090static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu) 1081static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1091{ 1082{
1092 struct spu_priv2 __iomem *priv2 = spu->priv2; 1083 struct spu_priv2 __iomem *priv2 = spu->priv2;
1093 u64 ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 1084 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1094 u64 idx; 1085 u64 idx;
1095 int i; 1086 int i;
1096 1087
@@ -1102,7 +1093,7 @@ static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1102 out_be64(&priv2->spu_chnldata_RW, 0UL); 1093 out_be64(&priv2->spu_chnldata_RW, 0UL);
1103 1094
1104 /* Reset the following CH: [0,3,4,24,25,27] */ 1095 /* Reset the following CH: [0,3,4,24,25,27] */
1105 for (i = 0; i < 7; i++) { 1096 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1106 idx = ch_indices[i]; 1097 idx = ch_indices[i];
1107 out_be64(&priv2->spu_chnlcntptr_RW, idx); 1098 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1108 eieio(); 1099 eieio();
@@ -1289,7 +1280,15 @@ static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1289 cycles_t resume_time = get_cycles(); 1280 cycles_t resume_time = get_cycles();
1290 cycles_t delta_time = resume_time - csa->suspend_time; 1281 cycles_t delta_time = resume_time - csa->suspend_time;
1291 1282
1283 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1284 if (csa->lscsa->decr.slot[0] < delta_time) {
1285 csa->lscsa->decr_status.slot[0] |=
1286 SPU_DECR_STATUS_WRAPPED;
1287 }
1288
1292 csa->lscsa->decr.slot[0] -= delta_time; 1289 csa->lscsa->decr.slot[0] -= delta_time;
1290 } else {
1291 csa->lscsa->decr_status.slot[0] = 0;
1293 } 1292 }
1294} 1293}
1295 1294
@@ -1398,6 +1397,18 @@ static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1398 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); 1397 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1399} 1398}
1400 1399
1400static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1401{
1402 struct spu_priv2 __iomem *priv2 = spu->priv2;
1403
1404 /* Restore, Step 47.
1405 * Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1406 * the queue.
1407 */
1408 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1409 eieio();
1410}
1411
1401static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) 1412static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1402{ 1413{
1403 /* Restore, Step 49: 1414 /* Restore, Step 49:
@@ -1548,10 +1559,10 @@ static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1548 * "wrapped" flag is set, OR in a '1' to 1559 * "wrapped" flag is set, OR in a '1' to
1549 * CSA.SPU_Event_Status[Tm]. 1560 * CSA.SPU_Event_Status[Tm].
1550 */ 1561 */
1551 if (csa->lscsa->decr_status.slot[0] == 1) { 1562 if (csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED) {
1552 csa->spu_chnldata_RW[0] |= 0x20; 1563 csa->spu_chnldata_RW[0] |= 0x20;
1553 } 1564 }
1554 if ((csa->lscsa->decr_status.slot[0] == 1) && 1565 if ((csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED) &&
1555 (csa->spu_chnlcnt_RW[0] == 0 && 1566 (csa->spu_chnlcnt_RW[0] == 0 &&
1556 ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) && 1567 ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) &&
1557 ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) { 1568 ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) {
@@ -1562,18 +1573,13 @@ static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1562static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) 1573static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1563{ 1574{
1564 struct spu_priv2 __iomem *priv2 = spu->priv2; 1575 struct spu_priv2 __iomem *priv2 = spu->priv2;
1565 u64 idx, ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; 1576 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1566 int i; 1577 int i;
1567 1578
1568 /* Restore, Step 59: 1579 /* Restore, Step 59:
1580 * Restore the following CH: [0,3,4,24,25,27]
1569 */ 1581 */
1570 1582 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1571 /* Restore CH 1 without count */
1572 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1573 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[1]);
1574
1575 /* Restore the following CH: [0,3,4,24,25,27] */
1576 for (i = 0; i < 7; i++) {
1577 idx = ch_indices[i]; 1583 idx = ch_indices[i];
1578 out_be64(&priv2->spu_chnlcntptr_RW, idx); 1584 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1579 eieio(); 1585 eieio();
@@ -1932,7 +1938,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
1932 set_switch_pending(prev, spu); /* Step 5. */ 1938 set_switch_pending(prev, spu); /* Step 5. */
1933 stop_spu_isolate(spu); /* NEW. */ 1939 stop_spu_isolate(spu); /* NEW. */
1934 remove_other_spu_access(prev, spu); /* Step 6. */ 1940 remove_other_spu_access(prev, spu); /* Step 6. */
1935 suspend_mfc(prev, spu); /* Step 7. */ 1941 suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
1936 wait_suspend_mfc_complete(prev, spu); /* Step 8. */ 1942 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1937 if (!suspend_spe(prev, spu)) /* Step 9. */ 1943 if (!suspend_spe(prev, spu)) /* Step 9. */
1938 clear_spu_status(prev, spu); /* Step 10. */ 1944 clear_spu_status(prev, spu); /* Step 10. */
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 8e37bdf4dfda..43f0fb88abbc 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -47,7 +47,7 @@ static long do_spu_run(struct file *filp,
47 goto out; 47 goto out;
48 48
49 i = SPUFS_I(filp->f_path.dentry->d_inode); 49 i = SPUFS_I(filp->f_path.dentry->d_inode);
50 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status); 50 ret = spufs_run_spu(i->i_ctx, &npc, &status);
51 51
52 if (put_user(npc, unpc)) 52 if (put_user(npc, unpc))
53 ret = -EFAULT; 53 ret = -EFAULT;
@@ -76,8 +76,8 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
76} 76}
77#endif 77#endif
78 78
79asmlinkage long sys_spu_create(const char __user *pathname, 79asmlinkage long do_spu_create(const char __user *pathname, unsigned int flags,
80 unsigned int flags, mode_t mode) 80 mode_t mode, struct file *neighbor)
81{ 81{
82 char *tmp; 82 char *tmp;
83 int ret; 83 int ret;
@@ -90,7 +90,7 @@ asmlinkage long sys_spu_create(const char __user *pathname,
90 ret = path_lookup(tmp, LOOKUP_PARENT| 90 ret = path_lookup(tmp, LOOKUP_PARENT|
91 LOOKUP_OPEN|LOOKUP_CREATE, &nd); 91 LOOKUP_OPEN|LOOKUP_CREATE, &nd);
92 if (!ret) { 92 if (!ret) {
93 ret = spufs_create(&nd, flags, mode); 93 ret = spufs_create(&nd, flags, mode, neighbor);
94 path_release(&nd); 94 path_release(&nd);
95 } 95 }
96 putname(tmp); 96 putname(tmp);
@@ -99,8 +99,32 @@ asmlinkage long sys_spu_create(const char __user *pathname,
99 return ret; 99 return ret;
100} 100}
101 101
102#ifndef MODULE
103asmlinkage long sys_spu_create(const char __user *pathname, unsigned int flags,
104 mode_t mode, int neighbor_fd)
105{
106 int fput_needed;
107 struct file *neighbor;
108 long ret;
109
110 if (flags & SPU_CREATE_AFFINITY_SPU) {
111 ret = -EBADF;
112 neighbor = fget_light(neighbor_fd, &fput_needed);
113 if (neighbor) {
114 ret = do_spu_create(pathname, flags, mode, neighbor);
115 fput_light(neighbor, fput_needed);
116 }
117 }
118 else {
119 ret = do_spu_create(pathname, flags, mode, NULL);
120 }
121
122 return ret;
123}
124#endif
125
102struct spufs_calls spufs_calls = { 126struct spufs_calls spufs_calls = {
103 .create_thread = sys_spu_create, 127 .create_thread = do_spu_create,
104 .spu_run = do_spu_run, 128 .spu_run = do_spu_run,
105 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
106}; 130};
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index bec772674e40..2d12f77e46bc 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -59,7 +59,7 @@ config MPC10X_BRIDGE
59config MV64X60 59config MV64X60
60 bool 60 bool
61 select PPC_INDIRECT_PCI 61 select PPC_INDIRECT_PCI
62 select CONFIG_CHECK_CACHE_COHERENCY 62 select CHECK_CACHE_COHERENCY
63 63
64config MPC10X_OPENPIC 64config MPC10X_OPENPIC
65 bool 65 bool
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index a05079b07696..d4fc74f7bb15 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -102,4 +102,40 @@ config PS3_STORAGE
102 depends on PPC_PS3 102 depends on PPC_PS3
103 tristate 103 tristate
104 104
105config PS3_DISK
106 tristate "PS3 Disk Storage Driver"
107 depends on PPC_PS3 && BLOCK
108 select PS3_STORAGE
109 help
110 Include support for the PS3 Disk Storage.
111
112 This support is required to access the PS3 hard disk.
113 In general, all users will say Y or M.
114
115config PS3_ROM
116 tristate "PS3 BD/DVD/CD-ROM Storage Driver"
117 depends on PPC_PS3 && SCSI
118 select PS3_STORAGE
119 help
120 Include support for the PS3 ROM Storage.
121
122 This support is required to access the PS3 BD/DVD/CD-ROM drive.
123 In general, all users will say Y or M.
124 Also make sure to say Y or M to "SCSI CDROM support" later.
125
126config PS3_FLASH
127 tristate "PS3 FLASH ROM Storage Driver"
128 depends on PPC_PS3
129 select PS3_STORAGE
130 help
131 Include support for the PS3 FLASH ROM Storage.
132
133 This support is required to access the PS3 FLASH ROM, which
134 contains the boot loader and some boot options.
135 In general, all users will say Y or M.
136
137 As this driver needs a fixed buffer of 256 KiB of memory, it can
138 be disabled on the kernel command line using "ps3flash=off", to
139 not allocate this fixed buffer.
140
105endmenu 141endmenu
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 29bf83bfb1f0..8b18a1c40092 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -66,24 +66,13 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
66 * device-tree/ibm,hypertas-functions. Ultimately this functionality may 66 * device-tree/ibm,hypertas-functions. Ultimately this functionality may
67 * be moved into prom.c prom_init(). 67 * be moved into prom.c prom_init().
68 */ 68 */
69void __init fw_feature_init(void) 69void __init fw_feature_init(const char *hypertas, unsigned long len)
70{ 70{
71 struct device_node *dn; 71 const char *s;
72 const char *hypertas, *s; 72 int i;
73 int len, i;
74 73
75 DBG(" -> fw_feature_init()\n"); 74 DBG(" -> fw_feature_init()\n");
76 75
77 dn = of_find_node_by_path("/rtas");
78 if (dn == NULL) {
79 printk(KERN_ERR "WARNING! Cannot find RTAS in device-tree!\n");
80 goto out;
81 }
82
83 hypertas = of_get_property(dn, "ibm,hypertas-functions", &len);
84 if (hypertas == NULL)
85 goto out;
86
87 for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) { 76 for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) {
88 for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) { 77 for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) {
89 /* check value against table of strings */ 78 /* check value against table of strings */
@@ -98,7 +87,5 @@ void __init fw_feature_init(void)
98 } 87 }
99 } 88 }
100 89
101out:
102 of_node_put(dn);
103 DBG(" <- fw_feature_init()\n"); 90 DBG(" <- fw_feature_init()\n");
104} 91}
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 61e19f78b923..61136d019554 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -10,7 +10,7 @@
10#ifndef _PSERIES_PSERIES_H 10#ifndef _PSERIES_PSERIES_H
11#define _PSERIES_PSERIES_H 11#define _PSERIES_PSERIES_H
12 12
13extern void __init fw_feature_init(void); 13extern void __init fw_feature_init(const char *hypertas, unsigned long len);
14 14
15struct pt_regs; 15struct pt_regs;
16 16
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 59e69f085cb4..f0b7146a110f 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -320,8 +320,6 @@ static void __init pSeries_init_early(void)
320{ 320{
321 DBG(" -> pSeries_init_early()\n"); 321 DBG(" -> pSeries_init_early()\n");
322 322
323 fw_feature_init();
324
325 if (firmware_has_feature(FW_FEATURE_LPAR)) 323 if (firmware_has_feature(FW_FEATURE_LPAR))
326 find_udbg_vterm(); 324 find_udbg_vterm();
327 325
@@ -343,14 +341,21 @@ static int __init pSeries_probe_hypertas(unsigned long node,
343 const char *uname, int depth, 341 const char *uname, int depth,
344 void *data) 342 void *data)
345{ 343{
344 const char *hypertas;
345 unsigned long len;
346
346 if (depth != 1 || 347 if (depth != 1 ||
347 (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0)) 348 (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
348 return 0; 349 return 0;
350
351 hypertas = of_get_flat_dt_prop(node, "ibm,hypertas-functions", &len);
352 if (!hypertas)
353 return 1;
349 354
350 if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL) 355 powerpc_firmware_features |= FW_FEATURE_LPAR;
351 powerpc_firmware_features |= FW_FEATURE_LPAR; 356 fw_feature_init(hypertas, len);
352 357
353 return 1; 358 return 1;
354} 359}
355 360
356static int __init pSeries_probe(void) 361static int __init pSeries_probe(void)
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index f65078c3d3b3..484eb4e0e9db 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
17mv64x60-$(CONFIG_PCI) += mv64x60_pci.o 17mv64x60-$(CONFIG_PCI) += mv64x60_pci.o
18obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o 18obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o
19obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o 19obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
20obj-$(CONFIG_AXON_RAM) += axonram.o
20 21
21# contains only the suspend handler for time 22# contains only the suspend handler for time
22ifeq ($(CONFIG_RTC_CLASS),) 23ifeq ($(CONFIG_RTC_CLASS),)
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
new file mode 100644
index 000000000000..2326d5dc5752
--- /dev/null
+++ b/arch/powerpc/sysdev/axonram.c
@@ -0,0 +1,381 @@
1/*
2 * (C) Copyright IBM Deutschland Entwicklung GmbH 2006
3 *
4 * Author: Maxim Shchetynin <maxim@de.ibm.com>
5 *
6 * Axon DDR2 device driver.
7 * It registers one block device per Axon's DDR2 memory bank found on a system.
8 * Block devices are called axonram?, their major and minor numbers are
9 * available in /proc/devices, /proc/partitions or in /sys/block/axonram?/dev.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/bio.h>
27#include <linux/blkdev.h>
28#include <linux/buffer_head.h>
29#include <linux/device.h>
30#include <linux/errno.h>
31#include <linux/fs.h>
32#include <linux/genhd.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/ioport.h>
36#include <linux/irq.h>
37#include <linux/irqreturn.h>
38#include <linux/kernel.h>
39#include <linux/mm.h>
40#include <linux/mod_devicetable.h>
41#include <linux/module.h>
42#include <linux/slab.h>
43#include <linux/string.h>
44#include <linux/types.h>
45#include <asm/of_device.h>
46#include <asm/of_platform.h>
47#include <asm/page.h>
48#include <asm/prom.h>
49
50#define AXON_RAM_MODULE_NAME "axonram"
51#define AXON_RAM_DEVICE_NAME "axonram"
52#define AXON_RAM_MINORS_PER_DISK 16
53#define AXON_RAM_BLOCK_SHIFT PAGE_SHIFT
54#define AXON_RAM_BLOCK_SIZE 1 << AXON_RAM_BLOCK_SHIFT
55#define AXON_RAM_SECTOR_SHIFT 9
56#define AXON_RAM_SECTOR_SIZE 1 << AXON_RAM_SECTOR_SHIFT
57#define AXON_RAM_IRQ_FLAGS IRQF_SHARED | IRQF_TRIGGER_RISING
58
59struct axon_ram_bank {
60 struct of_device *device;
61 struct gendisk *disk;
62 unsigned int irq_correctable;
63 unsigned int irq_uncorrectable;
64 unsigned long ph_addr;
65 unsigned long io_addr;
66 unsigned long size;
67 unsigned long ecc_counter;
68};
69
70static ssize_t
71axon_ram_sysfs_ecc(struct device *dev, struct device_attribute *attr, char *buf)
72{
73 struct of_device *device = to_of_device(dev);
74 struct axon_ram_bank *bank = device->dev.platform_data;
75
76 BUG_ON(!bank);
77
78 return sprintf(buf, "%ld\n", bank->ecc_counter);
79}
80
81static DEVICE_ATTR(ecc, S_IRUGO, axon_ram_sysfs_ecc, NULL);
82
83/**
84 * axon_ram_irq_handler - interrupt handler for Axon RAM ECC
85 * @irq: interrupt ID
86 * @dev: pointer to of_device
87 */
88static irqreturn_t
89axon_ram_irq_handler(int irq, void *dev)
90{
91 struct of_device *device = dev;
92 struct axon_ram_bank *bank = device->dev.platform_data;
93
94 BUG_ON(!bank);
95
96 if (irq == bank->irq_correctable) {
97 dev_err(&device->dev, "Correctable memory error occured\n");
98 bank->ecc_counter++;
99 return IRQ_HANDLED;
100 } else if (irq == bank->irq_uncorrectable) {
101 dev_err(&device->dev, "Uncorrectable memory error occured\n");
102 panic("Critical ECC error on %s", device->node->full_name);
103 }
104
105 return IRQ_NONE;
106}
107
108/**
109 * axon_ram_make_request - make_request() method for block device
110 * @queue, @bio: see blk_queue_make_request()
111 */
112static int
113axon_ram_make_request(struct request_queue *queue, struct bio *bio)
114{
115 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
116 unsigned long phys_mem, phys_end;
117 void *user_mem;
118 struct bio_vec *vec;
119 unsigned int transfered;
120 unsigned short idx;
121 int rc = 0;
122
123 phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
124 phys_end = bank->io_addr + bank->size;
125 transfered = 0;
126 bio_for_each_segment(vec, bio, idx) {
127 if (unlikely(phys_mem + vec->bv_len > phys_end)) {
128 bio_io_error(bio, bio->bi_size);
129 rc = -ERANGE;
130 break;
131 }
132
133 user_mem = page_address(vec->bv_page) + vec->bv_offset;
134 if (bio_data_dir(bio) == READ)
135 memcpy(user_mem, (void *) phys_mem, vec->bv_len);
136 else
137 memcpy((void *) phys_mem, user_mem, vec->bv_len);
138
139 phys_mem += vec->bv_len;
140 transfered += vec->bv_len;
141 }
142 bio_endio(bio, transfered, 0);
143
144 return rc;
145}
146
147/**
148 * axon_ram_direct_access - direct_access() method for block device
149 * @device, @sector, @data: see block_device_operations method
150 */
151static int
152axon_ram_direct_access(struct block_device *device, sector_t sector,
153 unsigned long *data)
154{
155 struct axon_ram_bank *bank = device->bd_disk->private_data;
156 loff_t offset;
157
158 offset = sector << AXON_RAM_SECTOR_SHIFT;
159 if (offset >= bank->size) {
160 dev_err(&bank->device->dev, "Access outside of address space\n");
161 return -ERANGE;
162 }
163
164 *data = bank->ph_addr + offset;
165
166 return 0;
167}
168
169static struct block_device_operations axon_ram_devops = {
170 .owner = THIS_MODULE,
171 .direct_access = axon_ram_direct_access
172};
173
174/**
175 * axon_ram_probe - probe() method for platform driver
176 * @device, @device_id: see of_platform_driver method
177 */
178static int
179axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
180{
181 static int axon_ram_bank_id = -1;
182 struct axon_ram_bank *bank;
183 struct resource resource;
184 int rc = 0;
185
186 axon_ram_bank_id++;
187
188 dev_info(&device->dev, "Found memory controller on %s\n",
189 device->node->full_name);
190
191 bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL);
192 if (bank == NULL) {
193 dev_err(&device->dev, "Out of memory\n");
194 rc = -ENOMEM;
195 goto failed;
196 }
197
198 device->dev.platform_data = bank;
199
200 bank->device = device;
201
202 if (of_address_to_resource(device->node, 0, &resource) != 0) {
203 dev_err(&device->dev, "Cannot access device tree\n");
204 rc = -EFAULT;
205 goto failed;
206 }
207
208 bank->size = resource.end - resource.start + 1;
209
210 if (bank->size == 0) {
211 dev_err(&device->dev, "No DDR2 memory found for %s%d\n",
212 AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
213 rc = -ENODEV;
214 goto failed;
215 }
216
217 dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n",
218 AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);
219
220 bank->ph_addr = resource.start;
221 bank->io_addr = (unsigned long) ioremap_flags(
222 bank->ph_addr, bank->size, _PAGE_NO_CACHE);
223 if (bank->io_addr == 0) {
224 dev_err(&device->dev, "ioremap() failed\n");
225 rc = -EFAULT;
226 goto failed;
227 }
228
229 bank->disk = alloc_disk(AXON_RAM_MINORS_PER_DISK);
230 if (bank->disk == NULL) {
231 dev_err(&device->dev, "Cannot register disk\n");
232 rc = -EFAULT;
233 goto failed;
234 }
235
236 bank->disk->first_minor = 0;
237 bank->disk->fops = &axon_ram_devops;
238 bank->disk->private_data = bank;
239 bank->disk->driverfs_dev = &device->dev;
240
241 sprintf(bank->disk->disk_name, "%s%d",
242 AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
243 bank->disk->major = register_blkdev(0, bank->disk->disk_name);
244 if (bank->disk->major < 0) {
245 dev_err(&device->dev, "Cannot register block device\n");
246 rc = -EFAULT;
247 goto failed;
248 }
249
250 bank->disk->queue = blk_alloc_queue(GFP_KERNEL);
251 if (bank->disk->queue == NULL) {
252 dev_err(&device->dev, "Cannot register disk queue\n");
253 rc = -EFAULT;
254 goto failed;
255 }
256
257 set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
258 blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
259 blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
260 add_disk(bank->disk);
261
262 bank->irq_correctable = irq_of_parse_and_map(device->node, 0);
263 bank->irq_uncorrectable = irq_of_parse_and_map(device->node, 1);
264 if ((bank->irq_correctable <= 0) || (bank->irq_uncorrectable <= 0)) {
265 dev_err(&device->dev, "Cannot access ECC interrupt ID\n");
266 rc = -EFAULT;
267 goto failed;
268 }
269
270 rc = request_irq(bank->irq_correctable, axon_ram_irq_handler,
271 AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device);
272 if (rc != 0) {
273 dev_err(&device->dev, "Cannot register ECC interrupt handler\n");
274 bank->irq_correctable = bank->irq_uncorrectable = 0;
275 rc = -EFAULT;
276 goto failed;
277 }
278
279 rc = request_irq(bank->irq_uncorrectable, axon_ram_irq_handler,
280 AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device);
281 if (rc != 0) {
282 dev_err(&device->dev, "Cannot register ECC interrupt handler\n");
283 bank->irq_uncorrectable = 0;
284 rc = -EFAULT;
285 goto failed;
286 }
287
288 rc = device_create_file(&device->dev, &dev_attr_ecc);
289 if (rc != 0) {
290 dev_err(&device->dev, "Cannot create sysfs file\n");
291 rc = -EFAULT;
292 goto failed;
293 }
294
295 return 0;
296
297failed:
298 if (bank != NULL) {
299 if (bank->irq_uncorrectable > 0)
300 free_irq(bank->irq_uncorrectable, device);
301 if (bank->irq_correctable > 0)
302 free_irq(bank->irq_correctable, device);
303 if (bank->disk != NULL) {
304 if (bank->disk->queue != NULL)
305 blk_cleanup_queue(bank->disk->queue);
306 if (bank->disk->major > 0)
307 unregister_blkdev(bank->disk->major,
308 bank->disk->disk_name);
309 del_gendisk(bank->disk);
310 }
311 device->dev.platform_data = NULL;
312 if (bank->io_addr != 0)
313 iounmap((void __iomem *) bank->io_addr);
314 kfree(bank);
315 }
316
317 return rc;
318}
319
320/**
321 * axon_ram_remove - remove() method for platform driver
322 * @device: see of_platform_driver method
323 */
324static int
325axon_ram_remove(struct of_device *device)
326{
327 struct axon_ram_bank *bank = device->dev.platform_data;
328
329 BUG_ON(!bank || !bank->disk);
330
331 device_remove_file(&device->dev, &dev_attr_ecc);
332 free_irq(bank->irq_uncorrectable, device);
333 free_irq(bank->irq_correctable, device);
334 blk_cleanup_queue(bank->disk->queue);
335 unregister_blkdev(bank->disk->major, bank->disk->disk_name);
336 del_gendisk(bank->disk);
337 iounmap((void __iomem *) bank->io_addr);
338 kfree(bank);
339
340 return 0;
341}
342
343static struct of_device_id axon_ram_device_id[] = {
344 {
345 .type = "dma-memory"
346 },
347 {}
348};
349
350static struct of_platform_driver axon_ram_driver = {
351 .owner = THIS_MODULE,
352 .name = AXON_RAM_MODULE_NAME,
353 .match_table = axon_ram_device_id,
354 .probe = axon_ram_probe,
355 .remove = axon_ram_remove
356};
357
358/**
359 * axon_ram_init
360 */
361static int __init
362axon_ram_init(void)
363{
364 return of_register_platform_driver(&axon_ram_driver);
365}
366
367/**
368 * axon_ram_exit
369 */
370static void __exit
371axon_ram_exit(void)
372{
373 of_unregister_platform_driver(&axon_ram_driver);
374}
375
376module_init(axon_ram_init);
377module_exit(axon_ram_exit);
378
379MODULE_LICENSE("GPL");
380MODULE_AUTHOR("Maxim Shchetynin <maxim@de.ibm.com>");
381MODULE_DESCRIPTION("Axon DDR2 RAM device driver for IBM Cell BE");
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 75aad38179f0..74c64c0d3b71 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -877,6 +877,8 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
877 877
878 if (hw == mpic->spurious_vec) 878 if (hw == mpic->spurious_vec)
879 return -EINVAL; 879 return -EINVAL;
880 if (mpic->protected && test_bit(hw, mpic->protected))
881 return -EINVAL;
880 882
881#ifdef CONFIG_SMP 883#ifdef CONFIG_SMP
882 else if (hw >= mpic->ipi_vecs[0]) { 884 else if (hw >= mpic->ipi_vecs[0]) {
@@ -1034,6 +1036,25 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1034 if (node && of_get_property(node, "big-endian", NULL) != NULL) 1036 if (node && of_get_property(node, "big-endian", NULL) != NULL)
1035 mpic->flags |= MPIC_BIG_ENDIAN; 1037 mpic->flags |= MPIC_BIG_ENDIAN;
1036 1038
1039 /* Look for protected sources */
1040 if (node) {
1041 unsigned int psize, bits, mapsize;
1042 const u32 *psrc =
1043 of_get_property(node, "protected-sources", &psize);
1044 if (psrc) {
1045 psize /= 4;
1046 bits = intvec_top + 1;
1047 mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long);
1048 mpic->protected = alloc_bootmem(mapsize);
1049 BUG_ON(mpic->protected == NULL);
1050 memset(mpic->protected, 0, mapsize);
1051 for (i = 0; i < psize; i++) {
1052 if (psrc[i] > intvec_top)
1053 continue;
1054 __set_bit(psrc[i], mpic->protected);
1055 }
1056 }
1057 }
1037 1058
1038#ifdef CONFIG_MPIC_WEIRD 1059#ifdef CONFIG_MPIC_WEIRD
1039 mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)]; 1060 mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)];
@@ -1213,6 +1234,9 @@ void __init mpic_init(struct mpic *mpic)
1213 u32 vecpri = MPIC_VECPRI_MASK | i | 1234 u32 vecpri = MPIC_VECPRI_MASK | i |
1214 (8 << MPIC_VECPRI_PRIORITY_SHIFT); 1235 (8 << MPIC_VECPRI_PRIORITY_SHIFT);
1215 1236
1237 /* check if protected */
1238 if (mpic->protected && test_bit(i, mpic->protected))
1239 continue;
1216 /* init hw */ 1240 /* init hw */
1217 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); 1241 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
1218 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1242 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
@@ -1407,6 +1431,14 @@ unsigned int mpic_get_one_irq(struct mpic *mpic)
1407 mpic_eoi(mpic); 1431 mpic_eoi(mpic);
1408 return NO_IRQ; 1432 return NO_IRQ;
1409 } 1433 }
1434 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1435 if (printk_ratelimit())
1436 printk(KERN_WARNING "%s: Got protected source %d !\n",
1437 mpic->name, (int)src);
1438 mpic_eoi(mpic);
1439 return NO_IRQ;
1440 }
1441
1410 return irq_linear_revmap(mpic->irqhost, src); 1442 return irq_linear_revmap(mpic->irqhost, src);
1411} 1443}
1412 1444
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 85a7c99c1003..2f91b55b7754 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -48,15 +48,13 @@ struct pmi_data {
48 struct work_struct work; 48 struct work_struct work;
49}; 49};
50 50
51static struct pmi_data *data;
51 52
52static int pmi_irq_handler(int irq, void *dev_id) 53static int pmi_irq_handler(int irq, void *dev_id)
53{ 54{
54 struct pmi_data *data;
55 u8 type; 55 u8 type;
56 int rc; 56 int rc;
57 57
58 data = dev_id;
59
60 spin_lock(&data->pmi_spinlock); 58 spin_lock(&data->pmi_spinlock);
61 59
62 type = ioread8(data->pmi_reg + PMI_READ_TYPE); 60 type = ioread8(data->pmi_reg + PMI_READ_TYPE);
@@ -111,16 +109,13 @@ MODULE_DEVICE_TABLE(of, pmi_match);
111 109
112static void pmi_notify_handlers(struct work_struct *work) 110static void pmi_notify_handlers(struct work_struct *work)
113{ 111{
114 struct pmi_data *data;
115 struct pmi_handler *handler; 112 struct pmi_handler *handler;
116 113
117 data = container_of(work, struct pmi_data, work);
118
119 spin_lock(&data->handler_spinlock); 114 spin_lock(&data->handler_spinlock);
120 list_for_each_entry(handler, &data->handler, node) { 115 list_for_each_entry(handler, &data->handler, node) {
121 pr_debug(KERN_INFO "pmi: notifying handler %p\n", handler); 116 pr_debug(KERN_INFO "pmi: notifying handler %p\n", handler);
122 if (handler->type == data->msg.type) 117 if (handler->type == data->msg.type)
123 handler->handle_pmi_message(data->dev, data->msg); 118 handler->handle_pmi_message(data->msg);
124 } 119 }
125 spin_unlock(&data->handler_spinlock); 120 spin_unlock(&data->handler_spinlock);
126} 121}
@@ -129,9 +124,14 @@ static int pmi_of_probe(struct of_device *dev,
129 const struct of_device_id *match) 124 const struct of_device_id *match)
130{ 125{
131 struct device_node *np = dev->node; 126 struct device_node *np = dev->node;
132 struct pmi_data *data;
133 int rc; 127 int rc;
134 128
129 if (data) {
130 printk(KERN_ERR "pmi: driver has already been initialized.\n");
131 rc = -EBUSY;
132 goto out;
133 }
134
135 data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL); 135 data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL);
136 if (!data) { 136 if (!data) {
137 printk(KERN_ERR "pmi: could not allocate memory.\n"); 137 printk(KERN_ERR "pmi: could not allocate memory.\n");
@@ -154,7 +154,6 @@ static int pmi_of_probe(struct of_device *dev,
154 154
155 INIT_WORK(&data->work, pmi_notify_handlers); 155 INIT_WORK(&data->work, pmi_notify_handlers);
156 156
157 dev->dev.driver_data = data;
158 data->dev = dev; 157 data->dev = dev;
159 158
160 data->irq = irq_of_parse_and_map(np, 0); 159 data->irq = irq_of_parse_and_map(np, 0);
@@ -164,7 +163,7 @@ static int pmi_of_probe(struct of_device *dev,
164 goto error_cleanup_iomap; 163 goto error_cleanup_iomap;
165 } 164 }
166 165
167 rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", data); 166 rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", NULL);
168 if (rc) { 167 if (rc) {
169 printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n", 168 printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n",
170 data->irq, rc); 169 data->irq, rc);
@@ -187,12 +186,9 @@ out:
187 186
188static int pmi_of_remove(struct of_device *dev) 187static int pmi_of_remove(struct of_device *dev)
189{ 188{
190 struct pmi_data *data;
191 struct pmi_handler *handler, *tmp; 189 struct pmi_handler *handler, *tmp;
192 190
193 data = dev->dev.driver_data; 191 free_irq(data->irq, NULL);
194
195 free_irq(data->irq, data);
196 iounmap(data->pmi_reg); 192 iounmap(data->pmi_reg);
197 193
198 spin_lock(&data->handler_spinlock); 194 spin_lock(&data->handler_spinlock);
@@ -202,7 +198,8 @@ static int pmi_of_remove(struct of_device *dev)
202 198
203 spin_unlock(&data->handler_spinlock); 199 spin_unlock(&data->handler_spinlock);
204 200
205 kfree(dev->dev.driver_data); 201 kfree(data);
202 data = NULL;
206 203
207 return 0; 204 return 0;
208} 205}
@@ -226,13 +223,13 @@ static void __exit pmi_module_exit(void)
226} 223}
227module_exit(pmi_module_exit); 224module_exit(pmi_module_exit);
228 225
229void pmi_send_message(struct of_device *device, pmi_message_t msg) 226int pmi_send_message(pmi_message_t msg)
230{ 227{
231 struct pmi_data *data;
232 unsigned long flags; 228 unsigned long flags;
233 DECLARE_COMPLETION_ONSTACK(completion); 229 DECLARE_COMPLETION_ONSTACK(completion);
234 230
235 data = device->dev.driver_data; 231 if (!data)
232 return -ENODEV;
236 233
237 mutex_lock(&data->msg_mutex); 234 mutex_lock(&data->msg_mutex);
238 235
@@ -256,30 +253,26 @@ void pmi_send_message(struct of_device *device, pmi_message_t msg)
256 data->completion = NULL; 253 data->completion = NULL;
257 254
258 mutex_unlock(&data->msg_mutex); 255 mutex_unlock(&data->msg_mutex);
256
257 return 0;
259} 258}
260EXPORT_SYMBOL_GPL(pmi_send_message); 259EXPORT_SYMBOL_GPL(pmi_send_message);
261 260
262void pmi_register_handler(struct of_device *device, 261int pmi_register_handler(struct pmi_handler *handler)
263 struct pmi_handler *handler)
264{ 262{
265 struct pmi_data *data;
266 data = device->dev.driver_data;
267
268 if (!data) 263 if (!data)
269 return; 264 return -ENODEV;
270 265
271 spin_lock(&data->handler_spinlock); 266 spin_lock(&data->handler_spinlock);
272 list_add_tail(&handler->node, &data->handler); 267 list_add_tail(&handler->node, &data->handler);
273 spin_unlock(&data->handler_spinlock); 268 spin_unlock(&data->handler_spinlock);
269
270 return 0;
274} 271}
275EXPORT_SYMBOL_GPL(pmi_register_handler); 272EXPORT_SYMBOL_GPL(pmi_register_handler);
276 273
277void pmi_unregister_handler(struct of_device *device, 274void pmi_unregister_handler(struct pmi_handler *handler)
278 struct pmi_handler *handler)
279{ 275{
280 struct pmi_data *data;
281 data = device->dev.driver_data;
282
283 if (!data) 276 if (!data)
284 return; 277 return;
285 278
diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
index 78765833f4c0..bfac84fbe780 100644
--- a/arch/powerpc/xmon/nonstdio.c
+++ b/arch/powerpc/xmon/nonstdio.c
@@ -132,3 +132,8 @@ void xmon_printf(const char *format, ...)
132 va_end(args); 132 va_end(args);
133 xmon_write(xmon_outbuf, n); 133 xmon_write(xmon_outbuf, n);
134} 134}
135
136void xmon_puts(const char *str)
137{
138 xmon_write(str, strlen(str));
139}
diff --git a/arch/powerpc/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h
index 47cebbd2b1b1..23dd95f4599c 100644
--- a/arch/powerpc/xmon/nonstdio.h
+++ b/arch/powerpc/xmon/nonstdio.h
@@ -5,10 +5,11 @@
5 5
6extern int xmon_putchar(int c); 6extern int xmon_putchar(int c);
7extern int xmon_getchar(void); 7extern int xmon_getchar(void);
8extern void xmon_puts(const char *);
8extern char *xmon_gets(char *, int); 9extern char *xmon_gets(char *, int);
9extern void xmon_printf(const char *, ...); 10extern void xmon_printf(const char *, ...);
10extern void xmon_map_scc(void); 11extern void xmon_map_scc(void);
11extern int xmon_expect(const char *str, unsigned long timeout); 12extern int xmon_expect(const char *str, unsigned long timeout);
12extern int xmon_write(void *ptr, int nb); 13extern int xmon_write(const void *ptr, int nb);
13extern int xmon_readchar(void); 14extern int xmon_readchar(void);
14extern int xmon_read_poll(void); 15extern int xmon_read_poll(void);
diff --git a/arch/powerpc/xmon/start.c b/arch/powerpc/xmon/start.c
index 712552c4f242..8864de2af382 100644
--- a/arch/powerpc/xmon/start.c
+++ b/arch/powerpc/xmon/start.c
@@ -14,7 +14,7 @@ void xmon_map_scc(void)
14{ 14{
15} 15}
16 16
17int xmon_write(void *ptr, int nb) 17int xmon_write(const void *ptr, int nb)
18{ 18{
19 return udbg_write(ptr, nb); 19 return udbg_write(ptr, nb);
20} 20}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 669e6566ad70..121b04d165d1 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -833,7 +833,7 @@ cmds(struct pt_regs *excp)
833 mdelay(2000); 833 mdelay(2000);
834 return cmd; 834 return cmd;
835 case '?': 835 case '?':
836 printf(help_string); 836 xmon_puts(help_string);
837 break; 837 break;
838 case 'b': 838 case 'b':
839 bpt_cmds(); 839 bpt_cmds();
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
index 032f4b7f4225..d212b1c418a9 100644
--- a/arch/ppc/syslib/mv64x60.c
+++ b/arch/ppc/syslib/mv64x60.c
@@ -14,6 +14,7 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/mutex.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/spinlock.h> 19#include <linux/spinlock.h>
19#include <linux/mv643xx.h> 20#include <linux/mv643xx.h>
@@ -2359,7 +2360,7 @@ mv64460_chip_specific_init(struct mv64x60_handle *bh,
2359/* Export the hotswap register via sysfs for enum event monitoring */ 2360/* Export the hotswap register via sysfs for enum event monitoring */
2360#define VAL_LEN_MAX 11 /* 32-bit hex or dec stringified number + '\n' */ 2361#define VAL_LEN_MAX 11 /* 32-bit hex or dec stringified number + '\n' */
2361 2362
2362DECLARE_MUTEX(mv64xxx_hs_lock); 2363static DEFINE_MUTEX(mv64xxx_hs_lock);
2363 2364
2364static ssize_t 2365static ssize_t
2365mv64xxx_hs_reg_read(struct kobject *kobj, char *buf, loff_t off, size_t count) 2366mv64xxx_hs_reg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
@@ -2372,14 +2373,14 @@ mv64xxx_hs_reg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
2372 if (count < VAL_LEN_MAX) 2373 if (count < VAL_LEN_MAX)
2373 return -EINVAL; 2374 return -EINVAL;
2374 2375
2375 if (down_interruptible(&mv64xxx_hs_lock)) 2376 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
2376 return -ERESTARTSYS; 2377 return -ERESTARTSYS;
2377 save_exclude = mv64x60_pci_exclude_bridge; 2378 save_exclude = mv64x60_pci_exclude_bridge;
2378 mv64x60_pci_exclude_bridge = 0; 2379 mv64x60_pci_exclude_bridge = 0;
2379 early_read_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0), 2380 early_read_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0),
2380 MV64360_PCICFG_CPCI_HOTSWAP, &v); 2381 MV64360_PCICFG_CPCI_HOTSWAP, &v);
2381 mv64x60_pci_exclude_bridge = save_exclude; 2382 mv64x60_pci_exclude_bridge = save_exclude;
2382 up(&mv64xxx_hs_lock); 2383 mutex_unlock(&mv64xxx_hs_lock);
2383 2384
2384 return sprintf(buf, "0x%08x\n", v); 2385 return sprintf(buf, "0x%08x\n", v);
2385} 2386}
@@ -2396,14 +2397,14 @@ mv64xxx_hs_reg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
2396 return -EINVAL; 2397 return -EINVAL;
2397 2398
2398 if (sscanf(buf, "%i", &v) == 1) { 2399 if (sscanf(buf, "%i", &v) == 1) {
2399 if (down_interruptible(&mv64xxx_hs_lock)) 2400 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
2400 return -ERESTARTSYS; 2401 return -ERESTARTSYS;
2401 save_exclude = mv64x60_pci_exclude_bridge; 2402 save_exclude = mv64x60_pci_exclude_bridge;
2402 mv64x60_pci_exclude_bridge = 0; 2403 mv64x60_pci_exclude_bridge = 0;
2403 early_write_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0), 2404 early_write_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0),
2404 MV64360_PCICFG_CPCI_HOTSWAP, v); 2405 MV64360_PCICFG_CPCI_HOTSWAP, v);
2405 mv64x60_pci_exclude_bridge = save_exclude; 2406 mv64x60_pci_exclude_bridge = save_exclude;
2406 up(&mv64xxx_hs_lock); 2407 mutex_unlock(&mv64xxx_hs_lock);
2407 } 2408 }
2408 else 2409 else
2409 count = -EINVAL; 2410 count = -EINVAL;
@@ -2433,10 +2434,10 @@ mv64xxx_hs_reg_valid_show(struct device *dev, struct device_attribute *attr,
2433 pdev = container_of(dev, struct platform_device, dev); 2434 pdev = container_of(dev, struct platform_device, dev);
2434 pdp = (struct mv64xxx_pdata *)pdev->dev.platform_data; 2435 pdp = (struct mv64xxx_pdata *)pdev->dev.platform_data;
2435 2436
2436 if (down_interruptible(&mv64xxx_hs_lock)) 2437 if (mutex_lock_interruptible(&mv64xxx_hs_lock))
2437 return -ERESTARTSYS; 2438 return -ERESTARTSYS;
2438 v = pdp->hs_reg_valid; 2439 v = pdp->hs_reg_valid;
2439 up(&mv64xxx_hs_lock); 2440 mutex_unlock(&mv64xxx_hs_lock);
2440 2441
2441 return sprintf(buf, "%i\n", v); 2442 return sprintf(buf, "%i\n", v);
2442} 2443}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index d8ed6676ae86..f87f429e0b24 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -178,6 +178,9 @@ config CPU_HAS_PINT_IRQ
178config CPU_HAS_MASKREG_IRQ 178config CPU_HAS_MASKREG_IRQ
179 bool 179 bool
180 180
181config CPU_HAS_INTC_IRQ
182 bool
183
181config CPU_HAS_INTC2_IRQ 184config CPU_HAS_INTC2_IRQ
182 bool 185 bool
183 186
@@ -209,6 +212,7 @@ config SOLUTION_ENGINE
209config SH_SOLUTION_ENGINE 212config SH_SOLUTION_ENGINE
210 bool "SolutionEngine" 213 bool "SolutionEngine"
211 select SOLUTION_ENGINE 214 select SOLUTION_ENGINE
215 select CPU_HAS_IPR_IRQ
212 depends on CPU_SUBTYPE_SH7709 || CPU_SUBTYPE_SH7750 216 depends on CPU_SUBTYPE_SH7709 || CPU_SUBTYPE_SH7750
213 help 217 help
214 Select SolutionEngine if configuring for a Hitachi SH7709 218 Select SolutionEngine if configuring for a Hitachi SH7709
@@ -241,6 +245,7 @@ config SH_7722_SOLUTION_ENGINE
241config SH_7751_SOLUTION_ENGINE 245config SH_7751_SOLUTION_ENGINE
242 bool "SolutionEngine7751" 246 bool "SolutionEngine7751"
243 select SOLUTION_ENGINE 247 select SOLUTION_ENGINE
248 select CPU_HAS_IPR_IRQ
244 depends on CPU_SUBTYPE_SH7751 249 depends on CPU_SUBTYPE_SH7751
245 help 250 help
246 Select 7751 SolutionEngine if configuring for a Hitachi SH7751 251 Select 7751 SolutionEngine if configuring for a Hitachi SH7751
@@ -250,6 +255,7 @@ config SH_7780_SOLUTION_ENGINE
250 bool "SolutionEngine7780" 255 bool "SolutionEngine7780"
251 select SOLUTION_ENGINE 256 select SOLUTION_ENGINE
252 select SYS_SUPPORTS_PCI 257 select SYS_SUPPORTS_PCI
258 select CPU_HAS_INTC2_IRQ
253 depends on CPU_SUBTYPE_SH7780 259 depends on CPU_SUBTYPE_SH7780
254 help 260 help
255 Select 7780 SolutionEngine if configuring for a Renesas SH7780 261 Select 7780 SolutionEngine if configuring for a Renesas SH7780
@@ -317,6 +323,7 @@ config SH_MPC1211
317config SH_SH03 323config SH_SH03
318 bool "Interface CTP/PCI-SH03" 324 bool "Interface CTP/PCI-SH03"
319 depends on CPU_SUBTYPE_SH7751 && BROKEN 325 depends on CPU_SUBTYPE_SH7751 && BROKEN
326 select CPU_HAS_IPR_IRQ
320 select SYS_SUPPORTS_PCI 327 select SYS_SUPPORTS_PCI
321 help 328 help
322 CTP/PCI-SH03 is a CPU module computer that is produced 329 CTP/PCI-SH03 is a CPU module computer that is produced
@@ -326,6 +333,7 @@ config SH_SH03
326config SH_SECUREEDGE5410 333config SH_SECUREEDGE5410
327 bool "SecureEdge5410" 334 bool "SecureEdge5410"
328 depends on CPU_SUBTYPE_SH7751R 335 depends on CPU_SUBTYPE_SH7751R
336 select CPU_HAS_IPR_IRQ
329 select SYS_SUPPORTS_PCI 337 select SYS_SUPPORTS_PCI
330 help 338 help
331 Select SecureEdge5410 if configuring for a SnapGear SH board. 339 Select SecureEdge5410 if configuring for a SnapGear SH board.
@@ -380,6 +388,7 @@ config SH_LANDISK
380config SH_TITAN 388config SH_TITAN
381 bool "TITAN" 389 bool "TITAN"
382 depends on CPU_SUBTYPE_SH7751R 390 depends on CPU_SUBTYPE_SH7751R
391 select CPU_HAS_IPR_IRQ
383 select SYS_SUPPORTS_PCI 392 select SYS_SUPPORTS_PCI
384 help 393 help
385 Select Titan if you are configuring for a Nimble Microsystems 394 Select Titan if you are configuring for a Nimble Microsystems
@@ -388,6 +397,7 @@ config SH_TITAN
388config SH_SHMIN 397config SH_SHMIN
389 bool "SHMIN" 398 bool "SHMIN"
390 depends on CPU_SUBTYPE_SH7706 399 depends on CPU_SUBTYPE_SH7706
400 select CPU_HAS_IPR_IRQ
391 help 401 help
392 Select SHMIN if configuring for the SHMIN board. 402 Select SHMIN if configuring for the SHMIN board.
393 403
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 77fecc62a056..0016609d1eba 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -121,8 +121,7 @@ core-y += $(addprefix arch/sh/boards/, \
121endif 121endif
122 122
123# Companion chips 123# Companion chips
124core-$(CONFIG_HD64461) += arch/sh/cchips/hd6446x/hd64461/ 124core-$(CONFIG_HD6446X_SERIES) += arch/sh/cchips/hd6446x/
125core-$(CONFIG_HD64465) += arch/sh/cchips/hd6446x/hd64465/
126core-$(CONFIG_VOYAGERGX) += arch/sh/cchips/voyagergx/ 125core-$(CONFIG_VOYAGERGX) += arch/sh/cchips/voyagergx/
127 126
128cpuincdir-$(CONFIG_CPU_SH2) := cpu-sh2 127cpuincdir-$(CONFIG_CPU_SH2) := cpu-sh2
diff --git a/arch/sh/boards/mpc1211/pci.c b/arch/sh/boards/mpc1211/pci.c
index 4ed1a95c6d56..23849f70f133 100644
--- a/arch/sh/boards/mpc1211/pci.c
+++ b/arch/sh/boards/mpc1211/pci.c
@@ -187,7 +187,7 @@ char * __devinit pcibios_setup(char *str)
187 * are examined. 187 * are examined.
188 */ 188 */
189 189
190void __init pcibios_fixup_bus(struct pci_bus *b) 190void __devinit pcibios_fixup_bus(struct pci_bus *b)
191{ 191{
192 pci_read_bridge_bases(b); 192 pci_read_bridge_bases(b);
193} 193}
diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
index 5afb864a1ec5..adb529d01bae 100644
--- a/arch/sh/boards/renesas/r7780rp/setup.c
+++ b/arch/sh/boards/renesas/r7780rp/setup.c
@@ -21,6 +21,58 @@
21#include <asm/clock.h> 21#include <asm/clock.h>
22#include <asm/io.h> 22#include <asm/io.h>
23 23
24static struct resource r8a66597_usb_host_resources[] = {
25 [0] = {
26 .name = "r8a66597_hcd",
27 .start = 0xA4200000,
28 .end = 0xA42000FF,
29 .flags = IORESOURCE_MEM,
30 },
31 [1] = {
32 .name = "r8a66597_hcd",
33 .start = 11, /* irq number */
34 .end = 11,
35 .flags = IORESOURCE_IRQ,
36 },
37};
38
39static struct platform_device r8a66597_usb_host_device = {
40 .name = "r8a66597_hcd",
41 .id = -1,
42 .dev = {
43 .dma_mask = NULL, /* don't use dma */
44 .coherent_dma_mask = 0xffffffff,
45 },
46 .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources),
47 .resource = r8a66597_usb_host_resources,
48};
49
50static struct resource m66592_usb_peripheral_resources[] = {
51 [0] = {
52 .name = "m66592_udc",
53 .start = 0xb0000000,
54 .end = 0xb00000FF,
55 .flags = IORESOURCE_MEM,
56 },
57 [1] = {
58 .name = "m66592_udc",
59 .start = 9, /* irq number */
60 .end = 9,
61 .flags = IORESOURCE_IRQ,
62 },
63};
64
65static struct platform_device m66592_usb_peripheral_device = {
66 .name = "m66592_udc",
67 .id = -1,
68 .dev = {
69 .dma_mask = NULL, /* don't use dma */
70 .coherent_dma_mask = 0xffffffff,
71 },
72 .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources),
73 .resource = m66592_usb_peripheral_resources,
74};
75
24static struct resource cf_ide_resources[] = { 76static struct resource cf_ide_resources[] = {
25 [0] = { 77 [0] = {
26 .start = PA_AREA5_IO + 0x1000, 78 .start = PA_AREA5_IO + 0x1000,
@@ -81,6 +133,8 @@ static struct platform_device heartbeat_device = {
81}; 133};
82 134
83static struct platform_device *r7780rp_devices[] __initdata = { 135static struct platform_device *r7780rp_devices[] __initdata = {
136 &r8a66597_usb_host_device,
137 &m66592_usb_peripheral_device,
84 &cf_ide_device, 138 &cf_ide_device,
85 &heartbeat_device, 139 &heartbeat_device,
86}; 140};
diff --git a/arch/sh/boards/renesas/rts7751r2d/setup.c b/arch/sh/boards/renesas/rts7751r2d/setup.c
index 656fda30ef70..e165d85c03b5 100644
--- a/arch/sh/boards/renesas/rts7751r2d/setup.c
+++ b/arch/sh/boards/renesas/rts7751r2d/setup.c
@@ -86,7 +86,8 @@ static struct plat_serial8250_port uart_platform_data[] = {
86 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, 86 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
87 .regshift = 2, 87 .regshift = 2,
88 .uartclk = (9600 * 16), 88 .uartclk = (9600 * 16),
89 } 89 },
90 { 0 },
90}; 91};
91 92
92static struct platform_device uart_device = { 93static struct platform_device uart_device = {
diff --git a/arch/sh/boards/se/7722/irq.c b/arch/sh/boards/se/7722/irq.c
index 26cff0efda40..0b03f3f610b8 100644
--- a/arch/sh/boards/se/7722/irq.c
+++ b/arch/sh/boards/se/7722/irq.c
@@ -16,95 +16,61 @@
16#include <asm/io.h> 16#include <asm/io.h>
17#include <asm/se7722.h> 17#include <asm/se7722.h>
18 18
19#define INTC_INTMSK0 0xFFD00044
20#define INTC_INTMSKCLR0 0xFFD00064
21
22struct se7722_data {
23 unsigned char irq;
24 unsigned char ipr_idx;
25 unsigned char shift;
26 unsigned short priority;
27 unsigned long addr;
28};
29
30
31static void disable_se7722_irq(unsigned int irq) 19static void disable_se7722_irq(unsigned int irq)
32{ 20{
33 struct se7722_data *p = get_irq_chip_data(irq); 21 unsigned int bit = irq - SE7722_FPGA_IRQ_BASE;
34 ctrl_outw( ctrl_inw( p->addr ) | p->priority , p->addr ); 22 ctrl_outw(ctrl_inw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
35} 23}
36 24
37static void enable_se7722_irq(unsigned int irq) 25static void enable_se7722_irq(unsigned int irq)
38{ 26{
39 struct se7722_data *p = get_irq_chip_data(irq); 27 unsigned int bit = irq - SE7722_FPGA_IRQ_BASE;
40 ctrl_outw( ctrl_inw( p->addr ) & ~p->priority , p->addr ); 28 ctrl_outw(ctrl_inw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
41} 29}
42 30
43static struct irq_chip se7722_irq_chip __read_mostly = { 31static struct irq_chip se7722_irq_chip __read_mostly = {
44 .name = "SE7722", 32 .name = "SE7722-FPGA",
45 .mask = disable_se7722_irq, 33 .mask = disable_se7722_irq,
46 .unmask = enable_se7722_irq, 34 .unmask = enable_se7722_irq,
47 .mask_ack = disable_se7722_irq, 35 .mask_ack = disable_se7722_irq,
48}; 36};
49 37
50static struct se7722_data ipr_irq_table[] = { 38static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
51 /* irq ,idx,sft, priority , addr */
52 { MRSHPC_IRQ0 , 0 , 0 , MRSHPC_BIT0 , IRQ01_MASK } ,
53 { MRSHPC_IRQ1 , 0 , 0 , MRSHPC_BIT1 , IRQ01_MASK } ,
54 { MRSHPC_IRQ2 , 0 , 0 , MRSHPC_BIT2 , IRQ01_MASK } ,
55 { MRSHPC_IRQ3 , 0 , 0 , MRSHPC_BIT3 , IRQ01_MASK } ,
56 { SMC_IRQ , 0 , 0 , SMC_BIT , IRQ01_MASK } ,
57 { EXT_IRQ , 0 , 0 , EXT_BIT , IRQ01_MASK } ,
58};
59
60int se7722_irq_demux(int irq)
61{ 39{
40 unsigned short intv = ctrl_inw(IRQ01_STS);
41 struct irq_desc *ext_desc;
42 unsigned int ext_irq = SE7722_FPGA_IRQ_BASE;
43
44 intv &= (1 << SE7722_FPGA_IRQ_NR) - 1;
62 45
63 if ((irq == IRQ0_IRQ)||(irq == IRQ1_IRQ)) { 46 while (intv) {
64 volatile unsigned short intv = 47 if (intv & 1) {
65 *(volatile unsigned short *)IRQ01_STS; 48 ext_desc = irq_desc + ext_irq;
66 if (irq == IRQ0_IRQ){ 49 handle_level_irq(ext_irq, ext_desc);
67 if(intv & SMC_BIT ) {
68 return SMC_IRQ;
69 } else if(intv & USB_BIT) {
70 return USB_IRQ;
71 } else {
72 printk("intv =%04x\n", intv);
73 return SMC_IRQ;
74 }
75 } else if(irq == IRQ1_IRQ){
76 if(intv & MRSHPC_BIT0) {
77 return MRSHPC_IRQ0;
78 } else if(intv & MRSHPC_BIT1) {
79 return MRSHPC_IRQ1;
80 } else if(intv & MRSHPC_BIT2) {
81 return MRSHPC_IRQ2;
82 } else if(intv & MRSHPC_BIT3) {
83 return MRSHPC_IRQ3;
84 } else {
85 printk("BIT_EXTENTION =%04x\n", intv);
86 return EXT_IRQ;
87 }
88 } 50 }
51 intv >>= 1;
52 ext_irq++;
89 } 53 }
90 return irq;
91
92} 54}
55
93/* 56/*
94 * Initialize IRQ setting 57 * Initialize IRQ setting
95 */ 58 */
96void __init init_se7722_IRQ(void) 59void __init init_se7722_IRQ(void)
97{ 60{
98 int i = 0; 61 int i;
62
63 ctrl_outw(0, IRQ01_MASK); /* disable all irqs */
99 ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */ 64 ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */
100 ctrl_outl((3 << ((7 - 0) * 4))|(3 << ((7 - 1) * 4)), INTC_INTPRI0); /* irq0 pri=3,irq1,pri=3 */
101 ctrl_outw((2 << ((7 - 0) * 2))|(2 << ((7 - 1) * 2)), INTC_ICR1); /* irq0,1 low-level irq */
102 65
103 for (i = 0; i < ARRAY_SIZE(ipr_irq_table); i++) { 66 for (i = 0; i < SE7722_FPGA_IRQ_NR; i++)
104 disable_irq_nosync(ipr_irq_table[i].irq); 67 set_irq_chip_and_handler_name(SE7722_FPGA_IRQ_BASE + i,
105 set_irq_chip_and_handler_name( ipr_irq_table[i].irq, &se7722_irq_chip, 68 &se7722_irq_chip,
106 handle_level_irq, "level"); 69 handle_level_irq, "level");
107 set_irq_chip_data( ipr_irq_table[i].irq, &ipr_irq_table[i] ); 70
108 disable_se7722_irq(ipr_irq_table[i].irq); 71 set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux);
109 } 72 set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
73
74 set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux);
75 set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
110} 76}
diff --git a/arch/sh/boards/se/7722/setup.c b/arch/sh/boards/se/7722/setup.c
index 6cca6cbc8069..495fc7e2b60f 100644
--- a/arch/sh/boards/se/7722/setup.c
+++ b/arch/sh/boards/se/7722/setup.c
@@ -77,6 +77,7 @@ static struct resource cf_ide_resources[] = {
77 }, 77 },
78 [2] = { 78 [2] = {
79 .start = MRSHPC_IRQ0, 79 .start = MRSHPC_IRQ0,
80 .end = MRSHPC_IRQ0,
80 .flags = IORESOURCE_IRQ, 81 .flags = IORESOURCE_IRQ,
81 }, 82 },
82}; 83};
@@ -140,8 +141,6 @@ static void __init se7722_setup(char **cmdline_p)
140static struct sh_machine_vector mv_se7722 __initmv = { 141static struct sh_machine_vector mv_se7722 __initmv = {
141 .mv_name = "Solution Engine 7722" , 142 .mv_name = "Solution Engine 7722" ,
142 .mv_setup = se7722_setup , 143 .mv_setup = se7722_setup ,
143 .mv_nr_irqs = 109 , 144 .mv_nr_irqs = SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_NR,
144 .mv_init_irq = init_se7722_IRQ, 145 .mv_init_irq = init_se7722_IRQ,
145 .mv_irq_demux = se7722_irq_demux,
146
147}; 146};
diff --git a/arch/sh/cchips/hd6446x/Makefile b/arch/sh/cchips/hd6446x/Makefile
new file mode 100644
index 000000000000..a106dd9db986
--- /dev/null
+++ b/arch/sh/cchips/hd6446x/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_HD64461) += hd64461.o
2obj-$(CONFIG_HD64465) += hd64465/
diff --git a/arch/sh/cchips/hd6446x/hd64461/setup.c b/arch/sh/cchips/hd6446x/hd64461.c
index 4d49b5cbcc13..97f6512aa1b7 100644
--- a/arch/sh/cchips/hd6446x/hd64461/setup.c
+++ b/arch/sh/cchips/hd6446x/hd64461.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * $Id: setup.c,v 1.5 2004/03/16 00:07:50 lethal Exp $
3 * Copyright (C) 2000 YAEGASHI Takeshi 2 * Copyright (C) 2000 YAEGASHI Takeshi
4 * Hitachi HD64461 companion chip support 3 * Hitachi HD64461 companion chip support
5 */ 4 */
diff --git a/arch/sh/cchips/hd6446x/hd64461/Makefile b/arch/sh/cchips/hd6446x/hd64461/Makefile
deleted file mode 100644
index bff4b92e388c..000000000000
--- a/arch/sh/cchips/hd6446x/hd64461/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1#
2# Makefile for the HD64461
3#
4
5obj-y := setup.o io.o
6
diff --git a/arch/sh/cchips/hd6446x/hd64461/io.c b/arch/sh/cchips/hd6446x/hd64461/io.c
deleted file mode 100644
index 7909a1b7b512..000000000000
--- a/arch/sh/cchips/hd6446x/hd64461/io.c
+++ /dev/null
@@ -1,150 +0,0 @@
1/*
2 * Copyright (C) 2000 YAEGASHI Takeshi
3 * Typical I/O routines for HD64461 system.
4 */
5
6#include <asm/io.h>
7#include <asm/hd64461.h>
8
9#define MEM_BASE (CONFIG_HD64461_IOBASE - HD64461_STBCR)
10
11static __inline__ unsigned long PORT2ADDR(unsigned long port)
12{
13 /* 16550A: HD64461 internal */
14 if (0x3f8<=port && port<=0x3ff)
15 return CONFIG_HD64461_IOBASE + 0x8000 + ((port-0x3f8)<<1);
16 if (0x2f8<=port && port<=0x2ff)
17 return CONFIG_HD64461_IOBASE + 0x7000 + ((port-0x2f8)<<1);
18
19#ifdef CONFIG_HD64461_ENABLER
20 /* NE2000: HD64461 PCMCIA channel 0 (I/O) */
21 if (0x300<=port && port<=0x31f)
22 return 0xba000000 + port;
23
24 /* ide0: HD64461 PCMCIA channel 1 (memory) */
25 /* On HP690, CF in slot 1 is configured as a memory card
26 device. See CF+ and CompactFlash Specification for the
27 detail of CF's memory mapped addressing. */
28 if (0x1f0<=port && port<=0x1f7) return 0xb5000000 + port;
29 if (port == 0x3f6) return 0xb50001fe;
30 if (port == 0x3f7) return 0xb50001ff;
31
32 /* ide1 */
33 if (0x170<=port && port<=0x177) return 0xba000000 + port;
34 if (port == 0x376) return 0xba000376;
35 if (port == 0x377) return 0xba000377;
36#endif
37
38 /* ??? */
39 if (port < 0xf000) return 0xa0000000 + port;
40 /* PCMCIA channel 0, I/O (0xba000000) */
41 if (port < 0x10000) return 0xba000000 + port - 0xf000;
42
43 /* HD64461 internal devices (0xb0000000) */
44 if (port < 0x20000) return CONFIG_HD64461_IOBASE + port - 0x10000;
45
46 /* PCMCIA channel 0, I/O (0xba000000) */
47 if (port < 0x30000) return 0xba000000 + port - 0x20000;
48
49 /* PCMCIA channel 1, memory (0xb5000000) */
50 if (port < 0x40000) return 0xb5000000 + port - 0x30000;
51
52 /* Whole physical address space (0xa0000000) */
53 return 0xa0000000 + (port & 0x1fffffff);
54}
55
56unsigned char hd64461_inb(unsigned long port)
57{
58 return *(volatile unsigned char*)PORT2ADDR(port);
59}
60
61unsigned char hd64461_inb_p(unsigned long port)
62{
63 unsigned long v = *(volatile unsigned char*)PORT2ADDR(port);
64 ctrl_delay();
65 return v;
66}
67
68unsigned short hd64461_inw(unsigned long port)
69{
70 return *(volatile unsigned short*)PORT2ADDR(port);
71}
72
73unsigned int hd64461_inl(unsigned long port)
74{
75 return *(volatile unsigned long*)PORT2ADDR(port);
76}
77
78void hd64461_outb(unsigned char b, unsigned long port)
79{
80 *(volatile unsigned char*)PORT2ADDR(port) = b;
81}
82
83void hd64461_outb_p(unsigned char b, unsigned long port)
84{
85 *(volatile unsigned char*)PORT2ADDR(port) = b;
86 ctrl_delay();
87}
88
89void hd64461_outw(unsigned short b, unsigned long port)
90{
91 *(volatile unsigned short*)PORT2ADDR(port) = b;
92}
93
94void hd64461_outl(unsigned int b, unsigned long port)
95{
96 *(volatile unsigned long*)PORT2ADDR(port) = b;
97}
98
99void hd64461_insb(unsigned long port, void *buffer, unsigned long count)
100{
101 volatile unsigned char* addr=(volatile unsigned char*)PORT2ADDR(port);
102 unsigned char *buf=buffer;
103 while(count--) *buf++=*addr;
104}
105
106void hd64461_insw(unsigned long port, void *buffer, unsigned long count)
107{
108 volatile unsigned short* addr=(volatile unsigned short*)PORT2ADDR(port);
109 unsigned short *buf=buffer;
110 while(count--) *buf++=*addr;
111}
112
113void hd64461_insl(unsigned long port, void *buffer, unsigned long count)
114{
115 volatile unsigned long* addr=(volatile unsigned long*)PORT2ADDR(port);
116 unsigned long *buf=buffer;
117 while(count--) *buf++=*addr;
118}
119
120void hd64461_outsb(unsigned long port, const void *buffer, unsigned long count)
121{
122 volatile unsigned char* addr=(volatile unsigned char*)PORT2ADDR(port);
123 const unsigned char *buf=buffer;
124 while(count--) *addr=*buf++;
125}
126
127void hd64461_outsw(unsigned long port, const void *buffer, unsigned long count)
128{
129 volatile unsigned short* addr=(volatile unsigned short*)PORT2ADDR(port);
130 const unsigned short *buf=buffer;
131 while(count--) *addr=*buf++;
132}
133
134void hd64461_outsl(unsigned long port, const void *buffer, unsigned long count)
135{
136 volatile unsigned long* addr=(volatile unsigned long*)PORT2ADDR(port);
137 const unsigned long *buf=buffer;
138 while(count--) *addr=*buf++;
139}
140
141unsigned short hd64461_readw(void __iomem *addr)
142{
143 return ctrl_inw(MEM_BASE+(unsigned long __force)addr);
144}
145
146void hd64461_writew(unsigned short b, void __iomem *addr)
147{
148 ctrl_outw(b, MEM_BASE+(unsigned long __force)addr);
149}
150
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index e7f8ddb0ada4..07310fa03250 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -217,7 +217,7 @@ CONFIG_SH_FPU=y
217# CONFIG_SH_DSP is not set 217# CONFIG_SH_DSP is not set
218# CONFIG_SH_STORE_QUEUES is not set 218# CONFIG_SH_STORE_QUEUES is not set
219CONFIG_CPU_HAS_INTEVT=y 219CONFIG_CPU_HAS_INTEVT=y
220CONFIG_CPU_HAS_IPR_IRQ=y 220CONFIG_CPU_HAS_INTC_IRQ=y
221CONFIG_CPU_HAS_SR_RB=y 221CONFIG_CPU_HAS_SR_RB=y
222CONFIG_CPU_HAS_PTEA=y 222CONFIG_CPU_HAS_PTEA=y
223 223
diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig
index be86414dcc87..fa09d68d057a 100644
--- a/arch/sh/configs/lboxre2_defconfig
+++ b/arch/sh/configs/lboxre2_defconfig
@@ -222,7 +222,7 @@ CONFIG_SH_FPU=y
222# CONFIG_SH_DSP is not set 222# CONFIG_SH_DSP is not set
223# CONFIG_SH_STORE_QUEUES is not set 223# CONFIG_SH_STORE_QUEUES is not set
224CONFIG_CPU_HAS_INTEVT=y 224CONFIG_CPU_HAS_INTEVT=y
225CONFIG_CPU_HAS_IPR_IRQ=y 225CONFIG_CPU_HAS_INTC_IRQ=y
226CONFIG_CPU_HAS_SR_RB=y 226CONFIG_CPU_HAS_SR_RB=y
227CONFIG_CPU_HAS_PTEA=y 227CONFIG_CPU_HAS_PTEA=y
228 228
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index 17f7402b31d8..ac4de4973b60 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -191,7 +191,7 @@ CONFIG_SH_FPU=y
191CONFIG_SH_STORE_QUEUES=y 191CONFIG_SH_STORE_QUEUES=y
192CONFIG_SPECULATIVE_EXECUTION=y 192CONFIG_SPECULATIVE_EXECUTION=y
193CONFIG_CPU_HAS_INTEVT=y 193CONFIG_CPU_HAS_INTEVT=y
194CONFIG_CPU_HAS_INTC2_IRQ=y 194CONFIG_CPU_HAS_INTC_IRQ=y
195CONFIG_CPU_HAS_SR_RB=y 195CONFIG_CPU_HAS_SR_RB=y
196 196
197# 197#
diff --git a/arch/sh/configs/r7780rp_defconfig b/arch/sh/configs/r7780rp_defconfig
index 48c6a2194c98..12cc01910cf8 100644
--- a/arch/sh/configs/r7780rp_defconfig
+++ b/arch/sh/configs/r7780rp_defconfig
@@ -241,7 +241,7 @@ CONFIG_SH_FPU=y
241CONFIG_SH_STORE_QUEUES=y 241CONFIG_SH_STORE_QUEUES=y
242CONFIG_SPECULATIVE_EXECUTION=y 242CONFIG_SPECULATIVE_EXECUTION=y
243CONFIG_CPU_HAS_INTEVT=y 243CONFIG_CPU_HAS_INTEVT=y
244CONFIG_CPU_HAS_INTC2_IRQ=y 244CONFIG_CPU_HAS_INTC_IRQ=y
245CONFIG_CPU_HAS_SR_RB=y 245CONFIG_CPU_HAS_SR_RB=y
246 246
247# 247#
diff --git a/arch/sh/configs/rts7751r2d_defconfig b/arch/sh/configs/rts7751r2d_defconfig
index a59bb78bd071..f1e979b1e495 100644
--- a/arch/sh/configs/rts7751r2d_defconfig
+++ b/arch/sh/configs/rts7751r2d_defconfig
@@ -155,7 +155,7 @@ CONFIG_CPU_SH4=y
155# CONFIG_CPU_SUBTYPE_SH7091 is not set 155# CONFIG_CPU_SUBTYPE_SH7091 is not set
156# CONFIG_CPU_SUBTYPE_SH7750R is not set 156# CONFIG_CPU_SUBTYPE_SH7750R is not set
157# CONFIG_CPU_SUBTYPE_SH7750S is not set 157# CONFIG_CPU_SUBTYPE_SH7750S is not set
158CONFIG_CPU_SUBTYPE_SH7751=y 158# CONFIG_CPU_SUBTYPE_SH7751 is not set
159CONFIG_CPU_SUBTYPE_SH7751R=y 159CONFIG_CPU_SUBTYPE_SH7751R=y
160# CONFIG_CPU_SUBTYPE_SH7760 is not set 160# CONFIG_CPU_SUBTYPE_SH7760 is not set
161# CONFIG_CPU_SUBTYPE_SH4_202 is not set 161# CONFIG_CPU_SUBTYPE_SH4_202 is not set
@@ -218,7 +218,7 @@ CONFIG_SH_FPU=y
218# CONFIG_SH_DSP is not set 218# CONFIG_SH_DSP is not set
219# CONFIG_SH_STORE_QUEUES is not set 219# CONFIG_SH_STORE_QUEUES is not set
220CONFIG_CPU_HAS_INTEVT=y 220CONFIG_CPU_HAS_INTEVT=y
221CONFIG_CPU_HAS_IPR_IRQ=y 221CONFIG_CPU_HAS_INTC_IRQ=y
222CONFIG_CPU_HAS_SR_RB=y 222CONFIG_CPU_HAS_SR_RB=y
223CONFIG_CPU_HAS_PTEA=y 223CONFIG_CPU_HAS_PTEA=y
224 224
@@ -280,7 +280,7 @@ CONFIG_ZERO_PAGE_OFFSET=0x00010000
280CONFIG_BOOT_LINK_OFFSET=0x00800000 280CONFIG_BOOT_LINK_OFFSET=0x00800000
281# CONFIG_UBC_WAKEUP is not set 281# CONFIG_UBC_WAKEUP is not set
282CONFIG_CMDLINE_BOOL=y 282CONFIG_CMDLINE_BOOL=y
283CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1" 283CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=bios"
284 284
285# 285#
286# Bus options 286# Bus options
@@ -1323,7 +1323,7 @@ CONFIG_ENABLE_MUST_CHECK=y
1323# CONFIG_DEBUG_KERNEL is not set 1323# CONFIG_DEBUG_KERNEL is not set
1324CONFIG_LOG_BUF_SHIFT=14 1324CONFIG_LOG_BUF_SHIFT=14
1325# CONFIG_DEBUG_BUGVERBOSE is not set 1325# CONFIG_DEBUG_BUGVERBOSE is not set
1326# CONFIG_SH_STANDARD_BIOS is not set 1326CONFIG_SH_STANDARD_BIOS=y
1327CONFIG_EARLY_SCIF_CONSOLE=y 1327CONFIG_EARLY_SCIF_CONSOLE=y
1328CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000 1328CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
1329CONFIG_EARLY_PRINTK=y 1329CONFIG_EARLY_PRINTK=y
diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig
index 764b813c4051..8e6a6baf5d27 100644
--- a/arch/sh/configs/se7722_defconfig
+++ b/arch/sh/configs/se7722_defconfig
@@ -200,7 +200,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
200CONFIG_SH_DSP=y 200CONFIG_SH_DSP=y
201CONFIG_SH_STORE_QUEUES=y 201CONFIG_SH_STORE_QUEUES=y
202CONFIG_CPU_HAS_INTEVT=y 202CONFIG_CPU_HAS_INTEVT=y
203CONFIG_CPU_HAS_IPR_IRQ=y 203CONFIG_CPU_HAS_INTC_IRQ=y
204CONFIG_CPU_HAS_SR_RB=y 204CONFIG_CPU_HAS_SR_RB=y
205CONFIG_CPU_HAS_PTEA=y 205CONFIG_CPU_HAS_PTEA=y
206 206
@@ -565,7 +565,7 @@ CONFIG_SERIO_LIBPS2=y
565# Non-8250 serial port support 565# Non-8250 serial port support
566# 566#
567CONFIG_SERIAL_SH_SCI=y 567CONFIG_SERIAL_SH_SCI=y
568CONFIG_SERIAL_SH_SCI_NR_UARTS=2 568CONFIG_SERIAL_SH_SCI_NR_UARTS=3
569CONFIG_SERIAL_SH_SCI_CONSOLE=y 569CONFIG_SERIAL_SH_SCI_CONSOLE=y
570CONFIG_SERIAL_CORE=y 570CONFIG_SERIAL_CORE=y
571CONFIG_SERIAL_CORE_CONSOLE=y 571CONFIG_SERIAL_CORE_CONSOLE=y
diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig
index 4e6e77fa4ce7..c60b6fd4fc42 100644
--- a/arch/sh/configs/se7750_defconfig
+++ b/arch/sh/configs/se7750_defconfig
@@ -226,7 +226,7 @@ CONFIG_SH_FPU=y
226# CONFIG_SH_DSP is not set 226# CONFIG_SH_DSP is not set
227# CONFIG_SH_STORE_QUEUES is not set 227# CONFIG_SH_STORE_QUEUES is not set
228CONFIG_CPU_HAS_INTEVT=y 228CONFIG_CPU_HAS_INTEVT=y
229CONFIG_CPU_HAS_IPR_IRQ=y 229CONFIG_CPU_HAS_INTC_IRQ=y
230CONFIG_CPU_HAS_SR_RB=y 230CONFIG_CPU_HAS_SR_RB=y
231CONFIG_CPU_HAS_PTEA=y 231CONFIG_CPU_HAS_PTEA=y
232 232
diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig
index 538661e98793..f68743dc3931 100644
--- a/arch/sh/configs/se7780_defconfig
+++ b/arch/sh/configs/se7780_defconfig
@@ -218,6 +218,7 @@ CONFIG_SH_FPU=y
218# CONFIG_SH_STORE_QUEUES is not set 218# CONFIG_SH_STORE_QUEUES is not set
219CONFIG_CPU_HAS_INTEVT=y 219CONFIG_CPU_HAS_INTEVT=y
220CONFIG_CPU_HAS_INTC2_IRQ=y 220CONFIG_CPU_HAS_INTC2_IRQ=y
221CONFIG_CPU_HAS_INTC_IRQ=y
221CONFIG_CPU_HAS_SR_RB=y 222CONFIG_CPU_HAS_SR_RB=y
222 223
223# 224#
diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
index 333898077c7c..ee711431e504 100644
--- a/arch/sh/drivers/dma/Kconfig
+++ b/arch/sh/drivers/dma/Kconfig
@@ -5,12 +5,13 @@ config SH_DMA_API
5 5
6config SH_DMA 6config SH_DMA
7 bool "SuperH on-chip DMA controller (DMAC) support" 7 bool "SuperH on-chip DMA controller (DMAC) support"
8 depends on CPU_SH3 || CPU_SH4
8 select SH_DMA_API 9 select SH_DMA_API
9 default n 10 default n
10 11
11config NR_ONCHIP_DMA_CHANNELS 12config NR_ONCHIP_DMA_CHANNELS
13 int
12 depends on SH_DMA 14 depends on SH_DMA
13 int "Number of on-chip DMAC channels"
14 default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R 15 default "8" if CPU_SUBTYPE_SH7750R || CPU_SUBTYPE_SH7751R
15 default "12" if CPU_SUBTYPE_SH7780 16 default "12" if CPU_SUBTYPE_SH7780
16 default "4" 17 default "4"
diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c
index 23dd6080422f..10c1828c9ff5 100644
--- a/arch/sh/drivers/heartbeat.c
+++ b/arch/sh/drivers/heartbeat.c
@@ -78,7 +78,7 @@ static int heartbeat_drv_probe(struct platform_device *pdev)
78 hd->bit_pos[i] = i; 78 hd->bit_pos[i] = i;
79 } 79 }
80 80
81 hd->base = (void __iomem *)res->start; 81 hd->base = (void __iomem *)(unsigned long)res->start;
82 82
83 setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd); 83 setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
84 platform_set_drvdata(pdev, hd); 84 platform_set_drvdata(pdev, hd);
diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile
index 0e9b532b9fbc..2f65ac72f48a 100644
--- a/arch/sh/drivers/pci/Makefile
+++ b/arch/sh/drivers/pci/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_PCI_AUTO) += pci-auto.o
7 7
8obj-$(CONFIG_CPU_SUBTYPE_ST40STB1) += pci-st40.o 8obj-$(CONFIG_CPU_SUBTYPE_ST40STB1) += pci-st40.o
9obj-$(CONFIG_CPU_SUBTYPE_SH7751) += pci-sh7751.o ops-sh4.o 9obj-$(CONFIG_CPU_SUBTYPE_SH7751) += pci-sh7751.o ops-sh4.o
10obj-$(CONFIG_CPU_SUBTYPE_SH7751R) += pci-sh7751.o ops-sh4.o
10obj-$(CONFIG_CPU_SUBTYPE_SH7780) += pci-sh7780.o ops-sh4.o 11obj-$(CONFIG_CPU_SUBTYPE_SH7780) += pci-sh7780.o ops-sh4.o
11obj-$(CONFIG_CPU_SUBTYPE_SH7785) += pci-sh7780.o ops-sh4.o 12obj-$(CONFIG_CPU_SUBTYPE_SH7785) += pci-sh7780.o ops-sh4.o
12 13
diff --git a/arch/sh/drivers/pci/ops-sh4.c b/arch/sh/drivers/pci/ops-sh4.c
index 54232f13e406..710a3b0306e5 100644
--- a/arch/sh/drivers/pci/ops-sh4.c
+++ b/arch/sh/drivers/pci/ops-sh4.c
@@ -153,7 +153,7 @@ static void __init pci_fixup_ide_bases(struct pci_dev *d)
153} 153}
154DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); 154DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
155 155
156char * __init pcibios_setup(char *str) 156char * __devinit pcibios_setup(char *str)
157{ 157{
158 if (!strcmp(str, "off")) { 158 if (!strcmp(str, "off")) {
159 pci_probe = 0; 159 pci_probe = 0;
diff --git a/arch/sh/drivers/pci/pci-st40.c b/arch/sh/drivers/pci/pci-st40.c
index 543417ff8314..1502a14386b6 100644
--- a/arch/sh/drivers/pci/pci-st40.c
+++ b/arch/sh/drivers/pci/pci-st40.c
@@ -328,7 +328,7 @@ int __init st40pci_init(unsigned memStart, unsigned memSize)
328 return 1; 328 return 1;
329} 329}
330 330
331char * __init pcibios_setup(char *str) 331char * __devinit pcibios_setup(char *str)
332{ 332{
333 return str; 333 return str;
334} 334}
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index d439336d2e18..ccaba368ac9b 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -71,7 +71,7 @@ subsys_initcall(pcibios_init);
71 * Called after each bus is probed, but before its children 71 * Called after each bus is probed, but before its children
72 * are examined. 72 * are examined.
73 */ 73 */
74void __init pcibios_fixup_bus(struct pci_bus *bus) 74void __devinit pcibios_fixup_bus(struct pci_bus *bus)
75{ 75{
76 pci_read_bridge_bases(bus); 76 pci_read_bridge_bases(bus);
77} 77}
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index b3d20c0e021f..725be6de589b 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -138,4 +138,4 @@ module_exit(switch_exit);
138 138
139MODULE_VERSION(DRV_VERSION); 139MODULE_VERSION(DRV_VERSION);
140MODULE_AUTHOR("Paul Mundt"); 140MODULE_AUTHOR("Paul Mundt");
141MODULE_LICENSE("GPLv2"); 141MODULE_LICENSE("GPL v2");
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index 63251549e9a8..92807ffa8e20 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -229,6 +229,22 @@ void clk_recalc_rate(struct clk *clk)
229} 229}
230EXPORT_SYMBOL_GPL(clk_recalc_rate); 230EXPORT_SYMBOL_GPL(clk_recalc_rate);
231 231
232long clk_round_rate(struct clk *clk, unsigned long rate)
233{
234 if (likely(clk->ops && clk->ops->round_rate)) {
235 unsigned long flags, rounded;
236
237 spin_lock_irqsave(&clock_lock, flags);
238 rounded = clk->ops->round_rate(clk, rate);
239 spin_unlock_irqrestore(&clock_lock, flags);
240
241 return rounded;
242 }
243
244 return clk_get_rate(clk);
245}
246EXPORT_SYMBOL_GPL(clk_round_rate);
247
232/* 248/*
233 * Returns a clock. Note that we first try to use device id on the bus 249 * Returns a clock. Note that we first try to use device id on the bus
234 * and clock name. If this fails, we try to use clock name only. 250 * and clock name. If this fails, we try to use clock name only.
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 1c23308cfc25..9ddb446ac930 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -6,4 +6,5 @@ obj-y += imask.o
6obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o 6obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
7obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o 7obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o
8obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o 8obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o
9obj-$(CONFIG_CPU_HAS_INTC_IRQ) += intc.o
9obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o 10obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o
diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c
new file mode 100644
index 000000000000..9345a7130e9e
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/intc.c
@@ -0,0 +1,405 @@
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
4 * Copyright (C) 2007 Magnus Damm
5 *
6 * Based on intc2.c and ipr.c
7 *
8 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
9 * Copyright (C) 2000 Kazumoto Kojima
10 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
11 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
12 * Copyright (C) 2005, 2006 Paul Mundt
13 *
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
16 * for more details.
17 */
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/module.h>
21#include <linux/io.h>
22#include <linux/interrupt.h>
23
24#define _INTC_MK(fn, idx, bit, value) \
25 ((fn) << 24 | ((value) << 16) | ((idx) << 8) | (bit))
26#define _INTC_FN(h) (h >> 24)
27#define _INTC_VALUE(h) ((h >> 16) & 0xff)
28#define _INTC_IDX(h) ((h >> 8) & 0xff)
29#define _INTC_BIT(h) (h & 0xff)
30
31#define _INTC_PTR(desc, member, data) \
32 (desc->member + _INTC_IDX(data))
33
34static inline struct intc_desc *get_intc_desc(unsigned int irq)
35{
36 struct irq_chip *chip = get_irq_chip(irq);
37 return (void *)((char *)chip - offsetof(struct intc_desc, chip));
38}
39
40static inline unsigned int set_field(unsigned int value,
41 unsigned int field_value,
42 unsigned int width,
43 unsigned int shift)
44{
45 value &= ~(((1 << width) - 1) << shift);
46 value |= field_value << shift;
47 return value;
48}
49
50static inline unsigned int set_prio_field(struct intc_desc *desc,
51 unsigned int value,
52 unsigned int priority,
53 unsigned int data)
54{
55 unsigned int width = _INTC_PTR(desc, prio_regs, data)->field_width;
56
57 return set_field(value, priority, width, _INTC_BIT(data));
58}
59
60static void disable_prio_16(struct intc_desc *desc, unsigned int data)
61{
62 unsigned long addr = _INTC_PTR(desc, prio_regs, data)->reg;
63
64 ctrl_outw(set_prio_field(desc, ctrl_inw(addr), 0, data), addr);
65}
66
67static void enable_prio_16(struct intc_desc *desc, unsigned int data)
68{
69 unsigned long addr = _INTC_PTR(desc, prio_regs, data)->reg;
70 unsigned int prio = _INTC_VALUE(data);
71
72 ctrl_outw(set_prio_field(desc, ctrl_inw(addr), prio, data), addr);
73}
74
75static void disable_prio_32(struct intc_desc *desc, unsigned int data)
76{
77 unsigned long addr = _INTC_PTR(desc, prio_regs, data)->reg;
78
79 ctrl_outl(set_prio_field(desc, ctrl_inl(addr), 0, data), addr);
80}
81
82static void enable_prio_32(struct intc_desc *desc, unsigned int data)
83{
84 unsigned long addr = _INTC_PTR(desc, prio_regs, data)->reg;
85 unsigned int prio = _INTC_VALUE(data);
86
87 ctrl_outl(set_prio_field(desc, ctrl_inl(addr), prio, data), addr);
88}
89
90static void disable_mask_8(struct intc_desc *desc, unsigned int data)
91{
92 ctrl_outb(1 << _INTC_BIT(data),
93 _INTC_PTR(desc, mask_regs, data)->set_reg);
94}
95
96static void enable_mask_8(struct intc_desc *desc, unsigned int data)
97{
98 ctrl_outb(1 << _INTC_BIT(data),
99 _INTC_PTR(desc, mask_regs, data)->clr_reg);
100}
101
102static void disable_mask_32(struct intc_desc *desc, unsigned int data)
103{
104 ctrl_outl(1 << _INTC_BIT(data),
105 _INTC_PTR(desc, mask_regs, data)->set_reg);
106}
107
108static void enable_mask_32(struct intc_desc *desc, unsigned int data)
109{
110 ctrl_outl(1 << _INTC_BIT(data),
111 _INTC_PTR(desc, mask_regs, data)->clr_reg);
112}
113
114enum { REG_FN_ERROR=0,
115 REG_FN_MASK_8, REG_FN_MASK_32,
116 REG_FN_PRIO_16, REG_FN_PRIO_32 };
117
118static struct {
119 void (*enable)(struct intc_desc *, unsigned int);
120 void (*disable)(struct intc_desc *, unsigned int);
121} intc_reg_fns[] = {
122 [REG_FN_MASK_8] = { enable_mask_8, disable_mask_8 },
123 [REG_FN_MASK_32] = { enable_mask_32, disable_mask_32 },
124 [REG_FN_PRIO_16] = { enable_prio_16, disable_prio_16 },
125 [REG_FN_PRIO_32] = { enable_prio_32, disable_prio_32 },
126};
127
128static void intc_enable(unsigned int irq)
129{
130 struct intc_desc *desc = get_intc_desc(irq);
131 unsigned int data = (unsigned int) get_irq_chip_data(irq);
132
133 intc_reg_fns[_INTC_FN(data)].enable(desc, data);
134}
135
136static void intc_disable(unsigned int irq)
137{
138 struct intc_desc *desc = get_intc_desc(irq);
139 unsigned int data = (unsigned int) get_irq_chip_data(irq);
140
141 intc_reg_fns[_INTC_FN(data)].disable(desc, data);
142}
143
144static void set_sense_16(struct intc_desc *desc, unsigned int data)
145{
146 unsigned long addr = _INTC_PTR(desc, sense_regs, data)->reg;
147 unsigned int width = _INTC_PTR(desc, sense_regs, data)->field_width;
148 unsigned int bit = _INTC_BIT(data);
149 unsigned int value = _INTC_VALUE(data);
150
151 ctrl_outw(set_field(ctrl_inw(addr), value, width, bit), addr);
152}
153
154static void set_sense_32(struct intc_desc *desc, unsigned int data)
155{
156 unsigned long addr = _INTC_PTR(desc, sense_regs, data)->reg;
157 unsigned int width = _INTC_PTR(desc, sense_regs, data)->field_width;
158 unsigned int bit = _INTC_BIT(data);
159 unsigned int value = _INTC_VALUE(data);
160
161 ctrl_outl(set_field(ctrl_inl(addr), value, width, bit), addr);
162}
163
164#define VALID(x) (x | 0x80)
165
166static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
167 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
168 [IRQ_TYPE_EDGE_RISING] = VALID(1),
169 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
170 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
171};
172
173static int intc_set_sense(unsigned int irq, unsigned int type)
174{
175 struct intc_desc *desc = get_intc_desc(irq);
176 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
177 unsigned int i, j, data, bit;
178 intc_enum enum_id = 0;
179
180 for (i = 0; i < desc->nr_vectors; i++) {
181 struct intc_vect *vect = desc->vectors + i;
182
183 if (evt2irq(vect->vect) != irq)
184 continue;
185
186 enum_id = vect->enum_id;
187 break;
188 }
189
190 if (!enum_id || !value)
191 return -EINVAL;
192
193 value ^= VALID(0);
194
195 for (i = 0; i < desc->nr_sense_regs; i++) {
196 struct intc_sense_reg *sr = desc->sense_regs + i;
197
198 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
199 if (sr->enum_ids[j] != enum_id)
200 continue;
201
202 bit = sr->reg_width - ((j + 1) * sr->field_width);
203 data = _INTC_MK(0, i, bit, value);
204
205 switch(sr->reg_width) {
206 case 16:
207 set_sense_16(desc, data);
208 break;
209 case 32:
210 set_sense_32(desc, data);
211 break;
212 }
213
214 return 0;
215 }
216 }
217
218 return -EINVAL;
219}
220
221static unsigned int __init intc_find_mask_handler(unsigned int width)
222{
223 switch (width) {
224 case 8:
225 return REG_FN_MASK_8;
226 case 32:
227 return REG_FN_MASK_32;
228 }
229
230 BUG();
231 return REG_FN_ERROR;
232}
233
234static unsigned int __init intc_find_prio_handler(unsigned int width)
235{
236 switch (width) {
237 case 16:
238 return REG_FN_PRIO_16;
239 case 32:
240 return REG_FN_PRIO_32;
241 }
242
243 BUG();
244 return REG_FN_ERROR;
245}
246
247static intc_enum __init intc_grp_id(struct intc_desc *desc, intc_enum enum_id)
248{
249 struct intc_group *g = desc->groups;
250 unsigned int i, j;
251
252 for (i = 0; g && enum_id && i < desc->nr_groups; i++) {
253 g = desc->groups + i;
254
255 for (j = 0; g->enum_ids[j]; j++) {
256 if (g->enum_ids[j] != enum_id)
257 continue;
258
259 return g->enum_id;
260 }
261 }
262
263 return 0;
264}
265
266static unsigned int __init intc_prio_value(struct intc_desc *desc,
267 intc_enum enum_id, int do_grps)
268{
269 struct intc_prio *p = desc->priorities;
270 unsigned int i;
271
272 for (i = 0; p && enum_id && i < desc->nr_priorities; i++) {
273 p = desc->priorities + i;
274
275 if (p->enum_id != enum_id)
276 continue;
277
278 return p->priority;
279 }
280
281 if (do_grps)
282 return intc_prio_value(desc, intc_grp_id(desc, enum_id), 0);
283
284 /* default to the lowest priority possible if no priority is set
285 * - this needs to be at least 2 for 5-bit priorities on 7780
286 */
287
288 return 2;
289}
290
291static unsigned int __init intc_mask_data(struct intc_desc *desc,
292 intc_enum enum_id, int do_grps)
293{
294 struct intc_mask_reg *mr = desc->mask_regs;
295 unsigned int i, j, fn;
296
297 for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) {
298 mr = desc->mask_regs + i;
299
300 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
301 if (mr->enum_ids[j] != enum_id)
302 continue;
303
304 fn = intc_find_mask_handler(mr->reg_width);
305 if (fn == REG_FN_ERROR)
306 return 0;
307
308 return _INTC_MK(fn, i, (mr->reg_width - 1) - j, 0);
309 }
310 }
311
312 if (do_grps)
313 return intc_mask_data(desc, intc_grp_id(desc, enum_id), 0);
314
315 return 0;
316}
317
318static unsigned int __init intc_prio_data(struct intc_desc *desc,
319 intc_enum enum_id, int do_grps)
320{
321 struct intc_prio_reg *pr = desc->prio_regs;
322 unsigned int i, j, fn, bit, prio;
323
324 for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) {
325 pr = desc->prio_regs + i;
326
327 for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) {
328 if (pr->enum_ids[j] != enum_id)
329 continue;
330
331 fn = intc_find_prio_handler(pr->reg_width);
332 if (fn == REG_FN_ERROR)
333 return 0;
334
335 prio = intc_prio_value(desc, enum_id, 1);
336 bit = pr->reg_width - ((j + 1) * pr->field_width);
337
338 BUG_ON(bit < 0);
339
340 return _INTC_MK(fn, i, bit, prio);
341 }
342 }
343
344 if (do_grps)
345 return intc_prio_data(desc, intc_grp_id(desc, enum_id), 0);
346
347 return 0;
348}
349
350static void __init intc_register_irq(struct intc_desc *desc, intc_enum enum_id,
351 unsigned int irq)
352{
353 unsigned int data[2], primary;
354
355 /* Prefer single interrupt source bitmap over other combinations:
356 * 1. bitmap, single interrupt source
357 * 2. priority, single interrupt source
358 * 3. bitmap, multiple interrupt sources (groups)
359 * 4. priority, multiple interrupt sources (groups)
360 */
361
362 data[0] = intc_mask_data(desc, enum_id, 0);
363 data[1] = intc_prio_data(desc, enum_id, 0);
364
365 primary = 0;
366 if (!data[0] && data[1])
367 primary = 1;
368
369 data[0] = data[0] ? data[0] : intc_mask_data(desc, enum_id, 1);
370 data[1] = data[1] ? data[1] : intc_prio_data(desc, enum_id, 1);
371
372 if (!data[primary])
373 primary ^= 1;
374
375 BUG_ON(!data[primary]); /* must have primary masking method */
376
377 disable_irq_nosync(irq);
378 set_irq_chip_and_handler_name(irq, &desc->chip,
379 handle_level_irq, "level");
380 set_irq_chip_data(irq, (void *)data[primary]);
381
382 /* enable secondary masking method if present */
383 if (data[!primary])
384 intc_reg_fns[_INTC_FN(data[!primary])].enable(desc,
385 data[!primary]);
386
387 /* irq should be disabled by default */
388 desc->chip.mask(irq);
389}
390
391void __init register_intc_controller(struct intc_desc *desc)
392{
393 unsigned int i;
394
395 desc->chip.mask = intc_disable;
396 desc->chip.unmask = intc_enable;
397 desc->chip.mask_ack = intc_disable;
398 desc->chip.set_type = intc_set_sense;
399
400 for (i = 0; i < desc->nr_vectors; i++) {
401 struct intc_vect *vect = desc->vectors + i;
402
403 intc_register_irq(desc, vect->enum_id, evt2irq(vect->vect));
404 }
405}
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 1a107fe22dde..a979b981e6a3 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -88,7 +88,7 @@ static struct ipr_desc ipr_irq_desc = {
88 }, 88 },
89}; 89};
90 90
91void __init init_IRQ_ipr(void) 91void __init plat_irq_setup(void)
92{ 92{
93 register_ipr_controller(&ipr_irq_desc); 93 register_ipr_controller(&ipr_irq_desc);
94} 94}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index b6e3a6351fa6..deab16500167 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -107,7 +107,7 @@ static struct ipr_desc ipr_irq_desc = {
107 }, 107 },
108}; 108};
109 109
110void __init init_IRQ_ipr(void) 110void __init plat_irq_setup(void)
111{ 111{
112 register_ipr_controller(&ipr_irq_desc); 112 register_ipr_controller(&ipr_irq_desc);
113} 113}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index a55b8ce2c54c..ebd9d06d8bdd 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -92,7 +92,7 @@ static struct ipr_desc ipr_irq_desc = {
92 }, 92 },
93}; 93};
94 94
95void __init init_IRQ_ipr(void) 95void __init plat_irq_setup(void)
96{ 96{
97 register_ipr_controller(&ipr_irq_desc); 97 register_ipr_controller(&ipr_irq_desc);
98} 98}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7709.c b/arch/sh/kernel/cpu/sh3/setup-sh7709.c
index d79ec0c0522f..086f8e2545af 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7709.c
@@ -139,7 +139,7 @@ static struct ipr_desc ipr_irq_desc = {
139 }, 139 },
140}; 140};
141 141
142void __init init_IRQ_ipr(void) 142void __init plat_irq_setup(void)
143{ 143{
144 register_ipr_controller(&ipr_irq_desc); 144 register_ipr_controller(&ipr_irq_desc);
145} 145}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index f40e6dac337d..132284893373 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -101,7 +101,7 @@ static struct ipr_desc ipr_irq_desc = {
101 }, 101 },
102}; 102};
103 103
104void __init init_IRQ_ipr(void) 104void __init plat_irq_setup(void)
105{ 105{
106 register_ipr_controller(&ipr_irq_desc); 106 register_ipr_controller(&ipr_irq_desc);
107} 107}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index da153bcdfeb2..f2286de22bd5 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -82,88 +82,213 @@ static int __init sh7750_devices_setup(void)
82} 82}
83__initcall(sh7750_devices_setup); 83__initcall(sh7750_devices_setup);
84 84
85static struct ipr_data ipr_irq_table[] = { 85enum {
86 /* IRQ, IPR-idx, shift, priority */ 86 UNUSED = 0,
87 { 16, 0, 12, 2 }, /* TMU0 TUNI*/ 87
88 { 17, 0, 12, 2 }, /* TMU1 TUNI */ 88 /* interrupt sources */
89 { 18, 0, 4, 2 }, /* TMU2 TUNI */ 89 IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */
90 { 19, 0, 4, 2 }, /* TMU2 TIPCI */ 90 HUDI, GPIOI,
91 { 27, 1, 12, 2 }, /* WDT ITI */ 91 DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3,
92 { 20, 0, 0, 2 }, /* RTC ATI (alarm) */ 92 DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7,
93 { 21, 0, 0, 2 }, /* RTC PRI (period) */ 93 DMAC_DMAE,
94 { 22, 0, 0, 2 }, /* RTC CUI (carry) */ 94 PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
95 { 23, 1, 4, 3 }, /* SCI ERI */ 95 PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3,
96 { 24, 1, 4, 3 }, /* SCI RXI */ 96 TMU3, TMU4, TMU0, TMU1, TMU2_TUNI, TMU2_TICPI,
97 { 25, 1, 4, 3 }, /* SCI TXI */ 97 RTC_ATI, RTC_PRI, RTC_CUI,
98 { 40, 2, 4, 3 }, /* SCIF ERI */ 98 SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI,
99 { 41, 2, 4, 3 }, /* SCIF RXI */ 99 SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI,
100 { 42, 2, 4, 3 }, /* SCIF BRI */ 100 WDT,
101 { 43, 2, 4, 3 }, /* SCIF TXI */ 101 REF_RCMI, REF_ROVI,
102 { 34, 2, 8, 7 }, /* DMAC DMTE0 */ 102
103 { 35, 2, 8, 7 }, /* DMAC DMTE1 */ 103 /* interrupt groups */
104 { 36, 2, 8, 7 }, /* DMAC DMTE2 */ 104 DMAC, PCIC1, TMU2, RTC, SCI1, SCIF, REF,
105 { 37, 2, 8, 7 }, /* DMAC DMTE3 */
106 { 38, 2, 8, 7 }, /* DMAC DMAE */
107};
108
109static unsigned long ipr_offsets[] = {
110 0xffd00004UL, /* 0: IPRA */
111 0xffd00008UL, /* 1: IPRB */
112 0xffd0000cUL, /* 2: IPRC */
113 0xffd00010UL, /* 3: IPRD */
114};
115
116static struct ipr_desc ipr_irq_desc = {
117 .ipr_offsets = ipr_offsets,
118 .nr_offsets = ARRAY_SIZE(ipr_offsets),
119
120 .ipr_data = ipr_irq_table,
121 .nr_irqs = ARRAY_SIZE(ipr_irq_table),
122
123 .chip = {
124 .name = "IPR-sh7750",
125 },
126}; 105};
127 106
128#ifdef CONFIG_CPU_SUBTYPE_SH7751 107static struct intc_vect vectors[] = {
129static struct ipr_data ipr_irq_table_sh7751[] = { 108 INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620),
130 { 44, 2, 8, 7 }, /* DMAC DMTE4 */ 109 INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
131 { 45, 2, 8, 7 }, /* DMAC DMTE5 */ 110 INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460),
132 { 46, 2, 8, 7 }, /* DMAC DMTE6 */ 111 INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
133 { 47, 2, 8, 7 }, /* DMAC DMTE7 */ 112 INTC_VECT(RTC_CUI, 0x4c0),
134 /* The following use INTC_INPRI00 for masking, which is a 32-bit 113 INTC_VECT(SCI1_ERI, 0x4e0), INTC_VECT(SCI1_RXI, 0x500),
135 register, not a 16-bit register like the IPRx registers, so it 114 INTC_VECT(SCI1_TXI, 0x520), INTC_VECT(SCI1_TEI, 0x540),
136 would need special support */ 115 INTC_VECT(SCIF_ERI, 0x700), INTC_VECT(SCIF_RXI, 0x720),
137 /*{ 72, INTPRI00, 8, ? },*/ /* TMU3 TUNI */ 116 INTC_VECT(SCIF_BRI, 0x740), INTC_VECT(SCIF_TXI, 0x760),
138 /*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */ 117 INTC_VECT(WDT, 0x560),
118 INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0),
139}; 119};
140 120
141static struct ipr_desc ipr_irq_desc_sh7751 = { 121static struct intc_group groups[] = {
142 .ipr_offsets = ipr_offsets, 122 INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI),
143 .nr_offsets = ARRAY_SIZE(ipr_offsets), 123 INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
124 INTC_GROUP(SCI1, SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI),
125 INTC_GROUP(SCIF, SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI),
126 INTC_GROUP(REF, REF_RCMI, REF_ROVI),
127};
144 128
145 .ipr_data = ipr_irq_table_sh7751, 129static struct intc_prio priorities[] = {
146 .nr_irqs = ARRAY_SIZE(ipr_irq_table_sh7751), 130 INTC_PRIO(SCIF, 3),
131 INTC_PRIO(SCI1, 3),
132 INTC_PRIO(DMAC, 7),
133};
147 134
148 .chip = { 135static struct intc_prio_reg prio_registers[] = {
149 .name = "IPR-sh7751", 136 { 0xffd00004, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
150 }, 137 { 0xffd00008, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } },
138 { 0xffd0000c, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } },
139 { 0xffd00010, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } },
140 { 0xfe080000, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0,
141 TMU4, TMU3,
142 PCIC1, PCIC0_PCISERR } },
143};
144
145static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups,
146 priorities, NULL, prio_registers, NULL);
147
148/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
149#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
150 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
151 defined(CONFIG_CPU_SUBTYPE_SH7751) || \
152 defined(CONFIG_CPU_SUBTYPE_SH7091)
153static struct intc_vect vectors_dma4[] = {
154 INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660),
155 INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0),
156 INTC_VECT(DMAC_DMAE, 0x6c0),
157};
158
159static struct intc_group groups_dma4[] = {
160 INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2,
161 DMAC_DMTE3, DMAC_DMAE),
162};
163
164static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
165 vectors_dma4, groups_dma4,
166 priorities, NULL, prio_registers, NULL);
167#endif
168
169/* SH7750R and SH7751R both have 8-channel DMA controllers */
170#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
171static struct intc_vect vectors_dma8[] = {
172 INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660),
173 INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0),
174 INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0),
175 INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0),
176 INTC_VECT(DMAC_DMAE, 0x6c0),
177};
178
179static struct intc_group groups_dma8[] = {
180 INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2,
181 DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5,
182 DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE),
183};
184
185static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
186 vectors_dma8, groups_dma8,
187 priorities, NULL, prio_registers, NULL);
188#endif
189
190/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
191#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
192 defined(CONFIG_CPU_SUBTYPE_SH7751) || \
193 defined(CONFIG_CPU_SUBTYPE_SH7751R)
194static struct intc_vect vectors_tmu34[] = {
195 INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80),
151}; 196};
197
198static struct intc_mask_reg mask_registers[] = {
199 { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
200 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
201 0, 0, 0, 0, 0, 0, TMU4, TMU3,
202 PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
203 PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2,
204 PCIC1_PCIDMA3, PCIC0_PCISERR } },
205};
206
207static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34",
208 vectors_tmu34, NULL, priorities,
209 mask_registers, prio_registers, NULL);
152#endif 210#endif
153 211
154void __init init_IRQ_ipr(void) 212/* SH7750S, SH7750R, SH7751 and SH7751R all have IRLM priority registers */
213static struct intc_vect vectors_irlm[] = {
214 INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
215 INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360),
216};
217
218static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL,
219 priorities, NULL, prio_registers, NULL);
220
221/* SH7751 and SH7751R both have PCI */
222#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
223static struct intc_vect vectors_pci[] = {
224 INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0),
225 INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0),
226 INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60),
227 INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20),
228};
229
230static struct intc_group groups_pci[] = {
231 INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
232 PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3),
233};
234
235static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci,
236 priorities, mask_registers, prio_registers, NULL);
237#endif
238
239#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
240 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
241 defined(CONFIG_CPU_SUBTYPE_SH7091)
242void __init plat_irq_setup(void)
155{ 243{
156 register_ipr_controller(&ipr_irq_desc); 244 /*
157#ifdef CONFIG_CPU_SUBTYPE_SH7751 245 * same vectors for SH7750, SH7750S and SH7091 except for IRLM,
158 register_ipr_controller(&ipr_irq_desc_sh7751); 246 * see below..
247 */
248 register_intc_controller(&intc_desc);
249 register_intc_controller(&intc_desc_dma4);
250}
159#endif 251#endif
252
253#if defined(CONFIG_CPU_SUBTYPE_SH7750R)
254void __init plat_irq_setup(void)
255{
256 register_intc_controller(&intc_desc);
257 register_intc_controller(&intc_desc_dma8);
258 register_intc_controller(&intc_desc_tmu34);
160} 259}
260#endif
261
262#if defined(CONFIG_CPU_SUBTYPE_SH7751)
263void __init plat_irq_setup(void)
264{
265 register_intc_controller(&intc_desc);
266 register_intc_controller(&intc_desc_dma4);
267 register_intc_controller(&intc_desc_tmu34);
268 register_intc_controller(&intc_desc_pci);
269}
270#endif
271
272#if defined(CONFIG_CPU_SUBTYPE_SH7751R)
273void __init plat_irq_setup(void)
274{
275 register_intc_controller(&intc_desc);
276 register_intc_controller(&intc_desc_dma8);
277 register_intc_controller(&intc_desc_tmu34);
278 register_intc_controller(&intc_desc_pci);
279}
280#endif
161 281
162#define INTC_ICR 0xffd00000UL 282#define INTC_ICR 0xffd00000UL
163#define INTC_ICR_IRLM (1<<7) 283#define INTC_ICR_IRLM (1<<7)
164 284
165/* enable individual interrupt mode for external interupts */ 285/* enable individual interrupt mode for external interupts */
166void ipr_irq_enable_irlm(void) 286void __init ipr_irq_enable_irlm(void)
167{ 287{
288#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7091)
289 BUG(); /* impossible to mask interrupts on SH7750 and SH7091 */
290#endif
291 register_intc_controller(&intc_desc_irlm);
292
168 ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); 293 ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
169} 294}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 3df169755673..47fa27056253 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -109,11 +109,6 @@ static struct intc2_desc intc2_irq_desc __read_mostly = {
109 }, 109 },
110}; 110};
111 111
112void __init init_IRQ_intc2(void)
113{
114 register_intc2_controller(&intc2_irq_desc);
115}
116
117static struct ipr_data ipr_irq_table[] = { 112static struct ipr_data ipr_irq_table[] = {
118 /* IRQ, IPR-idx, shift, priority */ 113 /* IRQ, IPR-idx, shift, priority */
119 { 16, 0, 12, 2 }, /* TMU0 TUNI*/ 114 { 16, 0, 12, 2 }, /* TMU0 TUNI*/
@@ -163,7 +158,8 @@ static struct ipr_desc ipr_irq_desc = {
163 }, 158 },
164}; 159};
165 160
166void __init init_IRQ_ipr(void) 161void __init plat_irq_setup(void)
167{ 162{
163 register_intc2_controller(&intc2_irq_desc);
168 register_ipr_controller(&ipr_irq_desc); 164 register_ipr_controller(&ipr_irq_desc);
169} 165}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 51b386d454de..a0fd8bb21f7c 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -387,9 +387,24 @@ out_err:
387 return err; 387 return err;
388} 388}
389 389
390static long sh7722_frqcr_round_rate(struct clk *clk, unsigned long rate)
391{
392 unsigned long parent_rate = clk->parent->rate;
393 int div;
394
395 /* look for multiplier/divisor pair */
396 div = sh7722_find_divisors(parent_rate, rate);
397 if (div < 0)
398 return clk->rate;
399
400 /* calculate new value of clock rate */
401 return parent_rate * 2 / div;
402}
403
390static struct clk_ops sh7722_frqcr_clk_ops = { 404static struct clk_ops sh7722_frqcr_clk_ops = {
391 .recalc = sh7722_frqcr_recalc, 405 .recalc = sh7722_frqcr_recalc,
392 .set_rate = sh7722_frqcr_set_rate, 406 .set_rate = sh7722_frqcr_set_rate,
407 .round_rate = sh7722_frqcr_round_rate,
393}; 408};
394 409
395/* 410/*
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index a3e159ef6dfe..25b913e07e2c 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -19,8 +19,21 @@ static struct plat_sci_port sci_platform_data[] = {
19 .mapbase = 0xffe00000, 19 .mapbase = 0xffe00000,
20 .flags = UPF_BOOT_AUTOCONF, 20 .flags = UPF_BOOT_AUTOCONF,
21 .type = PORT_SCIF, 21 .type = PORT_SCIF,
22 .irqs = { 80, 81, 83, 82 }, 22 .irqs = { 80, 80, 80, 80 },
23 }, { 23 },
24 {
25 .mapbase = 0xffe10000,
26 .flags = UPF_BOOT_AUTOCONF,
27 .type = PORT_SCIF,
28 .irqs = { 81, 81, 81, 81 },
29 },
30 {
31 .mapbase = 0xffe20000,
32 .flags = UPF_BOOT_AUTOCONF,
33 .type = PORT_SCIF,
34 .irqs = { 82, 82, 82, 82 },
35 },
36 {
24 .flags = 0, 37 .flags = 0,
25 } 38 }
26}; 39};
@@ -44,46 +57,145 @@ static int __init sh7722_devices_setup(void)
44} 57}
45__initcall(sh7722_devices_setup); 58__initcall(sh7722_devices_setup);
46 59
47static struct ipr_data ipr_irq_table[] = { 60enum {
48 /* IRQ, IPR-idx, shift, prio */ 61 UNUSED=0,
49 { 16, 0, 12, 2 }, /* TMU0 */ 62
50 { 17, 0, 8, 2 }, /* TMU1 */ 63 /* interrupt sources */
51 { 80, 6, 12, 3 }, /* SCIF ERI */ 64 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
52 { 81, 6, 12, 3 }, /* SCIF RXI */ 65 HUDI,
53 { 82, 6, 12, 3 }, /* SCIF BRI */ 66 SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
54 { 83, 6, 12, 3 }, /* SCIF TXI */ 67 RTC_ATI, RTC_PRI, RTC_CUI,
68 DMAC0, DMAC1, DMAC2, DMAC3,
69 VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
70 VPU, TPU,
71 USB_USBI0, USB_USBI1,
72 DMAC4, DMAC5, DMAC_DADERR,
73 KEYSC,
74 SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO,
75 FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
76 I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
77 SDHI0, SDHI1, SDHI2, SDHI3,
78 CMT, TSIF, SIU, TWODG,
79 TMU0, TMU1, TMU2,
80 IRDA, JPU, LCDC,
81
82 /* interrupt groups */
83
84 SIM, RTC, DMAC0123, VIOVOU, USB, DMAC45, FLCTL, I2C, SDHI,
55}; 85};
56 86
57static unsigned long ipr_offsets[] = { 87static struct intc_vect vectors[] = {
58 0xa4080000, /* 0: IPRA */ 88 INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
59 0xa4080004, /* 1: IPRB */ 89 INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
60 0xa4080008, /* 2: IPRC */ 90 INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
61 0xa408000c, /* 3: IPRD */ 91 INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
62 0xa4080010, /* 4: IPRE */ 92 INTC_VECT(SIM_ERI, 0x700), INTC_VECT(SIM_RXI, 0x720),
63 0xa4080014, /* 5: IPRF */ 93 INTC_VECT(SIM_TXI, 0x740), INTC_VECT(SIM_TEI, 0x760),
64 0xa4080018, /* 6: IPRG */ 94 INTC_VECT(RTC_ATI, 0x780), INTC_VECT(RTC_PRI, 0x7a0),
65 0xa408001c, /* 7: IPRH */ 95 INTC_VECT(RTC_CUI, 0x7c0),
66 0xa4080020, /* 8: IPRI */ 96 INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
67 0xa4080024, /* 9: IPRJ */ 97 INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
68 0xa4080028, /* 10: IPRK */ 98 INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
69 0xa408002c, /* 11: IPRL */ 99 INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
100 INTC_VECT(VPU, 0x980), INTC_VECT(TPU, 0x9a0),
101 INTC_VECT(USB_USBI0, 0xa20), INTC_VECT(USB_USBI1, 0xa40),
102 INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
103 INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0),
104 INTC_VECT(SCIF0, 0xc00), INTC_VECT(SCIF1, 0xc20),
105 INTC_VECT(SCIF2, 0xc40), INTC_VECT(SIOF0, 0xc80),
106 INTC_VECT(SIOF1, 0xca0), INTC_VECT(SIO, 0xd00),
107 INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
108 INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
109 INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
110 INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
111 INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
112 INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
113 INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
114 INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0),
115 INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
116 INTC_VECT(TMU2, 0x440), INTC_VECT(IRDA, 0x480),
117 INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580),
70}; 118};
71 119
72static struct ipr_desc ipr_irq_desc = { 120static struct intc_group groups[] = {
73 .ipr_offsets = ipr_offsets, 121 INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI),
74 .nr_offsets = ARRAY_SIZE(ipr_offsets), 122 INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
123 INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
124 INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
125 INTC_GROUP(USB, USB_USBI0, USB_USBI1),
126 INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
127 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
128 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
129 INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
130 INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
131};
75 132
76 .ipr_data = ipr_irq_table, 133static struct intc_prio priorities[] = {
77 .nr_irqs = ARRAY_SIZE(ipr_irq_table), 134 INTC_PRIO(SCIF0, 3),
135 INTC_PRIO(SCIF1, 3),
136 INTC_PRIO(SCIF2, 3),
137 INTC_PRIO(TMU0, 2),
138 INTC_PRIO(TMU1, 2),
139};
78 140
79 .chip = { 141static struct intc_mask_reg mask_registers[] = {
80 .name = "IPR-sh7722", 142 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
81 }, 143 { } },
144 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
145 { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
146 { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
147 { 0, 0, 0, VPU, } },
148 { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
149 { SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } },
150 { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
151 { 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } },
152 { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
153 { KEYSC, DMAC_DADERR, DMAC5, DMAC4, 0, SCIF2, SCIF1, SCIF0 } },
154 { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
155 { 0, 0, 0, SIO, 0, 0, SIOF1, SIOF0 } },
156 { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
157 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
158 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
159 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
160 { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, TWODG, SIU } },
161 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
162 { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
163 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
164 { } },
165 { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
166 { 0, RTC_CUI, RTC_PRI, RTC_ATI, 0, TPU, 0, TSIF } },
167 { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
168 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
82}; 169};
83 170
84void __init init_IRQ_ipr(void) 171static struct intc_prio_reg prio_registers[] = {
172 { 0xa4080000, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, IRDA } },
173 { 0xa4080004, 16, 4, /* IPRB */ { JPU, LCDC, SIM } },
174 { 0xa4080008, 16, 4, /* IPRC */ { } },
175 { 0xa408000c, 16, 4, /* IPRD */ { } },
176 { 0xa4080010, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, 0, VPU } },
177 { 0xa4080014, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } },
178 { 0xa4080018, 16, 4, /* IPRG */ { SCIF0, SCIF1, SCIF2 } },
179 { 0xa408001c, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C } },
180 { 0xa4080020, 16, 4, /* IPRI */ { SIO, 0, TSIF, RTC } },
181 { 0xa4080024, 16, 4, /* IPRJ */ { 0, 0, SIU } },
182 { 0xa4080028, 16, 4, /* IPRK */ { 0, 0, 0, SDHI } },
183 { 0xa408002c, 16, 4, /* IPRL */ { TWODG, 0, TPU } },
184 { 0xa4140010, 32, 4, /* INTPRI00 */
185 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
186};
187
188static struct intc_sense_reg sense_registers[] = {
189 { 0xa414001c, 16, 2, /* ICR1 */
190 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
191};
192
193static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups, priorities,
194 mask_registers, prio_registers, sense_registers);
195
196void __init plat_irq_setup(void)
85{ 197{
86 register_ipr_controller(&ipr_irq_desc); 198 register_intc_controller(&intc_desc);
87} 199}
88 200
89void __init plat_mem_setup(void) 201void __init plat_mem_setup(void)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index b57c760bffde..a4127ec15203 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -30,7 +30,7 @@ static struct resource rtc_resources[] = {
30 }, 30 },
31 [3] = { 31 [3] = {
32 /* Alarm IRQ */ 32 /* Alarm IRQ */
33 .start = 23, 33 .start = 20,
34 .flags = IORESOURCE_IRQ, 34 .flags = IORESOURCE_IRQ,
35 }, 35 },
36}; 36};
@@ -78,44 +78,205 @@ static int __init sh7780_devices_setup(void)
78} 78}
79__initcall(sh7780_devices_setup); 79__initcall(sh7780_devices_setup);
80 80
81static struct intc2_data intc2_irq_table[] = { 81enum {
82 { 28, 0, 24, 0, 0, 2 }, /* TMU0 */ 82 UNUSED = 0,
83 83
84 { 21, 1, 0, 0, 2, 2 }, 84 /* interrupt sources */
85 { 22, 1, 1, 0, 2, 2 },
86 { 23, 1, 2, 0, 2, 2 },
87 85
88 { 40, 8, 24, 0, 3, 3 }, /* SCIF0 ERI */ 86 IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
89 { 41, 8, 24, 0, 3, 3 }, /* SCIF0 RXI */ 87 IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
90 { 42, 8, 24, 0, 3, 3 }, /* SCIF0 BRI */ 88 IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
91 { 43, 8, 24, 0, 3, 3 }, /* SCIF0 TXI */ 89 IRL_HHLL, IRL_HHLH, IRL_HHHL,
92 90
93 { 76, 8, 16, 0, 4, 3 }, /* SCIF1 ERI */ 91 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
94 { 77, 8, 16, 0, 4, 3 }, /* SCIF1 RXI */ 92 RTC_ATI, RTC_PRI, RTC_CUI,
95 { 78, 8, 16, 0, 4, 3 }, /* SCIF1 BRI */ 93 WDT,
96 { 79, 8, 16, 0, 4, 3 }, /* SCIF1 TXI */ 94 TMU0, TMU1, TMU2, TMU2_TICPI,
95 HUDI,
96 DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE,
97 SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
98 DMAC0_DMINT4, DMAC0_DMINT5, DMAC1_DMINT6, DMAC1_DMINT7,
99 CMT, HAC,
100 PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
101 PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
102 SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
103 SIOF, HSPI,
104 MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
105 DMAC1_DMINT8, DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11,
106 TMU3, TMU4, TMU5,
107 SSI,
108 FLCTL_FLSTE, FLCTL_FLEND, FLCTL_FLTRQ0, FLCTL_FLTRQ1,
109 GPIOI0, GPIOI1, GPIOI2, GPIOI3,
97 110
98 { 64, 0x10, 8, 0, 14, 2 }, /* PCIC0 */ 111 /* interrupt groups */
99 { 65, 0x10, 0, 0, 15, 2 }, /* PCIC1 */ 112
100 { 66, 0x14, 24, 0, 16, 2 }, /* PCIC2 */ 113 RTC, TMU012, DMAC0, SCIF0, DMAC45, DMAC1,
101 { 67, 0x14, 16, 0, 17, 2 }, /* PCIC3 */ 114 PCIC5, SCIF1, MMCIF, TMU345, FLCTL, GPIO,
102 { 68, 0x14, 8, 0, 18, 2 }, /* PCIC4 */
103}; 115};
104 116
105static struct intc2_desc intc2_irq_desc __read_mostly = { 117static struct intc_vect vectors[] = {
106 .prio_base = 0xffd40000, 118 INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
107 .msk_base = 0xffd40038, 119 INTC_VECT(RTC_CUI, 0x4c0),
108 .mskclr_base = 0xffd4003c, 120 INTC_VECT(WDT, 0x560),
121 INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
122 INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
123 INTC_VECT(HUDI, 0x600),
124 INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
125 INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0),
126 INTC_VECT(DMAC0_DMAE, 0x6c0),
127 INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
128 INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
129 INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0),
130 INTC_VECT(DMAC1_DMINT6, 0x7c0), INTC_VECT(DMAC1_DMINT7, 0x7e0),
131 INTC_VECT(CMT, 0x900), INTC_VECT(HAC, 0x980),
132 INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
133 INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
134 INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
135 INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
136 INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
137 INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0),
138 INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0),
139 INTC_VECT(SIOF, 0xc00), INTC_VECT(HSPI, 0xc80),
140 INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
141 INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
142 INTC_VECT(DMAC1_DMINT8, 0xd80), INTC_VECT(DMAC1_DMINT9, 0xda0),
143 INTC_VECT(DMAC1_DMINT10, 0xdc0), INTC_VECT(DMAC1_DMINT11, 0xde0),
144 INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
145 INTC_VECT(TMU5, 0xe40),
146 INTC_VECT(SSI, 0xe80),
147 INTC_VECT(FLCTL_FLSTE, 0xf00), INTC_VECT(FLCTL_FLEND, 0xf20),
148 INTC_VECT(FLCTL_FLTRQ0, 0xf40), INTC_VECT(FLCTL_FLTRQ1, 0xf60),
149 INTC_VECT(GPIOI0, 0xf80), INTC_VECT(GPIOI1, 0xfa0),
150 INTC_VECT(GPIOI2, 0xfc0), INTC_VECT(GPIOI3, 0xfe0),
151};
109 152
110 .intc2_data = intc2_irq_table, 153static struct intc_group groups[] = {
111 .nr_irqs = ARRAY_SIZE(intc2_irq_table), 154 INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
155 INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
156 INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
157 DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
158 INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
159 INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8,
160 DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11),
161 INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
162 INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
163 INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
164 INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
165 INTC_GROUP(FLCTL, FLCTL_FLSTE, FLCTL_FLEND,
166 FLCTL_FLTRQ0, FLCTL_FLTRQ1),
167 INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
168};
112 169
113 .chip = { 170static struct intc_prio priorities[] = {
114 .name = "INTC2-sh7780", 171 INTC_PRIO(SCIF0, 3),
115 }, 172 INTC_PRIO(SCIF1, 3),
173};
174
175static struct intc_mask_reg mask_registers[] = {
176 { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
177 { 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
178 SSI, MMCIF, HSPI, SIOF, PCIC5, PCIINTD, PCIINTC, PCIINTB,
179 PCIINTA, PCISERR, HAC, CMT, 0, 0, DMAC1, DMAC0,
180 HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
181};
182
183static struct intc_prio_reg prio_registers[] = {
184 { 0xffd40000, 32, 8, /* INT2PRI0 */ { TMU0, TMU1, TMU2, TMU2_TICPI } },
185 { 0xffd40004, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
186 { 0xffd40008, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
187 { 0xffd4000c, 32, 8, /* INT2PRI3 */ { HUDI, DMAC0, DMAC1 } },
188 { 0xffd40010, 32, 8, /* INT2PRI4 */ { CMT, HAC, PCISERR, PCIINTA, } },
189 { 0xffd40014, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
190 PCIINTD, PCIC5 } },
191 { 0xffd40018, 32, 8, /* INT2PRI6 */ { SIOF, HSPI, MMCIF, SSI } },
192 { 0xffd4001c, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
193};
194
195static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups, priorities,
196 mask_registers, prio_registers, NULL);
197
198/* Support for external interrupt pins in IRQ mode */
199
200static struct intc_vect irq_vectors[] = {
201 INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
202 INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
203 INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
204 INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
205};
206
207static struct intc_mask_reg irq_mask_registers[] = {
208 { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
209 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
210};
211
212static struct intc_prio_reg irq_prio_registers[] = {
213 { 0xffd00010, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
214 IRQ4, IRQ5, IRQ6, IRQ7 } },
116}; 215};
117 216
118void __init init_IRQ_intc2(void) 217static struct intc_sense_reg irq_sense_registers[] = {
218 { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
219 IRQ4, IRQ5, IRQ6, IRQ7 } },
220};
221
222static DECLARE_INTC_DESC(intc_irq_desc, "sh7780-irq", irq_vectors,
223 NULL, NULL, irq_mask_registers, irq_prio_registers,
224 irq_sense_registers);
225
226/* External interrupt pins in IRL mode */
227
228static struct intc_vect irl_vectors[] = {
229 INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
230 INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
231 INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
232 INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
233 INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
234 INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
235 INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
236 INTC_VECT(IRL_HHHL, 0x3c0),
237};
238
239static struct intc_mask_reg irl3210_mask_registers[] = {
240 { 0xffd00080, 0xffd00084, 32, /* INTMSK2 / INTMSKCLR2 */
241 { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
242 IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
243 IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
244 IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
245};
246
247static struct intc_mask_reg irl7654_mask_registers[] = {
248 { 0xffd00080, 0xffd00084, 32, /* INTMSK2 / INTMSKCLR2 */
249 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250 IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
251 IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
252 IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
253 IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
254};
255
256static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
257 NULL, NULL, irl7654_mask_registers, NULL, NULL);
258
259static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
260 NULL, NULL, irl3210_mask_registers, NULL, NULL);
261
262void __init plat_irq_setup(void)
119{ 263{
120 register_intc2_controller(&intc2_irq_desc); 264 register_intc_controller(&intc_desc);
265}
266
267void __init plat_irq_setup_pins(int mode)
268{
269 switch (mode) {
270 case IRQ_MODE_IRQ:
271 register_intc_controller(&intc_irq_desc);
272 break;
273 case IRQ_MODE_IRL7654:
274 register_intc_controller(&intc_irl7654_desc);
275 break;
276 case IRQ_MODE_IRL3210:
277 register_intc_controller(&intc_irl3210_desc);
278 break;
279 default:
280 BUG();
281 }
121} 282}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index ce10ec5d6914..cf047562e43f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -110,7 +110,7 @@ static struct intc2_desc intc2_irq_desc __read_mostly = {
110 }, 110 },
111}; 111};
112 112
113void __init init_IRQ_intc2(void) 113void __init plat_irq_setup(void)
114{ 114{
115 register_intc2_controller(&intc2_irq_desc); 115 register_intc2_controller(&intc2_irq_desc);
116} 116}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 70683ea12b83..704c064f70dc 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -79,7 +79,7 @@ static struct intc2_desc intc2_irq_desc __read_mostly = {
79 }, 79 },
80}; 80};
81 81
82void __init init_IRQ_intc2(void) 82void __init plat_irq_setup(void)
83{ 83{
84 register_intc2_controller(&intc2_irq_desc); 84 register_intc2_controller(&intc2_irq_desc);
85} 85}
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index 47abf6e49dfb..e61890217c50 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -3,89 +3,46 @@
3 * 3 *
4 * cpufreq driver for the SuperH processors. 4 * cpufreq driver for the SuperH processors.
5 * 5 *
6 * Copyright (C) 2002, 2003, 2004, 2005 Paul Mundt 6 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Copyright (C) 2002 M. R. Brown 7 * Copyright (C) 2002 M. R. Brown
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
10 * under the terms of the GNU General Public License as published by the 10 *
11 * Free Software Foundation; either version 2 of the License, or (at your 11 * Copyright (C) 2004-2007 Atmel Corporation
12 * option) any later version. 12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
13 */ 16 */
14#include <linux/types.h> 17#include <linux/types.h>
15#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
16#include <linux/kernel.h> 19#include <linux/kernel.h>
17#include <linux/module.h> 20#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/init.h> 21#include <linux/init.h>
20#include <linux/delay.h> 22#include <linux/err.h>
21#include <linux/cpumask.h> 23#include <linux/cpumask.h>
22#include <linux/smp.h> 24#include <linux/smp.h>
23#include <linux/sched.h> /* set_cpus_allowed() */ 25#include <linux/sched.h> /* set_cpus_allowed() */
26#include <linux/clk.h>
24 27
25#include <asm/processor.h> 28static struct clk *cpuclk;
26#include <asm/watchdog.h>
27#include <asm/freq.h>
28#include <asm/io.h>
29
30/*
31 * For SuperH, each policy change requires that we change the IFC, BFC, and
32 * PFC at the same time. Here we define sane values that won't trash the
33 * system.
34 *
35 * Note the max set is computed at runtime, we use the divisors that we booted
36 * with to setup our maximum operating frequencies.
37 */
38struct clock_set {
39 unsigned int ifc;
40 unsigned int bfc;
41 unsigned int pfc;
42} clock_sets[] = {
43#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH2)
44 { 0, 0, 0 }, /* not implemented yet */
45#elif defined(CONFIG_CPU_SH4)
46 { 4, 8, 8 }, /* min - IFC: 1/4, BFC: 1/8, PFC: 1/8 */
47 { 1, 2, 2 }, /* max - IFC: 1, BFC: 1/2, PFC: 1/2 */
48#endif
49};
50
51#define MIN_CLOCK_SET 0
52#define MAX_CLOCK_SET (ARRAY_SIZE(clock_sets) - 1)
53
54/*
55 * For the time being, we only support two frequencies, which in turn are
56 * aimed at the POWERSAVE and PERFORMANCE policies, which in turn are derived
57 * directly from the respective min/max clock sets. Technically we could
58 * support a wider range of frequencies, but these vary far too much for each
59 * CPU subtype (and we'd have to construct a frequency table for each subtype).
60 *
61 * Maybe something to implement in the future..
62 */
63#define SH_FREQ_MAX 0
64#define SH_FREQ_MIN 1
65
66static struct cpufreq_frequency_table sh_freqs[] = {
67 { SH_FREQ_MAX, 0 },
68 { SH_FREQ_MIN, 0 },
69 { 0, CPUFREQ_TABLE_END },
70};
71 29
72static void sh_cpufreq_update_clocks(unsigned int set) 30static unsigned int sh_cpufreq_get(unsigned int cpu)
73{ 31{
74 current_cpu_data.cpu_clock = current_cpu_data.master_clock / clock_sets[set].ifc; 32 return (clk_get_rate(cpuclk) + 500) / 1000;
75 current_cpu_data.bus_clock = current_cpu_data.master_clock / clock_sets[set].bfc;
76 current_cpu_data.module_clock = current_cpu_data.master_clock / clock_sets[set].pfc;
77 current_cpu_data.loops_per_jiffy = loops_per_jiffy;
78} 33}
79 34
80/* XXX: This needs to be split out per CPU and CPU subtype. */
81/* 35/*
82 * Here we notify other drivers of the proposed change and the final change. 36 * Here we notify other drivers of the proposed change and the final change.
83 */ 37 */
84static int sh_cpufreq_setstate(unsigned int cpu, unsigned int set) 38static int sh_cpufreq_target(struct cpufreq_policy *policy,
39 unsigned int target_freq,
40 unsigned int relation)
85{ 41{
86 unsigned short frqcr = ctrl_inw(FRQCR); 42 unsigned int cpu = policy->cpu;
87 cpumask_t cpus_allowed; 43 cpumask_t cpus_allowed;
88 struct cpufreq_freqs freqs; 44 struct cpufreq_freqs freqs;
45 long freq;
89 46
90 if (!cpu_online(cpu)) 47 if (!cpu_online(cpu))
91 return -ENODEV; 48 return -ENODEV;
@@ -95,125 +52,109 @@ static int sh_cpufreq_setstate(unsigned int cpu, unsigned int set)
95 52
96 BUG_ON(smp_processor_id() != cpu); 53 BUG_ON(smp_processor_id() != cpu);
97 54
98 freqs.cpu = cpu; 55 /* Convert target_freq from kHz to Hz */
99 freqs.old = current_cpu_data.cpu_clock / 1000; 56 freq = clk_round_rate(cpuclk, target_freq * 1000);
100 freqs.new = (current_cpu_data.master_clock / clock_sets[set].ifc) / 1000;
101 57
102 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 58 if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
103#if defined(CONFIG_CPU_SH3) 59 return -EINVAL;
104 frqcr |= (newstate & 0x4000) << 14; 60
105 frqcr |= (newstate & 0x000c) << 2; 61 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
106#elif defined(CONFIG_CPU_SH4)
107 /*
108 * FRQCR.PLL2EN is 1, we need to allow the PLL to stabilize by
109 * initializing the WDT.
110 */
111 if (frqcr & (1 << 9)) {
112 __u8 csr;
113
114 /*
115 * Set the overflow period to the highest available,
116 * in this case a 1/4096 division ratio yields a 5.25ms
117 * overflow period. See asm-sh/watchdog.h for more
118 * information and a range of other divisors.
119 */
120 csr = sh_wdt_read_csr();
121 csr |= WTCSR_CKS_4096;
122 sh_wdt_write_csr(csr);
123
124 sh_wdt_write_cnt(0);
125 }
126 frqcr &= 0x0e00; /* Clear ifc, bfc, pfc */
127 frqcr |= get_ifc_value(clock_sets[set].ifc) << 6;
128 frqcr |= get_bfc_value(clock_sets[set].bfc) << 3;
129 frqcr |= get_pfc_value(clock_sets[set].pfc);
130#endif
131 ctrl_outw(frqcr, FRQCR);
132 sh_cpufreq_update_clocks(set);
133 62
63 freqs.cpu = cpu;
64 freqs.old = sh_cpufreq_get(cpu);
65 freqs.new = (freq + 500) / 1000;
66 freqs.flags = 0;
67
68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
134 set_cpus_allowed(current, cpus_allowed); 69 set_cpus_allowed(current, cpus_allowed);
70 clk_set_rate(cpuclk, freq);
135 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 71 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
136 72
73 pr_debug("cpufreq: set frequency %lu Hz\n", freq);
74
137 return 0; 75 return 0;
138} 76}
139 77
140static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) 78static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
141{ 79{
142 unsigned int min_freq, max_freq; 80 printk(KERN_INFO "cpufreq: SuperH CPU frequency driver.\n");
143 unsigned int ifc, bfc, pfc;
144 81
145 if (!cpu_online(policy->cpu)) 82 if (!cpu_online(policy->cpu))
146 return -ENODEV; 83 return -ENODEV;
147 84
148 /* Update our maximum clock set */ 85 cpuclk = clk_get(NULL, "cpu_clk");
149 get_current_frequency_divisors(&ifc, &bfc, &pfc); 86 if (IS_ERR(cpuclk)) {
150 clock_sets[MAX_CLOCK_SET].ifc = ifc; 87 printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
151 clock_sets[MAX_CLOCK_SET].bfc = bfc; 88 return PTR_ERR(cpuclk);
152 clock_sets[MAX_CLOCK_SET].pfc = pfc; 89 }
153
154 /* Convert from Hz to kHz */
155 max_freq = current_cpu_data.cpu_clock / 1000;
156 min_freq = (current_cpu_data.master_clock / clock_sets[MIN_CLOCK_SET].ifc) / 1000;
157
158 sh_freqs[SH_FREQ_MAX].frequency = max_freq;
159 sh_freqs[SH_FREQ_MIN].frequency = min_freq;
160 90
161 /* cpuinfo and default policy values */ 91 /* cpuinfo and default policy values */
162 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 92 policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
93 policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
163 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 94 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
164 policy->cur = max_freq;
165 95
166 return cpufreq_frequency_table_cpuinfo(policy, &sh_freqs[0]); 96 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
167} 97 policy->cur = sh_cpufreq_get(policy->cpu);
98 policy->min = policy->cpuinfo.min_freq;
99 policy->max = policy->cpuinfo.max_freq;
168 100
169static int sh_cpufreq_verify(struct cpufreq_policy *policy)
170{
171 return cpufreq_frequency_table_verify(policy, &sh_freqs[0]);
172}
173 101
174static int sh_cpufreq_target(struct cpufreq_policy *policy, 102 /*
175 unsigned int target_freq, 103 * Catch the cases where the clock framework hasn't been wired up
176 unsigned int relation) 104 * properly to support scaling.
177{ 105 */
178 unsigned int set, idx = 0; 106 if (unlikely(policy->min == policy->max)) {
107 printk(KERN_ERR "cpufreq: clock framework rate rounding "
108 "not supported on this CPU.\n");
179 109
180 if (cpufreq_frequency_table_target(policy, &sh_freqs[0], target_freq, relation, &idx)) 110 clk_put(cpuclk);
181 return -EINVAL; 111 return -EINVAL;
112 }
182 113
183 set = (idx == SH_FREQ_MIN) ? MIN_CLOCK_SET : MAX_CLOCK_SET; 114 printk(KERN_INFO "cpufreq: Frequencies - Minimum %u.%03u MHz, "
115 "Maximum %u.%03u MHz.\n",
116 policy->min / 1000, policy->min % 1000,
117 policy->max / 1000, policy->max % 1000);
184 118
185 sh_cpufreq_setstate(policy->cpu, set); 119 return 0;
120}
186 121
122static int sh_cpufreq_verify(struct cpufreq_policy *policy)
123{
124 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
125 policy->cpuinfo.max_freq);
126 return 0;
127}
128
129static int sh_cpufreq_exit(struct cpufreq_policy *policy)
130{
131 clk_put(cpuclk);
187 return 0; 132 return 0;
188} 133}
189 134
190static struct cpufreq_driver sh_cpufreq_driver = { 135static struct cpufreq_driver sh_cpufreq_driver = {
191 .owner = THIS_MODULE, 136 .owner = THIS_MODULE,
192 .name = "SH cpufreq", 137 .name = "sh",
193 .init = sh_cpufreq_cpu_init, 138 .init = sh_cpufreq_cpu_init,
194 .verify = sh_cpufreq_verify, 139 .verify = sh_cpufreq_verify,
195 .target = sh_cpufreq_target, 140 .target = sh_cpufreq_target,
141 .get = sh_cpufreq_get,
142 .exit = sh_cpufreq_exit,
196}; 143};
197 144
198static int __init sh_cpufreq_init(void) 145static int __init sh_cpufreq_module_init(void)
199{ 146{
200 if (!current_cpu_data.cpu_clock) 147 return cpufreq_register_driver(&sh_cpufreq_driver);
201 return -EINVAL;
202 if (cpufreq_register_driver(&sh_cpufreq_driver))
203 return -EINVAL;
204
205 return 0;
206} 148}
207 149
208static void __exit sh_cpufreq_exit(void) 150static void __exit sh_cpufreq_module_exit(void)
209{ 151{
210 cpufreq_unregister_driver(&sh_cpufreq_driver); 152 cpufreq_unregister_driver(&sh_cpufreq_driver);
211} 153}
212 154
213module_init(sh_cpufreq_init); 155module_init(sh_cpufreq_module_init);
214module_exit(sh_cpufreq_exit); 156module_exit(sh_cpufreq_module_exit);
215 157
216MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); 158MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
217MODULE_DESCRIPTION("cpufreq driver for SuperH"); 159MODULE_DESCRIPTION("cpufreq driver for SuperH");
218MODULE_LICENSE("GPL"); 160MODULE_LICENSE("GPL");
219
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index 71a3ad7d283e..0bccc0ca5a0f 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -36,7 +36,8 @@ ENTRY(empty_zero_page)
361: 361:
37 .skip PAGE_SIZE - empty_zero_page - 1b 37 .skip PAGE_SIZE - empty_zero_page - 1b
38 38
39 .text 39 .section .text.head, "ax"
40
40/* 41/*
41 * Condition at the entry of _stext: 42 * Condition at the entry of _stext:
42 * 43 *
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 27897798867a..03404987528d 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -253,14 +253,7 @@ void __init init_IRQ(void)
253#ifdef CONFIG_CPU_HAS_PINT_IRQ 253#ifdef CONFIG_CPU_HAS_PINT_IRQ
254 init_IRQ_pint(); 254 init_IRQ_pint();
255#endif 255#endif
256 256 plat_irq_setup();
257#ifdef CONFIG_CPU_HAS_INTC2_IRQ
258 init_IRQ_intc2();
259#endif
260
261#ifdef CONFIG_CPU_HAS_IPR_IRQ
262 init_IRQ_ipr();
263#endif
264 257
265 /* Perform the machine specific initialisation */ 258 /* Perform the machine specific initialisation */
266 if (sh_mv.mv_init_irq) 259 if (sh_mv.mv_init_irq)
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index de8e6e2f2c87..c14a3e95d0b1 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -21,6 +21,7 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/kexec.h> 23#include <linux/kexec.h>
24#include <linux/module.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/page.h> 27#include <asm/page.h>
@@ -78,7 +79,11 @@ static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
78static struct resource code_resource = { .name = "Kernel code", }; 79static struct resource code_resource = { .name = "Kernel code", };
79static struct resource data_resource = { .name = "Kernel data", }; 80static struct resource data_resource = { .name = "Kernel data", };
80 81
81unsigned long memory_start, memory_end; 82unsigned long memory_start;
83EXPORT_SYMBOL(memory_start);
84
85unsigned long memory_end;
86EXPORT_SYMBOL(memory_end);
82 87
83static int __init early_parse_mem(char *p) 88static int __init early_parse_mem(char *p)
84{ 89{
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index 5b53e10bb9cd..d1bcac4fa269 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -5,7 +5,7 @@
5 * Copyright (C) 2000 Greg Banks, Mitch Davis 5 * Copyright (C) 2000 Greg Banks, Mitch Davis
6 * 6 *
7 */ 7 */
8 8#include <linux/module.h>
9#include <asm/sh_bios.h> 9#include <asm/sh_bios.h>
10 10
11#define BIOS_CALL_CONSOLE_WRITE 0 11#define BIOS_CALL_CONSOLE_WRITE 0
@@ -63,6 +63,7 @@ void sh_bios_gdb_detach(void)
63{ 63{
64 sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0); 64 sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
65} 65}
66EXPORT_SYMBOL(sh_bios_gdb_detach);
66 67
67void sh_bios_get_node_addr (unsigned char *node_addr) 68void sh_bios_get_node_addr (unsigned char *node_addr)
68{ 69{
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index c968dcf09eee..37aef0a85197 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -63,10 +63,43 @@ EXPORT_SYMBOL(__const_udelay);
63/* These symbols are generated by the compiler itself */ 63/* These symbols are generated by the compiler itself */
64DECLARE_EXPORT(__udivsi3); 64DECLARE_EXPORT(__udivsi3);
65DECLARE_EXPORT(__sdivsi3); 65DECLARE_EXPORT(__sdivsi3);
66DECLARE_EXPORT(__ashrsi3);
67DECLARE_EXPORT(__ashlsi3);
66DECLARE_EXPORT(__ashrdi3); 68DECLARE_EXPORT(__ashrdi3);
67DECLARE_EXPORT(__ashldi3); 69DECLARE_EXPORT(__ashldi3);
70DECLARE_EXPORT(__ashiftrt_r4_6);
71DECLARE_EXPORT(__ashiftrt_r4_7);
72DECLARE_EXPORT(__ashiftrt_r4_8);
73DECLARE_EXPORT(__ashiftrt_r4_9);
74DECLARE_EXPORT(__ashiftrt_r4_10);
75DECLARE_EXPORT(__ashiftrt_r4_11);
76DECLARE_EXPORT(__ashiftrt_r4_12);
77DECLARE_EXPORT(__ashiftrt_r4_13);
78DECLARE_EXPORT(__ashiftrt_r4_14);
79DECLARE_EXPORT(__ashiftrt_r4_15);
80DECLARE_EXPORT(__ashiftrt_r4_20);
81DECLARE_EXPORT(__ashiftrt_r4_21);
82DECLARE_EXPORT(__ashiftrt_r4_22);
83DECLARE_EXPORT(__ashiftrt_r4_23);
84DECLARE_EXPORT(__ashiftrt_r4_24);
85DECLARE_EXPORT(__ashiftrt_r4_27);
86DECLARE_EXPORT(__ashiftrt_r4_30);
87DECLARE_EXPORT(__lshrsi3);
68DECLARE_EXPORT(__lshrdi3); 88DECLARE_EXPORT(__lshrdi3);
89DECLARE_EXPORT(__movstrSI8);
90DECLARE_EXPORT(__movstrSI12);
69DECLARE_EXPORT(__movstrSI16); 91DECLARE_EXPORT(__movstrSI16);
92DECLARE_EXPORT(__movstrSI20);
93DECLARE_EXPORT(__movstrSI24);
94DECLARE_EXPORT(__movstrSI28);
95DECLARE_EXPORT(__movstrSI32);
96DECLARE_EXPORT(__movstrSI36);
97DECLARE_EXPORT(__movstrSI40);
98DECLARE_EXPORT(__movstrSI44);
99DECLARE_EXPORT(__movstrSI48);
100DECLARE_EXPORT(__movstrSI52);
101DECLARE_EXPORT(__movstrSI56);
102DECLARE_EXPORT(__movstrSI60);
70#if __GNUC__ == 4 103#if __GNUC__ == 4
71DECLARE_EXPORT(__movmem); 104DECLARE_EXPORT(__movmem);
72#else 105#else
@@ -115,7 +148,9 @@ EXPORT_SYMBOL(synchronize_irq);
115#endif 148#endif
116 149
117EXPORT_SYMBOL(csum_partial); 150EXPORT_SYMBOL(csum_partial);
151EXPORT_SYMBOL(csum_partial_copy_generic);
118#ifdef CONFIG_IPV6 152#ifdef CONFIG_IPV6
119EXPORT_SYMBOL(csum_ipv6_magic); 153EXPORT_SYMBOL(csum_ipv6_magic);
120#endif 154#endif
121EXPORT_SYMBOL(clear_page); 155EXPORT_SYMBOL(clear_page);
156EXPORT_SYMBOL(__clear_user);
diff --git a/arch/sh/kernel/syscalls.S b/arch/sh/kernel/syscalls.S
index ff5656e60c05..91fb7024e06f 100644
--- a/arch/sh/kernel/syscalls.S
+++ b/arch/sh/kernel/syscalls.S
@@ -358,3 +358,4 @@ ENTRY(sys_call_table)
358 .long sys_signalfd 358 .long sys_signalfd
359 .long sys_timerfd 359 .long sys_timerfd
360 .long sys_eventfd 360 .long sys_eventfd
361 .long sys_fallocate
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 097ebd49f1bf..7aca37d79766 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -80,6 +80,7 @@ static void tmu_set_mode(enum clock_event_mode mode,
80 break; 80 break;
81 case CLOCK_EVT_MODE_UNUSED: 81 case CLOCK_EVT_MODE_UNUSED:
82 case CLOCK_EVT_MODE_SHUTDOWN: 82 case CLOCK_EVT_MODE_SHUTDOWN:
83 case CLOCK_EVT_MODE_RESUME:
83 break; 84 break;
84 } 85 }
85} 86}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 5ba216180b30..9cb95af7b090 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -22,6 +22,7 @@ SECTIONS
22 *(.empty_zero_page) 22 *(.empty_zero_page)
23 } = 0 23 } = 0
24 .text : { 24 .text : {
25 *(.text.head)
25 TEXT_TEXT 26 TEXT_TEXT
26 SCHED_TEXT 27 SCHED_TEXT
27 LOCK_TEXT 28 LOCK_TEXT
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 28d79a474cde..70da1c8d407e 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -120,14 +120,14 @@ config CPU_SUBTYPE_SH7712
120config CPU_SUBTYPE_SH7750 120config CPU_SUBTYPE_SH7750
121 bool "Support SH7750 processor" 121 bool "Support SH7750 processor"
122 select CPU_SH4 122 select CPU_SH4
123 select CPU_HAS_IPR_IRQ 123 select CPU_HAS_INTC_IRQ
124 help 124 help
125 Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. 125 Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
126 126
127config CPU_SUBTYPE_SH7091 127config CPU_SUBTYPE_SH7091
128 bool "Support SH7091 processor" 128 bool "Support SH7091 processor"
129 select CPU_SH4 129 select CPU_SH4
130 select CPU_HAS_IPR_IRQ 130 select CPU_HAS_INTC_IRQ
131 help 131 help
132 Select SH7091 if you have an SH-4 based Sega device (such as 132 Select SH7091 if you have an SH-4 based Sega device (such as
133 the Dreamcast, Naomi, and Naomi 2). 133 the Dreamcast, Naomi, and Naomi 2).
@@ -135,17 +135,17 @@ config CPU_SUBTYPE_SH7091
135config CPU_SUBTYPE_SH7750R 135config CPU_SUBTYPE_SH7750R
136 bool "Support SH7750R processor" 136 bool "Support SH7750R processor"
137 select CPU_SH4 137 select CPU_SH4
138 select CPU_HAS_IPR_IRQ 138 select CPU_HAS_INTC_IRQ
139 139
140config CPU_SUBTYPE_SH7750S 140config CPU_SUBTYPE_SH7750S
141 bool "Support SH7750S processor" 141 bool "Support SH7750S processor"
142 select CPU_SH4 142 select CPU_SH4
143 select CPU_HAS_IPR_IRQ 143 select CPU_HAS_INTC_IRQ
144 144
145config CPU_SUBTYPE_SH7751 145config CPU_SUBTYPE_SH7751
146 bool "Support SH7751 processor" 146 bool "Support SH7751 processor"
147 select CPU_SH4 147 select CPU_SH4
148 select CPU_HAS_IPR_IRQ 148 select CPU_HAS_INTC_IRQ
149 help 149 help
150 Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, 150 Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
151 or if you have a HD6417751R CPU. 151 or if you have a HD6417751R CPU.
@@ -153,7 +153,7 @@ config CPU_SUBTYPE_SH7751
153config CPU_SUBTYPE_SH7751R 153config CPU_SUBTYPE_SH7751R
154 bool "Support SH7751R processor" 154 bool "Support SH7751R processor"
155 select CPU_SH4 155 select CPU_SH4
156 select CPU_HAS_IPR_IRQ 156 select CPU_HAS_INTC_IRQ
157 157
158config CPU_SUBTYPE_SH7760 158config CPU_SUBTYPE_SH7760
159 bool "Support SH7760 processor" 159 bool "Support SH7760 processor"
@@ -189,7 +189,7 @@ config CPU_SUBTYPE_SH7770
189config CPU_SUBTYPE_SH7780 189config CPU_SUBTYPE_SH7780
190 bool "Support SH7780 processor" 190 bool "Support SH7780 processor"
191 select CPU_SH4A 191 select CPU_SH4A
192 select CPU_HAS_INTC2_IRQ 192 select CPU_HAS_INTC_IRQ
193 193
194config CPU_SUBTYPE_SH7785 194config CPU_SUBTYPE_SH7785
195 bool "Support SH7785 processor" 195 bool "Support SH7785 processor"
@@ -217,7 +217,7 @@ config CPU_SUBTYPE_SH7722
217 bool "Support SH7722 processor" 217 bool "Support SH7722 processor"
218 select CPU_SH4AL_DSP 218 select CPU_SH4AL_DSP
219 select CPU_SHX2 219 select CPU_SHX2
220 select CPU_HAS_IPR_IRQ 220 select CPU_HAS_INTC_IRQ
221 select ARCH_SPARSEMEM_ENABLE 221 select ARCH_SPARSEMEM_ENABLE
222 select SYS_SUPPORTS_NUMA 222 select SYS_SUPPORTS_NUMA
223 223
diff --git a/arch/sh64/configs/cayman_defconfig b/arch/sh64/configs/cayman_defconfig
index ed035084b053..784434143343 100644
--- a/arch/sh64/configs/cayman_defconfig
+++ b/arch/sh64/configs/cayman_defconfig
@@ -1,11 +1,12 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22-rc1 3# Linux kernel version: 2.6.22
4# Mon May 14 08:43:31 2007 4# Fri Jul 20 12:28:34 2007
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_SUPERH64=y 7CONFIG_SUPERH64=y
8CONFIG_MMU=y 8CONFIG_MMU=y
9CONFIG_QUICKLIST=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y 10CONFIG_RWSEM_GENERIC_SPINLOCK=y
10CONFIG_GENERIC_FIND_NEXT_BIT=y 11CONFIG_GENERIC_FIND_NEXT_BIT=y
11CONFIG_GENERIC_HWEIGHT=y 12CONFIG_GENERIC_HWEIGHT=y
@@ -32,7 +33,7 @@ CONFIG_SWAP=y
32CONFIG_POSIX_MQUEUE=y 33CONFIG_POSIX_MQUEUE=y
33# CONFIG_BSD_PROCESS_ACCT is not set 34# CONFIG_BSD_PROCESS_ACCT is not set
34# CONFIG_TASKSTATS is not set 35# CONFIG_TASKSTATS is not set
35# CONFIG_UTS_NS is not set 36# CONFIG_USER_NS is not set
36# CONFIG_AUDIT is not set 37# CONFIG_AUDIT is not set
37# CONFIG_IKCONFIG is not set 38# CONFIG_IKCONFIG is not set
38CONFIG_LOG_BUF_SHIFT=14 39CONFIG_LOG_BUF_SHIFT=14
@@ -66,19 +67,12 @@ CONFIG_SLAB=y
66CONFIG_RT_MUTEXES=y 67CONFIG_RT_MUTEXES=y
67# CONFIG_TINY_SHMEM is not set 68# CONFIG_TINY_SHMEM is not set
68CONFIG_BASE_SMALL=0 69CONFIG_BASE_SMALL=0
69
70#
71# Loadable module support
72#
73# CONFIG_MODULES is not set 70# CONFIG_MODULES is not set
74
75#
76# Block layer
77#
78CONFIG_BLOCK=y 71CONFIG_BLOCK=y
79# CONFIG_LBD is not set 72# CONFIG_LBD is not set
80# CONFIG_BLK_DEV_IO_TRACE is not set 73# CONFIG_BLK_DEV_IO_TRACE is not set
81# CONFIG_LSF is not set 74# CONFIG_LSF is not set
75# CONFIG_BLK_DEV_BSG is not set
82 76
83# 77#
84# IO Schedulers 78# IO Schedulers
@@ -156,6 +150,8 @@ CONFIG_FLAT_NODE_MEM_MAP=y
156CONFIG_SPLIT_PTLOCK_CPUS=4 150CONFIG_SPLIT_PTLOCK_CPUS=4
157# CONFIG_RESOURCES_64BIT is not set 151# CONFIG_RESOURCES_64BIT is not set
158CONFIG_ZONE_DMA_FLAG=0 152CONFIG_ZONE_DMA_FLAG=0
153CONFIG_NR_QUICK=1
154CONFIG_VIRT_TO_BUS=y
159 155
160# 156#
161# Bus options (PCI, PCMCIA, EISA, MCA, ISA) 157# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
@@ -175,7 +171,6 @@ CONFIG_SH_PCIDMA_NONCOHERENT=y
175# Executable file formats 171# Executable file formats
176# 172#
177CONFIG_BINFMT_ELF=y 173CONFIG_BINFMT_ELF=y
178# CONFIG_BINFMT_FLAT is not set
179# CONFIG_BINFMT_MISC is not set 174# CONFIG_BINFMT_MISC is not set
180 175
181# 176#
@@ -225,20 +220,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
225# CONFIG_INET6_TUNNEL is not set 220# CONFIG_INET6_TUNNEL is not set
226# CONFIG_NETWORK_SECMARK is not set 221# CONFIG_NETWORK_SECMARK is not set
227# CONFIG_NETFILTER is not set 222# CONFIG_NETFILTER is not set
228
229#
230# DCCP Configuration (EXPERIMENTAL)
231#
232# CONFIG_IP_DCCP is not set 223# CONFIG_IP_DCCP is not set
233
234#
235# SCTP Configuration (EXPERIMENTAL)
236#
237# CONFIG_IP_SCTP is not set 224# CONFIG_IP_SCTP is not set
238
239#
240# TIPC Configuration (EXPERIMENTAL)
241#
242# CONFIG_TIPC is not set 225# CONFIG_TIPC is not set
243# CONFIG_ATM is not set 226# CONFIG_ATM is not set
244# CONFIG_BRIDGE is not set 227# CONFIG_BRIDGE is not set
@@ -274,6 +257,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
274# CONFIG_MAC80211 is not set 257# CONFIG_MAC80211 is not set
275# CONFIG_IEEE80211 is not set 258# CONFIG_IEEE80211 is not set
276# CONFIG_RFKILL is not set 259# CONFIG_RFKILL is not set
260# CONFIG_NET_9P is not set
277 261
278# 262#
279# Device Drivers 263# Device Drivers
@@ -288,26 +272,10 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
288# CONFIG_DEBUG_DRIVER is not set 272# CONFIG_DEBUG_DRIVER is not set
289# CONFIG_DEBUG_DEVRES is not set 273# CONFIG_DEBUG_DEVRES is not set
290# CONFIG_SYS_HYPERVISOR is not set 274# CONFIG_SYS_HYPERVISOR is not set
291
292#
293# Connector - unified userspace <-> kernelspace linker
294#
295# CONFIG_CONNECTOR is not set 275# CONFIG_CONNECTOR is not set
296# CONFIG_MTD is not set 276# CONFIG_MTD is not set
297
298#
299# Parallel port support
300#
301# CONFIG_PARPORT is not set 277# CONFIG_PARPORT is not set
302 278CONFIG_BLK_DEV=y
303#
304# Plug and Play support
305#
306# CONFIG_PNPACPI is not set
307
308#
309# Block devices
310#
311# CONFIG_BLK_CPQ_DA is not set 279# CONFIG_BLK_CPQ_DA is not set
312# CONFIG_BLK_CPQ_CISS_DA is not set 280# CONFIG_BLK_CPQ_CISS_DA is not set
313# CONFIG_BLK_DEV_DAC960 is not set 281# CONFIG_BLK_DEV_DAC960 is not set
@@ -323,18 +291,11 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
323CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 291CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
324# CONFIG_CDROM_PKTCDVD is not set 292# CONFIG_CDROM_PKTCDVD is not set
325# CONFIG_ATA_OVER_ETH is not set 293# CONFIG_ATA_OVER_ETH is not set
326 294CONFIG_MISC_DEVICES=y
327#
328# Misc devices
329#
330# CONFIG_PHANTOM is not set 295# CONFIG_PHANTOM is not set
296# CONFIG_EEPROM_93CX6 is not set
331# CONFIG_SGI_IOC4 is not set 297# CONFIG_SGI_IOC4 is not set
332# CONFIG_TIFM_CORE is not set 298# CONFIG_TIFM_CORE is not set
333# CONFIG_BLINK is not set
334
335#
336# ATA/ATAPI/MFM/RLL support
337#
338# CONFIG_IDE is not set 299# CONFIG_IDE is not set
339 300
340# 301#
@@ -342,6 +303,7 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
342# 303#
343# CONFIG_RAID_ATTRS is not set 304# CONFIG_RAID_ATTRS is not set
344CONFIG_SCSI=y 305CONFIG_SCSI=y
306CONFIG_SCSI_DMA=y
345# CONFIG_SCSI_TGT is not set 307# CONFIG_SCSI_TGT is not set
346# CONFIG_SCSI_NETLINK is not set 308# CONFIG_SCSI_NETLINK is not set
347CONFIG_SCSI_PROC_FS=y 309CONFIG_SCSI_PROC_FS=y
@@ -410,13 +372,8 @@ CONFIG_SCSI_SYM53C8XX_MMIO=y
410# CONFIG_SCSI_DC390T is not set 372# CONFIG_SCSI_DC390T is not set
411# CONFIG_SCSI_NSP32 is not set 373# CONFIG_SCSI_NSP32 is not set
412# CONFIG_SCSI_DEBUG is not set 374# CONFIG_SCSI_DEBUG is not set
413# CONFIG_SCSI_ESP_CORE is not set
414# CONFIG_SCSI_SRP is not set 375# CONFIG_SCSI_SRP is not set
415# CONFIG_ATA is not set 376# CONFIG_ATA is not set
416
417#
418# Multi-device support (RAID and LVM)
419#
420# CONFIG_MD is not set 377# CONFIG_MD is not set
421 378
422# 379#
@@ -432,30 +389,16 @@ CONFIG_SCSI_SYM53C8XX_MMIO=y
432# 389#
433# CONFIG_FIREWIRE is not set 390# CONFIG_FIREWIRE is not set
434# CONFIG_IEEE1394 is not set 391# CONFIG_IEEE1394 is not set
435
436#
437# I2O device support
438#
439# CONFIG_I2O is not set 392# CONFIG_I2O is not set
440
441#
442# Network device support
443#
444CONFIG_NETDEVICES=y 393CONFIG_NETDEVICES=y
394# CONFIG_NETDEVICES_MULTIQUEUE is not set
445# CONFIG_DUMMY is not set 395# CONFIG_DUMMY is not set
446# CONFIG_BONDING is not set 396# CONFIG_BONDING is not set
397# CONFIG_MACVLAN is not set
447# CONFIG_EQUALIZER is not set 398# CONFIG_EQUALIZER is not set
448# CONFIG_TUN is not set 399# CONFIG_TUN is not set
449
450#
451# ARCnet devices
452#
453# CONFIG_ARCNET is not set 400# CONFIG_ARCNET is not set
454# CONFIG_PHYLIB is not set 401# CONFIG_PHYLIB is not set
455
456#
457# Ethernet (10 or 100Mbit)
458#
459CONFIG_NET_ETHERNET=y 402CONFIG_NET_ETHERNET=y
460# CONFIG_MII is not set 403# CONFIG_MII is not set
461# CONFIG_STNIC is not set 404# CONFIG_STNIC is not set
@@ -464,10 +407,6 @@ CONFIG_NET_ETHERNET=y
464# CONFIG_CASSINI is not set 407# CONFIG_CASSINI is not set
465# CONFIG_NET_VENDOR_3COM is not set 408# CONFIG_NET_VENDOR_3COM is not set
466# CONFIG_SMC91X is not set 409# CONFIG_SMC91X is not set
467
468#
469# Tulip family network device support
470#
471CONFIG_NET_TULIP=y 410CONFIG_NET_TULIP=y
472# CONFIG_DE2104X is not set 411# CONFIG_DE2104X is not set
473CONFIG_TULIP=y 412CONFIG_TULIP=y
@@ -510,7 +449,6 @@ CONFIG_NETDEV_1000=y
510# CONFIG_SIS190 is not set 449# CONFIG_SIS190 is not set
511# CONFIG_SKGE is not set 450# CONFIG_SKGE is not set
512# CONFIG_SKY2 is not set 451# CONFIG_SKY2 is not set
513# CONFIG_SK98LIN is not set
514# CONFIG_VIA_VELOCITY is not set 452# CONFIG_VIA_VELOCITY is not set
515# CONFIG_TIGON3 is not set 453# CONFIG_TIGON3 is not set
516# CONFIG_BNX2 is not set 454# CONFIG_BNX2 is not set
@@ -524,11 +462,6 @@ CONFIG_NETDEV_10000=y
524# CONFIG_MYRI10GE is not set 462# CONFIG_MYRI10GE is not set
525# CONFIG_NETXEN_NIC is not set 463# CONFIG_NETXEN_NIC is not set
526# CONFIG_MLX4_CORE is not set 464# CONFIG_MLX4_CORE is not set
527CONFIG_MLX4_DEBUG=y
528
529#
530# Token Ring devices
531#
532# CONFIG_TR is not set 465# CONFIG_TR is not set
533 466
534# 467#
@@ -546,15 +479,7 @@ CONFIG_MLX4_DEBUG=y
546# CONFIG_NETCONSOLE is not set 479# CONFIG_NETCONSOLE is not set
547# CONFIG_NETPOLL is not set 480# CONFIG_NETPOLL is not set
548# CONFIG_NET_POLL_CONTROLLER is not set 481# CONFIG_NET_POLL_CONTROLLER is not set
549
550#
551# ISDN subsystem
552#
553# CONFIG_ISDN is not set 482# CONFIG_ISDN is not set
554
555#
556# Telephony Support
557#
558# CONFIG_PHONE is not set 483# CONFIG_PHONE is not set
559 484
560# 485#
@@ -562,6 +487,7 @@ CONFIG_MLX4_DEBUG=y
562# 487#
563CONFIG_INPUT=y 488CONFIG_INPUT=y
564# CONFIG_INPUT_FF_MEMLESS is not set 489# CONFIG_INPUT_FF_MEMLESS is not set
490# CONFIG_INPUT_POLLDEV is not set
565 491
566# 492#
567# Userland interfaces 493# Userland interfaces
@@ -638,10 +564,6 @@ CONFIG_SERIAL_CORE_CONSOLE=y
638CONFIG_UNIX98_PTYS=y 564CONFIG_UNIX98_PTYS=y
639CONFIG_LEGACY_PTYS=y 565CONFIG_LEGACY_PTYS=y
640CONFIG_LEGACY_PTY_COUNT=256 566CONFIG_LEGACY_PTY_COUNT=256
641
642#
643# IPMI
644#
645# CONFIG_IPMI_HANDLER is not set 567# CONFIG_IPMI_HANDLER is not set
646CONFIG_WATCHDOG=y 568CONFIG_WATCHDOG=y
647# CONFIG_WATCHDOG_NOWAYOUT is not set 569# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -658,15 +580,10 @@ CONFIG_WATCHDOG=y
658# CONFIG_PCIPCWATCHDOG is not set 580# CONFIG_PCIPCWATCHDOG is not set
659# CONFIG_WDTPCI is not set 581# CONFIG_WDTPCI is not set
660CONFIG_HW_RANDOM=y 582CONFIG_HW_RANDOM=y
661# CONFIG_GEN_RTC is not set
662# CONFIG_R3964 is not set 583# CONFIG_R3964 is not set
663# CONFIG_APPLICOM is not set 584# CONFIG_APPLICOM is not set
664# CONFIG_DRM is not set 585# CONFIG_DRM is not set
665# CONFIG_RAW_DRIVER is not set 586# CONFIG_RAW_DRIVER is not set
666
667#
668# TPM devices
669#
670# CONFIG_TCG_TPM is not set 587# CONFIG_TCG_TPM is not set
671CONFIG_DEVPORT=y 588CONFIG_DEVPORT=y
672# CONFIG_I2C is not set 589# CONFIG_I2C is not set
@@ -676,20 +593,24 @@ CONFIG_DEVPORT=y
676# 593#
677# CONFIG_SPI is not set 594# CONFIG_SPI is not set
678# CONFIG_SPI_MASTER is not set 595# CONFIG_SPI_MASTER is not set
679
680#
681# Dallas's 1-wire bus
682#
683# CONFIG_W1 is not set 596# CONFIG_W1 is not set
597# CONFIG_POWER_SUPPLY is not set
684CONFIG_HWMON=y 598CONFIG_HWMON=y
685# CONFIG_HWMON_VID is not set 599# CONFIG_HWMON_VID is not set
686# CONFIG_SENSORS_ABITUGURU is not set 600# CONFIG_SENSORS_ABITUGURU is not set
601# CONFIG_SENSORS_ABITUGURU3 is not set
687# CONFIG_SENSORS_F71805F is not set 602# CONFIG_SENSORS_F71805F is not set
603# CONFIG_SENSORS_IT87 is not set
604# CONFIG_SENSORS_PC87360 is not set
688# CONFIG_SENSORS_PC87427 is not set 605# CONFIG_SENSORS_PC87427 is not set
606# CONFIG_SENSORS_SIS5595 is not set
689# CONFIG_SENSORS_SMSC47M1 is not set 607# CONFIG_SENSORS_SMSC47M1 is not set
690# CONFIG_SENSORS_SMSC47B397 is not set 608# CONFIG_SENSORS_SMSC47B397 is not set
609# CONFIG_SENSORS_VIA686A is not set
691# CONFIG_SENSORS_VT1211 is not set 610# CONFIG_SENSORS_VT1211 is not set
611# CONFIG_SENSORS_VT8231 is not set
692# CONFIG_SENSORS_W83627HF is not set 612# CONFIG_SENSORS_W83627HF is not set
613# CONFIG_SENSORS_W83627EHF is not set
693# CONFIG_HWMON_DEBUG_CHIP is not set 614# CONFIG_HWMON_DEBUG_CHIP is not set
694 615
695# 616#
@@ -739,7 +660,6 @@ CONFIG_FB_MODE_HELPERS=y
739# CONFIG_FB_CYBER2000 is not set 660# CONFIG_FB_CYBER2000 is not set
740# CONFIG_FB_ASILIANT is not set 661# CONFIG_FB_ASILIANT is not set
741# CONFIG_FB_IMSTT is not set 662# CONFIG_FB_IMSTT is not set
742# CONFIG_FB_EPSON1355 is not set
743# CONFIG_FB_S1D13XXX is not set 663# CONFIG_FB_S1D13XXX is not set
744# CONFIG_FB_NVIDIA is not set 664# CONFIG_FB_NVIDIA is not set
745# CONFIG_FB_RIVA is not set 665# CONFIG_FB_RIVA is not set
@@ -765,6 +685,7 @@ CONFIG_FB_KYRO=y
765# 685#
766CONFIG_DUMMY_CONSOLE=y 686CONFIG_DUMMY_CONSOLE=y
767CONFIG_FRAMEBUFFER_CONSOLE=y 687CONFIG_FRAMEBUFFER_CONSOLE=y
688# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
768# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set 689# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
769CONFIG_FONTS=y 690CONFIG_FONTS=y
770# CONFIG_FONT_8x8 is not set 691# CONFIG_FONT_8x8 is not set
@@ -789,16 +710,10 @@ CONFIG_LOGO_SUPERH_CLUT224=y
789# Sound 710# Sound
790# 711#
791# CONFIG_SOUND is not set 712# CONFIG_SOUND is not set
792 713CONFIG_HID_SUPPORT=y
793#
794# HID Devices
795#
796CONFIG_HID=y 714CONFIG_HID=y
797# CONFIG_HID_DEBUG is not set 715# CONFIG_HID_DEBUG is not set
798 716CONFIG_USB_SUPPORT=y
799#
800# USB support
801#
802CONFIG_USB_ARCH_HAS_HCD=y 717CONFIG_USB_ARCH_HAS_HCD=y
803CONFIG_USB_ARCH_HAS_OHCI=y 718CONFIG_USB_ARCH_HAS_OHCI=y
804CONFIG_USB_ARCH_HAS_EHCI=y 719CONFIG_USB_ARCH_HAS_EHCI=y
@@ -826,17 +741,9 @@ CONFIG_USB_ARCH_HAS_EHCI=y
826# 741#
827# LED Triggers 742# LED Triggers
828# 743#
829
830#
831# InfiniBand support
832#
833# CONFIG_INFINIBAND is not set 744# CONFIG_INFINIBAND is not set
834 745
835# 746#
836# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
837#
838
839#
840# Real Time Clock 747# Real Time Clock
841# 748#
842# CONFIG_RTC_CLASS is not set 749# CONFIG_RTC_CLASS is not set
@@ -855,6 +762,11 @@ CONFIG_USB_ARCH_HAS_EHCI=y
855# 762#
856 763
857# 764#
765# Userspace I/O
766#
767# CONFIG_UIO is not set
768
769#
858# File systems 770# File systems
859# 771#
860CONFIG_EXT2_FS=y 772CONFIG_EXT2_FS=y
@@ -950,7 +862,6 @@ CONFIG_SUNRPC=y
950# CONFIG_NCP_FS is not set 862# CONFIG_NCP_FS is not set
951# CONFIG_CODA_FS is not set 863# CONFIG_CODA_FS is not set
952# CONFIG_AFS_FS is not set 864# CONFIG_AFS_FS is not set
953# CONFIG_9P_FS is not set
954 865
955# 866#
956# Partition Types 867# Partition Types
@@ -1001,6 +912,7 @@ CONFIG_DEBUG_FS=y
1001CONFIG_DEBUG_KERNEL=y 912CONFIG_DEBUG_KERNEL=y
1002# CONFIG_DEBUG_SHIRQ is not set 913# CONFIG_DEBUG_SHIRQ is not set
1003CONFIG_DETECT_SOFTLOCKUP=y 914CONFIG_DETECT_SOFTLOCKUP=y
915CONFIG_SCHED_DEBUG=y
1004CONFIG_SCHEDSTATS=y 916CONFIG_SCHEDSTATS=y
1005# CONFIG_TIMER_STATS is not set 917# CONFIG_TIMER_STATS is not set
1006# CONFIG_DEBUG_SLAB is not set 918# CONFIG_DEBUG_SLAB is not set
@@ -1017,7 +929,6 @@ CONFIG_DEBUG_BUGVERBOSE=y
1017# CONFIG_DEBUG_LIST is not set 929# CONFIG_DEBUG_LIST is not set
1018CONFIG_FRAME_POINTER=y 930CONFIG_FRAME_POINTER=y
1019CONFIG_FORCED_INLINING=y 931CONFIG_FORCED_INLINING=y
1020# CONFIG_RCU_TORTURE_TEST is not set
1021# CONFIG_FAULT_INJECTION is not set 932# CONFIG_FAULT_INJECTION is not set
1022# CONFIG_EARLY_PRINTK is not set 933# CONFIG_EARLY_PRINTK is not set
1023# CONFIG_DEBUG_KERNEL_WITH_GDB_STUB is not set 934# CONFIG_DEBUG_KERNEL_WITH_GDB_STUB is not set
@@ -1033,10 +944,6 @@ CONFIG_SH64_SR_WATCH=y
1033# 944#
1034# CONFIG_KEYS is not set 945# CONFIG_KEYS is not set
1035# CONFIG_SECURITY is not set 946# CONFIG_SECURITY is not set
1036
1037#
1038# Cryptographic options
1039#
1040# CONFIG_CRYPTO is not set 947# CONFIG_CRYPTO is not set
1041 948
1042# 949#
@@ -1047,6 +954,7 @@ CONFIG_BITREVERSE=y
1047# CONFIG_CRC16 is not set 954# CONFIG_CRC16 is not set
1048# CONFIG_CRC_ITU_T is not set 955# CONFIG_CRC_ITU_T is not set
1049CONFIG_CRC32=y 956CONFIG_CRC32=y
957# CONFIG_CRC7 is not set
1050# CONFIG_LIBCRC32C is not set 958# CONFIG_LIBCRC32C is not set
1051CONFIG_PLIST=y 959CONFIG_PLIST=y
1052CONFIG_HAS_IOMEM=y 960CONFIG_HAS_IOMEM=y
diff --git a/arch/sh64/kernel/head.S b/arch/sh64/kernel/head.S
index f3740ddbc471..186406d3ad9c 100644
--- a/arch/sh64/kernel/head.S
+++ b/arch/sh64/kernel/head.S
@@ -124,7 +124,7 @@ empty_bad_pte_table:
124fpu_in_use: .quad 0 124fpu_in_use: .quad 0
125 125
126 126
127 .section .text, "ax" 127 .section .text.head, "ax"
128 .balign L1_CACHE_BYTES 128 .balign L1_CACHE_BYTES
129/* 129/*
130 * Condition at the entry of __stext: 130 * Condition at the entry of __stext:
diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c
index 3334f99b5835..388bb711f1b0 100644
--- a/arch/sh64/kernel/pci_sh5.c
+++ b/arch/sh64/kernel/pci_sh5.c
@@ -48,7 +48,7 @@ static void __init pci_fixup_ide_bases(struct pci_dev *d)
48} 48}
49DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); 49DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
50 50
51char * __init pcibios_setup(char *str) 51char * __devinit pcibios_setup(char *str)
52{ 52{
53 return str; 53 return str;
54} 54}
@@ -497,7 +497,7 @@ static int __init pcibios_init(void)
497 497
498subsys_initcall(pcibios_init); 498subsys_initcall(pcibios_init);
499 499
500void __init pcibios_fixup_bus(struct pci_bus *bus) 500void __devinit pcibios_fixup_bus(struct pci_bus *bus)
501{ 501{
502 struct pci_dev *dev = bus->self; 502 struct pci_dev *dev = bus->self;
503 int i; 503 int i;
diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S
index a5c680d29384..abb94c05d07a 100644
--- a/arch/sh64/kernel/syscalls.S
+++ b/arch/sh64/kernel/syscalls.S
@@ -378,3 +378,4 @@ sys_call_table:
378 .long sys_signalfd 378 .long sys_signalfd
379 .long sys_timerfd /* 350 */ 379 .long sys_timerfd /* 350 */
380 .long sys_eventfd 380 .long sys_eventfd
381 .long sys_fallocate
diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S
index 8ac9c7c5f848..267b4f9af2e1 100644
--- a/arch/sh64/kernel/vmlinux.lds.S
+++ b/arch/sh64/kernel/vmlinux.lds.S
@@ -54,6 +54,7 @@ SECTIONS
54 } = 0 54 } = 0
55 55
56 .text : C_PHYS(.text) { 56 .text : C_PHYS(.text) {
57 *(.text.head)
57 TEXT_TEXT 58 TEXT_TEXT
58 *(.text64) 59 *(.text64)
59 *(.text..SHmedia32) 60 *(.text..SHmedia32)
diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c
index ff26c02511aa..990857756d44 100644
--- a/arch/sh64/mm/ioremap.c
+++ b/arch/sh64/mm/ioremap.c
@@ -242,7 +242,7 @@ static void shmedia_free_io(struct resource *res)
242 release_resource(res); 242 release_resource(res);
243} 243}
244 244
245static void *sh64_get_page(void) 245static __init_refok void *sh64_get_page(void)
246{ 246{
247 extern int after_bootmem; 247 extern int after_bootmem;
248 void *page; 248 void *page;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 603d83ad65c8..9d327ec59759 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -24,6 +24,9 @@ config GENERIC_ISA_DMA
24config ARCH_NO_VIRT_TO_BUS 24config ARCH_NO_VIRT_TO_BUS
25 def_bool y 25 def_bool y
26 26
27config OF
28 def_bool y
29
27source "init/Kconfig" 30source "init/Kconfig"
28 31
29menu "General machine setup" 32menu "General machine setup"
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c
index 7bb86b9cdaa3..ac352eb6dff3 100644
--- a/arch/sparc/kernel/ebus.c
+++ b/arch/sparc/kernel/ebus.c
@@ -148,6 +148,7 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d
148{ 148{
149 const struct linux_prom_registers *regs; 149 const struct linux_prom_registers *regs;
150 struct linux_ebus_child *child; 150 struct linux_ebus_child *child;
151 struct dev_archdata *sd;
151 const int *irqs; 152 const int *irqs;
152 int i, n, len; 153 int i, n, len;
153 unsigned long baseaddr; 154 unsigned long baseaddr;
@@ -234,6 +235,10 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d
234 } 235 }
235 } 236 }
236 237
238 sd = &dev->ofdev.dev.archdata;
239 sd->prom_node = dp;
240 sd->op = &dev->ofdev;
241
237 dev->ofdev.node = dp; 242 dev->ofdev.node = dp;
238 dev->ofdev.dev.parent = &dev->bus->ofdev.dev; 243 dev->ofdev.dev.parent = &dev->bus->ofdev.dev;
239 dev->ofdev.dev.bus = &ebus_bus_type; 244 dev->ofdev.dev.bus = &ebus_bus_type;
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 831f540251f8..eac38388f5fd 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1749,8 +1749,8 @@ fpload:
1749__ndelay: 1749__ndelay:
1750 save %sp, -STACKFRAME_SZ, %sp 1750 save %sp, -STACKFRAME_SZ, %sp
1751 mov %i0, %o0 1751 mov %i0, %o0
1752 call .umul 1752 call .umul ! round multiplier up so large ns ok
1753 mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ) 1753 mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ)
1754 call .umul 1754 call .umul
1755 mov %i1, %o1 ! udelay_val 1755 mov %i1, %o1 ! udelay_val
1756 ba delay_continue 1756 ba delay_continue
@@ -1760,11 +1760,17 @@ __ndelay:
1760__udelay: 1760__udelay:
1761 save %sp, -STACKFRAME_SZ, %sp 1761 save %sp, -STACKFRAME_SZ, %sp
1762 mov %i0, %o0 1762 mov %i0, %o0
1763 sethi %hi(0x10c6), %o1 1763 sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok
1764 call .umul 1764 call .umul
1765 or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000 1765 or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000
1766 call .umul 1766 call .umul
1767 mov %i1, %o1 ! udelay_val 1767 mov %i1, %o1 ! udelay_val
1768 sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32,
1769 or %g0, %lo(0x028f4b62), %l0
1770 addcc %o0, %l0, %o0 ! 2**32 * 0.009 999
1771 bcs,a 3f
1772 add %o1, 0x01, %o1
17733:
1768 call .umul 1774 call .umul
1769 mov HZ, %o0 ! >>32 earlier for wider range 1775 mov HZ, %o0 ! >>32 earlier for wider range
1770 1776
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index f257a67bcf93..75b2240ad0f9 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -47,6 +47,8 @@
47#include <asm/cacheflush.h> 47#include <asm/cacheflush.h>
48#include <asm/irq_regs.h> 48#include <asm/irq_regs.h>
49 49
50#include "irq.h"
51
50#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
51#define SMP_NOP2 "nop; nop;\n\t" 53#define SMP_NOP2 "nop; nop;\n\t"
52#define SMP_NOP3 "nop; nop; nop;\n\t" 54#define SMP_NOP3 "nop; nop; nop;\n\t"
@@ -268,7 +270,7 @@ void free_irq(unsigned int irq, void *dev_id)
268 kfree(action); 270 kfree(action);
269 271
270 if (!sparc_irq[cpu_irq].action) 272 if (!sparc_irq[cpu_irq].action)
271 disable_irq(irq); 273 __disable_irq(irq);
272 274
273out_unlock: 275out_unlock:
274 spin_unlock_irqrestore(&irq_action_lock, flags); 276 spin_unlock_irqrestore(&irq_action_lock, flags);
@@ -464,7 +466,7 @@ int request_fast_irq(unsigned int irq,
464 466
465 sparc_irq[cpu_irq].action = action; 467 sparc_irq[cpu_irq].action = action;
466 468
467 enable_irq(irq); 469 __enable_irq(irq);
468 470
469 ret = 0; 471 ret = 0;
470out_unlock: 472out_unlock:
@@ -544,7 +546,7 @@ int request_irq(unsigned int irq,
544 546
545 *actionp = action; 547 *actionp = action;
546 548
547 enable_irq(irq); 549 __enable_irq(irq);
548 550
549 ret = 0; 551 ret = 0;
550out_unlock: 552out_unlock:
@@ -555,6 +557,25 @@ out:
555 557
556EXPORT_SYMBOL(request_irq); 558EXPORT_SYMBOL(request_irq);
557 559
560void disable_irq_nosync(unsigned int irq)
561{
562 return __disable_irq(irq);
563}
564EXPORT_SYMBOL(disable_irq_nosync);
565
566void disable_irq(unsigned int irq)
567{
568 return __disable_irq(irq);
569}
570EXPORT_SYMBOL(disable_irq);
571
572void enable_irq(unsigned int irq)
573{
574 return __enable_irq(irq);
575}
576
577EXPORT_SYMBOL(enable_irq);
578
558/* We really don't need these at all on the Sparc. We only have 579/* We really don't need these at all on the Sparc. We only have
559 * stubs here because they are exported to modules. 580 * stubs here because they are exported to modules.
560 */ 581 */
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
new file mode 100644
index 000000000000..32ef3ebd0a88
--- /dev/null
+++ b/arch/sparc/kernel/irq.h
@@ -0,0 +1,68 @@
1#include <asm/btfixup.h>
2
3/* Dave Redman (djhr@tadpole.co.uk)
4 * changed these to function pointers.. it saves cycles and will allow
5 * the irq dependencies to be split into different files at a later date
6 * sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size.
7 * Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Changed these to btfixup entities... It saves cycles :)
9 */
10
11BTFIXUPDEF_CALL(void, disable_irq, unsigned int)
12BTFIXUPDEF_CALL(void, enable_irq, unsigned int)
13BTFIXUPDEF_CALL(void, disable_pil_irq, unsigned int)
14BTFIXUPDEF_CALL(void, enable_pil_irq, unsigned int)
15BTFIXUPDEF_CALL(void, clear_clock_irq, void)
16BTFIXUPDEF_CALL(void, clear_profile_irq, int)
17BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
18
19static inline void __disable_irq(unsigned int irq)
20{
21 BTFIXUP_CALL(disable_irq)(irq);
22}
23
24static inline void __enable_irq(unsigned int irq)
25{
26 BTFIXUP_CALL(enable_irq)(irq);
27}
28
29static inline void disable_pil_irq(unsigned int irq)
30{
31 BTFIXUP_CALL(disable_pil_irq)(irq);
32}
33
34static inline void enable_pil_irq(unsigned int irq)
35{
36 BTFIXUP_CALL(enable_pil_irq)(irq);
37}
38
39static inline void clear_clock_irq(void)
40{
41 BTFIXUP_CALL(clear_clock_irq)();
42}
43
44static inline void clear_profile_irq(int irq)
45{
46 BTFIXUP_CALL(clear_profile_irq)(irq);
47}
48
49static inline void load_profile_irq(int cpu, int limit)
50{
51 BTFIXUP_CALL(load_profile_irq)(cpu, limit);
52}
53
54extern void (*sparc_init_timers)(irq_handler_t lvl10_irq);
55
56extern void claim_ticker14(irq_handler_t irq_handler,
57 int irq,
58 unsigned int timeout);
59
60#ifdef CONFIG_SMP
61BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
62BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
63BTFIXUPDEF_CALL(void, set_irq_udt, int)
64
65#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level)
66#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level)
67#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
68#endif
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index fd7f8cb668a3..36383f73d685 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -1,132 +1,13 @@
1#include <linux/string.h> 1#include <linux/string.h>
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/of.h>
3#include <linux/init.h> 4#include <linux/init.h>
4#include <linux/module.h> 5#include <linux/module.h>
5#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
6#include <linux/slab.h> 7#include <linux/slab.h>
7 8#include <linux/errno.h>
8#include <asm/errno.h> 9#include <linux/of_device.h>
9#include <asm/of_device.h> 10#include <linux/of_platform.h>
10
11/**
12 * of_match_device - Tell if an of_device structure has a matching
13 * of_match structure
14 * @ids: array of of device match structures to search in
15 * @dev: the of device structure to match against
16 *
17 * Used by a driver to check whether an of_device present in the
18 * system is in its list of supported devices.
19 */
20const struct of_device_id *of_match_device(const struct of_device_id *matches,
21 const struct of_device *dev)
22{
23 if (!dev->node)
24 return NULL;
25 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
26 int match = 1;
27 if (matches->name[0])
28 match &= dev->node->name
29 && !strcmp(matches->name, dev->node->name);
30 if (matches->type[0])
31 match &= dev->node->type
32 && !strcmp(matches->type, dev->node->type);
33 if (matches->compatible[0])
34 match &= of_device_is_compatible(dev->node,
35 matches->compatible);
36 if (match)
37 return matches;
38 matches++;
39 }
40 return NULL;
41}
42
43static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
44{
45 struct of_device * of_dev = to_of_device(dev);
46 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
47 const struct of_device_id * matches = of_drv->match_table;
48
49 if (!matches)
50 return 0;
51
52 return of_match_device(matches, of_dev) != NULL;
53}
54
55struct of_device *of_dev_get(struct of_device *dev)
56{
57 struct device *tmp;
58
59 if (!dev)
60 return NULL;
61 tmp = get_device(&dev->dev);
62 if (tmp)
63 return to_of_device(tmp);
64 else
65 return NULL;
66}
67
68void of_dev_put(struct of_device *dev)
69{
70 if (dev)
71 put_device(&dev->dev);
72}
73
74
75static int of_device_probe(struct device *dev)
76{
77 int error = -ENODEV;
78 struct of_platform_driver *drv;
79 struct of_device *of_dev;
80 const struct of_device_id *match;
81
82 drv = to_of_platform_driver(dev->driver);
83 of_dev = to_of_device(dev);
84
85 if (!drv->probe)
86 return error;
87
88 of_dev_get(of_dev);
89
90 match = of_match_device(drv->match_table, of_dev);
91 if (match)
92 error = drv->probe(of_dev, match);
93 if (error)
94 of_dev_put(of_dev);
95
96 return error;
97}
98
99static int of_device_remove(struct device *dev)
100{
101 struct of_device * of_dev = to_of_device(dev);
102 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
103
104 if (dev->driver && drv->remove)
105 drv->remove(of_dev);
106 return 0;
107}
108
109static int of_device_suspend(struct device *dev, pm_message_t state)
110{
111 struct of_device * of_dev = to_of_device(dev);
112 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
113 int error = 0;
114
115 if (dev->driver && drv->suspend)
116 error = drv->suspend(of_dev, state);
117 return error;
118}
119
120static int of_device_resume(struct device * dev)
121{
122 struct of_device * of_dev = to_of_device(dev);
123 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
124 int error = 0;
125
126 if (dev->driver && drv->resume)
127 error = drv->resume(of_dev);
128 return error;
129}
130 11
131static int node_match(struct device *dev, void *data) 12static int node_match(struct device *dev, void *data)
132{ 13{
@@ -138,7 +19,7 @@ static int node_match(struct device *dev, void *data)
138 19
139struct of_device *of_find_device_by_node(struct device_node *dp) 20struct of_device *of_find_device_by_node(struct device_node *dp)
140{ 21{
141 struct device *dev = bus_find_device(&of_bus_type, NULL, 22 struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
142 dp, node_match); 23 dp, node_match);
143 24
144 if (dev) 25 if (dev)
@@ -149,38 +30,17 @@ struct of_device *of_find_device_by_node(struct device_node *dp)
149EXPORT_SYMBOL(of_find_device_by_node); 30EXPORT_SYMBOL(of_find_device_by_node);
150 31
151#ifdef CONFIG_PCI 32#ifdef CONFIG_PCI
152struct bus_type ebus_bus_type = { 33struct bus_type ebus_bus_type;
153 .name = "ebus",
154 .match = of_platform_bus_match,
155 .probe = of_device_probe,
156 .remove = of_device_remove,
157 .suspend = of_device_suspend,
158 .resume = of_device_resume,
159};
160EXPORT_SYMBOL(ebus_bus_type); 34EXPORT_SYMBOL(ebus_bus_type);
161#endif 35#endif
162 36
163#ifdef CONFIG_SBUS 37#ifdef CONFIG_SBUS
164struct bus_type sbus_bus_type = { 38struct bus_type sbus_bus_type;
165 .name = "sbus",
166 .match = of_platform_bus_match,
167 .probe = of_device_probe,
168 .remove = of_device_remove,
169 .suspend = of_device_suspend,
170 .resume = of_device_resume,
171};
172EXPORT_SYMBOL(sbus_bus_type); 39EXPORT_SYMBOL(sbus_bus_type);
173#endif 40#endif
174 41
175struct bus_type of_bus_type = { 42struct bus_type of_platform_bus_type;
176 .name = "of", 43EXPORT_SYMBOL(of_platform_bus_type);
177 .match = of_platform_bus_match,
178 .probe = of_device_probe,
179 .remove = of_device_remove,
180 .suspend = of_device_suspend,
181 .resume = of_device_resume,
182};
183EXPORT_SYMBOL(of_bus_type);
184 44
185static inline u64 of_read_addr(const u32 *cell, int size) 45static inline u64 of_read_addr(const u32 *cell, int size)
186{ 46{
@@ -560,11 +420,16 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
560{ 420{
561 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); 421 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
562 const struct linux_prom_irqs *intr; 422 const struct linux_prom_irqs *intr;
423 struct dev_archdata *sd;
563 int len, i; 424 int len, i;
564 425
565 if (!op) 426 if (!op)
566 return NULL; 427 return NULL;
567 428
429 sd = &op->dev.archdata;
430 sd->prom_node = dp;
431 sd->op = op;
432
568 op->node = dp; 433 op->node = dp;
569 434
570 op->clock_freq = of_getintprop_default(dp, "clock-frequency", 435 op->clock_freq = of_getintprop_default(dp, "clock-frequency",
@@ -646,7 +511,7 @@ build_resources:
646 build_device_resources(op, parent); 511 build_device_resources(op, parent);
647 512
648 op->dev.parent = parent; 513 op->dev.parent = parent;
649 op->dev.bus = &of_bus_type; 514 op->dev.bus = &of_platform_bus_type;
650 if (!parent) 515 if (!parent)
651 strcpy(op->dev.bus_id, "root"); 516 strcpy(op->dev.bus_id, "root");
652 else 517 else
@@ -690,14 +555,14 @@ static int __init of_bus_driver_init(void)
690{ 555{
691 int err; 556 int err;
692 557
693 err = bus_register(&of_bus_type); 558 err = of_bus_type_init(&of_platform_bus_type, "of");
694#ifdef CONFIG_PCI 559#ifdef CONFIG_PCI
695 if (!err) 560 if (!err)
696 err = bus_register(&ebus_bus_type); 561 err = of_bus_type_init(&ebus_bus_type, "ebus");
697#endif 562#endif
698#ifdef CONFIG_SBUS 563#ifdef CONFIG_SBUS
699 if (!err) 564 if (!err)
700 err = bus_register(&sbus_bus_type); 565 err = of_bus_type_init(&sbus_bus_type, "sbus");
701#endif 566#endif
702 567
703 if (!err) 568 if (!err)
@@ -735,56 +600,6 @@ void of_unregister_driver(struct of_platform_driver *drv)
735 driver_unregister(&drv->driver); 600 driver_unregister(&drv->driver);
736} 601}
737 602
738
739static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
740{
741 struct of_device *ofdev;
742
743 ofdev = to_of_device(dev);
744 return sprintf(buf, "%s", ofdev->node->full_name);
745}
746
747static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
748
749/**
750 * of_release_dev - free an of device structure when all users of it are finished.
751 * @dev: device that's been disconnected
752 *
753 * Will be called only by the device core when all users of this of device are
754 * done.
755 */
756void of_release_dev(struct device *dev)
757{
758 struct of_device *ofdev;
759
760 ofdev = to_of_device(dev);
761
762 kfree(ofdev);
763}
764
765int of_device_register(struct of_device *ofdev)
766{
767 int rc;
768
769 BUG_ON(ofdev->node == NULL);
770
771 rc = device_register(&ofdev->dev);
772 if (rc)
773 return rc;
774
775 rc = device_create_file(&ofdev->dev, &dev_attr_devspec);
776 if (rc)
777 device_unregister(&ofdev->dev);
778
779 return rc;
780}
781
782void of_device_unregister(struct of_device *ofdev)
783{
784 device_remove_file(&ofdev->dev, &dev_attr_devspec);
785 device_unregister(&ofdev->dev);
786}
787
788struct of_device* of_platform_device_create(struct device_node *np, 603struct of_device* of_platform_device_create(struct device_node *np,
789 const char *bus_id, 604 const char *bus_id,
790 struct device *parent, 605 struct device *parent,
@@ -810,12 +625,6 @@ struct of_device* of_platform_device_create(struct device_node *np,
810 return dev; 625 return dev;
811} 626}
812 627
813EXPORT_SYMBOL(of_match_device);
814EXPORT_SYMBOL(of_register_driver); 628EXPORT_SYMBOL(of_register_driver);
815EXPORT_SYMBOL(of_unregister_driver); 629EXPORT_SYMBOL(of_unregister_driver);
816EXPORT_SYMBOL(of_device_register);
817EXPORT_SYMBOL(of_device_unregister);
818EXPORT_SYMBOL(of_dev_get);
819EXPORT_SYMBOL(of_dev_put);
820EXPORT_SYMBOL(of_platform_device_create); 630EXPORT_SYMBOL(of_platform_device_create);
821EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 791771196905..f2eae457fc9a 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -36,6 +36,7 @@
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/irq_regs.h> 37#include <asm/irq_regs.h>
38 38
39#include "irq.h"
39 40
40/* 41/*
41 * I studied different documents and many live PROMs both from 2.30 42 * I studied different documents and many live PROMs both from 2.30
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
index 8c37f8f5adb7..33f7a3ddb104 100644
--- a/arch/sparc/kernel/process.c
+++ b/arch/sparc/kernel/process.c
@@ -39,6 +39,7 @@
39#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/psr.h> 40#include <asm/psr.h>
41#include <asm/elf.h> 41#include <asm/elf.h>
42#include <asm/prom.h>
42#include <asm/unistd.h> 43#include <asm/unistd.h>
43 44
44/* 45/*
@@ -150,7 +151,7 @@ void machine_halt(void)
150 local_irq_enable(); 151 local_irq_enable();
151 mdelay(8); 152 mdelay(8);
152 local_irq_disable(); 153 local_irq_disable();
153 if (!serial_console && prom_palette) 154 if (prom_palette)
154 prom_palette (1); 155 prom_palette (1);
155 prom_halt(); 156 prom_halt();
156 panic("Halt failed!"); 157 panic("Halt failed!");
@@ -166,7 +167,7 @@ void machine_restart(char * cmd)
166 167
167 p = strchr (reboot_command, '\n'); 168 p = strchr (reboot_command, '\n');
168 if (p) *p = 0; 169 if (p) *p = 0;
169 if (!serial_console && prom_palette) 170 if (prom_palette)
170 prom_palette (1); 171 prom_palette (1);
171 if (cmd) 172 if (cmd)
172 prom_reboot(cmd); 173 prom_reboot(cmd);
@@ -179,7 +180,8 @@ void machine_restart(char * cmd)
179void machine_power_off(void) 180void machine_power_off(void)
180{ 181{
181#ifdef CONFIG_SUN_AUXIO 182#ifdef CONFIG_SUN_AUXIO
182 if (auxio_power_register && (!serial_console || scons_pwroff)) 183 if (auxio_power_register &&
184 (strcmp(of_console_device->type, "serial") || scons_pwroff))
183 *auxio_power_register |= AUXIO_POWER_OFF; 185 *auxio_power_register |= AUXIO_POWER_OFF;
184#endif 186#endif
185 machine_halt(); 187 machine_halt();
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c
index eed140b3c739..e3a537650db1 100644
--- a/arch/sparc/kernel/prom.c
+++ b/arch/sparc/kernel/prom.c
@@ -25,73 +25,9 @@
25#include <asm/prom.h> 25#include <asm/prom.h>
26#include <asm/oplib.h> 26#include <asm/oplib.h>
27 27
28static struct device_node *allnodes; 28extern struct device_node *allnodes; /* temporary while merging */
29 29
30/* use when traversing tree through the allnext, child, sibling, 30extern rwlock_t devtree_lock; /* temporary while merging */
31 * or parent members of struct device_node.
32 */
33static DEFINE_RWLOCK(devtree_lock);
34
35int of_device_is_compatible(const struct device_node *device,
36 const char *compat)
37{
38 const char* cp;
39 int cplen, l;
40
41 cp = of_get_property(device, "compatible", &cplen);
42 if (cp == NULL)
43 return 0;
44 while (cplen > 0) {
45 if (strncmp(cp, compat, strlen(compat)) == 0)
46 return 1;
47 l = strlen(cp) + 1;
48 cp += l;
49 cplen -= l;
50 }
51
52 return 0;
53}
54EXPORT_SYMBOL(of_device_is_compatible);
55
56struct device_node *of_get_parent(const struct device_node *node)
57{
58 struct device_node *np;
59
60 if (!node)
61 return NULL;
62
63 np = node->parent;
64
65 return np;
66}
67EXPORT_SYMBOL(of_get_parent);
68
69struct device_node *of_get_next_child(const struct device_node *node,
70 struct device_node *prev)
71{
72 struct device_node *next;
73
74 next = prev ? prev->sibling : node->child;
75 for (; next != 0; next = next->sibling) {
76 break;
77 }
78
79 return next;
80}
81EXPORT_SYMBOL(of_get_next_child);
82
83struct device_node *of_find_node_by_path(const char *path)
84{
85 struct device_node *np = allnodes;
86
87 for (; np != 0; np = np->allnext) {
88 if (np->full_name != 0 && strcmp(np->full_name, path) == 0)
89 break;
90 }
91
92 return np;
93}
94EXPORT_SYMBOL(of_find_node_by_path);
95 31
96struct device_node *of_find_node_by_phandle(phandle handle) 32struct device_node *of_find_node_by_phandle(phandle handle)
97{ 33{
@@ -105,81 +41,6 @@ struct device_node *of_find_node_by_phandle(phandle handle)
105} 41}
106EXPORT_SYMBOL(of_find_node_by_phandle); 42EXPORT_SYMBOL(of_find_node_by_phandle);
107 43
108struct device_node *of_find_node_by_name(struct device_node *from,
109 const char *name)
110{
111 struct device_node *np;
112
113 np = from ? from->allnext : allnodes;
114 for (; np != NULL; np = np->allnext)
115 if (np->name != NULL && strcmp(np->name, name) == 0)
116 break;
117
118 return np;
119}
120EXPORT_SYMBOL(of_find_node_by_name);
121
122struct device_node *of_find_node_by_type(struct device_node *from,
123 const char *type)
124{
125 struct device_node *np;
126
127 np = from ? from->allnext : allnodes;
128 for (; np != 0; np = np->allnext)
129 if (np->type != 0 && strcmp(np->type, type) == 0)
130 break;
131
132 return np;
133}
134EXPORT_SYMBOL(of_find_node_by_type);
135
136struct device_node *of_find_compatible_node(struct device_node *from,
137 const char *type, const char *compatible)
138{
139 struct device_node *np;
140
141 np = from ? from->allnext : allnodes;
142 for (; np != 0; np = np->allnext) {
143 if (type != NULL
144 && !(np->type != 0 && strcmp(np->type, type) == 0))
145 continue;
146 if (of_device_is_compatible(np, compatible))
147 break;
148 }
149
150 return np;
151}
152EXPORT_SYMBOL(of_find_compatible_node);
153
154struct property *of_find_property(const struct device_node *np,
155 const char *name,
156 int *lenp)
157{
158 struct property *pp;
159
160 for (pp = np->properties; pp != 0; pp = pp->next) {
161 if (strcasecmp(pp->name, name) == 0) {
162 if (lenp != 0)
163 *lenp = pp->length;
164 break;
165 }
166 }
167 return pp;
168}
169EXPORT_SYMBOL(of_find_property);
170
171/*
172 * Find a property with a given name for a given node
173 * and return the value.
174 */
175const void *of_get_property(const struct device_node *np, const char *name,
176 int *lenp)
177{
178 struct property *pp = of_find_property(np,name,lenp);
179 return pp ? pp->value : NULL;
180}
181EXPORT_SYMBOL(of_get_property);
182
183int of_getintprop_default(struct device_node *np, const char *name, int def) 44int of_getintprop_default(struct device_node *np, const char *name, int def)
184{ 45{
185 struct property *prop; 46 struct property *prop;
@@ -193,36 +54,6 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
193} 54}
194EXPORT_SYMBOL(of_getintprop_default); 55EXPORT_SYMBOL(of_getintprop_default);
195 56
196int of_n_addr_cells(struct device_node *np)
197{
198 const int* ip;
199 do {
200 if (np->parent)
201 np = np->parent;
202 ip = of_get_property(np, "#address-cells", NULL);
203 if (ip != NULL)
204 return *ip;
205 } while (np->parent);
206 /* No #address-cells property for the root node, default to 2 */
207 return 2;
208}
209EXPORT_SYMBOL(of_n_addr_cells);
210
211int of_n_size_cells(struct device_node *np)
212{
213 const int* ip;
214 do {
215 if (np->parent)
216 np = np->parent;
217 ip = of_get_property(np, "#size-cells", NULL);
218 if (ip != NULL)
219 return *ip;
220 } while (np->parent);
221 /* No #size-cells property for the root node, default to 1 */
222 return 1;
223}
224EXPORT_SYMBOL(of_n_size_cells);
225
226int of_set_property(struct device_node *dp, const char *name, void *val, int len) 57int of_set_property(struct device_node *dp, const char *name, void *val, int len)
227{ 58{
228 struct property **prevp; 59 struct property **prevp;
@@ -566,6 +397,135 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl
566 return dp; 397 return dp;
567} 398}
568 399
400struct device_node *of_console_device;
401EXPORT_SYMBOL(of_console_device);
402
403char *of_console_path;
404EXPORT_SYMBOL(of_console_path);
405
406char *of_console_options;
407EXPORT_SYMBOL(of_console_options);
408
409extern void restore_current(void);
410
411static void __init of_console_init(void)
412{
413 char *msg = "OF stdout device is: %s\n";
414 struct device_node *dp;
415 unsigned long flags;
416 const char *type;
417 phandle node;
418 int skip, fd;
419
420 of_console_path = prom_early_alloc(256);
421
422 switch (prom_vers) {
423 case PROM_V0:
424 case PROM_SUN4:
425 skip = 0;
426 switch (*romvec->pv_stdout) {
427 case PROMDEV_SCREEN:
428 type = "display";
429 break;
430
431 case PROMDEV_TTYB:
432 skip = 1;
433 /* FALLTHRU */
434
435 case PROMDEV_TTYA:
436 type = "serial";
437 break;
438
439 default:
440 prom_printf("Invalid PROM_V0 stdout value %u\n",
441 *romvec->pv_stdout);
442 prom_halt();
443 }
444
445 for_each_node_by_type(dp, type) {
446 if (!skip--)
447 break;
448 }
449 if (!dp) {
450 prom_printf("Cannot find PROM_V0 console node.\n");
451 prom_halt();
452 }
453 of_console_device = dp;
454
455 strcpy(of_console_path, dp->full_name);
456 if (!strcmp(type, "serial")) {
457 strcat(of_console_path,
458 (skip ? ":b" : ":a"));
459 }
460 break;
461
462 default:
463 case PROM_V2:
464 case PROM_V3:
465 fd = *romvec->pv_v2bootargs.fd_stdout;
466
467 spin_lock_irqsave(&prom_lock, flags);
468 node = (*romvec->pv_v2devops.v2_inst2pkg)(fd);
469 restore_current();
470 spin_unlock_irqrestore(&prom_lock, flags);
471
472 if (!node) {
473 prom_printf("Cannot resolve stdout node from "
474 "instance %08x.\n", fd);
475 prom_halt();
476 }
477 dp = of_find_node_by_phandle(node);
478 type = of_get_property(dp, "device_type", NULL);
479
480 if (!type) {
481 prom_printf("Console stdout lacks "
482 "device_type property.\n");
483 prom_halt();
484 }
485
486 if (strcmp(type, "display") && strcmp(type, "serial")) {
487 prom_printf("Console device_type is neither display "
488 "nor serial.\n");
489 prom_halt();
490 }
491
492 of_console_device = dp;
493
494 if (prom_vers == PROM_V2) {
495 strcpy(of_console_path, dp->full_name);
496 switch (*romvec->pv_stdout) {
497 case PROMDEV_TTYA:
498 strcat(of_console_path, ":a");
499 break;
500 case PROMDEV_TTYB:
501 strcat(of_console_path, ":b");
502 break;
503 }
504 } else {
505 const char *path;
506
507 dp = of_find_node_by_path("/");
508 path = of_get_property(dp, "stdout-path", NULL);
509 if (!path) {
510 prom_printf("No stdout-path in root node.\n");
511 prom_halt();
512 }
513 strcpy(of_console_path, path);
514 }
515 break;
516 }
517
518 of_console_options = strrchr(of_console_path, ':');
519 if (of_console_options) {
520 of_console_options++;
521 if (*of_console_options == '\0')
522 of_console_options = NULL;
523 }
524
525 prom_printf(msg, of_console_path);
526 printk(msg, of_console_path);
527}
528
569void __init prom_build_devicetree(void) 529void __init prom_build_devicetree(void)
570{ 530{
571 struct device_node **nextp; 531 struct device_node **nextp;
@@ -578,6 +538,8 @@ void __init prom_build_devicetree(void)
578 allnodes->child = build_tree(allnodes, 538 allnodes->child = build_tree(allnodes,
579 prom_getchild(allnodes->node), 539 prom_getchild(allnodes->node),
580 &nextp); 540 &nextp);
541 of_console_init();
542
581 printk("PROM: Built device tree with %u bytes of memory.\n", 543 printk("PROM: Built device tree with %u bytes of memory.\n",
582 prom_early_allocated); 544 prom_early_allocated);
583} 545}
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index 64c0ed98820a..f8228383895a 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -146,31 +146,6 @@ static void __init process_switch(char c)
146 } 146 }
147} 147}
148 148
149static void __init process_console(char *commands)
150{
151 serial_console = 0;
152 commands += 8;
153 /* Linux-style serial */
154 if (!strncmp(commands, "ttyS", 4))
155 serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
156 else if (!strncmp(commands, "tty", 3)) {
157 char c = *(commands + 3);
158 /* Solaris-style serial */
159 if (c == 'a' || c == 'b')
160 serial_console = c - 'a' + 1;
161 /* else Linux-style fbcon, not serial */
162 }
163#if defined(CONFIG_PROM_CONSOLE)
164 if (!strncmp(commands, "prom", 4)) {
165 char *p;
166
167 for (p = commands - 8; *p && *p != ' '; p++)
168 *p = ' ';
169 conswitchp = &prom_con;
170 }
171#endif
172}
173
174static void __init boot_flags_init(char *commands) 149static void __init boot_flags_init(char *commands)
175{ 150{
176 while (*commands) { 151 while (*commands) {
@@ -187,9 +162,7 @@ static void __init boot_flags_init(char *commands)
187 process_switch(*commands++); 162 process_switch(*commands++);
188 continue; 163 continue;
189 } 164 }
190 if (!strncmp(commands, "console=", 8)) { 165 if (!strncmp(commands, "mem=", 4)) {
191 process_console(commands);
192 } else if (!strncmp(commands, "mem=", 4)) {
193 /* 166 /*
194 * "mem=XXX[kKmM] overrides the PROM-reported 167 * "mem=XXX[kKmM] overrides the PROM-reported
195 * memory size. 168 * memory size.
@@ -341,41 +314,6 @@ void __init setup_arch(char **cmdline_p)
341 smp_setup_cpu_possible_map(); 314 smp_setup_cpu_possible_map();
342} 315}
343 316
344static int __init set_preferred_console(void)
345{
346 int idev, odev;
347
348 /* The user has requested a console so this is already set up. */
349 if (serial_console >= 0)
350 return -EBUSY;
351
352 idev = prom_query_input_device();
353 odev = prom_query_output_device();
354 if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
355 serial_console = 0;
356 } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
357 serial_console = 1;
358 } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
359 serial_console = 2;
360 } else if (idev == PROMDEV_I_UNK && odev == PROMDEV_OTTYA) {
361 prom_printf("MrCoffee ttya\n");
362 serial_console = 1;
363 } else if (idev == PROMDEV_I_UNK && odev == PROMDEV_OSCREEN) {
364 serial_console = 0;
365 prom_printf("MrCoffee keyboard\n");
366 } else {
367 prom_printf("Confusing console (idev %d, odev %d)\n",
368 idev, odev);
369 serial_console = 1;
370 }
371
372 if (serial_console)
373 return add_preferred_console("ttyS", serial_console - 1, NULL);
374
375 return -ENODEV;
376}
377console_initcall(set_preferred_console);
378
379extern char *sparc_cpu_type; 317extern char *sparc_cpu_type;
380extern char *sparc_fpu_type; 318extern char *sparc_fpu_type;
381 319
@@ -461,7 +399,6 @@ void sun_do_break(void)
461 prom_cmdline(); 399 prom_cmdline();
462} 400}
463 401
464int serial_console = -1;
465int stop_a_enabled = 1; 402int stop_a_enabled = 1;
466 403
467static int __init topology_init(void) 404static int __init topology_init(void)
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index 4fea3ac7bff0..6724ab90f82b 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -33,6 +33,8 @@
33#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
34#include <asm/cpudata.h> 34#include <asm/cpudata.h>
35 35
36#include "irq.h"
37
36int smp_num_cpus = 1; 38int smp_num_cpus = 1;
37volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,}; 39volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
38unsigned char boot_cpu_id = 0; 40unsigned char boot_cpu_id = 0;
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index d8e008a04e2b..55bac516dfe2 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -154,8 +154,6 @@ EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
154#else 154#else
155EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id)); 155EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
156#endif 156#endif
157EXPORT_SYMBOL(BTFIXUP_CALL(enable_irq));
158EXPORT_SYMBOL(BTFIXUP_CALL(disable_irq));
159EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea)); 157EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
160EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea)); 158EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
161EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl)); 159EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
index 009e891a4329..c6ac9fc52563 100644
--- a/arch/sparc/kernel/sun4c_irq.c
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include "irq.h"
21 22
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23#include <asm/processor.h> 24#include <asm/processor.h>
@@ -40,6 +41,20 @@ static struct resource sun4c_timer_eb = { "sun4c_timer" };
40static struct resource sun4c_intr_eb = { "sun4c_intr" }; 41static struct resource sun4c_intr_eb = { "sun4c_intr" };
41#endif 42#endif
42 43
44/*
45 * Bit field defines for the interrupt registers on various
46 * Sparc machines.
47 */
48
49/* The sun4c interrupt register. */
50#define SUN4C_INT_ENABLE 0x01 /* Allow interrupts. */
51#define SUN4C_INT_E14 0x80 /* Enable level 14 IRQ. */
52#define SUN4C_INT_E10 0x20 /* Enable level 10 IRQ. */
53#define SUN4C_INT_E8 0x10 /* Enable level 8 IRQ. */
54#define SUN4C_INT_E6 0x08 /* Enable level 6 IRQ. */
55#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
56#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
57
43/* Pointer to the interrupt enable byte 58/* Pointer to the interrupt enable byte
44 * 59 *
45 * Dave Redman (djhr@tadpole.co.uk) 60 * Dave Redman (djhr@tadpole.co.uk)
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 396797e20c39..e0efab2a6bef 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -39,6 +39,8 @@
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/irq_regs.h> 40#include <asm/irq_regs.h>
41 41
42#include "irq.h"
43
42/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */ 44/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
43/* #define DISTRIBUTE_IRQS */ 45/* #define DISTRIBUTE_IRQS */
44 46
@@ -188,7 +190,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
188 kfree(action); 190 kfree(action);
189 191
190 if (!(*actionp)) 192 if (!(*actionp))
191 disable_irq(irq); 193 __disable_irq(irq);
192 194
193out_unlock: 195out_unlock:
194 spin_unlock_irqrestore(&irq_action_lock, flags); 196 spin_unlock_irqrestore(&irq_action_lock, flags);
@@ -346,7 +348,7 @@ int sun4d_request_irq(unsigned int irq,
346 else 348 else
347 *actionp = action; 349 *actionp = action;
348 350
349 enable_irq(irq); 351 __enable_irq(irq);
350 352
351 ret = 0; 353 ret = 0;
352out_unlock: 354out_unlock:
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 098c94f1a322..89a6de95070c 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -36,6 +36,7 @@
36#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37#include <asm/cpudata.h> 37#include <asm/cpudata.h>
38 38
39#include "irq.h"
39#define IRQ_CROSS_CALL 15 40#define IRQ_CROSS_CALL 15
40 41
41extern ctxd_t *srmmu_ctx_table_phys; 42extern ctxd_t *srmmu_ctx_table_phys;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 91a803ea88be..b92d6d2d5b04 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -38,11 +38,85 @@
38#include <asm/sbus.h> 38#include <asm/sbus.h>
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40 40
41#include "irq.h"
42
43/* On the sun4m, just like the timers, we have both per-cpu and master
44 * interrupt registers.
45 */
46
47/* These registers are used for sending/receiving irqs from/to
48 * different cpu's.
49 */
50struct sun4m_intreg_percpu {
51 unsigned int tbt; /* Interrupts still pending for this cpu. */
52
53 /* These next two registers are WRITE-ONLY and are only
54 * "on bit" sensitive, "off bits" written have NO affect.
55 */
56 unsigned int clear; /* Clear this cpus irqs here. */
57 unsigned int set; /* Set this cpus irqs here. */
58 unsigned char space[PAGE_SIZE - 12];
59};
60
61/*
62 * djhr
63 * Actually the clear and set fields in this struct are misleading..
64 * according to the SLAVIO manual (and the same applies for the SEC)
65 * the clear field clears bits in the mask which will ENABLE that IRQ
66 * the set field sets bits in the mask to DISABLE the IRQ.
67 *
68 * Also the undirected_xx address in the SLAVIO is defined as
69 * RESERVED and write only..
70 *
71 * DAVEM_NOTE: The SLAVIO only specifies behavior on uniprocessor
72 * sun4m machines, for MP the layout makes more sense.
73 */
74struct sun4m_intregs {
75 struct sun4m_intreg_percpu cpu_intregs[SUN4M_NCPUS];
76 unsigned int tbt; /* IRQ's that are still pending. */
77 unsigned int irqs; /* Master IRQ bits. */
78
79 /* Again, like the above, two these registers are WRITE-ONLY. */
80 unsigned int clear; /* Clear master IRQ's by setting bits here. */
81 unsigned int set; /* Set master IRQ's by setting bits here. */
82
83 /* This register is both READ and WRITE. */
84 unsigned int undirected_target; /* Which cpu gets undirected irqs. */
85};
86
41static unsigned long dummy; 87static unsigned long dummy;
42 88
43struct sun4m_intregs *sun4m_interrupts; 89struct sun4m_intregs *sun4m_interrupts;
44unsigned long *irq_rcvreg = &dummy; 90unsigned long *irq_rcvreg = &dummy;
45 91
92/* Dave Redman (djhr@tadpole.co.uk)
93 * The sun4m interrupt registers.
94 */
95#define SUN4M_INT_ENABLE 0x80000000
96#define SUN4M_INT_E14 0x00000080
97#define SUN4M_INT_E10 0x00080000
98
99#define SUN4M_HARD_INT(x) (0x000000001 << (x))
100#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
101
102#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
103#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
104#define SUN4M_INT_M2S_WRITE 0x20000000 /* write buffer error */
105#define SUN4M_INT_ECC 0x10000000 /* ecc memory error */
106#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
107#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
108#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
109#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
110#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
111#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
112#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
113#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
114#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
115#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
116
117#define SUN4M_INT_SBUS(x) (1 << (x+7))
118#define SUN4M_INT_VME(x) (1 << (x))
119
46/* These tables only apply for interrupts greater than 15.. 120/* These tables only apply for interrupts greater than 15..
47 * 121 *
48 * any intr value below 0x10 is considered to be a soft-int 122 * any intr value below 0x10 is considered to be a soft-int
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 63ed19bfd028..730eb5796f8e 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -31,6 +31,8 @@
31#include <asm/oplib.h> 31#include <asm/oplib.h>
32#include <asm/cpudata.h> 32#include <asm/cpudata.h>
33 33
34#include "irq.h"
35
34#define IRQ_RESCHEDULE 13 36#define IRQ_RESCHEDULE 13
35#define IRQ_STOP_CPU 14 37#define IRQ_STOP_CPU 14
36#define IRQ_CROSS_CALL 15 38#define IRQ_CROSS_CALL 15
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index 90b52d4dab9a..55722840859c 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -1,8 +1,7 @@
1/* $Id: systbls.S,v 1.103 2002/02/08 03:57:14 davem Exp $ 1/* systbls.S: System call entry point tables for OS compatibility.
2 * systbls.S: System call entry point tables for OS compatibility.
3 * The native Linux system call table lives here also. 2 * The native Linux system call table lives here also.
4 * 3 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
6 * 5 *
7 * Based upon preliminary work which is: 6 * Based upon preliminary work which is:
8 * 7 *
@@ -80,7 +79,7 @@ sys_call_table:
80/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare 79/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
81/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy 80/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
82/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait 81/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
83/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd 82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd, sys_fallocate
84 83
85#ifdef CONFIG_SUNOS_EMUL 84#ifdef CONFIG_SUNOS_EMUL
86 /* Now the SunOS syscall table. */ 85 /* Now the SunOS syscall table. */
@@ -198,6 +197,6 @@ sunos_sys_table:
198 .long sunos_nosys, sunos_nosys, sunos_nosys 197 .long sunos_nosys, sunos_nosys, sunos_nosys
199 .long sunos_nosys 198 .long sunos_nosys
200/*310*/ .long sunos_nosys, sunos_nosys, sunos_nosys 199/*310*/ .long sunos_nosys, sunos_nosys, sunos_nosys
201 .long sunos_nosys 200 .long sunos_nosys, sunos_nosys
202 201
203#endif 202#endif
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
index f1a7bd19e04f..707bfda86570 100644
--- a/arch/sparc/kernel/tick14.c
+++ b/arch/sparc/kernel/tick14.c
@@ -25,6 +25,8 @@
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/io.h> 26#include <asm/io.h>
27 27
28#include "irq.h"
29
28extern unsigned long lvl14_save[5]; 30extern unsigned long lvl14_save[5];
29static unsigned long *linux_lvl14 = NULL; 31static unsigned long *linux_lvl14 = NULL;
30static unsigned long obp_lvl14[4]; 32static unsigned long obp_lvl14[4];
@@ -62,7 +64,7 @@ void claim_ticker14(irq_handler_t handler,
62 64
63 /* first we copy the obp handler instructions 65 /* first we copy the obp handler instructions
64 */ 66 */
65 disable_irq(irq_nr); 67 __disable_irq(irq_nr);
66 if (!handler) 68 if (!handler)
67 return; 69 return;
68 70
@@ -79,6 +81,6 @@ void claim_ticker14(irq_handler_t handler,
79 NULL)) { 81 NULL)) {
80 install_linux_ticker(); 82 install_linux_ticker();
81 load_profile_irq(cpu, timeout); 83 load_profile_irq(cpu, timeout);
82 enable_irq(irq_nr); 84 __enable_irq(irq_nr);
83 } 85 }
84} 86}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 7b4612da74a6..6a2513321620 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -44,6 +44,8 @@
44#include <asm/of_device.h> 44#include <asm/of_device.h>
45#include <asm/irq_regs.h> 45#include <asm/irq_regs.h>
46 46
47#include "irq.h"
48
47DEFINE_SPINLOCK(rtc_lock); 49DEFINE_SPINLOCK(rtc_lock);
48enum sparc_clock_type sp_clock_typ; 50enum sparc_clock_type sp_clock_typ;
49DEFINE_SPINLOCK(mostek_lock); 51DEFINE_SPINLOCK(mostek_lock);
@@ -354,7 +356,7 @@ static struct of_platform_driver clock_driver = {
354/* Probe for the mostek real time clock chip. */ 356/* Probe for the mostek real time clock chip. */
355static int __init clock_init(void) 357static int __init clock_init(void)
356{ 358{
357 return of_register_driver(&clock_driver, &of_bus_type); 359 return of_register_driver(&clock_driver, &of_platform_bus_type);
358} 360}
359 361
360/* Must be after subsys_initcall() so that busses are probed. Must 362/* Must be after subsys_initcall() so that busses are probed. Must
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index a532922e2e35..a1bef07755a9 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -308,6 +308,9 @@ extern void sun4c_paging_init(void);
308extern void srmmu_paging_init(void); 308extern void srmmu_paging_init(void);
309extern void device_scan(void); 309extern void device_scan(void);
310 310
311pgprot_t PAGE_SHARED __read_mostly;
312EXPORT_SYMBOL(PAGE_SHARED);
313
311void __init paging_init(void) 314void __init paging_init(void)
312{ 315{
313 switch(sparc_cpu_model) { 316 switch(sparc_cpu_model) {
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index ca26232da7ab..17b485f2825c 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -2154,7 +2154,7 @@ void __init ld_mmu_srmmu(void)
2154 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); 2154 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
2155 2155
2156 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); 2156 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
2157 BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED)); 2157 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
2158 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); 2158 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
2159 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); 2159 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
2160 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); 2160 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index bdd835fba02e..a57a366e339a 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -2155,7 +2155,7 @@ void __init ld_mmu_sun4c(void)
2155 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE); 2155 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
2156 2156
2157 BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE)); 2157 BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
2158 BTFIXUPSET_INT(page_shared, pgprot_val(SUN4C_PAGE_SHARED)); 2158 PAGE_SHARED = pgprot_val(SUN4C_PAGE_SHARED);
2159 BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY)); 2159 BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
2160 BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY)); 2160 BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
2161 BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL)); 2161 BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
diff --git a/arch/sparc/prom/console.c b/arch/sparc/prom/console.c
index 4e6e41d3291d..8d1cfb0d5068 100644
--- a/arch/sparc/prom/console.c
+++ b/arch/sparc/prom/console.c
@@ -102,119 +102,3 @@ prom_putchar(char c)
102 while(prom_nbputchar(c) == -1) ; 102 while(prom_nbputchar(c) == -1) ;
103 return; 103 return;
104} 104}
105
106/* Query for input device type */
107enum prom_input_device
108prom_query_input_device(void)
109{
110 unsigned long flags;
111 int st_p;
112 char propb[64];
113 char *p;
114 int propl;
115
116 switch(prom_vers) {
117 case PROM_V0:
118 case PROM_V2:
119 case PROM_SUN4:
120 default:
121 switch(*romvec->pv_stdin) {
122 case PROMDEV_KBD: return PROMDEV_IKBD;
123 case PROMDEV_TTYA: return PROMDEV_ITTYA;
124 case PROMDEV_TTYB: return PROMDEV_ITTYB;
125 default:
126 return PROMDEV_I_UNK;
127 };
128 case PROM_V3:
129 spin_lock_irqsave(&prom_lock, flags);
130 st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdin);
131 restore_current();
132 spin_unlock_irqrestore(&prom_lock, flags);
133 if(prom_node_has_property(st_p, "keyboard"))
134 return PROMDEV_IKBD;
135 if (prom_getproperty(st_p, "name", propb, sizeof(propb)) != -1) {
136 if(strncmp(propb, "keyboard", sizeof("serial")) == 0)
137 return PROMDEV_IKBD;
138 }
139 if (prom_getproperty(st_p, "device_type", propb, sizeof(propb)) != -1) {
140 if(strncmp(propb, "serial", sizeof("serial")))
141 return PROMDEV_I_UNK;
142 }
143 propl = prom_getproperty(prom_root_node, "stdin-path", propb, sizeof(propb));
144 if(propl > 2) {
145 p = propb;
146 while(*p) p++; p -= 2;
147 if(p[0] == ':') {
148 if(p[1] == 'a')
149 return PROMDEV_ITTYA;
150 else if(p[1] == 'b')
151 return PROMDEV_ITTYB;
152 }
153 }
154 return PROMDEV_I_UNK;
155 }
156}
157
158/* Query for output device type */
159
160enum prom_output_device
161prom_query_output_device(void)
162{
163 unsigned long flags;
164 int st_p;
165 char propb[64];
166 char *p;
167 int propl;
168
169 switch(prom_vers) {
170 case PROM_V0:
171 case PROM_SUN4:
172 switch(*romvec->pv_stdin) {
173 case PROMDEV_SCREEN: return PROMDEV_OSCREEN;
174 case PROMDEV_TTYA: return PROMDEV_OTTYA;
175 case PROMDEV_TTYB: return PROMDEV_OTTYB;
176 };
177 break;
178 case PROM_V2:
179 case PROM_V3:
180 spin_lock_irqsave(&prom_lock, flags);
181 st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdout);
182 restore_current();
183 spin_unlock_irqrestore(&prom_lock, flags);
184 propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
185 if (propl == sizeof("display") &&
186 strncmp("display", propb, sizeof("display")) == 0)
187 {
188 return PROMDEV_OSCREEN;
189 }
190 if(prom_vers == PROM_V3) {
191 if(propl >= 0 &&
192 strncmp("serial", propb, sizeof("serial")) != 0)
193 return PROMDEV_O_UNK;
194 propl = prom_getproperty(prom_root_node, "stdout-path",
195 propb, sizeof(propb));
196 if(propl == CON_SIZE_JMC &&
197 strncmp(propb, con_name_jmc, CON_SIZE_JMC) == 0)
198 return PROMDEV_OTTYA;
199 if(propl > 2) {
200 p = propb;
201 while(*p) p++; p-= 2;
202 if(p[0]==':') {
203 if(p[1] == 'a')
204 return PROMDEV_OTTYA;
205 else if(p[1] == 'b')
206 return PROMDEV_OTTYB;
207 }
208 }
209 } else {
210 switch(*romvec->pv_stdin) {
211 case PROMDEV_TTYA: return PROMDEV_OTTYA;
212 case PROMDEV_TTYB: return PROMDEV_OTTYB;
213 };
214 }
215 break;
216 default:
217 ;
218 };
219 return PROMDEV_O_UNK;
220}
diff --git a/arch/sparc/prom/misc.c b/arch/sparc/prom/misc.c
index 1942c7c05cb1..37cff5f54704 100644
--- a/arch/sparc/prom/misc.c
+++ b/arch/sparc/prom/misc.c
@@ -58,7 +58,7 @@ prom_cmdline(void)
58 extern void install_linux_ticker(void); 58 extern void install_linux_ticker(void);
59 unsigned long flags; 59 unsigned long flags;
60 60
61 if(!serial_console && prom_palette) 61 if (prom_palette)
62 prom_palette (1); 62 prom_palette (1);
63 spin_lock_irqsave(&prom_lock, flags); 63 spin_lock_irqsave(&prom_lock, flags);
64 install_obp_ticker(); 64 install_obp_ticker();
@@ -69,7 +69,7 @@ prom_cmdline(void)
69#ifdef CONFIG_SUN_AUXIO 69#ifdef CONFIG_SUN_AUXIO
70 set_auxio(AUXIO_LED, 0); 70 set_auxio(AUXIO_LED, 0);
71#endif 71#endif
72 if(!serial_console && prom_palette) 72 if (prom_palette)
73 prom_palette (0); 73 prom_palette (0);
74} 74}
75 75
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index df6ee71894d1..33dabf588bdd 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -23,6 +23,10 @@ config GENERIC_TIME
23 bool 23 bool
24 default y 24 default y
25 25
26config GENERIC_CMOS_UPDATE
27 bool
28 default y
29
26config GENERIC_CLOCKEVENTS 30config GENERIC_CLOCKEVENTS
27 bool 31 bool
28 default y 32 default y
@@ -65,6 +69,9 @@ config AUDIT_ARCH
65config ARCH_NO_VIRT_TO_BUS 69config ARCH_NO_VIRT_TO_BUS
66 def_bool y 70 def_bool y
67 71
72config OF
73 def_bool y
74
68choice 75choice
69 prompt "Kernel page size" 76 prompt "Kernel page size"
70 default SPARC64_PAGE_SIZE_8KB 77 default SPARC64_PAGE_SIZE_8KB
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 45ebf91a280c..10e301970a44 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22 3# Linux kernel version: 2.6.22
4# Tue Jul 17 01:19:52 2007 4# Thu Jul 19 21:30:37 2007
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -16,6 +16,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
16# CONFIG_ARCH_HAS_ILOG2_U32 is not set 16# CONFIG_ARCH_HAS_ILOG2_U32 is not set
17# CONFIG_ARCH_HAS_ILOG2_U64 is not set 17# CONFIG_ARCH_HAS_ILOG2_U64 is not set
18CONFIG_AUDIT_ARCH=y 18CONFIG_AUDIT_ARCH=y
19CONFIG_ARCH_NO_VIRT_TO_BUS=y
19CONFIG_SPARC64_PAGE_SIZE_8KB=y 20CONFIG_SPARC64_PAGE_SIZE_8KB=y
20# CONFIG_SPARC64_PAGE_SIZE_64KB is not set 21# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
21# CONFIG_SPARC64_PAGE_SIZE_512KB is not set 22# CONFIG_SPARC64_PAGE_SIZE_512KB is not set
@@ -148,7 +149,6 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
148CONFIG_RESOURCES_64BIT=y 149CONFIG_RESOURCES_64BIT=y
149CONFIG_ZONE_DMA_FLAG=0 150CONFIG_ZONE_DMA_FLAG=0
150CONFIG_NR_QUICK=1 151CONFIG_NR_QUICK=1
151CONFIG_VIRT_TO_BUS=y
152CONFIG_SBUS=y 152CONFIG_SBUS=y
153CONFIG_SBUSCHAR=y 153CONFIG_SBUSCHAR=y
154CONFIG_SUN_AUXIO=y 154CONFIG_SUN_AUXIO=y
@@ -317,7 +317,6 @@ CONFIG_CONNECTOR=m
317# CONFIG_PARPORT is not set 317# CONFIG_PARPORT is not set
318CONFIG_BLK_DEV=y 318CONFIG_BLK_DEV=y
319# CONFIG_BLK_DEV_FD is not set 319# CONFIG_BLK_DEV_FD is not set
320# CONFIG_BLK_CPQ_DA is not set
321# CONFIG_BLK_CPQ_CISS_DA is not set 320# CONFIG_BLK_CPQ_CISS_DA is not set
322# CONFIG_BLK_DEV_DAC960 is not set 321# CONFIG_BLK_DEV_DAC960 is not set
323# CONFIG_BLK_DEV_UMEM is not set 322# CONFIG_BLK_DEV_UMEM is not set
@@ -470,10 +469,6 @@ CONFIG_ISCSI_TCP=m
470# CONFIG_SCSI_SUNESP is not set 469# CONFIG_SCSI_SUNESP is not set
471# CONFIG_SCSI_SRP is not set 470# CONFIG_SCSI_SRP is not set
472# CONFIG_ATA is not set 471# CONFIG_ATA is not set
473
474#
475# Multi-device support (RAID and LVM)
476#
477CONFIG_MD=y 472CONFIG_MD=y
478CONFIG_BLK_DEV_MD=m 473CONFIG_BLK_DEV_MD=m
479CONFIG_MD_LINEAR=m 474CONFIG_MD_LINEAR=m
@@ -610,10 +605,6 @@ CONFIG_SLHC=m
610# CONFIG_NETCONSOLE is not set 605# CONFIG_NETCONSOLE is not set
611# CONFIG_NETPOLL is not set 606# CONFIG_NETPOLL is not set
612# CONFIG_NET_POLL_CONTROLLER is not set 607# CONFIG_NET_POLL_CONTROLLER is not set
613
614#
615# ISDN subsystem
616#
617# CONFIG_ISDN is not set 608# CONFIG_ISDN is not set
618# CONFIG_PHONE is not set 609# CONFIG_PHONE is not set
619 610
@@ -782,6 +773,7 @@ CONFIG_I2C_ALGOBIT=y
782CONFIG_HWMON=y 773CONFIG_HWMON=y
783# CONFIG_HWMON_VID is not set 774# CONFIG_HWMON_VID is not set
784# CONFIG_SENSORS_ABITUGURU is not set 775# CONFIG_SENSORS_ABITUGURU is not set
776# CONFIG_SENSORS_ABITUGURU3 is not set
785# CONFIG_SENSORS_AD7418 is not set 777# CONFIG_SENSORS_AD7418 is not set
786# CONFIG_SENSORS_ADM1021 is not set 778# CONFIG_SENSORS_ADM1021 is not set
787# CONFIG_SENSORS_ADM1025 is not set 779# CONFIG_SENSORS_ADM1025 is not set
@@ -808,11 +800,13 @@ CONFIG_HWMON=y
808# CONFIG_SENSORS_LM87 is not set 800# CONFIG_SENSORS_LM87 is not set
809# CONFIG_SENSORS_LM90 is not set 801# CONFIG_SENSORS_LM90 is not set
810# CONFIG_SENSORS_LM92 is not set 802# CONFIG_SENSORS_LM92 is not set
803# CONFIG_SENSORS_LM93 is not set
811# CONFIG_SENSORS_MAX1619 is not set 804# CONFIG_SENSORS_MAX1619 is not set
812# CONFIG_SENSORS_MAX6650 is not set 805# CONFIG_SENSORS_MAX6650 is not set
813# CONFIG_SENSORS_PC87360 is not set 806# CONFIG_SENSORS_PC87360 is not set
814# CONFIG_SENSORS_PC87427 is not set 807# CONFIG_SENSORS_PC87427 is not set
815# CONFIG_SENSORS_SIS5595 is not set 808# CONFIG_SENSORS_SIS5595 is not set
809# CONFIG_SENSORS_DME1737 is not set
816# CONFIG_SENSORS_SMSC47M1 is not set 810# CONFIG_SENSORS_SMSC47M1 is not set
817# CONFIG_SENSORS_SMSC47M192 is not set 811# CONFIG_SENSORS_SMSC47M192 is not set
818# CONFIG_SENSORS_SMSC47B397 is not set 812# CONFIG_SENSORS_SMSC47B397 is not set
@@ -906,6 +900,7 @@ CONFIG_FB_RADEON_I2C=y
906# CONFIG_PROM_CONSOLE is not set 900# CONFIG_PROM_CONSOLE is not set
907CONFIG_DUMMY_CONSOLE=y 901CONFIG_DUMMY_CONSOLE=y
908CONFIG_FRAMEBUFFER_CONSOLE=y 902CONFIG_FRAMEBUFFER_CONSOLE=y
903CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
909# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set 904# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
910CONFIG_FONTS=y 905CONFIG_FONTS=y
911# CONFIG_FONT_8x8 is not set 906# CONFIG_FONT_8x8 is not set
@@ -1196,6 +1191,11 @@ CONFIG_USB_STORAGE=m
1196# 1191#
1197 1192
1198# 1193#
1194# Userspace I/O
1195#
1196# CONFIG_UIO is not set
1197
1198#
1199# Misc Linux/SPARC drivers 1199# Misc Linux/SPARC drivers
1200# 1200#
1201CONFIG_SUN_OPENPROMIO=m 1201CONFIG_SUN_OPENPROMIO=m
@@ -1385,6 +1385,7 @@ CONFIG_SCHEDSTATS=y
1385# CONFIG_DEBUG_MUTEXES is not set 1385# CONFIG_DEBUG_MUTEXES is not set
1386# CONFIG_DEBUG_LOCK_ALLOC is not set 1386# CONFIG_DEBUG_LOCK_ALLOC is not set
1387# CONFIG_PROVE_LOCKING is not set 1387# CONFIG_PROVE_LOCKING is not set
1388# CONFIG_LOCK_STAT is not set
1388# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1389# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1389# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1390# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1390# CONFIG_DEBUG_KOBJECT is not set 1391# CONFIG_DEBUG_KOBJECT is not set
@@ -1461,6 +1462,7 @@ CONFIG_CRC_CCITT=m
1461CONFIG_CRC16=m 1462CONFIG_CRC16=m
1462# CONFIG_CRC_ITU_T is not set 1463# CONFIG_CRC_ITU_T is not set
1463CONFIG_CRC32=y 1464CONFIG_CRC32=y
1465# CONFIG_CRC7 is not set
1464CONFIG_LIBCRC32C=m 1466CONFIG_LIBCRC32C=m
1465CONFIG_ZLIB_INFLATE=y 1467CONFIG_ZLIB_INFLATE=y
1466CONFIG_ZLIB_DEFLATE=y 1468CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index 826118ee53d5..7b379761e9f8 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -155,7 +155,7 @@ static struct of_platform_driver auxio_driver = {
155 155
156static int __init auxio_init(void) 156static int __init auxio_init(void)
157{ 157{
158 return of_register_driver(&auxio_driver, &of_bus_type); 158 return of_register_driver(&auxio_driver, &of_platform_bus_type);
159} 159}
160 160
161/* Must be after subsys_initcall() so that busses are probed. Must 161/* Must be after subsys_initcall() so that busses are probed. Must
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c
index fa1f04d756a2..9f472a79d37e 100644
--- a/arch/sparc64/kernel/ds.c
+++ b/arch/sparc64/kernel/ds.c
@@ -13,11 +13,11 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/kthread.h> 15#include <linux/kthread.h>
16#include <linux/reboot.h>
16#include <linux/cpu.h> 17#include <linux/cpu.h>
17 18
18#include <asm/ldc.h> 19#include <asm/ldc.h>
19#include <asm/vio.h> 20#include <asm/vio.h>
20#include <asm/power.h>
21#include <asm/mdesc.h> 21#include <asm/mdesc.h>
22#include <asm/head.h> 22#include <asm/head.h>
23#include <asm/irq.h> 23#include <asm/irq.h>
@@ -124,10 +124,11 @@ struct ds_data_nack {
124 __u64 result; 124 __u64 result;
125}; 125};
126 126
127struct ds_info;
127struct ds_cap_state { 128struct ds_cap_state {
128 __u64 handle; 129 __u64 handle;
129 130
130 void (*data)(struct ldc_channel *lp, 131 void (*data)(struct ds_info *dp,
131 struct ds_cap_state *cp, 132 struct ds_cap_state *cp,
132 void *buf, int len); 133 void *buf, int len);
133 134
@@ -139,27 +140,27 @@ struct ds_cap_state {
139#define CAP_STATE_REGISTERED 0x02 140#define CAP_STATE_REGISTERED 0x02
140}; 141};
141 142
142static void md_update_data(struct ldc_channel *lp, struct ds_cap_state *cp, 143static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp,
143 void *buf, int len); 144 void *buf, int len);
144static void domain_shutdown_data(struct ldc_channel *lp, 145static void domain_shutdown_data(struct ds_info *dp,
145 struct ds_cap_state *cp, 146 struct ds_cap_state *cp,
146 void *buf, int len); 147 void *buf, int len);
147static void domain_panic_data(struct ldc_channel *lp, 148static void domain_panic_data(struct ds_info *dp,
148 struct ds_cap_state *cp, 149 struct ds_cap_state *cp,
149 void *buf, int len); 150 void *buf, int len);
150#ifdef CONFIG_HOTPLUG_CPU 151#ifdef CONFIG_HOTPLUG_CPU
151static void dr_cpu_data(struct ldc_channel *lp, 152static void dr_cpu_data(struct ds_info *dp,
152 struct ds_cap_state *cp, 153 struct ds_cap_state *cp,
153 void *buf, int len); 154 void *buf, int len);
154#endif 155#endif
155static void ds_pri_data(struct ldc_channel *lp, 156static void ds_pri_data(struct ds_info *dp,
156 struct ds_cap_state *cp, 157 struct ds_cap_state *cp,
157 void *buf, int len); 158 void *buf, int len);
158static void ds_var_data(struct ldc_channel *lp, 159static void ds_var_data(struct ds_info *dp,
159 struct ds_cap_state *cp, 160 struct ds_cap_state *cp,
160 void *buf, int len); 161 void *buf, int len);
161 162
162struct ds_cap_state ds_states[] = { 163struct ds_cap_state ds_states_template[] = {
163 { 164 {
164 .service_id = "md-update", 165 .service_id = "md-update",
165 .data = md_update_data, 166 .data = md_update_data,
@@ -200,30 +201,38 @@ struct ds_info {
200#define DS_HS_START 0x01 201#define DS_HS_START 0x01
201#define DS_HS_DONE 0x02 202#define DS_HS_DONE 0x02
202 203
204 u64 id;
205
203 void *rcv_buf; 206 void *rcv_buf;
204 int rcv_buf_len; 207 int rcv_buf_len;
208
209 struct ds_cap_state *ds_states;
210 int num_ds_states;
211
212 struct ds_info *next;
205}; 213};
206 214
207static struct ds_info *ds_info; 215static struct ds_info *ds_info_list;
208 216
209static struct ds_cap_state *find_cap(u64 handle) 217static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle)
210{ 218{
211 unsigned int index = handle >> 32; 219 unsigned int index = handle >> 32;
212 220
213 if (index >= ARRAY_SIZE(ds_states)) 221 if (index >= dp->num_ds_states)
214 return NULL; 222 return NULL;
215 return &ds_states[index]; 223 return &dp->ds_states[index];
216} 224}
217 225
218static struct ds_cap_state *find_cap_by_string(const char *name) 226static struct ds_cap_state *find_cap_by_string(struct ds_info *dp,
227 const char *name)
219{ 228{
220 int i; 229 int i;
221 230
222 for (i = 0; i < ARRAY_SIZE(ds_states); i++) { 231 for (i = 0; i < dp->num_ds_states; i++) {
223 if (strcmp(ds_states[i].service_id, name)) 232 if (strcmp(dp->ds_states[i].service_id, name))
224 continue; 233 continue;
225 234
226 return &ds_states[i]; 235 return &dp->ds_states[i];
227 } 236 }
228 return NULL; 237 return NULL;
229} 238}
@@ -264,10 +273,11 @@ struct ds_md_update_res {
264 __u32 result; 273 __u32 result;
265}; 274};
266 275
267static void md_update_data(struct ldc_channel *lp, 276static void md_update_data(struct ds_info *dp,
268 struct ds_cap_state *dp, 277 struct ds_cap_state *cp,
269 void *buf, int len) 278 void *buf, int len)
270{ 279{
280 struct ldc_channel *lp = dp->lp;
271 struct ds_data *dpkt = buf; 281 struct ds_data *dpkt = buf;
272 struct ds_md_update_req *rp; 282 struct ds_md_update_req *rp;
273 struct { 283 struct {
@@ -277,14 +287,14 @@ static void md_update_data(struct ldc_channel *lp,
277 287
278 rp = (struct ds_md_update_req *) (dpkt + 1); 288 rp = (struct ds_md_update_req *) (dpkt + 1);
279 289
280 printk(KERN_INFO PFX "Machine description update.\n"); 290 printk(KERN_INFO "ds-%lu: Machine description update.\n", dp->id);
281 291
282 mdesc_update(); 292 mdesc_update();
283 293
284 memset(&pkt, 0, sizeof(pkt)); 294 memset(&pkt, 0, sizeof(pkt));
285 pkt.data.tag.type = DS_DATA; 295 pkt.data.tag.type = DS_DATA;
286 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); 296 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
287 pkt.data.handle = dp->handle; 297 pkt.data.handle = cp->handle;
288 pkt.res.req_num = rp->req_num; 298 pkt.res.req_num = rp->req_num;
289 pkt.res.result = DS_OK; 299 pkt.res.result = DS_OK;
290 300
@@ -302,10 +312,11 @@ struct ds_shutdown_res {
302 char reason[1]; 312 char reason[1];
303}; 313};
304 314
305static void domain_shutdown_data(struct ldc_channel *lp, 315static void domain_shutdown_data(struct ds_info *dp,
306 struct ds_cap_state *dp, 316 struct ds_cap_state *cp,
307 void *buf, int len) 317 void *buf, int len)
308{ 318{
319 struct ldc_channel *lp = dp->lp;
309 struct ds_data *dpkt = buf; 320 struct ds_data *dpkt = buf;
310 struct ds_shutdown_req *rp; 321 struct ds_shutdown_req *rp;
311 struct { 322 struct {
@@ -315,20 +326,20 @@ static void domain_shutdown_data(struct ldc_channel *lp,
315 326
316 rp = (struct ds_shutdown_req *) (dpkt + 1); 327 rp = (struct ds_shutdown_req *) (dpkt + 1);
317 328
318 printk(KERN_ALERT PFX "Shutdown request from " 329 printk(KERN_ALERT "ds-%lu: Shutdown request from "
319 "LDOM manager received.\n"); 330 "LDOM manager received.\n", dp->id);
320 331
321 memset(&pkt, 0, sizeof(pkt)); 332 memset(&pkt, 0, sizeof(pkt));
322 pkt.data.tag.type = DS_DATA; 333 pkt.data.tag.type = DS_DATA;
323 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); 334 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
324 pkt.data.handle = dp->handle; 335 pkt.data.handle = cp->handle;
325 pkt.res.req_num = rp->req_num; 336 pkt.res.req_num = rp->req_num;
326 pkt.res.result = DS_OK; 337 pkt.res.result = DS_OK;
327 pkt.res.reason[0] = 0; 338 pkt.res.reason[0] = 0;
328 339
329 ds_send(lp, &pkt, sizeof(pkt)); 340 ds_send(lp, &pkt, sizeof(pkt));
330 341
331 wake_up_powerd(); 342 orderly_poweroff(true);
332} 343}
333 344
334struct ds_panic_req { 345struct ds_panic_req {
@@ -341,10 +352,11 @@ struct ds_panic_res {
341 char reason[1]; 352 char reason[1];
342}; 353};
343 354
344static void domain_panic_data(struct ldc_channel *lp, 355static void domain_panic_data(struct ds_info *dp,
345 struct ds_cap_state *dp, 356 struct ds_cap_state *cp,
346 void *buf, int len) 357 void *buf, int len)
347{ 358{
359 struct ldc_channel *lp = dp->lp;
348 struct ds_data *dpkt = buf; 360 struct ds_data *dpkt = buf;
349 struct ds_panic_req *rp; 361 struct ds_panic_req *rp;
350 struct { 362 struct {
@@ -354,13 +366,13 @@ static void domain_panic_data(struct ldc_channel *lp,
354 366
355 rp = (struct ds_panic_req *) (dpkt + 1); 367 rp = (struct ds_panic_req *) (dpkt + 1);
356 368
357 printk(KERN_ALERT PFX "Panic request from " 369 printk(KERN_ALERT "ds-%lu: Panic request from "
358 "LDOM manager received.\n"); 370 "LDOM manager received.\n", dp->id);
359 371
360 memset(&pkt, 0, sizeof(pkt)); 372 memset(&pkt, 0, sizeof(pkt));
361 pkt.data.tag.type = DS_DATA; 373 pkt.data.tag.type = DS_DATA;
362 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); 374 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
363 pkt.data.handle = dp->handle; 375 pkt.data.handle = cp->handle;
364 pkt.res.req_num = rp->req_num; 376 pkt.res.req_num = rp->req_num;
365 pkt.res.result = DS_OK; 377 pkt.res.result = DS_OK;
366 pkt.res.reason[0] = 0; 378 pkt.res.reason[0] = 0;
@@ -403,10 +415,11 @@ struct dr_cpu_resp_entry {
403 __u32 str_off; 415 __u32 str_off;
404}; 416};
405 417
406static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data) 418static void __dr_cpu_send_error(struct ds_info *dp,
419 struct ds_cap_state *cp,
420 struct ds_data *data)
407{ 421{
408 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); 422 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
409 struct ds_info *dp = ds_info;
410 struct { 423 struct {
411 struct ds_data data; 424 struct ds_data data;
412 struct dr_cpu_tag tag; 425 struct dr_cpu_tag tag;
@@ -428,12 +441,14 @@ static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
428 __ds_send(dp->lp, &pkt, msg_len); 441 __ds_send(dp->lp, &pkt, msg_len);
429} 442}
430 443
431static void dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data) 444static void dr_cpu_send_error(struct ds_info *dp,
445 struct ds_cap_state *cp,
446 struct ds_data *data)
432{ 447{
433 unsigned long flags; 448 unsigned long flags;
434 449
435 spin_lock_irqsave(&ds_lock, flags); 450 spin_lock_irqsave(&ds_lock, flags);
436 __dr_cpu_send_error(cp, data); 451 __dr_cpu_send_error(dp, cp, data);
437 spin_unlock_irqrestore(&ds_lock, flags); 452 spin_unlock_irqrestore(&ds_lock, flags);
438} 453}
439 454
@@ -511,7 +526,9 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
511 } 526 }
512} 527}
513 528
514static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num, 529static int dr_cpu_configure(struct ds_info *dp,
530 struct ds_cap_state *cp,
531 u64 req_num,
515 cpumask_t *mask) 532 cpumask_t *mask)
516{ 533{
517 struct ds_data *resp; 534 struct ds_data *resp;
@@ -533,7 +550,8 @@ static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num,
533 for_each_cpu_mask(cpu, *mask) { 550 for_each_cpu_mask(cpu, *mask) {
534 int err; 551 int err;
535 552
536 printk(KERN_INFO PFX "Starting cpu %d...\n", cpu); 553 printk(KERN_INFO "ds-%lu: Starting cpu %d...\n",
554 dp->id, cpu);
537 err = cpu_up(cpu); 555 err = cpu_up(cpu);
538 if (err) { 556 if (err) {
539 __u32 res = DR_CPU_RES_FAILURE; 557 __u32 res = DR_CPU_RES_FAILURE;
@@ -548,14 +566,14 @@ static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num,
548 res = DR_CPU_RES_CPU_NOT_RESPONDING; 566 res = DR_CPU_RES_CPU_NOT_RESPONDING;
549 } 567 }
550 568
551 printk(KERN_INFO PFX "CPU startup failed err=%d\n", 569 printk(KERN_INFO "ds-%lu: CPU startup failed err=%d\n",
552 err); 570 dp->id, err);
553 dr_cpu_mark(resp, cpu, ncpus, res, stat); 571 dr_cpu_mark(resp, cpu, ncpus, res, stat);
554 } 572 }
555 } 573 }
556 574
557 spin_lock_irqsave(&ds_lock, flags); 575 spin_lock_irqsave(&ds_lock, flags);
558 __ds_send(ds_info->lp, resp, resp_len); 576 __ds_send(dp->lp, resp, resp_len);
559 spin_unlock_irqrestore(&ds_lock, flags); 577 spin_unlock_irqrestore(&ds_lock, flags);
560 578
561 kfree(resp); 579 kfree(resp);
@@ -566,7 +584,9 @@ static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num,
566 return 0; 584 return 0;
567} 585}
568 586
569static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num, 587static int dr_cpu_unconfigure(struct ds_info *dp,
588 struct ds_cap_state *cp,
589 u64 req_num,
570 cpumask_t *mask) 590 cpumask_t *mask)
571{ 591{
572 struct ds_data *resp; 592 struct ds_data *resp;
@@ -586,8 +606,8 @@ static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
586 for_each_cpu_mask(cpu, *mask) { 606 for_each_cpu_mask(cpu, *mask) {
587 int err; 607 int err;
588 608
589 printk(KERN_INFO PFX "CPU[%d]: Shutting down cpu %d...\n", 609 printk(KERN_INFO "ds-%lu: Shutting down cpu %d...\n",
590 smp_processor_id(), cpu); 610 dp->id, cpu);
591 err = cpu_down(cpu); 611 err = cpu_down(cpu);
592 if (err) 612 if (err)
593 dr_cpu_mark(resp, cpu, ncpus, 613 dr_cpu_mark(resp, cpu, ncpus,
@@ -596,7 +616,7 @@ static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
596 } 616 }
597 617
598 spin_lock_irqsave(&ds_lock, flags); 618 spin_lock_irqsave(&ds_lock, flags);
599 __ds_send(ds_info->lp, resp, resp_len); 619 __ds_send(dp->lp, resp, resp_len);
600 spin_unlock_irqrestore(&ds_lock, flags); 620 spin_unlock_irqrestore(&ds_lock, flags);
601 621
602 kfree(resp); 622 kfree(resp);
@@ -604,7 +624,7 @@ static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
604 return 0; 624 return 0;
605} 625}
606 626
607static void dr_cpu_data(struct ldc_channel *lp, 627static void dr_cpu_data(struct ds_info *dp,
608 struct ds_cap_state *cp, 628 struct ds_cap_state *cp,
609 void *buf, int len) 629 void *buf, int len)
610{ 630{
@@ -623,7 +643,7 @@ static void dr_cpu_data(struct ldc_channel *lp,
623 break; 643 break;
624 644
625 default: 645 default:
626 dr_cpu_send_error(cp, data); 646 dr_cpu_send_error(dp, cp, data);
627 return; 647 return;
628 } 648 }
629 649
@@ -639,12 +659,12 @@ static void dr_cpu_data(struct ldc_channel *lp,
639 } 659 }
640 660
641 if (tag->type == DR_CPU_CONFIGURE) 661 if (tag->type == DR_CPU_CONFIGURE)
642 err = dr_cpu_configure(cp, req_num, &mask); 662 err = dr_cpu_configure(dp, cp, req_num, &mask);
643 else 663 else
644 err = dr_cpu_unconfigure(cp, req_num, &mask); 664 err = dr_cpu_unconfigure(dp, cp, req_num, &mask);
645 665
646 if (err) 666 if (err)
647 dr_cpu_send_error(cp, data); 667 dr_cpu_send_error(dp, cp, data);
648} 668}
649#endif /* CONFIG_HOTPLUG_CPU */ 669#endif /* CONFIG_HOTPLUG_CPU */
650 670
@@ -656,8 +676,8 @@ struct ds_pri_msg {
656#define DS_PRI_UPDATE 0x02 676#define DS_PRI_UPDATE 0x02
657}; 677};
658 678
659static void ds_pri_data(struct ldc_channel *lp, 679static void ds_pri_data(struct ds_info *dp,
660 struct ds_cap_state *dp, 680 struct ds_cap_state *cp,
661 void *buf, int len) 681 void *buf, int len)
662{ 682{
663 struct ds_data *dpkt = buf; 683 struct ds_data *dpkt = buf;
@@ -665,8 +685,8 @@ static void ds_pri_data(struct ldc_channel *lp,
665 685
666 rp = (struct ds_pri_msg *) (dpkt + 1); 686 rp = (struct ds_pri_msg *) (dpkt + 1);
667 687
668 printk(KERN_INFO PFX "PRI REQ [%lx:%lx], len=%d\n", 688 printk(KERN_INFO "ds-%lu: PRI REQ [%lx:%lx], len=%d\n",
669 rp->req_num, rp->type, len); 689 dp->id, rp->req_num, rp->type, len);
670} 690}
671 691
672struct ds_var_hdr { 692struct ds_var_hdr {
@@ -701,8 +721,8 @@ static DEFINE_MUTEX(ds_var_mutex);
701static int ds_var_doorbell; 721static int ds_var_doorbell;
702static int ds_var_response; 722static int ds_var_response;
703 723
704static void ds_var_data(struct ldc_channel *lp, 724static void ds_var_data(struct ds_info *dp,
705 struct ds_cap_state *dp, 725 struct ds_cap_state *cp,
706 void *buf, int len) 726 void *buf, int len)
707{ 727{
708 struct ds_data *dpkt = buf; 728 struct ds_data *dpkt = buf;
@@ -721,14 +741,35 @@ static void ds_var_data(struct ldc_channel *lp,
721 741
722void ldom_set_var(const char *var, const char *value) 742void ldom_set_var(const char *var, const char *value)
723{ 743{
724 struct ds_info *dp = ds_info;
725 struct ds_cap_state *cp; 744 struct ds_cap_state *cp;
745 struct ds_info *dp;
746 unsigned long flags;
726 747
727 cp = find_cap_by_string("var-config"); 748 spin_lock_irqsave(&ds_lock, flags);
728 if (cp->state != CAP_STATE_REGISTERED) 749 cp = NULL;
729 cp = find_cap_by_string("var-config-backup"); 750 for (dp = ds_info_list; dp; dp = dp->next) {
751 struct ds_cap_state *tmp;
730 752
731 if (cp->state == CAP_STATE_REGISTERED) { 753 tmp = find_cap_by_string(dp, "var-config");
754 if (tmp && tmp->state == CAP_STATE_REGISTERED) {
755 cp = tmp;
756 break;
757 }
758 }
759 if (!cp) {
760 for (dp = ds_info_list; dp; dp = dp->next) {
761 struct ds_cap_state *tmp;
762
763 tmp = find_cap_by_string(dp, "var-config-backup");
764 if (tmp && tmp->state == CAP_STATE_REGISTERED) {
765 cp = tmp;
766 break;
767 }
768 }
769 }
770 spin_unlock_irqrestore(&ds_lock, flags);
771
772 if (cp) {
732 union { 773 union {
733 struct { 774 struct {
734 struct ds_data data; 775 struct ds_data data;
@@ -736,7 +777,6 @@ void ldom_set_var(const char *var, const char *value)
736 } header; 777 } header;
737 char all[512]; 778 char all[512];
738 } pkt; 779 } pkt;
739 unsigned long flags;
740 char *base, *p; 780 char *base, *p;
741 int msg_len, loops; 781 int msg_len, loops;
742 782
@@ -777,9 +817,9 @@ void ldom_set_var(const char *var, const char *value)
777 817
778 if (ds_var_doorbell == 0 || 818 if (ds_var_doorbell == 0 ||
779 ds_var_response != DS_VAR_SUCCESS) 819 ds_var_response != DS_VAR_SUCCESS)
780 printk(KERN_ERR PFX "var-config [%s:%s] " 820 printk(KERN_ERR "ds-%lu: var-config [%s:%s] "
781 "failed, response(%d).\n", 821 "failed, response(%d).\n",
782 var, value, 822 dp->id, var, value,
783 ds_var_response); 823 ds_var_response);
784 } else { 824 } else {
785 printk(KERN_ERR PFX "var-config not registered so " 825 printk(KERN_ERR PFX "var-config not registered so "
@@ -811,8 +851,8 @@ void ldom_power_off(void)
811 851
812static void ds_conn_reset(struct ds_info *dp) 852static void ds_conn_reset(struct ds_info *dp)
813{ 853{
814 printk(KERN_ERR PFX "ds_conn_reset() from %p\n", 854 printk(KERN_ERR "ds-%lu: ds_conn_reset() from %p\n",
815 __builtin_return_address(0)); 855 dp->id, __builtin_return_address(0));
816} 856}
817 857
818static int register_services(struct ds_info *dp) 858static int register_services(struct ds_info *dp)
@@ -820,12 +860,12 @@ static int register_services(struct ds_info *dp)
820 struct ldc_channel *lp = dp->lp; 860 struct ldc_channel *lp = dp->lp;
821 int i; 861 int i;
822 862
823 for (i = 0; i < ARRAY_SIZE(ds_states); i++) { 863 for (i = 0; i < dp->num_ds_states; i++) {
824 struct { 864 struct {
825 struct ds_reg_req req; 865 struct ds_reg_req req;
826 u8 id_buf[256]; 866 u8 id_buf[256];
827 } pbuf; 867 } pbuf;
828 struct ds_cap_state *cp = &ds_states[i]; 868 struct ds_cap_state *cp = &dp->ds_states[i];
829 int err, msg_len; 869 int err, msg_len;
830 u64 new_count; 870 u64 new_count;
831 871
@@ -870,28 +910,26 @@ static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt)
870 910
871 if (pkt->type == DS_REG_ACK) { 911 if (pkt->type == DS_REG_ACK) {
872 struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt; 912 struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt;
873 struct ds_cap_state *cp = find_cap(ap->handle); 913 struct ds_cap_state *cp = find_cap(dp, ap->handle);
874 914
875 if (!cp) { 915 if (!cp) {
876 printk(KERN_ERR PFX "REG ACK for unknown handle %lx\n", 916 printk(KERN_ERR "ds-%lu: REG ACK for unknown "
877 ap->handle); 917 "handle %lx\n", dp->id, ap->handle);
878 return 0; 918 return 0;
879 } 919 }
880 printk(KERN_INFO PFX "Registered %s service.\n", 920 printk(KERN_INFO "ds-%lu: Registered %s service.\n",
881 cp->service_id); 921 dp->id, cp->service_id);
882 cp->state = CAP_STATE_REGISTERED; 922 cp->state = CAP_STATE_REGISTERED;
883 } else if (pkt->type == DS_REG_NACK) { 923 } else if (pkt->type == DS_REG_NACK) {
884 struct ds_reg_nack *np = (struct ds_reg_nack *) pkt; 924 struct ds_reg_nack *np = (struct ds_reg_nack *) pkt;
885 struct ds_cap_state *cp = find_cap(np->handle); 925 struct ds_cap_state *cp = find_cap(dp, np->handle);
886 926
887 if (!cp) { 927 if (!cp) {
888 printk(KERN_ERR PFX "REG NACK for " 928 printk(KERN_ERR "ds-%lu: REG NACK for "
889 "unknown handle %lx\n", 929 "unknown handle %lx\n",
890 np->handle); 930 dp->id, np->handle);
891 return 0; 931 return 0;
892 } 932 }
893 printk(KERN_INFO PFX "Could not register %s service\n",
894 cp->service_id);
895 cp->state = CAP_STATE_UNKNOWN; 933 cp->state = CAP_STATE_UNKNOWN;
896 } 934 }
897 935
@@ -922,6 +960,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ds_wait);
922 960
923struct ds_queue_entry { 961struct ds_queue_entry {
924 struct list_head list; 962 struct list_head list;
963 struct ds_info *dp;
925 int req_len; 964 int req_len;
926 int __pad; 965 int __pad;
927 u64 req[0]; 966 u64 req[0];
@@ -930,7 +969,6 @@ struct ds_queue_entry {
930static void process_ds_work(void) 969static void process_ds_work(void)
931{ 970{
932 struct ds_queue_entry *qp, *tmp; 971 struct ds_queue_entry *qp, *tmp;
933 static struct ds_info *dp;
934 unsigned long flags; 972 unsigned long flags;
935 LIST_HEAD(todo); 973 LIST_HEAD(todo);
936 974
@@ -939,22 +977,22 @@ static void process_ds_work(void)
939 INIT_LIST_HEAD(&ds_work_list); 977 INIT_LIST_HEAD(&ds_work_list);
940 spin_unlock_irqrestore(&ds_lock, flags); 978 spin_unlock_irqrestore(&ds_lock, flags);
941 979
942 dp = ds_info;
943
944 list_for_each_entry_safe(qp, tmp, &todo, list) { 980 list_for_each_entry_safe(qp, tmp, &todo, list) {
945 struct ds_data *dpkt = (struct ds_data *) qp->req; 981 struct ds_data *dpkt = (struct ds_data *) qp->req;
946 struct ds_cap_state *cp = find_cap(dpkt->handle); 982 struct ds_info *dp = qp->dp;
983 struct ds_cap_state *cp = find_cap(dp, dpkt->handle);
947 int req_len = qp->req_len; 984 int req_len = qp->req_len;
948 985
949 if (!cp) { 986 if (!cp) {
950 printk(KERN_ERR PFX "Data for unknown handle %lu\n", 987 printk(KERN_ERR "ds-%lu: Data for unknown "
951 dpkt->handle); 988 "handle %lu\n",
989 dp->id, dpkt->handle);
952 990
953 spin_lock_irqsave(&ds_lock, flags); 991 spin_lock_irqsave(&ds_lock, flags);
954 __send_ds_nack(dp, dpkt->handle); 992 __send_ds_nack(dp, dpkt->handle);
955 spin_unlock_irqrestore(&ds_lock, flags); 993 spin_unlock_irqrestore(&ds_lock, flags);
956 } else { 994 } else {
957 cp->data(dp->lp, cp, dpkt, req_len); 995 cp->data(dp, cp, dpkt, req_len);
958 } 996 }
959 997
960 list_del(&qp->list); 998 list_del(&qp->list);
@@ -990,6 +1028,7 @@ static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
990 if (!qp) { 1028 if (!qp) {
991 __send_ds_nack(dp, dpkt->handle); 1029 __send_ds_nack(dp, dpkt->handle);
992 } else { 1030 } else {
1031 qp->dp = dp;
993 memcpy(&qp->req, pkt, len); 1032 memcpy(&qp->req, pkt, len);
994 list_add_tail(&qp->list, &ds_work_list); 1033 list_add_tail(&qp->list, &ds_work_list);
995 wake_up(&ds_wait); 1034 wake_up(&ds_wait);
@@ -1019,8 +1058,8 @@ static void ds_reset(struct ds_info *dp)
1019 1058
1020 dp->hs_state = 0; 1059 dp->hs_state = 0;
1021 1060
1022 for (i = 0; i < ARRAY_SIZE(ds_states); i++) { 1061 for (i = 0; i < dp->num_ds_states; i++) {
1023 struct ds_cap_state *cp = &ds_states[i]; 1062 struct ds_cap_state *cp = &dp->ds_states[i];
1024 1063
1025 cp->state = CAP_STATE_UNKNOWN; 1064 cp->state = CAP_STATE_UNKNOWN;
1026 } 1065 }
@@ -1048,7 +1087,8 @@ static void ds_event(void *arg, int event)
1048 } 1087 }
1049 1088
1050 if (event != LDC_EVENT_DATA_READY) { 1089 if (event != LDC_EVENT_DATA_READY) {
1051 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); 1090 printk(KERN_WARNING "ds-%lu: Unexpected LDC event %d\n",
1091 dp->id, event);
1052 spin_unlock_irqrestore(&ds_lock, flags); 1092 spin_unlock_irqrestore(&ds_lock, flags);
1053 return; 1093 return;
1054 } 1094 }
@@ -1099,9 +1139,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
1099 .mtu = 4096, 1139 .mtu = 4096,
1100 .mode = LDC_MODE_STREAM, 1140 .mode = LDC_MODE_STREAM,
1101 }; 1141 };
1142 struct mdesc_handle *hp;
1102 struct ldc_channel *lp; 1143 struct ldc_channel *lp;
1103 struct ds_info *dp; 1144 struct ds_info *dp;
1104 int err; 1145 const u64 *val;
1146 int err, i;
1105 1147
1106 if (ds_version_printed++ == 0) 1148 if (ds_version_printed++ == 0)
1107 printk(KERN_INFO "%s", version); 1149 printk(KERN_INFO "%s", version);
@@ -1111,19 +1153,37 @@ static int __devinit ds_probe(struct vio_dev *vdev,
1111 if (!dp) 1153 if (!dp)
1112 goto out_err; 1154 goto out_err;
1113 1155
1156 hp = mdesc_grab();
1157 val = mdesc_get_property(hp, vdev->mp, "id", NULL);
1158 if (val)
1159 dp->id = *val;
1160 mdesc_release(hp);
1161
1114 dp->rcv_buf = kzalloc(4096, GFP_KERNEL); 1162 dp->rcv_buf = kzalloc(4096, GFP_KERNEL);
1115 if (!dp->rcv_buf) 1163 if (!dp->rcv_buf)
1116 goto out_free_dp; 1164 goto out_free_dp;
1117 1165
1118 dp->rcv_buf_len = 4096; 1166 dp->rcv_buf_len = 4096;
1119 1167
1168 dp->ds_states = kzalloc(sizeof(ds_states_template),
1169 GFP_KERNEL);
1170 if (!dp->ds_states)
1171 goto out_free_rcv_buf;
1172
1173 memcpy(dp->ds_states, ds_states_template,
1174 sizeof(ds_states_template));
1175 dp->num_ds_states = ARRAY_SIZE(ds_states_template);
1176
1177 for (i = 0; i < dp->num_ds_states; i++)
1178 dp->ds_states[i].handle = ((u64)i << 32);
1179
1120 ds_cfg.tx_irq = vdev->tx_irq; 1180 ds_cfg.tx_irq = vdev->tx_irq;
1121 ds_cfg.rx_irq = vdev->rx_irq; 1181 ds_cfg.rx_irq = vdev->rx_irq;
1122 1182
1123 lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); 1183 lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
1124 if (IS_ERR(lp)) { 1184 if (IS_ERR(lp)) {
1125 err = PTR_ERR(lp); 1185 err = PTR_ERR(lp);
1126 goto out_free_rcv_buf; 1186 goto out_free_ds_states;
1127 } 1187 }
1128 dp->lp = lp; 1188 dp->lp = lp;
1129 1189
@@ -1131,15 +1191,19 @@ static int __devinit ds_probe(struct vio_dev *vdev,
1131 if (err) 1191 if (err)
1132 goto out_free_ldc; 1192 goto out_free_ldc;
1133 1193
1134 ds_info = dp; 1194 spin_lock_irq(&ds_lock);
1135 1195 dp->next = ds_info_list;
1136 start_powerd(); 1196 ds_info_list = dp;
1197 spin_unlock_irq(&ds_lock);
1137 1198
1138 return err; 1199 return err;
1139 1200
1140out_free_ldc: 1201out_free_ldc:
1141 ldc_free(dp->lp); 1202 ldc_free(dp->lp);
1142 1203
1204out_free_ds_states:
1205 kfree(dp->ds_states);
1206
1143out_free_rcv_buf: 1207out_free_rcv_buf:
1144 kfree(dp->rcv_buf); 1208 kfree(dp->rcv_buf);
1145 1209
@@ -1174,11 +1238,6 @@ static struct vio_driver ds_driver = {
1174 1238
1175static int __init ds_init(void) 1239static int __init ds_init(void)
1176{ 1240{
1177 int i;
1178
1179 for (i = 0; i < ARRAY_SIZE(ds_states); i++)
1180 ds_states[i].handle = ((u64)i << 32);
1181
1182 kthread_run(ds_thread, NULL, "kldomd"); 1241 kthread_run(ds_thread, NULL, "kldomd");
1183 1242
1184 return vio_register_driver(&ds_driver); 1243 return vio_register_driver(&ds_driver);
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index ad55a9bb50dd..6d2956179cde 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -362,6 +362,7 @@ static int __init child_regs_nonstandard(struct linux_ebus_device *dev)
362static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev) 362static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev)
363{ 363{
364 struct linux_ebus_child *child; 364 struct linux_ebus_child *child;
365 struct dev_archdata *sd;
365 struct of_device *op; 366 struct of_device *op;
366 int i, len; 367 int i, len;
367 368
@@ -387,6 +388,10 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de
387 dev->irqs[i] = op->irqs[i]; 388 dev->irqs[i] = op->irqs[i];
388 } 389 }
389 390
391 sd = &dev->ofdev.dev.archdata;
392 sd->prom_node = dp;
393 sd->op = &dev->ofdev;
394
390 dev->ofdev.node = dp; 395 dev->ofdev.node = dp;
391 dev->ofdev.dev.parent = &dev->bus->ofdev.dev; 396 dev->ofdev.dev.parent = &dev->bus->ofdev.dev;
392 dev->ofdev.dev.bus = &ebus_bus_type; 397 dev->ofdev.dev.bus = &ebus_bus_type;
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 77259526cb15..35feacb6b8ec 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -458,7 +458,6 @@ tlb_fixup_done:
458 or %g6, %lo(init_thread_union), %g6 458 or %g6, %lo(init_thread_union), %g6
459 ldx [%g6 + TI_TASK], %g4 459 ldx [%g6 + TI_TASK], %g4
460 mov %sp, %l6 460 mov %sp, %l6
461 mov %o4, %l7
462 461
463 wr %g0, ASI_P, %asi 462 wr %g0, ASI_P, %asi
464 mov 1, %g1 463 mov 1, %g1
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 8cb3358674f5..db31bf6b42db 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -87,7 +87,11 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
87 */ 87 */
88#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) 88#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
89 89
90static unsigned int virt_to_real_irq_table[NR_IRQS]; 90static struct {
91 unsigned int irq;
92 unsigned int dev_handle;
93 unsigned int dev_ino;
94} virt_to_real_irq_table[NR_IRQS];
91 95
92static unsigned char virt_irq_alloc(unsigned int real_irq) 96static unsigned char virt_irq_alloc(unsigned int real_irq)
93{ 97{
@@ -96,7 +100,7 @@ static unsigned char virt_irq_alloc(unsigned int real_irq)
96 BUILD_BUG_ON(NR_IRQS >= 256); 100 BUILD_BUG_ON(NR_IRQS >= 256);
97 101
98 for (ent = 1; ent < NR_IRQS; ent++) { 102 for (ent = 1; ent < NR_IRQS; ent++) {
99 if (!virt_to_real_irq_table[ent]) 103 if (!virt_to_real_irq_table[ent].irq)
100 break; 104 break;
101 } 105 }
102 if (ent >= NR_IRQS) { 106 if (ent >= NR_IRQS) {
@@ -104,7 +108,7 @@ static unsigned char virt_irq_alloc(unsigned int real_irq)
104 return 0; 108 return 0;
105 } 109 }
106 110
107 virt_to_real_irq_table[ent] = real_irq; 111 virt_to_real_irq_table[ent].irq = real_irq;
108 112
109 return ent; 113 return ent;
110} 114}
@@ -117,8 +121,8 @@ static void virt_irq_free(unsigned int virt_irq)
117 if (virt_irq >= NR_IRQS) 121 if (virt_irq >= NR_IRQS)
118 return; 122 return;
119 123
120 real_irq = virt_to_real_irq_table[virt_irq]; 124 real_irq = virt_to_real_irq_table[virt_irq].irq;
121 virt_to_real_irq_table[virt_irq] = 0; 125 virt_to_real_irq_table[virt_irq].irq = 0;
122 126
123 __bucket(real_irq)->virt_irq = 0; 127 __bucket(real_irq)->virt_irq = 0;
124} 128}
@@ -126,7 +130,7 @@ static void virt_irq_free(unsigned int virt_irq)
126 130
127static unsigned int virt_to_real_irq(unsigned char virt_irq) 131static unsigned int virt_to_real_irq(unsigned char virt_irq)
128{ 132{
129 return virt_to_real_irq_table[virt_irq]; 133 return virt_to_real_irq_table[virt_irq].irq;
130} 134}
131 135
132/* 136/*
@@ -336,15 +340,15 @@ static void sun4v_irq_enable(unsigned int virt_irq)
336 340
337 err = sun4v_intr_settarget(ino, cpuid); 341 err = sun4v_intr_settarget(ino, cpuid);
338 if (err != HV_EOK) 342 if (err != HV_EOK)
339 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n", 343 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
340 ino, cpuid, err); 344 "err(%d)\n", ino, cpuid, err);
341 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 345 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
342 if (err != HV_EOK) 346 if (err != HV_EOK)
343 printk("sun4v_intr_setstate(%x): " 347 printk(KERN_ERR "sun4v_intr_setstate(%x): "
344 "err(%d)\n", ino, err); 348 "err(%d)\n", ino, err);
345 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); 349 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
346 if (err != HV_EOK) 350 if (err != HV_EOK)
347 printk("sun4v_intr_setenabled(%x): err(%d)\n", 351 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
348 ino, err); 352 ino, err);
349 } 353 }
350} 354}
@@ -362,8 +366,8 @@ static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
362 366
363 err = sun4v_intr_settarget(ino, cpuid); 367 err = sun4v_intr_settarget(ino, cpuid);
364 if (err != HV_EOK) 368 if (err != HV_EOK)
365 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n", 369 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
366 ino, cpuid, err); 370 "err(%d)\n", ino, cpuid, err);
367 } 371 }
368} 372}
369 373
@@ -377,7 +381,7 @@ static void sun4v_irq_disable(unsigned int virt_irq)
377 381
378 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); 382 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
379 if (err != HV_EOK) 383 if (err != HV_EOK)
380 printk("sun4v_intr_setenabled(%x): " 384 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
381 "err(%d)\n", ino, err); 385 "err(%d)\n", ino, err);
382 } 386 }
383} 387}
@@ -410,7 +414,7 @@ static void sun4v_irq_end(unsigned int virt_irq)
410 414
411 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 415 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
412 if (err != HV_EOK) 416 if (err != HV_EOK)
413 printk("sun4v_intr_setstate(%x): " 417 printk(KERN_ERR "sun4v_intr_setstate(%x): "
414 "err(%d)\n", ino, err); 418 "err(%d)\n", ino, err);
415 } 419 }
416} 420}
@@ -418,7 +422,6 @@ static void sun4v_irq_end(unsigned int virt_irq)
418static void sun4v_virq_enable(unsigned int virt_irq) 422static void sun4v_virq_enable(unsigned int virt_irq)
419{ 423{
420 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 424 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
421 unsigned int ino = bucket - &ivector_table[0];
422 425
423 if (likely(bucket)) { 426 if (likely(bucket)) {
424 unsigned long cpuid, dev_handle, dev_ino; 427 unsigned long cpuid, dev_handle, dev_ino;
@@ -426,24 +429,24 @@ static void sun4v_virq_enable(unsigned int virt_irq)
426 429
427 cpuid = irq_choose_cpu(virt_irq); 430 cpuid = irq_choose_cpu(virt_irq);
428 431
429 dev_handle = ino & IMAP_IGN; 432 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
430 dev_ino = ino & IMAP_INO; 433 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
431 434
432 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 435 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
433 if (err != HV_EOK) 436 if (err != HV_EOK)
434 printk("sun4v_vintr_set_target(%lx,%lx,%lu): " 437 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
435 "err(%d)\n", 438 "err(%d)\n",
436 dev_handle, dev_ino, cpuid, err); 439 dev_handle, dev_ino, cpuid, err);
437 err = sun4v_vintr_set_state(dev_handle, dev_ino, 440 err = sun4v_vintr_set_state(dev_handle, dev_ino,
438 HV_INTR_STATE_IDLE); 441 HV_INTR_STATE_IDLE);
439 if (err != HV_EOK) 442 if (err != HV_EOK)
440 printk("sun4v_vintr_set_state(%lx,%lx," 443 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
441 "HV_INTR_STATE_IDLE): err(%d)\n", 444 "HV_INTR_STATE_IDLE): err(%d)\n",
442 dev_handle, dev_ino, err); 445 dev_handle, dev_ino, err);
443 err = sun4v_vintr_set_valid(dev_handle, dev_ino, 446 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
444 HV_INTR_ENABLED); 447 HV_INTR_ENABLED);
445 if (err != HV_EOK) 448 if (err != HV_EOK)
446 printk("sun4v_vintr_set_state(%lx,%lx," 449 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
447 "HV_INTR_ENABLED): err(%d)\n", 450 "HV_INTR_ENABLED): err(%d)\n",
448 dev_handle, dev_ino, err); 451 dev_handle, dev_ino, err);
449 } 452 }
@@ -452,7 +455,6 @@ static void sun4v_virq_enable(unsigned int virt_irq)
452static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) 455static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
453{ 456{
454 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 457 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
455 unsigned int ino = bucket - &ivector_table[0];
456 458
457 if (likely(bucket)) { 459 if (likely(bucket)) {
458 unsigned long cpuid, dev_handle, dev_ino; 460 unsigned long cpuid, dev_handle, dev_ino;
@@ -460,12 +462,12 @@ static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
460 462
461 cpuid = irq_choose_cpu(virt_irq); 463 cpuid = irq_choose_cpu(virt_irq);
462 464
463 dev_handle = ino & IMAP_IGN; 465 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
464 dev_ino = ino & IMAP_INO; 466 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
465 467
466 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 468 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
467 if (err != HV_EOK) 469 if (err != HV_EOK)
468 printk("sun4v_vintr_set_target(%lx,%lx,%lu): " 470 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
469 "err(%d)\n", 471 "err(%d)\n",
470 dev_handle, dev_ino, cpuid, err); 472 dev_handle, dev_ino, cpuid, err);
471 } 473 }
@@ -474,19 +476,18 @@ static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
474static void sun4v_virq_disable(unsigned int virt_irq) 476static void sun4v_virq_disable(unsigned int virt_irq)
475{ 477{
476 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 478 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
477 unsigned int ino = bucket - &ivector_table[0];
478 479
479 if (likely(bucket)) { 480 if (likely(bucket)) {
480 unsigned long dev_handle, dev_ino; 481 unsigned long dev_handle, dev_ino;
481 int err; 482 int err;
482 483
483 dev_handle = ino & IMAP_IGN; 484 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
484 dev_ino = ino & IMAP_INO; 485 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
485 486
486 err = sun4v_vintr_set_valid(dev_handle, dev_ino, 487 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
487 HV_INTR_DISABLED); 488 HV_INTR_DISABLED);
488 if (err != HV_EOK) 489 if (err != HV_EOK)
489 printk("sun4v_vintr_set_state(%lx,%lx," 490 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
490 "HV_INTR_DISABLED): err(%d)\n", 491 "HV_INTR_DISABLED): err(%d)\n",
491 dev_handle, dev_ino, err); 492 dev_handle, dev_ino, err);
492 } 493 }
@@ -495,7 +496,6 @@ static void sun4v_virq_disable(unsigned int virt_irq)
495static void sun4v_virq_end(unsigned int virt_irq) 496static void sun4v_virq_end(unsigned int virt_irq)
496{ 497{
497 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); 498 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
498 unsigned int ino = bucket - &ivector_table[0];
499 struct irq_desc *desc = irq_desc + virt_irq; 499 struct irq_desc *desc = irq_desc + virt_irq;
500 500
501 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) 501 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -505,13 +505,13 @@ static void sun4v_virq_end(unsigned int virt_irq)
505 unsigned long dev_handle, dev_ino; 505 unsigned long dev_handle, dev_ino;
506 int err; 506 int err;
507 507
508 dev_handle = ino & IMAP_IGN; 508 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
509 dev_ino = ino & IMAP_INO; 509 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
510 510
511 err = sun4v_vintr_set_state(dev_handle, dev_ino, 511 err = sun4v_vintr_set_state(dev_handle, dev_ino,
512 HV_INTR_STATE_IDLE); 512 HV_INTR_STATE_IDLE);
513 if (err != HV_EOK) 513 if (err != HV_EOK)
514 printk("sun4v_vintr_set_state(%lx,%lx," 514 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
515 "HV_INTR_STATE_IDLE): err(%d)\n", 515 "HV_INTR_STATE_IDLE): err(%d)\n",
516 dev_handle, dev_ino, err); 516 dev_handle, dev_ino, err);
517 } 517 }
@@ -700,11 +700,12 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
700unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) 700unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
701{ 701{
702 unsigned long sysino, hv_err; 702 unsigned long sysino, hv_err;
703 unsigned int virq;
703 704
704 BUG_ON(devhandle & ~IMAP_IGN); 705 BUG_ON(devhandle & devino);
705 BUG_ON(devino & ~IMAP_INO);
706 706
707 sysino = devhandle | devino; 707 sysino = devhandle | devino;
708 BUG_ON(sysino & ~(IMAP_IGN | IMAP_INO));
708 709
709 hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino); 710 hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
710 if (hv_err) { 711 if (hv_err) {
@@ -713,7 +714,12 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
713 prom_halt(); 714 prom_halt();
714 } 715 }
715 716
716 return sun4v_build_common(sysino, &sun4v_virq); 717 virq = sun4v_build_common(sysino, &sun4v_virq);
718
719 virt_to_real_irq_table[virq].dev_handle = devhandle;
720 virt_to_real_irq_table[virq].dev_ino = devino;
721
722 return virq;
717} 723}
718 724
719#ifdef CONFIG_PCI_MSI 725#ifdef CONFIG_PCI_MSI
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index 6a6882e57ff2..1a1043fcf97d 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -79,6 +79,7 @@ static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
79 79
80 while (dp) { 80 while (dp) {
81 struct sparc_isa_device *isa_dev; 81 struct sparc_isa_device *isa_dev;
82 struct dev_archdata *sd;
82 83
83 isa_dev = kzalloc(sizeof(*isa_dev), GFP_KERNEL); 84 isa_dev = kzalloc(sizeof(*isa_dev), GFP_KERNEL);
84 if (!isa_dev) { 85 if (!isa_dev) {
@@ -86,6 +87,10 @@ static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
86 return; 87 return;
87 } 88 }
88 89
90 sd = &isa_dev->ofdev.dev.archdata;
91 sd->prom_node = dp;
92 sd->op = &isa_dev->ofdev;
93
89 isa_dev->ofdev.node = dp; 94 isa_dev->ofdev.node = dp;
90 isa_dev->ofdev.dev.parent = &isa_br->ofdev.dev; 95 isa_dev->ofdev.dev.parent = &isa_br->ofdev.dev;
91 isa_dev->ofdev.dev.bus = &isa_bus_type; 96 isa_dev->ofdev.dev.bus = &isa_bus_type;
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
index 302ba5e5a0bb..cce4d0ddf5d5 100644
--- a/arch/sparc64/kernel/mdesc.c
+++ b/arch/sparc64/kernel/mdesc.c
@@ -83,7 +83,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
83 hp->handle_size = handle_size; 83 hp->handle_size = handle_size;
84} 84}
85 85
86static struct mdesc_handle *mdesc_bootmem_alloc(unsigned int mdesc_size) 86static struct mdesc_handle * __init mdesc_bootmem_alloc(unsigned int mdesc_size)
87{ 87{
88 struct mdesc_handle *hp; 88 struct mdesc_handle *hp;
89 unsigned int handle_size, alloc_size; 89 unsigned int handle_size, alloc_size;
@@ -123,7 +123,7 @@ static void mdesc_bootmem_free(struct mdesc_handle *hp)
123 } 123 }
124} 124}
125 125
126static struct mdesc_mem_ops bootmem_mdesc_memops = { 126static struct mdesc_mem_ops bootmem_mdesc_ops = {
127 .alloc = mdesc_bootmem_alloc, 127 .alloc = mdesc_bootmem_alloc,
128 .free = mdesc_bootmem_free, 128 .free = mdesc_bootmem_free,
129}; 129};
@@ -231,6 +231,25 @@ void mdesc_register_notifier(struct mdesc_notifier_client *client)
231 mutex_unlock(&mdesc_mutex); 231 mutex_unlock(&mdesc_mutex);
232} 232}
233 233
234static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
235{
236 const u64 *id;
237 u64 a;
238
239 id = NULL;
240 mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
241 u64 target;
242
243 target = mdesc_arc_target(hp, a);
244 id = mdesc_get_property(hp, target,
245 "cfg-handle", NULL);
246 if (id)
247 break;
248 }
249
250 return id;
251}
252
234/* Run 'func' on nodes which are in A but not in B. */ 253/* Run 'func' on nodes which are in A but not in B. */
235static void invoke_on_missing(const char *name, 254static void invoke_on_missing(const char *name,
236 struct mdesc_handle *a, 255 struct mdesc_handle *a,
@@ -240,13 +259,42 @@ static void invoke_on_missing(const char *name,
240 u64 node; 259 u64 node;
241 260
242 mdesc_for_each_node_by_name(a, node, name) { 261 mdesc_for_each_node_by_name(a, node, name) {
243 const u64 *id = mdesc_get_property(a, node, "id", NULL); 262 int found = 0, is_vdc_port = 0;
244 int found = 0; 263 const char *name_prop;
264 const u64 *id;
245 u64 fnode; 265 u64 fnode;
246 266
267 name_prop = mdesc_get_property(a, node, "name", NULL);
268 if (name_prop && !strcmp(name_prop, "vdc-port")) {
269 is_vdc_port = 1;
270 id = parent_cfg_handle(a, node);
271 } else
272 id = mdesc_get_property(a, node, "id", NULL);
273
274 if (!id) {
275 printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
276 (name_prop ? name_prop : name));
277 continue;
278 }
279
247 mdesc_for_each_node_by_name(b, fnode, name) { 280 mdesc_for_each_node_by_name(b, fnode, name) {
248 const u64 *fid = mdesc_get_property(b, fnode, 281 const u64 *fid;
249 "id", NULL); 282
283 if (is_vdc_port) {
284 name_prop = mdesc_get_property(b, fnode,
285 "name", NULL);
286 if (!name_prop ||
287 strcmp(name_prop, "vdc-port"))
288 continue;
289 fid = parent_cfg_handle(b, fnode);
290 if (!fid) {
291 printk(KERN_ERR "MD: Cannot find ID "
292 "for vdc-port node.\n");
293 continue;
294 }
295 } else
296 fid = mdesc_get_property(b, fnode,
297 "id", NULL);
250 298
251 if (*id == *fid) { 299 if (*id == *fid) {
252 found = 1; 300 found = 1;
@@ -812,7 +860,7 @@ void __init sun4v_mdesc_init(void)
812 860
813 printk("MDESC: Size is %lu bytes.\n", len); 861 printk("MDESC: Size is %lu bytes.\n", len);
814 862
815 hp = mdesc_alloc(len, &bootmem_mdesc_memops); 863 hp = mdesc_alloc(len, &bootmem_mdesc_ops);
816 if (hp == NULL) { 864 if (hp == NULL) {
817 prom_printf("MDESC: alloc of %lu bytes failed.\n", len); 865 prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
818 prom_halt(); 866 prom_halt();
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 6676b93219dc..4cc77485f536 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -1,132 +1,13 @@
1#include <linux/string.h> 1#include <linux/string.h>
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/of.h>
3#include <linux/init.h> 4#include <linux/init.h>
4#include <linux/module.h> 5#include <linux/module.h>
5#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
6#include <linux/slab.h> 7#include <linux/slab.h>
7 8#include <linux/errno.h>
8#include <asm/errno.h> 9#include <linux/of_device.h>
9#include <asm/of_device.h> 10#include <linux/of_platform.h>
10
11/**
12 * of_match_device - Tell if an of_device structure has a matching
13 * of_match structure
14 * @ids: array of of device match structures to search in
15 * @dev: the of device structure to match against
16 *
17 * Used by a driver to check whether an of_device present in the
18 * system is in its list of supported devices.
19 */
20const struct of_device_id *of_match_device(const struct of_device_id *matches,
21 const struct of_device *dev)
22{
23 if (!dev->node)
24 return NULL;
25 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
26 int match = 1;
27 if (matches->name[0])
28 match &= dev->node->name
29 && !strcmp(matches->name, dev->node->name);
30 if (matches->type[0])
31 match &= dev->node->type
32 && !strcmp(matches->type, dev->node->type);
33 if (matches->compatible[0])
34 match &= of_device_is_compatible(dev->node,
35 matches->compatible);
36 if (match)
37 return matches;
38 matches++;
39 }
40 return NULL;
41}
42
43static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
44{
45 struct of_device * of_dev = to_of_device(dev);
46 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
47 const struct of_device_id * matches = of_drv->match_table;
48
49 if (!matches)
50 return 0;
51
52 return of_match_device(matches, of_dev) != NULL;
53}
54
55struct of_device *of_dev_get(struct of_device *dev)
56{
57 struct device *tmp;
58
59 if (!dev)
60 return NULL;
61 tmp = get_device(&dev->dev);
62 if (tmp)
63 return to_of_device(tmp);
64 else
65 return NULL;
66}
67
68void of_dev_put(struct of_device *dev)
69{
70 if (dev)
71 put_device(&dev->dev);
72}
73
74
75static int of_device_probe(struct device *dev)
76{
77 int error = -ENODEV;
78 struct of_platform_driver *drv;
79 struct of_device *of_dev;
80 const struct of_device_id *match;
81
82 drv = to_of_platform_driver(dev->driver);
83 of_dev = to_of_device(dev);
84
85 if (!drv->probe)
86 return error;
87
88 of_dev_get(of_dev);
89
90 match = of_match_device(drv->match_table, of_dev);
91 if (match)
92 error = drv->probe(of_dev, match);
93 if (error)
94 of_dev_put(of_dev);
95
96 return error;
97}
98
99static int of_device_remove(struct device *dev)
100{
101 struct of_device * of_dev = to_of_device(dev);
102 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
103
104 if (dev->driver && drv->remove)
105 drv->remove(of_dev);
106 return 0;
107}
108
109static int of_device_suspend(struct device *dev, pm_message_t state)
110{
111 struct of_device * of_dev = to_of_device(dev);
112 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
113 int error = 0;
114
115 if (dev->driver && drv->suspend)
116 error = drv->suspend(of_dev, state);
117 return error;
118}
119
120static int of_device_resume(struct device * dev)
121{
122 struct of_device * of_dev = to_of_device(dev);
123 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
124 int error = 0;
125
126 if (dev->driver && drv->resume)
127 error = drv->resume(of_dev);
128 return error;
129}
130 11
131void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name) 12void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
132{ 13{
@@ -163,7 +44,7 @@ static int node_match(struct device *dev, void *data)
163 44
164struct of_device *of_find_device_by_node(struct device_node *dp) 45struct of_device *of_find_device_by_node(struct device_node *dp)
165{ 46{
166 struct device *dev = bus_find_device(&of_bus_type, NULL, 47 struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
167 dp, node_match); 48 dp, node_match);
168 49
169 if (dev) 50 if (dev)
@@ -174,48 +55,20 @@ struct of_device *of_find_device_by_node(struct device_node *dp)
174EXPORT_SYMBOL(of_find_device_by_node); 55EXPORT_SYMBOL(of_find_device_by_node);
175 56
176#ifdef CONFIG_PCI 57#ifdef CONFIG_PCI
177struct bus_type isa_bus_type = { 58struct bus_type isa_bus_type;
178 .name = "isa",
179 .match = of_platform_bus_match,
180 .probe = of_device_probe,
181 .remove = of_device_remove,
182 .suspend = of_device_suspend,
183 .resume = of_device_resume,
184};
185EXPORT_SYMBOL(isa_bus_type); 59EXPORT_SYMBOL(isa_bus_type);
186 60
187struct bus_type ebus_bus_type = { 61struct bus_type ebus_bus_type;
188 .name = "ebus",
189 .match = of_platform_bus_match,
190 .probe = of_device_probe,
191 .remove = of_device_remove,
192 .suspend = of_device_suspend,
193 .resume = of_device_resume,
194};
195EXPORT_SYMBOL(ebus_bus_type); 62EXPORT_SYMBOL(ebus_bus_type);
196#endif 63#endif
197 64
198#ifdef CONFIG_SBUS 65#ifdef CONFIG_SBUS
199struct bus_type sbus_bus_type = { 66struct bus_type sbus_bus_type;
200 .name = "sbus",
201 .match = of_platform_bus_match,
202 .probe = of_device_probe,
203 .remove = of_device_remove,
204 .suspend = of_device_suspend,
205 .resume = of_device_resume,
206};
207EXPORT_SYMBOL(sbus_bus_type); 67EXPORT_SYMBOL(sbus_bus_type);
208#endif 68#endif
209 69
210struct bus_type of_bus_type = { 70struct bus_type of_platform_bus_type;
211 .name = "of", 71EXPORT_SYMBOL(of_platform_bus_type);
212 .match = of_platform_bus_match,
213 .probe = of_device_probe,
214 .remove = of_device_remove,
215 .suspend = of_device_suspend,
216 .resume = of_device_resume,
217};
218EXPORT_SYMBOL(of_bus_type);
219 72
220static inline u64 of_read_addr(const u32 *cell, int size) 73static inline u64 of_read_addr(const u32 *cell, int size)
221{ 74{
@@ -899,11 +752,16 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
899{ 752{
900 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL); 753 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
901 const unsigned int *irq; 754 const unsigned int *irq;
755 struct dev_archdata *sd;
902 int len, i; 756 int len, i;
903 757
904 if (!op) 758 if (!op)
905 return NULL; 759 return NULL;
906 760
761 sd = &op->dev.archdata;
762 sd->prom_node = dp;
763 sd->op = op;
764
907 op->node = dp; 765 op->node = dp;
908 766
909 op->clock_freq = of_getintprop_default(dp, "clock-frequency", 767 op->clock_freq = of_getintprop_default(dp, "clock-frequency",
@@ -933,7 +791,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
933 op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]); 791 op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]);
934 792
935 op->dev.parent = parent; 793 op->dev.parent = parent;
936 op->dev.bus = &of_bus_type; 794 op->dev.bus = &of_platform_bus_type;
937 if (!parent) 795 if (!parent)
938 strcpy(op->dev.bus_id, "root"); 796 strcpy(op->dev.bus_id, "root");
939 else 797 else
@@ -977,16 +835,16 @@ static int __init of_bus_driver_init(void)
977{ 835{
978 int err; 836 int err;
979 837
980 err = bus_register(&of_bus_type); 838 err = of_bus_type_init(&of_platform_bus_type, "of");
981#ifdef CONFIG_PCI 839#ifdef CONFIG_PCI
982 if (!err) 840 if (!err)
983 err = bus_register(&isa_bus_type); 841 err = of_bus_type_init(&isa_bus_type, "isa");
984 if (!err) 842 if (!err)
985 err = bus_register(&ebus_bus_type); 843 err = of_bus_type_init(&ebus_bus_type, "ebus");
986#endif 844#endif
987#ifdef CONFIG_SBUS 845#ifdef CONFIG_SBUS
988 if (!err) 846 if (!err)
989 err = bus_register(&sbus_bus_type); 847 err = of_bus_type_init(&sbus_bus_type, "sbus");
990#endif 848#endif
991 849
992 if (!err) 850 if (!err)
@@ -1020,61 +878,13 @@ int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
1020 /* register with core */ 878 /* register with core */
1021 return driver_register(&drv->driver); 879 return driver_register(&drv->driver);
1022} 880}
881EXPORT_SYMBOL(of_register_driver);
1023 882
1024void of_unregister_driver(struct of_platform_driver *drv) 883void of_unregister_driver(struct of_platform_driver *drv)
1025{ 884{
1026 driver_unregister(&drv->driver); 885 driver_unregister(&drv->driver);
1027} 886}
1028 887EXPORT_SYMBOL(of_unregister_driver);
1029
1030static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
1031{
1032 struct of_device *ofdev;
1033
1034 ofdev = to_of_device(dev);
1035 return sprintf(buf, "%s", ofdev->node->full_name);
1036}
1037
1038static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
1039
1040/**
1041 * of_release_dev - free an of device structure when all users of it are finished.
1042 * @dev: device that's been disconnected
1043 *
1044 * Will be called only by the device core when all users of this of device are
1045 * done.
1046 */
1047void of_release_dev(struct device *dev)
1048{
1049 struct of_device *ofdev;
1050
1051 ofdev = to_of_device(dev);
1052
1053 kfree(ofdev);
1054}
1055
1056int of_device_register(struct of_device *ofdev)
1057{
1058 int rc;
1059
1060 BUG_ON(ofdev->node == NULL);
1061
1062 rc = device_register(&ofdev->dev);
1063 if (rc)
1064 return rc;
1065
1066 rc = device_create_file(&ofdev->dev, &dev_attr_devspec);
1067 if (rc)
1068 device_unregister(&ofdev->dev);
1069
1070 return rc;
1071}
1072
1073void of_device_unregister(struct of_device *ofdev)
1074{
1075 device_remove_file(&ofdev->dev, &dev_attr_devspec);
1076 device_unregister(&ofdev->dev);
1077}
1078 888
1079struct of_device* of_platform_device_create(struct device_node *np, 889struct of_device* of_platform_device_create(struct device_node *np,
1080 const char *bus_id, 890 const char *bus_id,
@@ -1100,13 +910,4 @@ struct of_device* of_platform_device_create(struct device_node *np,
1100 910
1101 return dev; 911 return dev;
1102} 912}
1103
1104EXPORT_SYMBOL(of_match_device);
1105EXPORT_SYMBOL(of_register_driver);
1106EXPORT_SYMBOL(of_unregister_driver);
1107EXPORT_SYMBOL(of_device_register);
1108EXPORT_SYMBOL(of_device_unregister);
1109EXPORT_SYMBOL(of_dev_get);
1110EXPORT_SYMBOL(of_dev_put);
1111EXPORT_SYMBOL(of_platform_device_create); 913EXPORT_SYMBOL(of_platform_device_create);
1112EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 6b3fe2c1d65e..639cf06ca372 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -1129,7 +1129,7 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1129} 1129}
1130#endif /* !(CONFIG_PCI_MSI) */ 1130#endif /* !(CONFIG_PCI_MSI) */
1131 1131
1132static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) 1132static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
1133{ 1133{
1134 struct pci_pbm_info *pbm; 1134 struct pci_pbm_info *pbm;
1135 1135
@@ -1163,7 +1163,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
1163 pci_sun4v_msi_init(pbm); 1163 pci_sun4v_msi_init(pbm);
1164} 1164}
1165 1165
1166void sun4v_pci_init(struct device_node *dp, char *model_name) 1166void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1167{ 1167{
1168 static int hvapi_negotiated = 0; 1168 static int hvapi_negotiated = 0;
1169 struct pci_controller_info *p; 1169 struct pci_controller_info *p;
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 8dd4294ad21e..881a09ee4c4c 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -12,13 +12,13 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/pm.h> 13#include <linux/pm.h>
14#include <linux/syscalls.h> 14#include <linux/syscalls.h>
15#include <linux/reboot.h>
15 16
16#include <asm/system.h> 17#include <asm/system.h>
17#include <asm/auxio.h> 18#include <asm/auxio.h>
18#include <asm/prom.h> 19#include <asm/prom.h>
19#include <asm/of_device.h> 20#include <asm/of_device.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include <asm/power.h>
22#include <asm/sstate.h> 22#include <asm/sstate.h>
23 23
24#include <linux/unistd.h> 24#include <linux/unistd.h>
@@ -31,20 +31,9 @@ int scons_pwroff = 1;
31 31
32static void __iomem *power_reg; 32static void __iomem *power_reg;
33 33
34static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
35static int button_pressed;
36
37void wake_up_powerd(void)
38{
39 if (button_pressed == 0) {
40 button_pressed = 1;
41 wake_up(&powerd_wait);
42 }
43}
44
45static irqreturn_t power_handler(int irq, void *dev_id) 34static irqreturn_t power_handler(int irq, void *dev_id)
46{ 35{
47 wake_up_powerd(); 36 orderly_poweroff(true);
48 37
49 /* FIXME: Check registers for status... */ 38 /* FIXME: Check registers for status... */
50 return IRQ_HANDLED; 39 return IRQ_HANDLED;
@@ -57,7 +46,7 @@ static void (*poweroff_method)(void) = machine_alt_power_off;
57void machine_power_off(void) 46void machine_power_off(void)
58{ 47{
59 sstate_poweroff(); 48 sstate_poweroff();
60 if (!serial_console || scons_pwroff) { 49 if (strcmp(of_console_device->type, "serial") || scons_pwroff) {
61 if (power_reg) { 50 if (power_reg) {
62 /* Both register bits seem to have the 51 /* Both register bits seem to have the
63 * same effect, so until I figure out 52 * same effect, so until I figure out
@@ -77,48 +66,6 @@ void machine_power_off(void)
77void (*pm_power_off)(void) = machine_power_off; 66void (*pm_power_off)(void) = machine_power_off;
78EXPORT_SYMBOL(pm_power_off); 67EXPORT_SYMBOL(pm_power_off);
79 68
80static int powerd(void *__unused)
81{
82 static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
83 char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
84 DECLARE_WAITQUEUE(wait, current);
85
86 daemonize("powerd");
87
88 add_wait_queue(&powerd_wait, &wait);
89
90 for (;;) {
91 set_task_state(current, TASK_INTERRUPTIBLE);
92 if (button_pressed)
93 break;
94 flush_signals(current);
95 schedule();
96 }
97 __set_current_state(TASK_RUNNING);
98 remove_wait_queue(&powerd_wait, &wait);
99
100 /* Ok, down we go... */
101 button_pressed = 0;
102 if (kernel_execve("/sbin/shutdown", argv, envp) < 0) {
103 printk(KERN_ERR "powerd: shutdown execution failed\n");
104 machine_power_off();
105 }
106 return 0;
107}
108
109int start_powerd(void)
110{
111 int err;
112
113 err = kernel_thread(powerd, NULL, CLONE_FS);
114 if (err < 0)
115 printk(KERN_ERR "power: Failed to start power daemon.\n");
116 else
117 printk(KERN_INFO "power: powerd running.\n");
118
119 return err;
120}
121
122static int __init has_button_interrupt(unsigned int irq, struct device_node *dp) 69static int __init has_button_interrupt(unsigned int irq, struct device_node *dp)
123{ 70{
124 if (irq == 0xffffffff) 71 if (irq == 0xffffffff)
@@ -136,20 +83,15 @@ static int __devinit power_probe(struct of_device *op, const struct of_device_id
136 83
137 power_reg = of_ioremap(res, 0, 0x4, "power"); 84 power_reg = of_ioremap(res, 0, 0x4, "power");
138 85
139 printk("%s: Control reg at %lx ... ", 86 printk(KERN_INFO "%s: Control reg at %lx\n",
140 op->node->name, res->start); 87 op->node->name, res->start);
141 88
142 poweroff_method = machine_halt; /* able to use the standard halt */ 89 poweroff_method = machine_halt; /* able to use the standard halt */
143 90
144 if (has_button_interrupt(irq, op->node)) { 91 if (has_button_interrupt(irq, op->node)) {
145 if (start_powerd() < 0)
146 return 0;
147
148 if (request_irq(irq, 92 if (request_irq(irq,
149 power_handler, 0, "power", NULL) < 0) 93 power_handler, 0, "power", NULL) < 0)
150 printk(KERN_ERR "power: Cannot setup IRQ handler.\n"); 94 printk(KERN_ERR "power: Cannot setup IRQ handler.\n");
151 } else {
152 printk(KERN_INFO "power: Not using powerd.\n");
153 } 95 }
154 96
155 return 0; 97 return 0;
@@ -170,6 +112,6 @@ static struct of_platform_driver power_driver = {
170 112
171void __init power_init(void) 113void __init power_init(void)
172{ 114{
173 of_register_driver(&power_driver, &of_bus_type); 115 of_register_driver(&power_driver, &of_platform_bus_type);
174 return; 116 return;
175} 117}
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 93557507ec9f..fd7899ba1d70 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -119,7 +119,7 @@ extern void (*prom_keyboard)(void);
119void machine_halt(void) 119void machine_halt(void)
120{ 120{
121 sstate_halt(); 121 sstate_halt();
122 if (!serial_console && prom_palette) 122 if (prom_palette)
123 prom_palette (1); 123 prom_palette (1);
124 if (prom_keyboard) 124 if (prom_keyboard)
125 prom_keyboard(); 125 prom_keyboard();
@@ -130,7 +130,7 @@ void machine_halt(void)
130void machine_alt_power_off(void) 130void machine_alt_power_off(void)
131{ 131{
132 sstate_poweroff(); 132 sstate_poweroff();
133 if (!serial_console && prom_palette) 133 if (prom_palette)
134 prom_palette(1); 134 prom_palette(1);
135 if (prom_keyboard) 135 if (prom_keyboard)
136 prom_keyboard(); 136 prom_keyboard();
@@ -145,7 +145,7 @@ void machine_restart(char * cmd)
145 sstate_reboot(); 145 sstate_reboot();
146 p = strchr (reboot_command, '\n'); 146 p = strchr (reboot_command, '\n');
147 if (p) *p = 0; 147 if (p) *p = 0;
148 if (!serial_console && prom_palette) 148 if (prom_palette)
149 prom_palette (1); 149 prom_palette (1);
150 if (prom_keyboard) 150 if (prom_keyboard)
151 prom_keyboard(); 151 prom_keyboard();
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 5d220302cd50..f4e0a9ad9be3 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -30,73 +30,9 @@
30#include <asm/upa.h> 30#include <asm/upa.h>
31#include <asm/smp.h> 31#include <asm/smp.h>
32 32
33static struct device_node *allnodes; 33extern struct device_node *allnodes; /* temporary while merging */
34 34
35/* use when traversing tree through the allnext, child, sibling, 35extern rwlock_t devtree_lock; /* temporary while merging */
36 * or parent members of struct device_node.
37 */
38static DEFINE_RWLOCK(devtree_lock);
39
40int of_device_is_compatible(const struct device_node *device,
41 const char *compat)
42{
43 const char* cp;
44 int cplen, l;
45
46 cp = of_get_property(device, "compatible", &cplen);
47 if (cp == NULL)
48 return 0;
49 while (cplen > 0) {
50 if (strncmp(cp, compat, strlen(compat)) == 0)
51 return 1;
52 l = strlen(cp) + 1;
53 cp += l;
54 cplen -= l;
55 }
56
57 return 0;
58}
59EXPORT_SYMBOL(of_device_is_compatible);
60
61struct device_node *of_get_parent(const struct device_node *node)
62{
63 struct device_node *np;
64
65 if (!node)
66 return NULL;
67
68 np = node->parent;
69
70 return np;
71}
72EXPORT_SYMBOL(of_get_parent);
73
74struct device_node *of_get_next_child(const struct device_node *node,
75 struct device_node *prev)
76{
77 struct device_node *next;
78
79 next = prev ? prev->sibling : node->child;
80 for (; next != 0; next = next->sibling) {
81 break;
82 }
83
84 return next;
85}
86EXPORT_SYMBOL(of_get_next_child);
87
88struct device_node *of_find_node_by_path(const char *path)
89{
90 struct device_node *np = allnodes;
91
92 for (; np != 0; np = np->allnext) {
93 if (np->full_name != 0 && strcmp(np->full_name, path) == 0)
94 break;
95 }
96
97 return np;
98}
99EXPORT_SYMBOL(of_find_node_by_path);
100 36
101struct device_node *of_find_node_by_phandle(phandle handle) 37struct device_node *of_find_node_by_phandle(phandle handle)
102{ 38{
@@ -110,81 +46,6 @@ struct device_node *of_find_node_by_phandle(phandle handle)
110} 46}
111EXPORT_SYMBOL(of_find_node_by_phandle); 47EXPORT_SYMBOL(of_find_node_by_phandle);
112 48
113struct device_node *of_find_node_by_name(struct device_node *from,
114 const char *name)
115{
116 struct device_node *np;
117
118 np = from ? from->allnext : allnodes;
119 for (; np != NULL; np = np->allnext)
120 if (np->name != NULL && strcmp(np->name, name) == 0)
121 break;
122
123 return np;
124}
125EXPORT_SYMBOL(of_find_node_by_name);
126
127struct device_node *of_find_node_by_type(struct device_node *from,
128 const char *type)
129{
130 struct device_node *np;
131
132 np = from ? from->allnext : allnodes;
133 for (; np != 0; np = np->allnext)
134 if (np->type != 0 && strcmp(np->type, type) == 0)
135 break;
136
137 return np;
138}
139EXPORT_SYMBOL(of_find_node_by_type);
140
141struct device_node *of_find_compatible_node(struct device_node *from,
142 const char *type, const char *compatible)
143{
144 struct device_node *np;
145
146 np = from ? from->allnext : allnodes;
147 for (; np != 0; np = np->allnext) {
148 if (type != NULL
149 && !(np->type != 0 && strcmp(np->type, type) == 0))
150 continue;
151 if (of_device_is_compatible(np, compatible))
152 break;
153 }
154
155 return np;
156}
157EXPORT_SYMBOL(of_find_compatible_node);
158
159struct property *of_find_property(const struct device_node *np,
160 const char *name,
161 int *lenp)
162{
163 struct property *pp;
164
165 for (pp = np->properties; pp != 0; pp = pp->next) {
166 if (strcasecmp(pp->name, name) == 0) {
167 if (lenp != 0)
168 *lenp = pp->length;
169 break;
170 }
171 }
172 return pp;
173}
174EXPORT_SYMBOL(of_find_property);
175
176/*
177 * Find a property with a given name for a given node
178 * and return the value.
179 */
180const void *of_get_property(const struct device_node *np, const char *name,
181 int *lenp)
182{
183 struct property *pp = of_find_property(np,name,lenp);
184 return pp ? pp->value : NULL;
185}
186EXPORT_SYMBOL(of_get_property);
187
188int of_getintprop_default(struct device_node *np, const char *name, int def) 49int of_getintprop_default(struct device_node *np, const char *name, int def)
189{ 50{
190 struct property *prop; 51 struct property *prop;
@@ -198,36 +59,6 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
198} 59}
199EXPORT_SYMBOL(of_getintprop_default); 60EXPORT_SYMBOL(of_getintprop_default);
200 61
201int of_n_addr_cells(struct device_node *np)
202{
203 const int* ip;
204 do {
205 if (np->parent)
206 np = np->parent;
207 ip = of_get_property(np, "#address-cells", NULL);
208 if (ip != NULL)
209 return *ip;
210 } while (np->parent);
211 /* No #address-cells property for the root node, default to 2 */
212 return 2;
213}
214EXPORT_SYMBOL(of_n_addr_cells);
215
216int of_n_size_cells(struct device_node *np)
217{
218 const int* ip;
219 do {
220 if (np->parent)
221 np = np->parent;
222 ip = of_get_property(np, "#size-cells", NULL);
223 if (ip != NULL)
224 return *ip;
225 } while (np->parent);
226 /* No #size-cells property for the root node, default to 1 */
227 return 1;
228}
229EXPORT_SYMBOL(of_n_size_cells);
230
231int of_set_property(struct device_node *dp, const char *name, void *val, int len) 62int of_set_property(struct device_node *dp, const char *name, void *val, int len)
232{ 63{
233 struct property **prevp; 64 struct property **prevp;
@@ -1815,6 +1646,60 @@ static void __init of_fill_in_cpu_data(void)
1815 smp_fill_in_sib_core_maps(); 1646 smp_fill_in_sib_core_maps();
1816} 1647}
1817 1648
1649struct device_node *of_console_device;
1650EXPORT_SYMBOL(of_console_device);
1651
1652char *of_console_path;
1653EXPORT_SYMBOL(of_console_path);
1654
1655char *of_console_options;
1656EXPORT_SYMBOL(of_console_options);
1657
1658static void __init of_console_init(void)
1659{
1660 char *msg = "OF stdout device is: %s\n";
1661 struct device_node *dp;
1662 const char *type;
1663 phandle node;
1664
1665 of_console_path = prom_early_alloc(256);
1666 if (prom_ihandle2path(prom_stdout, of_console_path, 256) < 0) {
1667 prom_printf("Cannot obtain path of stdout.\n");
1668 prom_halt();
1669 }
1670 of_console_options = strrchr(of_console_path, ':');
1671 if (of_console_options) {
1672 of_console_options++;
1673 if (*of_console_options == '\0')
1674 of_console_options = NULL;
1675 }
1676
1677 node = prom_inst2pkg(prom_stdout);
1678 if (!node) {
1679 prom_printf("Cannot resolve stdout node from "
1680 "instance %08x.\n", prom_stdout);
1681 prom_halt();
1682 }
1683
1684 dp = of_find_node_by_phandle(node);
1685 type = of_get_property(dp, "device_type", NULL);
1686 if (!type) {
1687 prom_printf("Console stdout lacks device_type property.\n");
1688 prom_halt();
1689 }
1690
1691 if (strcmp(type, "display") && strcmp(type, "serial")) {
1692 prom_printf("Console device_type is neither display "
1693 "nor serial.\n");
1694 prom_halt();
1695 }
1696
1697 of_console_device = dp;
1698
1699 prom_printf(msg, of_console_path);
1700 printk(msg, of_console_path);
1701}
1702
1818void __init prom_build_devicetree(void) 1703void __init prom_build_devicetree(void)
1819{ 1704{
1820 struct device_node **nextp; 1705 struct device_node **nextp;
@@ -1827,6 +1712,8 @@ void __init prom_build_devicetree(void)
1827 allnodes->child = build_tree(allnodes, 1712 allnodes->child = build_tree(allnodes,
1828 prom_getchild(allnodes->node), 1713 prom_getchild(allnodes->node),
1829 &nextp); 1714 &nextp);
1715 of_console_init();
1716
1830 printk("PROM: Built device tree with %u bytes of memory.\n", 1717 printk("PROM: Built device tree with %u bytes of memory.\n",
1831 prom_early_allocated); 1718 prom_early_allocated);
1832 1719
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index aafde3dd9fd4..0f5be828ee92 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -133,33 +133,6 @@ static void __init process_switch(char c)
133 } 133 }
134} 134}
135 135
136static void __init process_console(char *commands)
137{
138 serial_console = 0;
139 commands += 8;
140 /* Linux-style serial */
141 if (!strncmp(commands, "ttyS", 4))
142 serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
143 else if (!strncmp(commands, "tty", 3)) {
144 char c = *(commands + 3);
145 /* Solaris-style serial */
146 if (c == 'a' || c == 'b') {
147 serial_console = c - 'a' + 1;
148 prom_printf ("Using /dev/tty%c as console.\n", c);
149 }
150 /* else Linux-style fbcon, not serial */
151 }
152#if defined(CONFIG_PROM_CONSOLE)
153 if (!strncmp(commands, "prom", 4)) {
154 char *p;
155
156 for (p = commands - 8; *p && *p != ' '; p++)
157 *p = ' ';
158 conswitchp = &prom_con;
159 }
160#endif
161}
162
163static void __init boot_flags_init(char *commands) 136static void __init boot_flags_init(char *commands)
164{ 137{
165 while (*commands) { 138 while (*commands) {
@@ -176,9 +149,7 @@ static void __init boot_flags_init(char *commands)
176 process_switch(*commands++); 149 process_switch(*commands++);
177 continue; 150 continue;
178 } 151 }
179 if (!strncmp(commands, "console=", 8)) { 152 if (!strncmp(commands, "mem=", 4)) {
180 process_console(commands);
181 } else if (!strncmp(commands, "mem=", 4)) {
182 /* 153 /*
183 * "mem=XXX[kKmM]" overrides the PROM-reported 154 * "mem=XXX[kKmM]" overrides the PROM-reported
184 * memory size. 155 * memory size.
@@ -378,44 +349,6 @@ void __init setup_arch(char **cmdline_p)
378 paging_init(); 349 paging_init();
379} 350}
380 351
381static int __init set_preferred_console(void)
382{
383 int idev, odev;
384
385 /* The user has requested a console so this is already set up. */
386 if (serial_console >= 0)
387 return -EBUSY;
388
389 idev = prom_query_input_device();
390 odev = prom_query_output_device();
391 if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
392 serial_console = 0;
393 } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
394 serial_console = 1;
395 } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
396 serial_console = 2;
397 } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) {
398 serial_console = 3;
399 } else if (idev == PROMDEV_IVCONS && odev == PROMDEV_OVCONS) {
400 /* sunhv_console_init() doesn't check the serial_console
401 * value anyways...
402 */
403 serial_console = 4;
404 return add_preferred_console("ttyHV", 0, NULL);
405 } else {
406 prom_printf("Inconsistent console: "
407 "input %d, output %d\n",
408 idev, odev);
409 prom_halt();
410 }
411
412 if (serial_console)
413 return add_preferred_console("ttyS", serial_console - 1, NULL);
414
415 return -ENODEV;
416}
417console_initcall(set_preferred_console);
418
419/* BUFFER is PAGE_SIZE bytes long. */ 352/* BUFFER is PAGE_SIZE bytes long. */
420 353
421extern char *sparc_cpu_type; 354extern char *sparc_cpu_type;
@@ -508,5 +441,4 @@ void sun_do_break(void)
508 prom_cmdline(); 441 prom_cmdline();
509} 442}
510 443
511int serial_console = -1;
512int stop_a_enabled = 1; 444int stop_a_enabled = 1;
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 719d676c2ddc..d270c2f0be0f 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -280,6 +280,7 @@ EXPORT_SYMBOL(sys_getgid);
280EXPORT_SYMBOL(svr4_getcontext); 280EXPORT_SYMBOL(svr4_getcontext);
281EXPORT_SYMBOL(svr4_setcontext); 281EXPORT_SYMBOL(svr4_setcontext);
282EXPORT_SYMBOL(compat_sys_ioctl); 282EXPORT_SYMBOL(compat_sys_ioctl);
283EXPORT_SYMBOL(sys_ioctl);
283EXPORT_SYMBOL(sparc32_open); 284EXPORT_SYMBOL(sparc32_open);
284#endif 285#endif
285 286
@@ -330,7 +331,6 @@ EXPORT_SYMBOL(VISenter);
330 331
331/* for input/keybdev */ 332/* for input/keybdev */
332EXPORT_SYMBOL(sun_do_break); 333EXPORT_SYMBOL(sun_do_break);
333EXPORT_SYMBOL(serial_console);
334EXPORT_SYMBOL(stop_a_enabled); 334EXPORT_SYMBOL(stop_a_enabled);
335 335
336#ifdef CONFIG_DEBUG_BUGVERBOSE 336#ifdef CONFIG_DEBUG_BUGVERBOSE
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index abd83129b2e7..e8dce90d05d4 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1,8 +1,7 @@
1/* $Id: sys_sparc32.c,v 1.184 2002/02/09 19:49:31 davem Exp $ 1/* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
2 * sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
3 * 2 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 3 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
6 * 5 *
7 * These routines maintain argument size conversion between 32bit and 64bit 6 * These routines maintain argument size conversion between 32bit and 64bit
8 * environment. 7 * environment.
@@ -1028,3 +1027,10 @@ long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_lo
1028 (nb_high << 32) | nb_low, 1027 (nb_high << 32) | nb_low,
1029 flags); 1028 flags);
1030} 1029}
1030
1031asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
1032 u32 lenhi, u32 lenlo)
1033{
1034 return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
1035 ((loff_t)lenhi << 32) | lenlo);
1036}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 8765e32155a0..06d10907d8ce 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -1,8 +1,7 @@
1/* $Id: systbls.S,v 1.81 2002/02/08 03:57:14 davem Exp $ 1/* systbls.S: System call entry point tables for OS compatibility.
2 * systbls.S: System call entry point tables for OS compatibility.
3 * The native Linux system call table lives here also. 2 * The native Linux system call table lives here also.
4 * 3 *
5 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1995, 1996, 2007 David S. Miller (davem@davemloft.net)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * 6 *
8 * Based upon preliminary work which is: 7 * Based upon preliminary work which is:
@@ -81,7 +80,7 @@ sys_call_table32:
81 .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare 80 .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
82/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy 81/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy
83 .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait 82 .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
84/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, compat_sys_timerfd, sys_eventfd 83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, compat_sys_timerfd, sys_eventfd, compat_sys_fallocate
85 84
86#endif /* CONFIG_COMPAT */ 85#endif /* CONFIG_COMPAT */
87 86
@@ -153,7 +152,7 @@ sys_call_table:
153 .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare 152 .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
154/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy 153/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
155 .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait 154 .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
156/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd 155/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd, sys_fallocate
157 156
158#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \ 157#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
159 defined(CONFIG_SOLARIS_EMUL_MODULE) 158 defined(CONFIG_SOLARIS_EMUL_MODULE)
@@ -272,6 +271,6 @@ sunos_sys_table:
272 .word sunos_nosys, sunos_nosys, sunos_nosys 271 .word sunos_nosys, sunos_nosys, sunos_nosys
273 .word sunos_nosys 272 .word sunos_nosys
274/*310*/ .word sunos_nosys, sunos_nosys, sunos_nosys 273/*310*/ .word sunos_nosys, sunos_nosys, sunos_nosys
275 .word sunos_nosys 274 .word sunos_nosys, sunos_nosys
276 275
277#endif 276#endif
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 62e316ab1339..49063ca2efcd 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -403,58 +403,9 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
403 403
404static unsigned long timer_ticks_per_nsec_quotient __read_mostly; 404static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
405 405
406#define TICK_SIZE (tick_nsec / 1000) 406int update_persistent_clock(struct timespec now)
407
408#define USEC_AFTER 500000
409#define USEC_BEFORE 500000
410
411static void sync_cmos_clock(unsigned long dummy);
412
413static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
414
415static void sync_cmos_clock(unsigned long dummy)
416{
417 struct timeval now, next;
418 int fail = 1;
419
420 /*
421 * If we have an externally synchronized Linux clock, then update
422 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
423 * called as close as possible to 500 ms before the new second starts.
424 * This code is run on a timer. If the clock is set, that timer
425 * may not expire at the correct time. Thus, we adjust...
426 */
427 if (!ntp_synced())
428 /*
429 * Not synced, exit, do not restart a timer (if one is
430 * running, let it run out).
431 */
432 return;
433
434 do_gettimeofday(&now);
435 if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
436 now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
437 fail = set_rtc_mmss(now.tv_sec);
438
439 next.tv_usec = USEC_AFTER - now.tv_usec;
440 if (next.tv_usec <= 0)
441 next.tv_usec += USEC_PER_SEC;
442
443 if (!fail)
444 next.tv_sec = 659;
445 else
446 next.tv_sec = 0;
447
448 if (next.tv_usec >= USEC_PER_SEC) {
449 next.tv_sec++;
450 next.tv_usec -= USEC_PER_SEC;
451 }
452 mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
453}
454
455void notify_arch_cmos_timer(void)
456{ 407{
457 mod_timer(&sync_cmos_timer, jiffies + 1); 408 return set_rtc_mmss(now.tv_sec);
458} 409}
459 410
460/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ 411/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
@@ -835,7 +786,7 @@ static int __init clock_init(void)
835 return 0; 786 return 0;
836 } 787 }
837 788
838 return of_register_driver(&clock_driver, &of_bus_type); 789 return of_register_driver(&clock_driver, &of_platform_bus_type);
839} 790}
840 791
841/* Must be after subsys_initcall() so that busses are probed. Must 792/* Must be after subsys_initcall() so that busses are probed. Must
@@ -931,6 +882,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode,
931{ 882{
932 switch (mode) { 883 switch (mode) {
933 case CLOCK_EVT_MODE_ONESHOT: 884 case CLOCK_EVT_MODE_ONESHOT:
885 case CLOCK_EVT_MODE_RESUME:
934 break; 886 break;
935 887
936 case CLOCK_EVT_MODE_SHUTDOWN: 888 case CLOCK_EVT_MODE_SHUTDOWN:
@@ -1434,6 +1386,78 @@ static int bq4802_set_rtc_time(struct rtc_time *time)
1434 1386
1435 return 0; 1387 return 0;
1436} 1388}
1389
1390static void cmos_get_rtc_time(struct rtc_time *rtc_tm)
1391{
1392 unsigned char ctrl;
1393
1394 rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
1395 rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
1396 rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
1397 rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
1398 rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
1399 rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
1400 rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK);
1401
1402 ctrl = CMOS_READ(RTC_CONTROL);
1403 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1404 BCD_TO_BIN(rtc_tm->tm_sec);
1405 BCD_TO_BIN(rtc_tm->tm_min);
1406 BCD_TO_BIN(rtc_tm->tm_hour);
1407 BCD_TO_BIN(rtc_tm->tm_mday);
1408 BCD_TO_BIN(rtc_tm->tm_mon);
1409 BCD_TO_BIN(rtc_tm->tm_year);
1410 BCD_TO_BIN(rtc_tm->tm_wday);
1411 }
1412
1413 if (rtc_tm->tm_year <= 69)
1414 rtc_tm->tm_year += 100;
1415
1416 rtc_tm->tm_mon--;
1417}
1418
1419static int cmos_set_rtc_time(struct rtc_time *rtc_tm)
1420{
1421 unsigned char mon, day, hrs, min, sec;
1422 unsigned char save_control, save_freq_select;
1423 unsigned int yrs;
1424
1425 yrs = rtc_tm->tm_year;
1426 mon = rtc_tm->tm_mon + 1;
1427 day = rtc_tm->tm_mday;
1428 hrs = rtc_tm->tm_hour;
1429 min = rtc_tm->tm_min;
1430 sec = rtc_tm->tm_sec;
1431
1432 if (yrs >= 100)
1433 yrs -= 100;
1434
1435 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1436 BIN_TO_BCD(sec);
1437 BIN_TO_BCD(min);
1438 BIN_TO_BCD(hrs);
1439 BIN_TO_BCD(day);
1440 BIN_TO_BCD(mon);
1441 BIN_TO_BCD(yrs);
1442 }
1443
1444 save_control = CMOS_READ(RTC_CONTROL);
1445 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
1446 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1447 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
1448
1449 CMOS_WRITE(yrs, RTC_YEAR);
1450 CMOS_WRITE(mon, RTC_MONTH);
1451 CMOS_WRITE(day, RTC_DAY_OF_MONTH);
1452 CMOS_WRITE(hrs, RTC_HOURS);
1453 CMOS_WRITE(min, RTC_MINUTES);
1454 CMOS_WRITE(sec, RTC_SECONDS);
1455
1456 CMOS_WRITE(save_control, RTC_CONTROL);
1457 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1458
1459 return 0;
1460}
1437#endif /* CONFIG_PCI */ 1461#endif /* CONFIG_PCI */
1438 1462
1439struct mini_rtc_ops { 1463struct mini_rtc_ops {
@@ -1456,6 +1480,11 @@ static struct mini_rtc_ops bq4802_rtc_ops = {
1456 .get_rtc_time = bq4802_get_rtc_time, 1480 .get_rtc_time = bq4802_get_rtc_time,
1457 .set_rtc_time = bq4802_set_rtc_time, 1481 .set_rtc_time = bq4802_set_rtc_time,
1458}; 1482};
1483
1484static struct mini_rtc_ops cmos_rtc_ops = {
1485 .get_rtc_time = cmos_get_rtc_time,
1486 .set_rtc_time = cmos_set_rtc_time,
1487};
1459#endif /* CONFIG_PCI */ 1488#endif /* CONFIG_PCI */
1460 1489
1461static struct mini_rtc_ops *mini_rtc_ops; 1490static struct mini_rtc_ops *mini_rtc_ops;
@@ -1583,6 +1612,8 @@ static int __init rtc_mini_init(void)
1583#ifdef CONFIG_PCI 1612#ifdef CONFIG_PCI
1584 else if (bq4802_regs) 1613 else if (bq4802_regs)
1585 mini_rtc_ops = &bq4802_rtc_ops; 1614 mini_rtc_ops = &bq4802_rtc_ops;
1615 else if (ds1287_regs)
1616 mini_rtc_ops = &cmos_rtc_ops;
1586#endif /* CONFIG_PCI */ 1617#endif /* CONFIG_PCI */
1587 else 1618 else
1588 return -ENODEV; 1619 return -ENODEV;
diff --git a/arch/sparc64/kernel/vio.c b/arch/sparc64/kernel/vio.c
index 8d3cc4fdb557..3685daf5157f 100644
--- a/arch/sparc64/kernel/vio.c
+++ b/arch/sparc64/kernel/vio.c
@@ -103,9 +103,9 @@ static ssize_t devspec_show(struct device *dev,
103 struct vio_dev *vdev = to_vio_dev(dev); 103 struct vio_dev *vdev = to_vio_dev(dev);
104 const char *str = "none"; 104 const char *str = "none";
105 105
106 if (!strcmp(vdev->type, "network")) 106 if (!strcmp(vdev->type, "vnet-port"))
107 str = "vnet"; 107 str = "vnet";
108 else if (!strcmp(vdev->type, "block")) 108 else if (!strcmp(vdev->type, "vdc-port"))
109 str = "vdisk"; 109 str = "vdisk";
110 110
111 return sprintf(buf, "%s\n", str); 111 return sprintf(buf, "%s\n", str);
@@ -205,7 +205,8 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
205 struct device_node *dp; 205 struct device_node *dp;
206 struct vio_dev *vdev; 206 struct vio_dev *vdev;
207 int err, tlen, clen; 207 int err, tlen, clen;
208 const u64 *id; 208 const u64 *id, *cfg_handle;
209 u64 a;
209 210
210 type = mdesc_get_property(hp, mp, "device-type", &tlen); 211 type = mdesc_get_property(hp, mp, "device-type", &tlen);
211 if (!type) { 212 if (!type) {
@@ -221,6 +222,19 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
221 return NULL; 222 return NULL;
222 } 223 }
223 224
225 id = mdesc_get_property(hp, mp, "id", NULL);
226
227 cfg_handle = NULL;
228 mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
229 u64 target;
230
231 target = mdesc_arc_target(hp, a);
232 cfg_handle = mdesc_get_property(hp, target,
233 "cfg-handle", NULL);
234 if (cfg_handle)
235 break;
236 }
237
224 bus_id_name = type; 238 bus_id_name = type;
225 if (!strcmp(type, "domain-services-port")) 239 if (!strcmp(type, "domain-services-port"))
226 bus_id_name = "ds"; 240 bus_id_name = "ds";
@@ -260,13 +274,19 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
260 274
261 vio_fill_channel_info(hp, mp, vdev); 275 vio_fill_channel_info(hp, mp, vdev);
262 276
263 id = mdesc_get_property(hp, mp, "id", NULL); 277 if (!id) {
264 if (!id)
265 snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s", 278 snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s",
266 bus_id_name); 279 bus_id_name);
267 else 280 vdev->dev_no = ~(u64)0;
281 } else if (!cfg_handle) {
268 snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s-%lu", 282 snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s-%lu",
269 bus_id_name, *id); 283 bus_id_name, *id);
284 vdev->dev_no = *id;
285 } else {
286 snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s-%lu-%lu",
287 bus_id_name, *cfg_handle, *id);
288 vdev->dev_no = *cfg_handle;
289 }
270 290
271 vdev->dev.parent = parent; 291 vdev->dev.parent = parent;
272 vdev->dev.bus = &vio_bus_type; 292 vdev->dev.bus = &vio_bus_type;
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
index 7c25c54cefdc..3fafa9a8b50b 100644
--- a/arch/sparc64/prom/console.c
+++ b/arch/sparc64/prom/console.c
@@ -73,88 +73,3 @@ prom_puts(const char *s, int len)
73 P1275_INOUT(3,1), 73 P1275_INOUT(3,1),
74 prom_stdout, s, P1275_SIZE(len)); 74 prom_stdout, s, P1275_SIZE(len));
75} 75}
76
77/* Query for input device type */
78enum prom_input_device
79prom_query_input_device(void)
80{
81 int st_p;
82 char propb[64];
83
84 st_p = prom_inst2pkg(prom_stdin);
85 if(prom_node_has_property(st_p, "keyboard"))
86 return PROMDEV_IKBD;
87 prom_getproperty(st_p, "device_type", propb, sizeof(propb));
88 if(strncmp(propb, "serial", 6))
89 return PROMDEV_I_UNK;
90 /* FIXME: Is there any better way how to find out? */
91 memset(propb, 0, sizeof(propb));
92 st_p = prom_finddevice ("/options");
93 prom_getproperty(st_p, "input-device", propb, sizeof(propb));
94
95 /*
96 * If we get here with propb == 'keyboard', we are on ttya, as
97 * the PROM defaulted to this due to 'no input device'.
98 */
99 if (!strncmp(propb, "keyboard", 8))
100 return PROMDEV_ITTYA;
101
102 if (!strncmp (propb, "rsc", 3))
103 return PROMDEV_IRSC;
104
105 if (!strncmp (propb, "virtual-console", 3))
106 return PROMDEV_IVCONS;
107
108 if (strncmp (propb, "tty", 3) || !propb[3])
109 return PROMDEV_I_UNK;
110
111 switch (propb[3]) {
112 case 'a': return PROMDEV_ITTYA;
113 case 'b': return PROMDEV_ITTYB;
114 default: return PROMDEV_I_UNK;
115 }
116}
117
118/* Query for output device type */
119
120enum prom_output_device
121prom_query_output_device(void)
122{
123 int st_p;
124 char propb[64];
125 int propl;
126
127 st_p = prom_inst2pkg(prom_stdout);
128 propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
129 if (propl >= 0 && propl == sizeof("display") &&
130 strncmp("display", propb, sizeof("display")) == 0)
131 return PROMDEV_OSCREEN;
132 if(strncmp("serial", propb, 6))
133 return PROMDEV_O_UNK;
134 /* FIXME: Is there any better way how to find out? */
135 memset(propb, 0, sizeof(propb));
136 st_p = prom_finddevice ("/options");
137 prom_getproperty(st_p, "output-device", propb, sizeof(propb));
138
139 /*
140 * If we get here with propb == 'screen', we are on ttya, as
141 * the PROM defaulted to this due to 'no input device'.
142 */
143 if (!strncmp(propb, "screen", 6))
144 return PROMDEV_OTTYA;
145
146 if (!strncmp (propb, "rsc", 3))
147 return PROMDEV_ORSC;
148
149 if (!strncmp (propb, "virtual-console", 3))
150 return PROMDEV_OVCONS;
151
152 if (strncmp (propb, "tty", 3) || !propb[3])
153 return PROMDEV_O_UNK;
154
155 switch (propb[3]) {
156 case 'a': return PROMDEV_OTTYA;
157 case 'b': return PROMDEV_OTTYB;
158 default: return PROMDEV_O_UNK;
159 }
160}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 33c5b7da31e5..68c83ad04ad9 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -72,7 +72,7 @@ void prom_cmdline(void)
72 72
73 local_irq_save(flags); 73 local_irq_save(flags);
74 74
75 if (!serial_console && prom_palette) 75 if (prom_palette)
76 prom_palette(1); 76 prom_palette(1);
77 77
78#ifdef CONFIG_SMP 78#ifdef CONFIG_SMP
@@ -85,7 +85,7 @@ void prom_cmdline(void)
85 smp_release(); 85 smp_release();
86#endif 86#endif
87 87
88 if (!serial_console && prom_palette) 88 if (prom_palette)
89 prom_palette(0); 89 prom_palette(0);
90 90
91 local_irq_restore(flags); 91 local_irq_restore(flags);
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index 17b7ecfe7ca9..b2c5b12c9818 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -304,3 +304,11 @@ prom_pathtoinode(const char *path)
304 if (node == -1) return 0; 304 if (node == -1) return 0;
305 return node; 305 return node;
306} 306}
307
308int prom_ihandle2path(int handle, char *buffer, int bufsize)
309{
310 return p1275_cmd("instance-to-path",
311 P1275_ARG(1,P1275_ARG_OUT_BUF)|
312 P1275_INOUT(3, 1),
313 handle, buffer, P1275_SIZE(bufsize));
314}
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 14bf8ce3ea23..45f82ae6d389 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -32,6 +32,10 @@ config GENERIC_TIME_VSYSCALL
32 bool 32 bool
33 default y 33 default y
34 34
35config GENERIC_CMOS_UPDATE
36 bool
37 default y
38
35config ZONE_DMA32 39config ZONE_DMA32
36 bool 40 bool
37 default y 41 default y
@@ -56,6 +60,14 @@ config ZONE_DMA
56 bool 60 bool
57 default y 61 default y
58 62
63config QUICKLIST
64 bool
65 default y
66
67config NR_QUICK
68 int
69 default 2
70
59config ISA 71config ISA
60 bool 72 bool
61 73
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 29617ae3926d..128561d3e876 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -76,7 +76,8 @@ head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kern
76libs-y += arch/x86_64/lib/ 76libs-y += arch/x86_64/lib/
77core-y += arch/x86_64/kernel/ \ 77core-y += arch/x86_64/kernel/ \
78 arch/x86_64/mm/ \ 78 arch/x86_64/mm/ \
79 arch/x86_64/crypto/ 79 arch/x86_64/crypto/ \
80 arch/x86_64/vdso/
80core-$(CONFIG_IA32_EMULATION) += arch/x86_64/ia32/ 81core-$(CONFIG_IA32_EMULATION) += arch/x86_64/ia32/
81drivers-$(CONFIG_PCI) += arch/x86_64/pci/ 82drivers-$(CONFIG_PCI) += arch/x86_64/pci/
82drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/ 83drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
diff --git a/arch/x86_64/boot/compressed/Makefile b/arch/x86_64/boot/compressed/Makefile
index c9f2da7496c1..877c0bdbbc67 100644
--- a/arch/x86_64/boot/compressed/Makefile
+++ b/arch/x86_64/boot/compressed/Makefile
@@ -3,8 +3,6 @@
3# 3#
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6# Note all the files here are compiled/linked as 32bit executables.
7#
8 6
9targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o 7targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
10 8
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 40178e5c3104..b7c4cd04bfc3 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,19 +1,22 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22-rc2 3# Linux kernel version: 2.6.22-git14
4# Mon May 21 13:23:40 2007 4# Fri Jul 20 09:53:15 2007
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
8CONFIG_X86=y 8CONFIG_X86=y
9CONFIG_GENERIC_TIME=y 9CONFIG_GENERIC_TIME=y
10CONFIG_GENERIC_TIME_VSYSCALL=y 10CONFIG_GENERIC_TIME_VSYSCALL=y
11CONFIG_GENERIC_CMOS_UPDATE=y
11CONFIG_ZONE_DMA32=y 12CONFIG_ZONE_DMA32=y
12CONFIG_LOCKDEP_SUPPORT=y 13CONFIG_LOCKDEP_SUPPORT=y
13CONFIG_STACKTRACE_SUPPORT=y 14CONFIG_STACKTRACE_SUPPORT=y
14CONFIG_SEMAPHORE_SLEEPERS=y 15CONFIG_SEMAPHORE_SLEEPERS=y
15CONFIG_MMU=y 16CONFIG_MMU=y
16CONFIG_ZONE_DMA=y 17CONFIG_ZONE_DMA=y
18CONFIG_QUICKLIST=y
19CONFIG_NR_QUICK=2
17CONFIG_RWSEM_GENERIC_SPINLOCK=y 20CONFIG_RWSEM_GENERIC_SPINLOCK=y
18CONFIG_GENERIC_HWEIGHT=y 21CONFIG_GENERIC_HWEIGHT=y
19CONFIG_GENERIC_CALIBRATE_DELAY=y 22CONFIG_GENERIC_CALIBRATE_DELAY=y
@@ -44,19 +47,18 @@ CONFIG_LOCALVERSION=""
44CONFIG_LOCALVERSION_AUTO=y 47CONFIG_LOCALVERSION_AUTO=y
45CONFIG_SWAP=y 48CONFIG_SWAP=y
46CONFIG_SYSVIPC=y 49CONFIG_SYSVIPC=y
47# CONFIG_IPC_NS is not set
48CONFIG_SYSVIPC_SYSCTL=y 50CONFIG_SYSVIPC_SYSCTL=y
49CONFIG_POSIX_MQUEUE=y 51CONFIG_POSIX_MQUEUE=y
50# CONFIG_BSD_PROCESS_ACCT is not set 52# CONFIG_BSD_PROCESS_ACCT is not set
51# CONFIG_TASKSTATS is not set 53# CONFIG_TASKSTATS is not set
52# CONFIG_UTS_NS is not set 54# CONFIG_USER_NS is not set
53# CONFIG_AUDIT is not set 55# CONFIG_AUDIT is not set
54CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
55CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
56CONFIG_LOG_BUF_SHIFT=18 58CONFIG_LOG_BUF_SHIFT=18
57# CONFIG_CPUSETS is not set 59# CONFIG_CPUSETS is not set
58CONFIG_SYSFS_DEPRECATED=y 60CONFIG_SYSFS_DEPRECATED=y
59# CONFIG_RELAY is not set 61CONFIG_RELAY=y
60CONFIG_BLK_DEV_INITRD=y 62CONFIG_BLK_DEV_INITRD=y
61CONFIG_INITRAMFS_SOURCE="" 63CONFIG_INITRAMFS_SOURCE=""
62CONFIG_CC_OPTIMIZE_FOR_SIZE=y 64CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -86,10 +88,6 @@ CONFIG_SLAB=y
86CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
87# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
88CONFIG_BASE_SMALL=0 90CONFIG_BASE_SMALL=0
89
90#
91# Loadable module support
92#
93CONFIG_MODULES=y 91CONFIG_MODULES=y
94CONFIG_MODULE_UNLOAD=y 92CONFIG_MODULE_UNLOAD=y
95CONFIG_MODULE_FORCE_UNLOAD=y 93CONFIG_MODULE_FORCE_UNLOAD=y
@@ -97,12 +95,9 @@ CONFIG_MODULE_FORCE_UNLOAD=y
97# CONFIG_MODULE_SRCVERSION_ALL is not set 95# CONFIG_MODULE_SRCVERSION_ALL is not set
98# CONFIG_KMOD is not set 96# CONFIG_KMOD is not set
99CONFIG_STOP_MACHINE=y 97CONFIG_STOP_MACHINE=y
100
101#
102# Block layer
103#
104CONFIG_BLOCK=y 98CONFIG_BLOCK=y
105# CONFIG_BLK_DEV_IO_TRACE is not set 99# CONFIG_BLK_DEV_IO_TRACE is not set
100# CONFIG_BLK_DEV_BSG is not set
106 101
107# 102#
108# IO Schedulers 103# IO Schedulers
@@ -165,9 +160,12 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
165CONFIG_MIGRATION=y 160CONFIG_MIGRATION=y
166CONFIG_RESOURCES_64BIT=y 161CONFIG_RESOURCES_64BIT=y
167CONFIG_ZONE_DMA_FLAG=1 162CONFIG_ZONE_DMA_FLAG=1
163CONFIG_BOUNCE=y
164CONFIG_VIRT_TO_BUS=y
168CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y 165CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
169CONFIG_OUT_OF_LINE_PFN_TO_PAGE=y 166CONFIG_OUT_OF_LINE_PFN_TO_PAGE=y
170CONFIG_NR_CPUS=32 167CONFIG_NR_CPUS=32
168CONFIG_PHYSICAL_ALIGN=0x200000
171CONFIG_HOTPLUG_CPU=y 169CONFIG_HOTPLUG_CPU=y
172CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 170CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
173CONFIG_HPET_TIMER=y 171CONFIG_HPET_TIMER=y
@@ -180,7 +178,7 @@ CONFIG_X86_MCE_INTEL=y
180CONFIG_X86_MCE_AMD=y 178CONFIG_X86_MCE_AMD=y
181# CONFIG_KEXEC is not set 179# CONFIG_KEXEC is not set
182# CONFIG_CRASH_DUMP is not set 180# CONFIG_CRASH_DUMP is not set
183CONFIG_RELOCATABLE=y 181# CONFIG_RELOCATABLE is not set
184CONFIG_PHYSICAL_START=0x200000 182CONFIG_PHYSICAL_START=0x200000
185CONFIG_SECCOMP=y 183CONFIG_SECCOMP=y
186# CONFIG_CC_STACKPROTECTOR is not set 184# CONFIG_CC_STACKPROTECTOR is not set
@@ -201,7 +199,6 @@ CONFIG_GENERIC_PENDING_IRQ=y
201CONFIG_PM=y 199CONFIG_PM=y
202# CONFIG_PM_LEGACY is not set 200# CONFIG_PM_LEGACY is not set
203# CONFIG_PM_DEBUG is not set 201# CONFIG_PM_DEBUG is not set
204# CONFIG_PM_SYSFS_DEPRECATED is not set
205CONFIG_SOFTWARE_SUSPEND=y 202CONFIG_SOFTWARE_SUSPEND=y
206CONFIG_PM_STD_PARTITION="" 203CONFIG_PM_STD_PARTITION=""
207CONFIG_SUSPEND_SMP=y 204CONFIG_SUSPEND_SMP=y
@@ -248,7 +245,7 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
248# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set 245# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
249CONFIG_CPU_FREQ_GOV_USERSPACE=y 246CONFIG_CPU_FREQ_GOV_USERSPACE=y
250CONFIG_CPU_FREQ_GOV_ONDEMAND=y 247CONFIG_CPU_FREQ_GOV_ONDEMAND=y
251# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set 248CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
252 249
253# 250#
254# CPUFreq processor drivers 251# CPUFreq processor drivers
@@ -351,20 +348,8 @@ CONFIG_IPV6_SIT=y
351# CONFIG_IPV6_MULTIPLE_TABLES is not set 348# CONFIG_IPV6_MULTIPLE_TABLES is not set
352# CONFIG_NETWORK_SECMARK is not set 349# CONFIG_NETWORK_SECMARK is not set
353# CONFIG_NETFILTER is not set 350# CONFIG_NETFILTER is not set
354
355#
356# DCCP Configuration (EXPERIMENTAL)
357#
358# CONFIG_IP_DCCP is not set 351# CONFIG_IP_DCCP is not set
359
360#
361# SCTP Configuration (EXPERIMENTAL)
362#
363# CONFIG_IP_SCTP is not set 352# CONFIG_IP_SCTP is not set
364
365#
366# TIPC Configuration (EXPERIMENTAL)
367#
368# CONFIG_TIPC is not set 353# CONFIG_TIPC is not set
369# CONFIG_ATM is not set 354# CONFIG_ATM is not set
370# CONFIG_BRIDGE is not set 355# CONFIG_BRIDGE is not set
@@ -401,6 +386,7 @@ CONFIG_IPV6_SIT=y
401# CONFIG_MAC80211 is not set 386# CONFIG_MAC80211 is not set
402# CONFIG_IEEE80211 is not set 387# CONFIG_IEEE80211 is not set
403# CONFIG_RFKILL is not set 388# CONFIG_RFKILL is not set
389# CONFIG_NET_9P is not set
404 390
405# 391#
406# Device Drivers 392# Device Drivers
@@ -415,21 +401,9 @@ CONFIG_FW_LOADER=y
415# CONFIG_DEBUG_DRIVER is not set 401# CONFIG_DEBUG_DRIVER is not set
416# CONFIG_DEBUG_DEVRES is not set 402# CONFIG_DEBUG_DEVRES is not set
417# CONFIG_SYS_HYPERVISOR is not set 403# CONFIG_SYS_HYPERVISOR is not set
418
419#
420# Connector - unified userspace <-> kernelspace linker
421#
422# CONFIG_CONNECTOR is not set 404# CONFIG_CONNECTOR is not set
423# CONFIG_MTD is not set 405# CONFIG_MTD is not set
424
425#
426# Parallel port support
427#
428# CONFIG_PARPORT is not set 406# CONFIG_PARPORT is not set
429
430#
431# Plug and Play support
432#
433CONFIG_PNP=y 407CONFIG_PNP=y
434# CONFIG_PNP_DEBUG is not set 408# CONFIG_PNP_DEBUG is not set
435 409
@@ -437,10 +411,7 @@ CONFIG_PNP=y
437# Protocols 411# Protocols
438# 412#
439CONFIG_PNPACPI=y 413CONFIG_PNPACPI=y
440 414CONFIG_BLK_DEV=y
441#
442# Block devices
443#
444CONFIG_BLK_DEV_FD=y 415CONFIG_BLK_DEV_FD=y
445# CONFIG_BLK_CPQ_DA is not set 416# CONFIG_BLK_CPQ_DA is not set
446# CONFIG_BLK_CPQ_CISS_DA is not set 417# CONFIG_BLK_CPQ_CISS_DA is not set
@@ -458,17 +429,14 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
458CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 429CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
459# CONFIG_CDROM_PKTCDVD is not set 430# CONFIG_CDROM_PKTCDVD is not set
460# CONFIG_ATA_OVER_ETH is not set 431# CONFIG_ATA_OVER_ETH is not set
461 432CONFIG_MISC_DEVICES=y
462#
463# Misc devices
464#
465# CONFIG_IBM_ASM is not set 433# CONFIG_IBM_ASM is not set
466# CONFIG_PHANTOM is not set 434# CONFIG_PHANTOM is not set
435# CONFIG_EEPROM_93CX6 is not set
467# CONFIG_SGI_IOC4 is not set 436# CONFIG_SGI_IOC4 is not set
468# CONFIG_TIFM_CORE is not set 437# CONFIG_TIFM_CORE is not set
469# CONFIG_SONY_LAPTOP is not set 438# CONFIG_SONY_LAPTOP is not set
470# CONFIG_THINKPAD_ACPI is not set 439# CONFIG_THINKPAD_ACPI is not set
471# CONFIG_BLINK is not set
472CONFIG_IDE=y 440CONFIG_IDE=y
473CONFIG_BLK_DEV_IDE=y 441CONFIG_BLK_DEV_IDE=y
474 442
@@ -539,6 +507,7 @@ CONFIG_BLK_DEV_IDEDMA=y
539# 507#
540# CONFIG_RAID_ATTRS is not set 508# CONFIG_RAID_ATTRS is not set
541CONFIG_SCSI=y 509CONFIG_SCSI=y
510CONFIG_SCSI_DMA=y
542# CONFIG_SCSI_TGT is not set 511# CONFIG_SCSI_TGT is not set
543CONFIG_SCSI_NETLINK=y 512CONFIG_SCSI_NETLINK=y
544# CONFIG_SCSI_PROC_FS is not set 513# CONFIG_SCSI_PROC_FS is not set
@@ -590,11 +559,9 @@ CONFIG_AIC79XX_DEBUG_MASK=0
590# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set 559# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
591# CONFIG_SCSI_AIC94XX is not set 560# CONFIG_SCSI_AIC94XX is not set
592# CONFIG_SCSI_ARCMSR is not set 561# CONFIG_SCSI_ARCMSR is not set
593CONFIG_MEGARAID_NEWGEN=y 562# CONFIG_MEGARAID_NEWGEN is not set
594CONFIG_MEGARAID_MM=y
595CONFIG_MEGARAID_MAILBOX=y
596# CONFIG_MEGARAID_LEGACY is not set 563# CONFIG_MEGARAID_LEGACY is not set
597CONFIG_MEGARAID_SAS=y 564# CONFIG_MEGARAID_SAS is not set
598# CONFIG_SCSI_HPTIOP is not set 565# CONFIG_SCSI_HPTIOP is not set
599# CONFIG_SCSI_BUSLOGIC is not set 566# CONFIG_SCSI_BUSLOGIC is not set
600# CONFIG_SCSI_DMX3191D is not set 567# CONFIG_SCSI_DMX3191D is not set
@@ -614,7 +581,6 @@ CONFIG_MEGARAID_SAS=y
614# CONFIG_SCSI_DC395x is not set 581# CONFIG_SCSI_DC395x is not set
615# CONFIG_SCSI_DC390T is not set 582# CONFIG_SCSI_DC390T is not set
616# CONFIG_SCSI_DEBUG is not set 583# CONFIG_SCSI_DEBUG is not set
617# CONFIG_SCSI_ESP_CORE is not set
618# CONFIG_SCSI_SRP is not set 584# CONFIG_SCSI_SRP is not set
619CONFIG_ATA=y 585CONFIG_ATA=y
620# CONFIG_ATA_NONSTANDARD is not set 586# CONFIG_ATA_NONSTANDARD is not set
@@ -671,10 +637,6 @@ CONFIG_SATA_VIA=y
671# CONFIG_PATA_SIS is not set 637# CONFIG_PATA_SIS is not set
672# CONFIG_PATA_VIA is not set 638# CONFIG_PATA_VIA is not set
673# CONFIG_PATA_WINBOND is not set 639# CONFIG_PATA_WINBOND is not set
674
675#
676# Multi-device support (RAID and LVM)
677#
678CONFIG_MD=y 640CONFIG_MD=y
679# CONFIG_BLK_DEV_MD is not set 641# CONFIG_BLK_DEV_MD is not set
680CONFIG_BLK_DEV_DM=y 642CONFIG_BLK_DEV_DM=y
@@ -692,7 +654,7 @@ CONFIG_BLK_DEV_DM=y
692CONFIG_FUSION=y 654CONFIG_FUSION=y
693CONFIG_FUSION_SPI=y 655CONFIG_FUSION_SPI=y
694# CONFIG_FUSION_FC is not set 656# CONFIG_FUSION_FC is not set
695CONFIG_FUSION_SAS=y 657# CONFIG_FUSION_SAS is not set
696CONFIG_FUSION_MAX_SGE=128 658CONFIG_FUSION_MAX_SGE=128
697# CONFIG_FUSION_CTL is not set 659# CONFIG_FUSION_CTL is not set
698 660
@@ -710,7 +672,10 @@ CONFIG_IEEE1394=y
710# 672#
711# Controllers 673# Controllers
712# 674#
713# CONFIG_IEEE1394_PCILYNX is not set 675
676#
677# Texas Instruments PCILynx requires I2C
678#
714CONFIG_IEEE1394_OHCI1394=y 679CONFIG_IEEE1394_OHCI1394=y
715 680
716# 681#
@@ -722,32 +687,19 @@ CONFIG_IEEE1394_OHCI1394=y
722# CONFIG_IEEE1394_ETH1394 is not set 687# CONFIG_IEEE1394_ETH1394 is not set
723# CONFIG_IEEE1394_DV1394 is not set 688# CONFIG_IEEE1394_DV1394 is not set
724CONFIG_IEEE1394_RAWIO=y 689CONFIG_IEEE1394_RAWIO=y
725
726#
727# I2O device support
728#
729# CONFIG_I2O is not set 690# CONFIG_I2O is not set
730# CONFIG_MACINTOSH_DRIVERS is not set 691CONFIG_MACINTOSH_DRIVERS=y
731 692# CONFIG_MAC_EMUMOUSEBTN is not set
732#
733# Network device support
734#
735CONFIG_NETDEVICES=y 693CONFIG_NETDEVICES=y
694CONFIG_NETDEVICES_MULTIQUEUE=y
736# CONFIG_DUMMY is not set 695# CONFIG_DUMMY is not set
737# CONFIG_BONDING is not set 696# CONFIG_BONDING is not set
697# CONFIG_MACVLAN is not set
738# CONFIG_EQUALIZER is not set 698# CONFIG_EQUALIZER is not set
739CONFIG_TUN=y 699CONFIG_TUN=y
740# CONFIG_NET_SB1000 is not set 700# CONFIG_NET_SB1000 is not set
741
742#
743# ARCnet devices
744#
745# CONFIG_ARCNET is not set 701# CONFIG_ARCNET is not set
746# CONFIG_PHYLIB is not set 702# CONFIG_PHYLIB is not set
747
748#
749# Ethernet (10 or 100Mbit)
750#
751CONFIG_NET_ETHERNET=y 703CONFIG_NET_ETHERNET=y
752CONFIG_MII=y 704CONFIG_MII=y
753# CONFIG_HAPPYMEAL is not set 705# CONFIG_HAPPYMEAL is not set
@@ -756,10 +708,6 @@ CONFIG_MII=y
756CONFIG_NET_VENDOR_3COM=y 708CONFIG_NET_VENDOR_3COM=y
757CONFIG_VORTEX=y 709CONFIG_VORTEX=y
758# CONFIG_TYPHOON is not set 710# CONFIG_TYPHOON is not set
759
760#
761# Tulip family network device support
762#
763CONFIG_NET_TULIP=y 711CONFIG_NET_TULIP=y
764# CONFIG_DE2104X is not set 712# CONFIG_DE2104X is not set
765CONFIG_TULIP=y 713CONFIG_TULIP=y
@@ -773,7 +721,8 @@ CONFIG_TULIP=y
773# CONFIG_HP100 is not set 721# CONFIG_HP100 is not set
774CONFIG_NET_PCI=y 722CONFIG_NET_PCI=y
775# CONFIG_PCNET32 is not set 723# CONFIG_PCNET32 is not set
776# CONFIG_AMD8111_ETH is not set 724CONFIG_AMD8111_ETH=y
725# CONFIG_AMD8111E_NAPI is not set
777# CONFIG_ADAPTEC_STARFIRE is not set 726# CONFIG_ADAPTEC_STARFIRE is not set
778CONFIG_B44=y 727CONFIG_B44=y
779CONFIG_FORCEDETH=y 728CONFIG_FORCEDETH=y
@@ -808,7 +757,6 @@ CONFIG_E1000=y
808# CONFIG_SIS190 is not set 757# CONFIG_SIS190 is not set
809# CONFIG_SKGE is not set 758# CONFIG_SKGE is not set
810# CONFIG_SKY2 is not set 759# CONFIG_SKY2 is not set
811# CONFIG_SK98LIN is not set
812# CONFIG_VIA_VELOCITY is not set 760# CONFIG_VIA_VELOCITY is not set
813CONFIG_TIGON3=y 761CONFIG_TIGON3=y
814CONFIG_BNX2=y 762CONFIG_BNX2=y
@@ -823,10 +771,6 @@ CONFIG_S2IO=m
823# CONFIG_MYRI10GE is not set 771# CONFIG_MYRI10GE is not set
824# CONFIG_NETXEN_NIC is not set 772# CONFIG_NETXEN_NIC is not set
825# CONFIG_MLX4_CORE is not set 773# CONFIG_MLX4_CORE is not set
826
827#
828# Token Ring devices
829#
830# CONFIG_TR is not set 774# CONFIG_TR is not set
831 775
832# 776#
@@ -855,15 +799,7 @@ CONFIG_NETCONSOLE=y
855CONFIG_NETPOLL=y 799CONFIG_NETPOLL=y
856# CONFIG_NETPOLL_TRAP is not set 800# CONFIG_NETPOLL_TRAP is not set
857CONFIG_NET_POLL_CONTROLLER=y 801CONFIG_NET_POLL_CONTROLLER=y
858
859#
860# ISDN subsystem
861#
862# CONFIG_ISDN is not set 802# CONFIG_ISDN is not set
863
864#
865# Telephony Support
866#
867# CONFIG_PHONE is not set 803# CONFIG_PHONE is not set
868 804
869# 805#
@@ -871,6 +807,7 @@ CONFIG_NET_POLL_CONTROLLER=y
871# 807#
872CONFIG_INPUT=y 808CONFIG_INPUT=y
873# CONFIG_INPUT_FF_MEMLESS is not set 809# CONFIG_INPUT_FF_MEMLESS is not set
810# CONFIG_INPUT_POLLDEV is not set
874 811
875# 812#
876# Userland interfaces 813# Userland interfaces
@@ -936,6 +873,7 @@ CONFIG_HW_CONSOLE=y
936# 873#
937CONFIG_SERIAL_8250=y 874CONFIG_SERIAL_8250=y
938CONFIG_SERIAL_8250_CONSOLE=y 875CONFIG_SERIAL_8250_CONSOLE=y
876CONFIG_FIX_EARLYCON_MEM=y
939CONFIG_SERIAL_8250_PCI=y 877CONFIG_SERIAL_8250_PCI=y
940CONFIG_SERIAL_8250_PNP=y 878CONFIG_SERIAL_8250_PNP=y
941CONFIG_SERIAL_8250_NR_UARTS=4 879CONFIG_SERIAL_8250_NR_UARTS=4
@@ -951,16 +889,11 @@ CONFIG_SERIAL_CORE_CONSOLE=y
951CONFIG_UNIX98_PTYS=y 889CONFIG_UNIX98_PTYS=y
952CONFIG_LEGACY_PTYS=y 890CONFIG_LEGACY_PTYS=y
953CONFIG_LEGACY_PTY_COUNT=256 891CONFIG_LEGACY_PTY_COUNT=256
954
955#
956# IPMI
957#
958# CONFIG_IPMI_HANDLER is not set 892# CONFIG_IPMI_HANDLER is not set
959# CONFIG_WATCHDOG is not set 893# CONFIG_WATCHDOG is not set
960CONFIG_HW_RANDOM=y 894CONFIG_HW_RANDOM=y
961CONFIG_HW_RANDOM_INTEL=y 895CONFIG_HW_RANDOM_INTEL=y
962CONFIG_HW_RANDOM_AMD=y 896CONFIG_HW_RANDOM_AMD=y
963# CONFIG_HW_RANDOM_GEODE is not set
964# CONFIG_NVRAM is not set 897# CONFIG_NVRAM is not set
965CONFIG_RTC=y 898CONFIG_RTC=y
966# CONFIG_R3964 is not set 899# CONFIG_R3964 is not set
@@ -979,127 +912,19 @@ CONFIG_HPET=y
979# CONFIG_HPET_RTC_IRQ is not set 912# CONFIG_HPET_RTC_IRQ is not set
980CONFIG_HPET_MMAP=y 913CONFIG_HPET_MMAP=y
981# CONFIG_HANGCHECK_TIMER is not set 914# CONFIG_HANGCHECK_TIMER is not set
982
983#
984# TPM devices
985#
986# CONFIG_TCG_TPM is not set 915# CONFIG_TCG_TPM is not set
987# CONFIG_TELCLOCK is not set 916# CONFIG_TELCLOCK is not set
988CONFIG_DEVPORT=y 917CONFIG_DEVPORT=y
989CONFIG_I2C=m 918# CONFIG_I2C is not set
990CONFIG_I2C_BOARDINFO=y
991CONFIG_I2C_CHARDEV=m
992
993#
994# I2C Algorithms
995#
996# CONFIG_I2C_ALGOBIT is not set
997# CONFIG_I2C_ALGOPCF is not set
998# CONFIG_I2C_ALGOPCA is not set
999
1000#
1001# I2C Hardware Bus support
1002#
1003# CONFIG_I2C_ALI1535 is not set
1004# CONFIG_I2C_ALI1563 is not set
1005# CONFIG_I2C_ALI15X3 is not set
1006# CONFIG_I2C_AMD756 is not set
1007# CONFIG_I2C_AMD8111 is not set
1008# CONFIG_I2C_I801 is not set
1009# CONFIG_I2C_I810 is not set
1010# CONFIG_I2C_PIIX4 is not set
1011# CONFIG_I2C_NFORCE2 is not set
1012# CONFIG_I2C_OCORES is not set
1013# CONFIG_I2C_PARPORT_LIGHT is not set
1014# CONFIG_I2C_PROSAVAGE is not set
1015# CONFIG_I2C_SAVAGE4 is not set
1016# CONFIG_I2C_SIMTEC is not set
1017# CONFIG_I2C_SIS5595 is not set
1018# CONFIG_I2C_SIS630 is not set
1019# CONFIG_I2C_SIS96X is not set
1020# CONFIG_I2C_STUB is not set
1021# CONFIG_I2C_TINY_USB is not set
1022# CONFIG_I2C_VIA is not set
1023# CONFIG_I2C_VIAPRO is not set
1024# CONFIG_I2C_VOODOO3 is not set
1025
1026#
1027# Miscellaneous I2C Chip support
1028#
1029# CONFIG_SENSORS_DS1337 is not set
1030# CONFIG_SENSORS_DS1374 is not set
1031# CONFIG_SENSORS_EEPROM is not set
1032# CONFIG_SENSORS_PCF8574 is not set
1033# CONFIG_SENSORS_PCA9539 is not set
1034# CONFIG_SENSORS_PCF8591 is not set
1035# CONFIG_SENSORS_MAX6875 is not set
1036# CONFIG_I2C_DEBUG_CORE is not set
1037# CONFIG_I2C_DEBUG_ALGO is not set
1038# CONFIG_I2C_DEBUG_BUS is not set
1039# CONFIG_I2C_DEBUG_CHIP is not set
1040 919
1041# 920#
1042# SPI support 921# SPI support
1043# 922#
1044# CONFIG_SPI is not set 923# CONFIG_SPI is not set
1045# CONFIG_SPI_MASTER is not set 924# CONFIG_SPI_MASTER is not set
1046
1047#
1048# Dallas's 1-wire bus
1049#
1050# CONFIG_W1 is not set 925# CONFIG_W1 is not set
1051CONFIG_HWMON=y 926# CONFIG_POWER_SUPPLY is not set
1052# CONFIG_HWMON_VID is not set 927# CONFIG_HWMON is not set
1053# CONFIG_SENSORS_ABITUGURU is not set
1054# CONFIG_SENSORS_AD7418 is not set
1055# CONFIG_SENSORS_ADM1021 is not set
1056# CONFIG_SENSORS_ADM1025 is not set
1057# CONFIG_SENSORS_ADM1026 is not set
1058# CONFIG_SENSORS_ADM1029 is not set
1059# CONFIG_SENSORS_ADM1031 is not set
1060# CONFIG_SENSORS_ADM9240 is not set
1061# CONFIG_SENSORS_K8TEMP is not set
1062# CONFIG_SENSORS_ASB100 is not set
1063# CONFIG_SENSORS_ATXP1 is not set
1064# CONFIG_SENSORS_DS1621 is not set
1065# CONFIG_SENSORS_F71805F is not set
1066# CONFIG_SENSORS_FSCHER is not set
1067# CONFIG_SENSORS_FSCPOS is not set
1068# CONFIG_SENSORS_GL518SM is not set
1069# CONFIG_SENSORS_GL520SM is not set
1070CONFIG_SENSORS_CORETEMP=y
1071# CONFIG_SENSORS_IT87 is not set
1072# CONFIG_SENSORS_LM63 is not set
1073# CONFIG_SENSORS_LM75 is not set
1074# CONFIG_SENSORS_LM77 is not set
1075# CONFIG_SENSORS_LM78 is not set
1076# CONFIG_SENSORS_LM80 is not set
1077# CONFIG_SENSORS_LM83 is not set
1078# CONFIG_SENSORS_LM85 is not set
1079# CONFIG_SENSORS_LM87 is not set
1080# CONFIG_SENSORS_LM90 is not set
1081# CONFIG_SENSORS_LM92 is not set
1082# CONFIG_SENSORS_MAX1619 is not set
1083# CONFIG_SENSORS_MAX6650 is not set
1084# CONFIG_SENSORS_PC87360 is not set
1085# CONFIG_SENSORS_PC87427 is not set
1086# CONFIG_SENSORS_SIS5595 is not set
1087# CONFIG_SENSORS_SMSC47M1 is not set
1088# CONFIG_SENSORS_SMSC47M192 is not set
1089CONFIG_SENSORS_SMSC47B397=m
1090# CONFIG_SENSORS_VIA686A is not set
1091# CONFIG_SENSORS_VT1211 is not set
1092# CONFIG_SENSORS_VT8231 is not set
1093# CONFIG_SENSORS_W83781D is not set
1094# CONFIG_SENSORS_W83791D is not set
1095# CONFIG_SENSORS_W83792D is not set
1096# CONFIG_SENSORS_W83793 is not set
1097# CONFIG_SENSORS_W83L785TS is not set
1098# CONFIG_SENSORS_W83627HF is not set
1099# CONFIG_SENSORS_W83627EHF is not set
1100# CONFIG_SENSORS_HDAPS is not set
1101# CONFIG_SENSORS_APPLESMC is not set
1102# CONFIG_HWMON_DEBUG_CHIP is not set
1103 928
1104# 929#
1105# Multifunction device drivers 930# Multifunction device drivers
@@ -1149,15 +974,11 @@ CONFIG_SOUND=y
1149# Open Sound System 974# Open Sound System
1150# 975#
1151CONFIG_SOUND_PRIME=y 976CONFIG_SOUND_PRIME=y
1152# CONFIG_OSS_OBSOLETE is not set
1153# CONFIG_SOUND_TRIDENT is not set 977# CONFIG_SOUND_TRIDENT is not set
1154# CONFIG_SOUND_MSNDCLAS is not set 978# CONFIG_SOUND_MSNDCLAS is not set
1155# CONFIG_SOUND_MSNDPIN is not set 979# CONFIG_SOUND_MSNDPIN is not set
1156# CONFIG_SOUND_OSS is not set 980# CONFIG_SOUND_OSS is not set
1157 981CONFIG_HID_SUPPORT=y
1158#
1159# HID Devices
1160#
1161CONFIG_HID=y 982CONFIG_HID=y
1162# CONFIG_HID_DEBUG is not set 983# CONFIG_HID_DEBUG is not set
1163 984
@@ -1168,10 +989,7 @@ CONFIG_USB_HID=y
1168# CONFIG_USB_HIDINPUT_POWERBOOK is not set 989# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1169# CONFIG_HID_FF is not set 990# CONFIG_HID_FF is not set
1170# CONFIG_USB_HIDDEV is not set 991# CONFIG_USB_HIDDEV is not set
1171 992CONFIG_USB_SUPPORT=y
1172#
1173# USB support
1174#
1175CONFIG_USB_ARCH_HAS_HCD=y 993CONFIG_USB_ARCH_HAS_HCD=y
1176CONFIG_USB_ARCH_HAS_OHCI=y 994CONFIG_USB_ARCH_HAS_OHCI=y
1177CONFIG_USB_ARCH_HAS_EHCI=y 995CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1185,6 +1003,7 @@ CONFIG_USB_DEVICEFS=y
1185# CONFIG_USB_DEVICE_CLASS is not set 1003# CONFIG_USB_DEVICE_CLASS is not set
1186# CONFIG_USB_DYNAMIC_MINORS is not set 1004# CONFIG_USB_DYNAMIC_MINORS is not set
1187# CONFIG_USB_SUSPEND is not set 1005# CONFIG_USB_SUSPEND is not set
1006# CONFIG_USB_PERSIST is not set
1188# CONFIG_USB_OTG is not set 1007# CONFIG_USB_OTG is not set
1189 1008
1190# 1009#
@@ -1194,7 +1013,6 @@ CONFIG_USB_EHCI_HCD=y
1194# CONFIG_USB_EHCI_SPLIT_ISO is not set 1013# CONFIG_USB_EHCI_SPLIT_ISO is not set
1195# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 1014# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1196# CONFIG_USB_EHCI_TT_NEWSCHED is not set 1015# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1197# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
1198# CONFIG_USB_ISP116X_HCD is not set 1016# CONFIG_USB_ISP116X_HCD is not set
1199CONFIG_USB_OHCI_HCD=y 1017CONFIG_USB_OHCI_HCD=y
1200# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 1018# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1202,6 +1020,7 @@ CONFIG_USB_OHCI_HCD=y
1202CONFIG_USB_OHCI_LITTLE_ENDIAN=y 1020CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1203CONFIG_USB_UHCI_HCD=y 1021CONFIG_USB_UHCI_HCD=y
1204# CONFIG_USB_SL811_HCD is not set 1022# CONFIG_USB_SL811_HCD is not set
1023# CONFIG_USB_R8A66597_HCD is not set
1205 1024
1206# 1025#
1207# USB Device Class drivers 1026# USB Device Class drivers
@@ -1292,15 +1111,7 @@ CONFIG_USB_MON=y
1292# 1111#
1293# LED Triggers 1112# LED Triggers
1294# 1113#
1295
1296#
1297# InfiniBand support
1298#
1299# CONFIG_INFINIBAND is not set 1114# CONFIG_INFINIBAND is not set
1300
1301#
1302# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1303#
1304# CONFIG_EDAC is not set 1115# CONFIG_EDAC is not set
1305 1116
1306# 1117#
@@ -1320,11 +1131,13 @@ CONFIG_USB_MON=y
1320# 1131#
1321# DMA Devices 1132# DMA Devices
1322# 1133#
1134CONFIG_VIRTUALIZATION=y
1135# CONFIG_KVM is not set
1323 1136
1324# 1137#
1325# Virtualization 1138# Userspace I/O
1326# 1139#
1327# CONFIG_KVM is not set 1140# CONFIG_UIO is not set
1328 1141
1329# 1142#
1330# Firmware Drivers 1143# Firmware Drivers
@@ -1332,6 +1145,7 @@ CONFIG_USB_MON=y
1332# CONFIG_EDD is not set 1145# CONFIG_EDD is not set
1333# CONFIG_DELL_RBU is not set 1146# CONFIG_DELL_RBU is not set
1334# CONFIG_DCDBAS is not set 1147# CONFIG_DCDBAS is not set
1148CONFIG_DMIID=y
1335 1149
1336# 1150#
1337# File systems 1151# File systems
@@ -1447,7 +1261,6 @@ CONFIG_SUNRPC=y
1447# CONFIG_NCP_FS is not set 1261# CONFIG_NCP_FS is not set
1448# CONFIG_CODA_FS is not set 1262# CONFIG_CODA_FS is not set
1449# CONFIG_AFS_FS is not set 1263# CONFIG_AFS_FS is not set
1450# CONFIG_9P_FS is not set
1451 1264
1452# 1265#
1453# Partition Types 1266# Partition Types
@@ -1524,8 +1337,9 @@ CONFIG_DEBUG_FS=y
1524CONFIG_DEBUG_KERNEL=y 1337CONFIG_DEBUG_KERNEL=y
1525# CONFIG_DEBUG_SHIRQ is not set 1338# CONFIG_DEBUG_SHIRQ is not set
1526CONFIG_DETECT_SOFTLOCKUP=y 1339CONFIG_DETECT_SOFTLOCKUP=y
1340# CONFIG_SCHED_DEBUG is not set
1527# CONFIG_SCHEDSTATS is not set 1341# CONFIG_SCHEDSTATS is not set
1528# CONFIG_TIMER_STATS is not set 1342CONFIG_TIMER_STATS=y
1529# CONFIG_DEBUG_SLAB is not set 1343# CONFIG_DEBUG_SLAB is not set
1530# CONFIG_DEBUG_RT_MUTEXES is not set 1344# CONFIG_DEBUG_RT_MUTEXES is not set
1531# CONFIG_RT_MUTEX_TESTER is not set 1345# CONFIG_RT_MUTEX_TESTER is not set
@@ -1533,6 +1347,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
1533# CONFIG_DEBUG_MUTEXES is not set 1347# CONFIG_DEBUG_MUTEXES is not set
1534# CONFIG_DEBUG_LOCK_ALLOC is not set 1348# CONFIG_DEBUG_LOCK_ALLOC is not set
1535# CONFIG_PROVE_LOCKING is not set 1349# CONFIG_PROVE_LOCKING is not set
1350# CONFIG_LOCK_STAT is not set
1536# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1351# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1537# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1352# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1538# CONFIG_DEBUG_KOBJECT is not set 1353# CONFIG_DEBUG_KOBJECT is not set
@@ -1541,8 +1356,6 @@ CONFIG_DEBUG_BUGVERBOSE=y
1541# CONFIG_DEBUG_VM is not set 1356# CONFIG_DEBUG_VM is not set
1542# CONFIG_DEBUG_LIST is not set 1357# CONFIG_DEBUG_LIST is not set
1543# CONFIG_FRAME_POINTER is not set 1358# CONFIG_FRAME_POINTER is not set
1544CONFIG_UNWIND_INFO=y
1545CONFIG_STACK_UNWIND=y
1546# CONFIG_FORCED_INLINING is not set 1359# CONFIG_FORCED_INLINING is not set
1547# CONFIG_RCU_TORTURE_TEST is not set 1360# CONFIG_RCU_TORTURE_TEST is not set
1548# CONFIG_LKDTM is not set 1361# CONFIG_LKDTM is not set
@@ -1557,10 +1370,6 @@ CONFIG_DEBUG_STACKOVERFLOW=y
1557# 1370#
1558# CONFIG_KEYS is not set 1371# CONFIG_KEYS is not set
1559# CONFIG_SECURITY is not set 1372# CONFIG_SECURITY is not set
1560
1561#
1562# Cryptographic options
1563#
1564# CONFIG_CRYPTO is not set 1373# CONFIG_CRYPTO is not set
1565 1374
1566# 1375#
@@ -1571,6 +1380,7 @@ CONFIG_BITREVERSE=y
1571# CONFIG_CRC16 is not set 1380# CONFIG_CRC16 is not set
1572# CONFIG_CRC_ITU_T is not set 1381# CONFIG_CRC_ITU_T is not set
1573CONFIG_CRC32=y 1382CONFIG_CRC32=y
1383# CONFIG_CRC7 is not set
1574# CONFIG_LIBCRC32C is not set 1384# CONFIG_LIBCRC32C is not set
1575CONFIG_ZLIB_INFLATE=y 1385CONFIG_ZLIB_INFLATE=y
1576CONFIG_PLIST=y 1386CONFIG_PLIST=y
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index ed56a8806eab..b70f3e7cf06c 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -38,6 +38,7 @@
38 38
39int sysctl_vsyscall32 = 1; 39int sysctl_vsyscall32 = 1;
40 40
41#undef ARCH_DLINFO
41#define ARCH_DLINFO do { \ 42#define ARCH_DLINFO do { \
42 if (sysctl_vsyscall32) { \ 43 if (sysctl_vsyscall32) { \
43 NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \ 44 NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 3f66e970d86f..938278697e20 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -104,7 +104,7 @@ ENTRY(ia32_sysenter_target)
104 pushq %rax 104 pushq %rax
105 CFI_ADJUST_CFA_OFFSET 8 105 CFI_ADJUST_CFA_OFFSET 8
106 cld 106 cld
107 SAVE_ARGS 0,0,0 107 SAVE_ARGS 0,0,1
108 /* no need to do an access_ok check here because rbp has been 108 /* no need to do an access_ok check here because rbp has been
109 32bit zero extended */ 109 32bit zero extended */
1101: movl (%rbp),%r9d 1101: movl (%rbp),%r9d
@@ -294,7 +294,7 @@ ia32_badarg:
294 */ 294 */
295 295
296ENTRY(ia32_syscall) 296ENTRY(ia32_syscall)
297 CFI_STARTPROC simple 297 CFI_STARTPROC32 simple
298 CFI_SIGNAL_FRAME 298 CFI_SIGNAL_FRAME
299 CFI_DEF_CFA rsp,SS+8-RIP 299 CFI_DEF_CFA rsp,SS+8-RIP
300 /*CFI_REL_OFFSET ss,SS-RIP*/ 300 /*CFI_REL_OFFSET ss,SS-RIP*/
@@ -330,6 +330,7 @@ ia32_sysret:
330 330
331ia32_tracesys: 331ia32_tracesys:
332 SAVE_REST 332 SAVE_REST
333 CLEAR_RREGS
333 movq $-ENOSYS,RAX(%rsp) /* really needed? */ 334 movq $-ENOSYS,RAX(%rsp) /* really needed? */
334 movq %rsp,%rdi /* &pt_regs -> arg1 */ 335 movq %rsp,%rdi /* &pt_regs -> arg1 */
335 call syscall_trace_enter 336 call syscall_trace_enter
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index a3d450d6c15b..8f681cae7bf7 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -20,7 +20,7 @@
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <asm/e820.h> 21#include <asm/e820.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/proto.h> 23#include <asm/iommu.h>
24#include <asm/pci-direct.h> 24#include <asm/pci-direct.h>
25#include <asm/dma.h> 25#include <asm/dma.h>
26#include <asm/k8.h> 26#include <asm/k8.h>
@@ -214,7 +214,7 @@ void __init iommu_hole_init(void)
214 if (iommu_aperture_disabled || !fix_aperture || !early_pci_allowed()) 214 if (iommu_aperture_disabled || !fix_aperture || !early_pci_allowed())
215 return; 215 return;
216 216
217 printk("Checking aperture...\n"); 217 printk(KERN_INFO "Checking aperture...\n");
218 218
219 fix = 0; 219 fix = 0;
220 for (num = 24; num < 32; num++) { 220 for (num = 24; num < 32; num++) {
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 1b0e07bb8728..900ff38d68de 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -92,8 +92,9 @@ unsigned int safe_apic_wait_icr_idle(void)
92void enable_NMI_through_LVT0 (void * dummy) 92void enable_NMI_through_LVT0 (void * dummy)
93{ 93{
94 unsigned int v; 94 unsigned int v;
95 95
96 v = APIC_DM_NMI; /* unmask and set to NMI */ 96 /* unmask and set to NMI */
97 v = APIC_DM_NMI;
97 apic_write(APIC_LVT0, v); 98 apic_write(APIC_LVT0, v);
98} 99}
99 100
@@ -120,7 +121,7 @@ void ack_bad_irq(unsigned int irq)
120 * holds up an irq slot - in excessive cases (when multiple 121 * holds up an irq slot - in excessive cases (when multiple
121 * unexpected vectors occur) that might lock up the APIC 122 * unexpected vectors occur) that might lock up the APIC
122 * completely. 123 * completely.
123 * But don't ack when the APIC is disabled. -AK 124 * But don't ack when the APIC is disabled. -AK
124 */ 125 */
125 if (!disable_apic) 126 if (!disable_apic)
126 ack_APIC_irq(); 127 ack_APIC_irq();
@@ -616,7 +617,7 @@ early_param("apic", apic_set_verbosity);
616 * Detect and enable local APICs on non-SMP boards. 617 * Detect and enable local APICs on non-SMP boards.
617 * Original code written by Keir Fraser. 618 * Original code written by Keir Fraser.
618 * On AMD64 we trust the BIOS - if it says no APIC it is likely 619 * On AMD64 we trust the BIOS - if it says no APIC it is likely
619 * not correctly set up (usually the APIC timer won't work etc.) 620 * not correctly set up (usually the APIC timer won't work etc.)
620 */ 621 */
621 622
622static int __init detect_init_APIC (void) 623static int __init detect_init_APIC (void)
@@ -789,13 +790,13 @@ static void setup_APIC_timer(unsigned int clocks)
789 local_irq_save(flags); 790 local_irq_save(flags);
790 791
791 /* wait for irq slice */ 792 /* wait for irq slice */
792 if (hpet_address && hpet_use_timer) { 793 if (hpet_address && hpet_use_timer) {
793 int trigger = hpet_readl(HPET_T0_CMP); 794 int trigger = hpet_readl(HPET_T0_CMP);
794 while (hpet_readl(HPET_COUNTER) >= trigger) 795 while (hpet_readl(HPET_COUNTER) >= trigger)
795 /* do nothing */ ; 796 /* do nothing */ ;
796 while (hpet_readl(HPET_COUNTER) < trigger) 797 while (hpet_readl(HPET_COUNTER) < trigger)
797 /* do nothing */ ; 798 /* do nothing */ ;
798 } else { 799 } else {
799 int c1, c2; 800 int c1, c2;
800 outb_p(0x00, 0x43); 801 outb_p(0x00, 0x43);
801 c2 = inb_p(0x40); 802 c2 = inb_p(0x40);
@@ -881,10 +882,10 @@ static unsigned int calibration_result;
881 882
882void __init setup_boot_APIC_clock (void) 883void __init setup_boot_APIC_clock (void)
883{ 884{
884 if (disable_apic_timer) { 885 if (disable_apic_timer) {
885 printk(KERN_INFO "Disabling APIC timer\n"); 886 printk(KERN_INFO "Disabling APIC timer\n");
886 return; 887 return;
887 } 888 }
888 889
889 printk(KERN_INFO "Using local APIC timer interrupts.\n"); 890 printk(KERN_INFO "Using local APIC timer interrupts.\n");
890 using_apic_timer = 1; 891 using_apic_timer = 1;
@@ -990,8 +991,8 @@ int setup_profiling_timer(unsigned int multiplier)
990 return -EINVAL; 991 return -EINVAL;
991} 992}
992 993
993void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, 994void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
994 unsigned char msg_type, unsigned char mask) 995 unsigned char msg_type, unsigned char mask)
995{ 996{
996 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE; 997 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
997 unsigned int v = (mask << 16) | (msg_type << 8) | vector; 998 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
@@ -1128,20 +1129,6 @@ asmlinkage void smp_spurious_interrupt(void)
1128 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) 1129 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1129 ack_APIC_irq(); 1130 ack_APIC_irq();
1130 1131
1131#if 0
1132 static unsigned long last_warning;
1133 static unsigned long skipped;
1134
1135 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1136 if (time_before(last_warning+30*HZ,jiffies)) {
1137 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
1138 smp_processor_id(), skipped);
1139 last_warning = jiffies;
1140 skipped = 0;
1141 } else {
1142 skipped++;
1143 }
1144#endif
1145 irq_exit(); 1132 irq_exit();
1146} 1133}
1147 1134
@@ -1173,11 +1160,11 @@ asmlinkage void smp_error_interrupt(void)
1173 7: Illegal register address 1160 7: Illegal register address
1174 */ 1161 */
1175 printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n", 1162 printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1176 smp_processor_id(), v , v1); 1163 smp_processor_id(), v , v1);
1177 irq_exit(); 1164 irq_exit();
1178} 1165}
1179 1166
1180int disable_apic; 1167int disable_apic;
1181 1168
1182/* 1169/*
1183 * This initializes the IO-APIC and APIC hardware if this is 1170 * This initializes the IO-APIC and APIC hardware if this is
@@ -1185,11 +1172,11 @@ int disable_apic;
1185 */ 1172 */
1186int __init APIC_init_uniprocessor (void) 1173int __init APIC_init_uniprocessor (void)
1187{ 1174{
1188 if (disable_apic) { 1175 if (disable_apic) {
1189 printk(KERN_INFO "Apic disabled\n"); 1176 printk(KERN_INFO "Apic disabled\n");
1190 return -1; 1177 return -1;
1191 } 1178 }
1192 if (!cpu_has_apic) { 1179 if (!cpu_has_apic) {
1193 disable_apic = 1; 1180 disable_apic = 1;
1194 printk(KERN_INFO "Apic disabled by BIOS\n"); 1181 printk(KERN_INFO "Apic disabled by BIOS\n");
1195 return -1; 1182 return -1;
@@ -1211,8 +1198,8 @@ int __init APIC_init_uniprocessor (void)
1211 return 0; 1198 return 0;
1212} 1199}
1213 1200
1214static __init int setup_disableapic(char *str) 1201static __init int setup_disableapic(char *str)
1215{ 1202{
1216 disable_apic = 1; 1203 disable_apic = 1;
1217 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); 1204 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1218 return 0; 1205 return 0;
@@ -1220,10 +1207,10 @@ static __init int setup_disableapic(char *str)
1220early_param("disableapic", setup_disableapic); 1207early_param("disableapic", setup_disableapic);
1221 1208
1222/* same as disableapic, for compatibility */ 1209/* same as disableapic, for compatibility */
1223static __init int setup_nolapic(char *str) 1210static __init int setup_nolapic(char *str)
1224{ 1211{
1225 return setup_disableapic(str); 1212 return setup_disableapic(str);
1226} 1213}
1227early_param("nolapic", setup_nolapic); 1214early_param("nolapic", setup_nolapic);
1228 1215
1229static int __init parse_lapic_timer_c2_ok(char *arg) 1216static int __init parse_lapic_timer_c2_ok(char *arg)
@@ -1233,13 +1220,13 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1233} 1220}
1234early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 1221early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1235 1222
1236static __init int setup_noapictimer(char *str) 1223static __init int setup_noapictimer(char *str)
1237{ 1224{
1238 if (str[0] != ' ' && str[0] != 0) 1225 if (str[0] != ' ' && str[0] != 0)
1239 return 0; 1226 return 0;
1240 disable_apic_timer = 1; 1227 disable_apic_timer = 1;
1241 return 1; 1228 return 1;
1242} 1229}
1243 1230
1244static __init int setup_apicmaintimer(char *str) 1231static __init int setup_apicmaintimer(char *str)
1245{ 1232{
@@ -1264,5 +1251,5 @@ static __init int setup_apicpmtimer(char *s)
1264} 1251}
1265__setup("apicpmtimer", setup_apicpmtimer); 1252__setup("apicpmtimer", setup_apicpmtimer);
1266 1253
1267__setup("noapictimer", setup_noapictimer); 1254__setup("noapictimer", setup_noapictimer);
1268 1255
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 13c6c37610e0..0f4d5e209e9b 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -194,37 +194,6 @@ unsigned long __init e820_end_of_ram(void)
194} 194}
195 195
196/* 196/*
197 * Find the hole size in the range.
198 */
199unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
200{
201 unsigned long ram = 0;
202 int i;
203
204 for (i = 0; i < e820.nr_map; i++) {
205 struct e820entry *ei = &e820.map[i];
206 unsigned long last, addr;
207
208 if (ei->type != E820_RAM ||
209 ei->addr+ei->size <= start ||
210 ei->addr >= end)
211 continue;
212
213 addr = round_up(ei->addr, PAGE_SIZE);
214 if (addr < start)
215 addr = start;
216
217 last = round_down(ei->addr + ei->size, PAGE_SIZE);
218 if (last >= end)
219 last = end;
220
221 if (last > addr)
222 ram += last - addr;
223 }
224 return ((end - start) - ram);
225}
226
227/*
228 * Mark e820 reserved areas as busy for the resource manager. 197 * Mark e820 reserved areas as busy for the resource manager.
229 */ 198 */
230void __init e820_reserve_resources(void) 199void __init e820_reserve_resources(void)
@@ -289,47 +258,61 @@ void __init e820_mark_nosave_regions(void)
289 } 258 }
290} 259}
291 260
292/* Walk the e820 map and register active regions within a node */ 261/*
293void __init 262 * Finds an active region in the address range from start_pfn to end_pfn and
294e820_register_active_regions(int nid, unsigned long start_pfn, 263 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
295 unsigned long end_pfn) 264 */
265static int __init e820_find_active_region(const struct e820entry *ei,
266 unsigned long start_pfn,
267 unsigned long end_pfn,
268 unsigned long *ei_startpfn,
269 unsigned long *ei_endpfn)
296{ 270{
297 int i; 271 *ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
298 unsigned long ei_startpfn, ei_endpfn; 272 *ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE) >> PAGE_SHIFT;
299 for (i = 0; i < e820.nr_map; i++) {
300 struct e820entry *ei = &e820.map[i];
301 ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
302 ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE)
303 >> PAGE_SHIFT;
304 273
305 /* Skip map entries smaller than a page */ 274 /* Skip map entries smaller than a page */
306 if (ei_startpfn >= ei_endpfn) 275 if (*ei_startpfn >= *ei_endpfn)
307 continue; 276 return 0;
308 277
309 /* Check if end_pfn_map should be updated */ 278 /* Check if end_pfn_map should be updated */
310 if (ei->type != E820_RAM && ei_endpfn > end_pfn_map) 279 if (ei->type != E820_RAM && *ei_endpfn > end_pfn_map)
311 end_pfn_map = ei_endpfn; 280 end_pfn_map = *ei_endpfn;
312 281
313 /* Skip if map is outside the node */ 282 /* Skip if map is outside the node */
314 if (ei->type != E820_RAM || 283 if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
315 ei_endpfn <= start_pfn || 284 *ei_startpfn >= end_pfn)
316 ei_startpfn >= end_pfn) 285 return 0;
317 continue;
318 286
319 /* Check for overlaps */ 287 /* Check for overlaps */
320 if (ei_startpfn < start_pfn) 288 if (*ei_startpfn < start_pfn)
321 ei_startpfn = start_pfn; 289 *ei_startpfn = start_pfn;
322 if (ei_endpfn > end_pfn) 290 if (*ei_endpfn > end_pfn)
323 ei_endpfn = end_pfn; 291 *ei_endpfn = end_pfn;
324 292
325 /* Obey end_user_pfn to save on memmap */ 293 /* Obey end_user_pfn to save on memmap */
326 if (ei_startpfn >= end_user_pfn) 294 if (*ei_startpfn >= end_user_pfn)
327 continue; 295 return 0;
328 if (ei_endpfn > end_user_pfn) 296 if (*ei_endpfn > end_user_pfn)
329 ei_endpfn = end_user_pfn; 297 *ei_endpfn = end_user_pfn;
330 298
331 add_active_range(nid, ei_startpfn, ei_endpfn); 299 return 1;
332 } 300}
301
302/* Walk the e820 map and register active regions within a node */
303void __init
304e820_register_active_regions(int nid, unsigned long start_pfn,
305 unsigned long end_pfn)
306{
307 unsigned long ei_startpfn;
308 unsigned long ei_endpfn;
309 int i;
310
311 for (i = 0; i < e820.nr_map; i++)
312 if (e820_find_active_region(&e820.map[i],
313 start_pfn, end_pfn,
314 &ei_startpfn, &ei_endpfn))
315 add_active_range(nid, ei_startpfn, ei_endpfn);
333} 316}
334 317
335/* 318/*
@@ -350,12 +333,35 @@ void __init add_memory_region(unsigned long start, unsigned long size, int type)
350 e820.nr_map++; 333 e820.nr_map++;
351} 334}
352 335
336/*
337 * Find the hole size (in bytes) in the memory range.
338 * @start: starting address of the memory range to scan
339 * @end: ending address of the memory range to scan
340 */
341unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
342{
343 unsigned long start_pfn = start >> PAGE_SHIFT;
344 unsigned long end_pfn = end >> PAGE_SHIFT;
345 unsigned long ei_startpfn;
346 unsigned long ei_endpfn;
347 unsigned long ram = 0;
348 int i;
349
350 for (i = 0; i < e820.nr_map; i++) {
351 if (e820_find_active_region(&e820.map[i],
352 start_pfn, end_pfn,
353 &ei_startpfn, &ei_endpfn))
354 ram += ei_endpfn - ei_startpfn;
355 }
356 return end - start - (ram << PAGE_SHIFT);
357}
358
353void __init e820_print_map(char *who) 359void __init e820_print_map(char *who)
354{ 360{
355 int i; 361 int i;
356 362
357 for (i = 0; i < e820.nr_map; i++) { 363 for (i = 0; i < e820.nr_map; i++) {
358 printk(" %s: %016Lx - %016Lx ", who, 364 printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
359 (unsigned long long) e820.map[i].addr, 365 (unsigned long long) e820.map[i].addr,
360 (unsigned long long) (e820.map[i].addr + e820.map[i].size)); 366 (unsigned long long) (e820.map[i].addr + e820.map[i].size));
361 switch (e820.map[i].type) { 367 switch (e820.map[i].type) {
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 990d9c218a5d..13aa4fd728f3 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -14,6 +14,7 @@
14#include <linux/pci_ids.h> 14#include <linux/pci_ids.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include <asm/proto.h> 16#include <asm/proto.h>
17#include <asm/iommu.h>
17#include <asm/dma.h> 18#include <asm/dma.h>
18 19
19static void __init via_bugs(void) 20static void __init via_bugs(void)
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index a67f87bf4015..830cfc6ee8cb 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -282,7 +282,7 @@ sysret_careful:
282sysret_signal: 282sysret_signal:
283 TRACE_IRQS_ON 283 TRACE_IRQS_ON
284 sti 284 sti
285 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx 285 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
286 jz 1f 286 jz 1f
287 287
288 /* Really a signal */ 288 /* Really a signal */
@@ -375,7 +375,7 @@ int_very_careful:
375 jmp int_restore_rest 375 jmp int_restore_rest
376 376
377int_signal: 377int_signal:
378 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx 378 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
379 jz 1f 379 jz 1f
380 movq %rsp,%rdi # &ptregs -> arg1 380 movq %rsp,%rdi # &ptregs -> arg1
381 xorl %esi,%esi # oldset -> arg2 381 xorl %esi,%esi # oldset -> arg2
@@ -599,7 +599,7 @@ retint_careful:
599 jmp retint_check 599 jmp retint_check
600 600
601retint_signal: 601retint_signal:
602 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx 602 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
603 jz retint_swapgs 603 jz retint_swapgs
604 TRACE_IRQS_ON 604 TRACE_IRQS_ON
605 sti 605 sti
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 941c84baecc8..e89abcdbdde8 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -25,7 +25,7 @@
25 */ 25 */
26 26
27 .text 27 .text
28 .section .bootstrap.text 28 .section .text.head
29 .code64 29 .code64
30 .globl startup_64 30 .globl startup_64
31startup_64: 31startup_64:
@@ -243,10 +243,16 @@ ENTRY(secondary_startup_64)
243 lretq 243 lretq
244 244
245 /* SMP bootup changes these two */ 245 /* SMP bootup changes these two */
246#ifndef CONFIG_HOTPLUG_CPU
247 .pushsection .init.data
248#endif
246 .align 8 249 .align 8
247 .globl initial_code 250 .globl initial_code
248initial_code: 251initial_code:
249 .quad x86_64_start_kernel 252 .quad x86_64_start_kernel
253#ifndef CONFIG_HOTPLUG_CPU
254 .popsection
255#endif
250 .globl init_rsp 256 .globl init_rsp
251init_rsp: 257init_rsp:
252 .quad init_thread_union+THREAD_SIZE-8 258 .quad init_thread_union+THREAD_SIZE-8
diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c
index b8286968662d..e2d1b912e154 100644
--- a/arch/x86_64/kernel/hpet.c
+++ b/arch/x86_64/kernel/hpet.c
@@ -133,7 +133,7 @@ struct clocksource clocksource_hpet = {
133 .vread = vread_hpet, 133 .vread = vread_hpet,
134}; 134};
135 135
136int hpet_arch_init(void) 136int __init hpet_arch_init(void)
137{ 137{
138 unsigned int id; 138 unsigned int id;
139 u64 tmp; 139 u64 tmp;
@@ -190,7 +190,7 @@ int hpet_reenable(void)
190 */ 190 */
191 191
192#define TICK_COUNT 100000000 192#define TICK_COUNT 100000000
193#define TICK_MIN 5000 193#define SMI_THRESHOLD 50000
194#define MAX_TRIES 5 194#define MAX_TRIES 5
195 195
196/* 196/*
@@ -205,7 +205,7 @@ static void __init read_hpet_tsc(int *hpet, int *tsc)
205 tsc1 = get_cycles_sync(); 205 tsc1 = get_cycles_sync();
206 hpet1 = hpet_readl(HPET_COUNTER); 206 hpet1 = hpet_readl(HPET_COUNTER);
207 tsc2 = get_cycles_sync(); 207 tsc2 = get_cycles_sync();
208 if (tsc2 - tsc1 > TICK_MIN) 208 if ((tsc2 - tsc1) < SMI_THRESHOLD)
209 break; 209 break;
210 } 210 }
211 *hpet = hpet1; 211 *hpet = hpet1;
@@ -439,7 +439,7 @@ int hpet_rtc_dropped_irq(void)
439 return 1; 439 return 1;
440} 440}
441 441
442irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs) 442irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
443{ 443{
444 struct rtc_time curr_time; 444 struct rtc_time curr_time;
445 unsigned long rtc_int_flag = 0; 445 unsigned long rtc_int_flag = 0;
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 4b326655b208..948cae646099 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -444,24 +444,6 @@ void __init init_ISA_irqs (void)
444 } 444 }
445} 445}
446 446
447void apic_timer_interrupt(void);
448void spurious_interrupt(void);
449void error_interrupt(void);
450void reschedule_interrupt(void);
451void call_function_interrupt(void);
452void irq_move_cleanup_interrupt(void);
453void invalidate_interrupt0(void);
454void invalidate_interrupt1(void);
455void invalidate_interrupt2(void);
456void invalidate_interrupt3(void);
457void invalidate_interrupt4(void);
458void invalidate_interrupt5(void);
459void invalidate_interrupt6(void);
460void invalidate_interrupt7(void);
461void thermal_interrupt(void);
462void threshold_interrupt(void);
463void i8254_timer_resume(void);
464
465static void setup_timer_hardware(void) 447static void setup_timer_hardware(void)
466{ 448{
467 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ 449 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 1c6c6f724573..050141c0602b 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -152,6 +152,32 @@ static inline void io_apic_modify(unsigned int apic, unsigned int value)
152 writel(value, &io_apic->data); 152 writel(value, &io_apic->data);
153} 153}
154 154
155static int io_apic_level_ack_pending(unsigned int irq)
156{
157 struct irq_pin_list *entry;
158 unsigned long flags;
159 int pending = 0;
160
161 spin_lock_irqsave(&ioapic_lock, flags);
162 entry = irq_2_pin + irq;
163 for (;;) {
164 unsigned int reg;
165 int pin;
166
167 pin = entry->pin;
168 if (pin == -1)
169 break;
170 reg = io_apic_read(entry->apic, 0x10 + pin*2);
171 /* Is the remote IRR bit set? */
172 pending |= (reg >> 14) & 1;
173 if (!entry->next)
174 break;
175 entry = irq_2_pin + entry->next;
176 }
177 spin_unlock_irqrestore(&ioapic_lock, flags);
178 return pending;
179}
180
155/* 181/*
156 * Synchronize the IO-APIC and the CPU by doing 182 * Synchronize the IO-APIC and the CPU by doing
157 * a dummy read from the IO-APIC 183 * a dummy read from the IO-APIC
@@ -1418,9 +1444,37 @@ static void ack_apic_level(unsigned int irq)
1418 ack_APIC_irq(); 1444 ack_APIC_irq();
1419 1445
1420 /* Now we can move and renable the irq */ 1446 /* Now we can move and renable the irq */
1421 move_masked_irq(irq); 1447 if (unlikely(do_unmask_irq)) {
1422 if (unlikely(do_unmask_irq)) 1448 /* Only migrate the irq if the ack has been received.
1449 *
1450 * On rare occasions the broadcast level triggered ack gets
1451 * delayed going to ioapics, and if we reprogram the
1452 * vector while Remote IRR is still set the irq will never
1453 * fire again.
1454 *
1455 * To prevent this scenario we read the Remote IRR bit
1456 * of the ioapic. This has two effects.
1457 * - On any sane system the read of the ioapic will
1458 * flush writes (and acks) going to the ioapic from
1459 * this cpu.
1460 * - We get to see if the ACK has actually been delivered.
1461 *
1462 * Based on failed experiments of reprogramming the
1463 * ioapic entry from outside of irq context starting
1464 * with masking the ioapic entry and then polling until
1465 * Remote IRR was clear before reprogramming the
1466 * ioapic I don't trust the Remote IRR bit to be
1467 * completey accurate.
1468 *
1469 * However there appears to be no other way to plug
1470 * this race, so if the Remote IRR bit is not
1471 * accurate and is causing problems then it is a hardware bug
1472 * and you can go talk to the chipset vendor about it.
1473 */
1474 if (!io_apic_level_ack_pending(irq))
1475 move_masked_irq(irq);
1423 unmask_IO_APIC_irq(irq); 1476 unmask_IO_APIC_irq(irq);
1477 }
1424} 1478}
1425 1479
1426static struct irq_chip ioapic_chip __read_mostly = { 1480static struct irq_chip ioapic_chip __read_mostly = {
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index d4a0d0ac9935..a30e004682e2 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -39,9 +39,9 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/kdebug.h> 40#include <linux/kdebug.h>
41 41
42#include <asm/cacheflush.h>
43#include <asm/pgtable.h> 42#include <asm/pgtable.h>
44#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/alternative.h>
45 45
46void jprobe_return_end(void); 46void jprobe_return_end(void);
47static void __kprobes arch_copy_kprobe(struct kprobe *p); 47static void __kprobes arch_copy_kprobe(struct kprobe *p);
@@ -209,16 +209,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
209 209
210void __kprobes arch_arm_kprobe(struct kprobe *p) 210void __kprobes arch_arm_kprobe(struct kprobe *p)
211{ 211{
212 *p->addr = BREAKPOINT_INSTRUCTION; 212 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
213 flush_icache_range((unsigned long) p->addr,
214 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
215} 213}
216 214
217void __kprobes arch_disarm_kprobe(struct kprobe *p) 215void __kprobes arch_disarm_kprobe(struct kprobe *p)
218{ 216{
219 *p->addr = p->opcode; 217 text_poke(p->addr, &p->opcode, 1);
220 flush_icache_range((unsigned long) p->addr,
221 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
222} 218}
223 219
224void __kprobes arch_remove_kprobe(struct kprobe *p) 220void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index f3fb8174559e..a66d607f5b92 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -18,6 +18,8 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/poll.h>
22#include <linux/thread_info.h>
21#include <linux/ctype.h> 23#include <linux/ctype.h>
22#include <linux/kmod.h> 24#include <linux/kmod.h>
23#include <linux/kdebug.h> 25#include <linux/kdebug.h>
@@ -26,6 +28,7 @@
26#include <asm/mce.h> 28#include <asm/mce.h>
27#include <asm/uaccess.h> 29#include <asm/uaccess.h>
28#include <asm/smp.h> 30#include <asm/smp.h>
31#include <asm/idle.h>
29 32
30#define MISC_MCELOG_MINOR 227 33#define MISC_MCELOG_MINOR 227
31#define NR_BANKS 6 34#define NR_BANKS 6
@@ -34,13 +37,17 @@ atomic_t mce_entry;
34 37
35static int mce_dont_init; 38static int mce_dont_init;
36 39
37/* 0: always panic, 1: panic if deadlock possible, 2: try to avoid panic, 40/*
38 3: never panic or exit (for testing only) */ 41 * Tolerant levels:
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
46 */
39static int tolerant = 1; 47static int tolerant = 1;
40static int banks; 48static int banks;
41static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL }; 49static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
42static unsigned long console_logged; 50static unsigned long notify_user;
43static int notify_user;
44static int rip_msr; 51static int rip_msr;
45static int mce_bootlog = 1; 52static int mce_bootlog = 1;
46static atomic_t mce_events; 53static atomic_t mce_events;
@@ -48,6 +55,8 @@ static atomic_t mce_events;
48static char trigger[128]; 55static char trigger[128];
49static char *trigger_argv[2] = { trigger, NULL }; 56static char *trigger_argv[2] = { trigger, NULL };
50 57
58static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
59
51/* 60/*
52 * Lockless MCE logging infrastructure. 61 * Lockless MCE logging infrastructure.
53 * This avoids deadlocks on printk locks without having to break locks. Also 62 * This avoids deadlocks on printk locks without having to break locks. Also
@@ -94,8 +103,7 @@ void mce_log(struct mce *mce)
94 mcelog.entry[entry].finished = 1; 103 mcelog.entry[entry].finished = 1;
95 wmb(); 104 wmb();
96 105
97 if (!test_and_set_bit(0, &console_logged)) 106 set_bit(0, &notify_user);
98 notify_user = 1;
99} 107}
100 108
101static void print_mce(struct mce *m) 109static void print_mce(struct mce *m)
@@ -128,6 +136,7 @@ static void print_mce(struct mce *m)
128static void mce_panic(char *msg, struct mce *backup, unsigned long start) 136static void mce_panic(char *msg, struct mce *backup, unsigned long start)
129{ 137{
130 int i; 138 int i;
139
131 oops_begin(); 140 oops_begin();
132 for (i = 0; i < MCE_LOG_LEN; i++) { 141 for (i = 0; i < MCE_LOG_LEN; i++) {
133 unsigned long tsc = mcelog.entry[i].tsc; 142 unsigned long tsc = mcelog.entry[i].tsc;
@@ -139,10 +148,7 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
139 } 148 }
140 if (backup) 149 if (backup)
141 print_mce(backup); 150 print_mce(backup);
142 if (tolerant >= 3) 151 panic(msg);
143 printk("Fake panic: %s\n", msg);
144 else
145 panic(msg);
146} 152}
147 153
148static int mce_available(struct cpuinfo_x86 *c) 154static int mce_available(struct cpuinfo_x86 *c)
@@ -167,17 +173,6 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
167 } 173 }
168} 174}
169 175
170static void do_mce_trigger(void)
171{
172 static atomic_t mce_logged;
173 int events = atomic_read(&mce_events);
174 if (events != atomic_read(&mce_logged) && trigger[0]) {
175 /* Small race window, but should be harmless. */
176 atomic_set(&mce_logged, events);
177 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
178 }
179}
180
181/* 176/*
182 * The actual machine check handler 177 * The actual machine check handler
183 */ 178 */
@@ -185,11 +180,19 @@ static void do_mce_trigger(void)
185void do_machine_check(struct pt_regs * regs, long error_code) 180void do_machine_check(struct pt_regs * regs, long error_code)
186{ 181{
187 struct mce m, panicm; 182 struct mce m, panicm;
188 int nowayout = (tolerant < 1);
189 int kill_it = 0;
190 u64 mcestart = 0; 183 u64 mcestart = 0;
191 int i; 184 int i;
192 int panicm_found = 0; 185 int panicm_found = 0;
186 /*
187 * If no_way_out gets set, there is no safe way to recover from this
188 * MCE. If tolerant is cranked up, we'll try anyway.
189 */
190 int no_way_out = 0;
191 /*
192 * If kill_it gets set, there might be a way to recover from this
193 * error.
194 */
195 int kill_it = 0;
193 196
194 atomic_inc(&mce_entry); 197 atomic_inc(&mce_entry);
195 198
@@ -201,8 +204,9 @@ void do_machine_check(struct pt_regs * regs, long error_code)
201 memset(&m, 0, sizeof(struct mce)); 204 memset(&m, 0, sizeof(struct mce));
202 m.cpu = smp_processor_id(); 205 m.cpu = smp_processor_id();
203 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); 206 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
207 /* if the restart IP is not valid, we're done for */
204 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 208 if (!(m.mcgstatus & MCG_STATUS_RIPV))
205 kill_it = 1; 209 no_way_out = 1;
206 210
207 rdtscll(mcestart); 211 rdtscll(mcestart);
208 barrier(); 212 barrier();
@@ -221,10 +225,18 @@ void do_machine_check(struct pt_regs * regs, long error_code)
221 continue; 225 continue;
222 226
223 if (m.status & MCI_STATUS_EN) { 227 if (m.status & MCI_STATUS_EN) {
224 /* In theory _OVER could be a nowayout too, but 228 /* if PCC was set, there's no way out */
225 assume any overflowed errors were no fatal. */ 229 no_way_out |= !!(m.status & MCI_STATUS_PCC);
226 nowayout |= !!(m.status & MCI_STATUS_PCC); 230 /*
227 kill_it |= !!(m.status & MCI_STATUS_UC); 231 * If this error was uncorrectable and there was
232 * an overflow, we're in trouble. If no overflow,
233 * we might get away with just killing a task.
234 */
235 if (m.status & MCI_STATUS_UC) {
236 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
237 no_way_out = 1;
238 kill_it = 1;
239 }
228 } 240 }
229 241
230 if (m.status & MCI_STATUS_MISCV) 242 if (m.status & MCI_STATUS_MISCV)
@@ -235,7 +247,6 @@ void do_machine_check(struct pt_regs * regs, long error_code)
235 mce_get_rip(&m, regs); 247 mce_get_rip(&m, regs);
236 if (error_code >= 0) 248 if (error_code >= 0)
237 rdtscll(m.tsc); 249 rdtscll(m.tsc);
238 wrmsrl(MSR_IA32_MC0_STATUS + i*4, 0);
239 if (error_code != -2) 250 if (error_code != -2)
240 mce_log(&m); 251 mce_log(&m);
241 252
@@ -251,45 +262,59 @@ void do_machine_check(struct pt_regs * regs, long error_code)
251 } 262 }
252 263
253 /* Never do anything final in the polling timer */ 264 /* Never do anything final in the polling timer */
254 if (!regs) { 265 if (!regs)
255 /* Normal interrupt context here. Call trigger for any new
256 events. */
257 do_mce_trigger();
258 goto out; 266 goto out;
259 }
260 267
261 /* If we didn't find an uncorrectable error, pick 268 /* If we didn't find an uncorrectable error, pick
262 the last one (shouldn't happen, just being safe). */ 269 the last one (shouldn't happen, just being safe). */
263 if (!panicm_found) 270 if (!panicm_found)
264 panicm = m; 271 panicm = m;
265 if (nowayout) 272
273 /*
274 * If we have decided that we just CAN'T continue, and the user
275 * has not set tolerant to an insane level, give up and die.
276 */
277 if (no_way_out && tolerant < 3)
266 mce_panic("Machine check", &panicm, mcestart); 278 mce_panic("Machine check", &panicm, mcestart);
267 if (kill_it) { 279
280 /*
281 * If the error seems to be unrecoverable, something should be
282 * done. Try to kill as little as possible. If we can kill just
283 * one task, do that. If the user has set the tolerance very
284 * high, don't try to do anything at all.
285 */
286 if (kill_it && tolerant < 3) {
268 int user_space = 0; 287 int user_space = 0;
269 288
270 if (m.mcgstatus & MCG_STATUS_RIPV) 289 /*
290 * If the EIPV bit is set, it means the saved IP is the
291 * instruction which caused the MCE.
292 */
293 if (m.mcgstatus & MCG_STATUS_EIPV)
271 user_space = panicm.rip && (panicm.cs & 3); 294 user_space = panicm.rip && (panicm.cs & 3);
272 295
273 /* When the machine was in user space and the CPU didn't get 296 /*
274 confused it's normally not necessary to panic, unless you 297 * If we know that the error was in user space, send a
275 are paranoid (tolerant == 0) 298 * SIGBUS. Otherwise, panic if tolerance is low.
276 299 *
277 RED-PEN could be more tolerant for MCEs in idle, 300 * do_exit() takes an awful lot of locks and has a slight
278 but most likely they occur at boot anyways, where 301 * risk of deadlocking.
279 it is best to just halt the machine. */ 302 */
280 if ((!user_space && (panic_on_oops || tolerant < 2)) || 303 if (user_space) {
281 (unsigned)current->pid <= 1)
282 mce_panic("Uncorrected machine check", &panicm, mcestart);
283
284 /* do_exit takes an awful lot of locks and has as
285 slight risk of deadlocking. If you don't want that
286 don't set tolerant >= 2 */
287 if (tolerant < 3)
288 do_exit(SIGBUS); 304 do_exit(SIGBUS);
305 } else if (panic_on_oops || tolerant < 2) {
306 mce_panic("Uncorrected machine check",
307 &panicm, mcestart);
308 }
289 } 309 }
290 310
311 /* notify userspace ASAP */
312 set_thread_flag(TIF_MCE_NOTIFY);
313
291 out: 314 out:
292 /* Last thing done in the machine check exception to clear state. */ 315 /* the last thing we do is clear state */
316 for (i = 0; i < banks; i++)
317 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
293 wrmsrl(MSR_IA32_MCG_STATUS, 0); 318 wrmsrl(MSR_IA32_MCG_STATUS, 0);
294 out2: 319 out2:
295 atomic_dec(&mce_entry); 320 atomic_dec(&mce_entry);
@@ -344,37 +369,69 @@ static void mcheck_timer(struct work_struct *work)
344 on_each_cpu(mcheck_check_cpu, NULL, 1, 1); 369 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
345 370
346 /* 371 /*
347 * It's ok to read stale data here for notify_user and 372 * Alert userspace if needed. If we logged an MCE, reduce the
348 * console_logged as we'll simply get the updated versions 373 * polling interval, otherwise increase the polling interval.
349 * on the next mcheck_timer execution and atomic operations
350 * on console_logged act as synchronization for notify_user
351 * writes.
352 */ 374 */
353 if (notify_user && console_logged) { 375 if (mce_notify_user()) {
376 next_interval = max(next_interval/2, HZ/100);
377 } else {
378 next_interval = min(next_interval*2,
379 (int)round_jiffies_relative(check_interval*HZ));
380 }
381
382 schedule_delayed_work(&mcheck_work, next_interval);
383}
384
385/*
386 * This is only called from process context. This is where we do
387 * anything we need to alert userspace about new MCEs. This is called
388 * directly from the poller and also from entry.S and idle, thanks to
389 * TIF_MCE_NOTIFY.
390 */
391int mce_notify_user(void)
392{
393 clear_thread_flag(TIF_MCE_NOTIFY);
394 if (test_and_clear_bit(0, &notify_user)) {
354 static unsigned long last_print; 395 static unsigned long last_print;
355 unsigned long now = jiffies; 396 unsigned long now = jiffies;
356 397
357 /* if we logged an MCE, reduce the polling interval */ 398 wake_up_interruptible(&mce_wait);
358 next_interval = max(next_interval/2, HZ/100); 399 if (trigger[0])
359 notify_user = 0; 400 call_usermodehelper(trigger, trigger_argv, NULL,
360 clear_bit(0, &console_logged); 401 UMH_NO_WAIT);
402
361 if (time_after_eq(now, last_print + (check_interval*HZ))) { 403 if (time_after_eq(now, last_print + (check_interval*HZ))) {
362 last_print = now; 404 last_print = now;
363 printk(KERN_INFO "Machine check events logged\n"); 405 printk(KERN_INFO "Machine check events logged\n");
364 } 406 }
365 } else { 407
366 next_interval = min(next_interval*2, check_interval*HZ); 408 return 1;
367 } 409 }
410 return 0;
411}
368 412
369 schedule_delayed_work(&mcheck_work, next_interval); 413/* see if the idle task needs to notify userspace */
414static int
415mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
416{
417 /* IDLE_END should be safe - interrupts are back on */
418 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
419 mce_notify_user();
420
421 return NOTIFY_OK;
370} 422}
371 423
424static struct notifier_block mce_idle_notifier = {
425 .notifier_call = mce_idle_callback,
426};
372 427
373static __init int periodic_mcheck_init(void) 428static __init int periodic_mcheck_init(void)
374{ 429{
375 next_interval = check_interval * HZ; 430 next_interval = check_interval * HZ;
376 if (next_interval) 431 if (next_interval)
377 schedule_delayed_work(&mcheck_work, next_interval); 432 schedule_delayed_work(&mcheck_work,
433 round_jiffies_relative(next_interval));
434 idle_notifier_register(&mce_idle_notifier);
378 return 0; 435 return 0;
379} 436}
380__initcall(periodic_mcheck_init); 437__initcall(periodic_mcheck_init);
@@ -465,6 +522,40 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
465 * Character device to read and clear the MCE log. 522 * Character device to read and clear the MCE log.
466 */ 523 */
467 524
525static DEFINE_SPINLOCK(mce_state_lock);
526static int open_count; /* #times opened */
527static int open_exclu; /* already open exclusive? */
528
529static int mce_open(struct inode *inode, struct file *file)
530{
531 spin_lock(&mce_state_lock);
532
533 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
534 spin_unlock(&mce_state_lock);
535 return -EBUSY;
536 }
537
538 if (file->f_flags & O_EXCL)
539 open_exclu = 1;
540 open_count++;
541
542 spin_unlock(&mce_state_lock);
543
544 return nonseekable_open(inode, file);
545}
546
547static int mce_release(struct inode *inode, struct file *file)
548{
549 spin_lock(&mce_state_lock);
550
551 open_count--;
552 open_exclu = 0;
553
554 spin_unlock(&mce_state_lock);
555
556 return 0;
557}
558
468static void collect_tscs(void *data) 559static void collect_tscs(void *data)
469{ 560{
470 unsigned long *cpu_tsc = (unsigned long *)data; 561 unsigned long *cpu_tsc = (unsigned long *)data;
@@ -532,6 +623,14 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
532 return err ? -EFAULT : buf - ubuf; 623 return err ? -EFAULT : buf - ubuf;
533} 624}
534 625
626static unsigned int mce_poll(struct file *file, poll_table *wait)
627{
628 poll_wait(file, &mce_wait, wait);
629 if (rcu_dereference(mcelog.next))
630 return POLLIN | POLLRDNORM;
631 return 0;
632}
633
535static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) 634static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg)
536{ 635{
537 int __user *p = (int __user *)arg; 636 int __user *p = (int __user *)arg;
@@ -555,7 +654,10 @@ static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned
555} 654}
556 655
557static const struct file_operations mce_chrdev_ops = { 656static const struct file_operations mce_chrdev_ops = {
657 .open = mce_open,
658 .release = mce_release,
558 .read = mce_read, 659 .read = mce_read,
660 .poll = mce_poll,
559 .ioctl = mce_ioctl, 661 .ioctl = mce_ioctl,
560}; 662};
561 663
@@ -565,6 +667,20 @@ static struct miscdevice mce_log_device = {
565 &mce_chrdev_ops, 667 &mce_chrdev_ops,
566}; 668};
567 669
670static unsigned long old_cr4 __initdata;
671
672void __init stop_mce(void)
673{
674 old_cr4 = read_cr4();
675 clear_in_cr4(X86_CR4_MCE);
676}
677
678void __init restart_mce(void)
679{
680 if (old_cr4 & X86_CR4_MCE)
681 set_in_cr4(X86_CR4_MCE);
682}
683
568/* 684/*
569 * Old style boot options parsing. Only for compatibility. 685 * Old style boot options parsing. Only for compatibility.
570 */ 686 */
@@ -620,7 +736,8 @@ static void mce_restart(void)
620 on_each_cpu(mce_init, NULL, 1, 1); 736 on_each_cpu(mce_init, NULL, 1, 1);
621 next_interval = check_interval * HZ; 737 next_interval = check_interval * HZ;
622 if (next_interval) 738 if (next_interval)
623 schedule_delayed_work(&mcheck_work, next_interval); 739 schedule_delayed_work(&mcheck_work,
740 round_jiffies_relative(next_interval));
624} 741}
625 742
626static struct sysdev_class mce_sysclass = { 743static struct sysdev_class mce_sysclass = {
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 03356e64f9c8..2f8a7f18b0fe 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -157,9 +157,9 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
157 high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20; 157 high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
158 wrmsr(address, low, high); 158 wrmsr(address, low, high);
159 159
160 setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD, 160 setup_APIC_extended_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
161 THRESHOLD_APIC_VECTOR, 161 THRESHOLD_APIC_VECTOR,
162 K8_APIC_EXT_INT_MSG_FIX, 0); 162 K8_APIC_EXT_INT_MSG_FIX, 0);
163 163
164 threshold_defaults.address = address; 164 threshold_defaults.address = address;
165 threshold_restart_bank(&threshold_defaults, 0, 0); 165 threshold_restart_bank(&threshold_defaults, 0, 0);
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 61ae57eb9e4c..8bf0ca03ac8e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -32,7 +32,6 @@
32 32
33/* Have we found an MP table */ 33/* Have we found an MP table */
34int smp_found_config; 34int smp_found_config;
35unsigned int __initdata maxcpus = NR_CPUS;
36 35
37/* 36/*
38 * Various Linux-internal data structures created from the 37 * Various Linux-internal data structures created from the
@@ -649,6 +648,20 @@ static int mp_find_ioapic(int gsi)
649 return -1; 648 return -1;
650} 649}
651 650
651static u8 uniq_ioapic_id(u8 id)
652{
653 int i;
654 DECLARE_BITMAP(used, 256);
655 bitmap_zero(used, 256);
656 for (i = 0; i < nr_ioapics; i++) {
657 struct mpc_config_ioapic *ia = &mp_ioapics[i];
658 __set_bit(ia->mpc_apicid, used);
659 }
660 if (!test_bit(id, used))
661 return id;
662 return find_first_zero_bit(used, 256);
663}
664
652void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) 665void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
653{ 666{
654 int idx = 0; 667 int idx = 0;
@@ -656,14 +669,14 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
656 if (bad_ioapic(address)) 669 if (bad_ioapic(address))
657 return; 670 return;
658 671
659 idx = nr_ioapics++; 672 idx = nr_ioapics;
660 673
661 mp_ioapics[idx].mpc_type = MP_IOAPIC; 674 mp_ioapics[idx].mpc_type = MP_IOAPIC;
662 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; 675 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
663 mp_ioapics[idx].mpc_apicaddr = address; 676 mp_ioapics[idx].mpc_apicaddr = address;
664 677
665 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 678 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
666 mp_ioapics[idx].mpc_apicid = id; 679 mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
667 mp_ioapics[idx].mpc_apicver = 0; 680 mp_ioapics[idx].mpc_apicver = 0;
668 681
669 /* 682 /*
@@ -680,6 +693,8 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
680 mp_ioapics[idx].mpc_apicaddr, 693 mp_ioapics[idx].mpc_apicaddr,
681 mp_ioapic_routing[idx].gsi_start, 694 mp_ioapic_routing[idx].gsi_start,
682 mp_ioapic_routing[idx].gsi_end); 695 mp_ioapic_routing[idx].gsi_end);
696
697 nr_ioapics++;
683} 698}
684 699
685void __init 700void __init
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index edbbc59b7523..cb8ee9d02f86 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -384,11 +384,14 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
384 return rc; 384 return rc;
385} 385}
386 386
387static unsigned ignore_nmis;
388
387asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code) 389asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
388{ 390{
389 nmi_enter(); 391 nmi_enter();
390 add_pda(__nmi_count,1); 392 add_pda(__nmi_count,1);
391 default_do_nmi(regs); 393 if (!ignore_nmis)
394 default_do_nmi(regs);
392 nmi_exit(); 395 nmi_exit();
393} 396}
394 397
@@ -401,6 +404,18 @@ int do_nmi_callback(struct pt_regs * regs, int cpu)
401 return 0; 404 return 0;
402} 405}
403 406
407void stop_nmi(void)
408{
409 acpi_nmi_disable();
410 ignore_nmis++;
411}
412
413void restart_nmi(void)
414{
415 ignore_nmis--;
416 acpi_nmi_enable();
417}
418
404#ifdef CONFIG_SYSCTL 419#ifdef CONFIG_SYSCTL
405 420
406static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) 421static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index 5bd20b542c1e..ba16c968ca3f 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Derived from arch/powerpc/kernel/iommu.c 2 * Derived from arch/powerpc/kernel/iommu.c
3 * 3 *
4 * Copyright (C) IBM Corporation, 2006 4 * Copyright IBM Corporation, 2006-2007
5 * Copyright (C) 2006 Jon Mason <jdmason@kudzu.us> 5 * Copyright (C) 2006 Jon Mason <jdmason@kudzu.us>
6 * 6 *
7 * Author: Jon Mason <jdmason@kudzu.us> 7 * Author: Jon Mason <jdmason@kudzu.us>
@@ -35,7 +35,7 @@
35#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <asm/proto.h> 38#include <asm/iommu.h>
39#include <asm/calgary.h> 39#include <asm/calgary.h>
40#include <asm/tce.h> 40#include <asm/tce.h>
41#include <asm/pci-direct.h> 41#include <asm/pci-direct.h>
@@ -50,13 +50,7 @@ int use_calgary __read_mostly = 0;
50#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */ 50#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
51 51
52#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1 52#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
53#define PCI_VENDOR_DEVICE_ID_CALGARY \ 53#define PCI_DEVICE_ID_IBM_CALIOC2 0x0308
54 (PCI_VENDOR_ID_IBM | PCI_DEVICE_ID_IBM_CALGARY << 16)
55
56/* we need these for register space address calculation */
57#define START_ADDRESS 0xfe000000
58#define CHASSIS_BASE 0
59#define ONE_BASED_CHASSIS_NUM 1
60 54
61/* register offsets inside the host bridge space */ 55/* register offsets inside the host bridge space */
62#define CALGARY_CONFIG_REG 0x0108 56#define CALGARY_CONFIG_REG 0x0108
@@ -80,6 +74,12 @@ int use_calgary __read_mostly = 0;
80#define PHB_MEM_2_SIZE_LOW 0x02E0 74#define PHB_MEM_2_SIZE_LOW 0x02E0
81#define PHB_DOSHOLE_OFFSET 0x08E0 75#define PHB_DOSHOLE_OFFSET 0x08E0
82 76
77/* CalIOC2 specific */
78#define PHB_SAVIOR_L2 0x0DB0
79#define PHB_PAGE_MIG_CTRL 0x0DA8
80#define PHB_PAGE_MIG_DEBUG 0x0DA0
81#define PHB_ROOT_COMPLEX_STATUS 0x0CB0
82
83/* PHB_CONFIG_RW */ 83/* PHB_CONFIG_RW */
84#define PHB_TCE_ENABLE 0x20000000 84#define PHB_TCE_ENABLE 0x20000000
85#define PHB_SLOT_DISABLE 0x1C000000 85#define PHB_SLOT_DISABLE 0x1C000000
@@ -92,7 +92,11 @@ int use_calgary __read_mostly = 0;
92/* CSR (Channel/DMA Status Register) */ 92/* CSR (Channel/DMA Status Register) */
93#define CSR_AGENT_MASK 0xffe0ffff 93#define CSR_AGENT_MASK 0xffe0ffff
94/* CCR (Calgary Configuration Register) */ 94/* CCR (Calgary Configuration Register) */
95#define CCR_2SEC_TIMEOUT 0x000000000000000EUL 95#define CCR_2SEC_TIMEOUT 0x000000000000000EUL
96/* PMCR/PMDR (Page Migration Control/Debug Registers */
97#define PMR_SOFTSTOP 0x80000000
98#define PMR_SOFTSTOPFAULT 0x40000000
99#define PMR_HARDSTOP 0x20000000
96 100
97#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */ 101#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
98#define MAX_NUM_CHASSIS 8 /* max number of chassis */ 102#define MAX_NUM_CHASSIS 8 /* max number of chassis */
@@ -155,9 +159,26 @@ struct calgary_bus_info {
155 void __iomem *bbar; 159 void __iomem *bbar;
156}; 160};
157 161
158static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, }; 162static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
163static void calgary_tce_cache_blast(struct iommu_table *tbl);
164static void calgary_dump_error_regs(struct iommu_table *tbl);
165static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
166static void calioc2_tce_cache_blast(struct iommu_table *tbl);
167static void calioc2_dump_error_regs(struct iommu_table *tbl);
168
169static struct cal_chipset_ops calgary_chip_ops = {
170 .handle_quirks = calgary_handle_quirks,
171 .tce_cache_blast = calgary_tce_cache_blast,
172 .dump_error_regs = calgary_dump_error_regs
173};
159 174
160static void tce_cache_blast(struct iommu_table *tbl); 175static struct cal_chipset_ops calioc2_chip_ops = {
176 .handle_quirks = calioc2_handle_quirks,
177 .tce_cache_blast = calioc2_tce_cache_blast,
178 .dump_error_regs = calioc2_dump_error_regs
179};
180
181static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
161 182
162/* enable this to stress test the chip's TCE cache */ 183/* enable this to stress test the chip's TCE cache */
163#ifdef CONFIG_IOMMU_DEBUG 184#ifdef CONFIG_IOMMU_DEBUG
@@ -187,6 +208,7 @@ static inline unsigned long verify_bit_range(unsigned long* bitmap,
187{ 208{
188 return ~0UL; 209 return ~0UL;
189} 210}
211
190#endif /* CONFIG_IOMMU_DEBUG */ 212#endif /* CONFIG_IOMMU_DEBUG */
191 213
192static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen) 214static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
@@ -206,11 +228,12 @@ static inline int translate_phb(struct pci_dev* dev)
206} 228}
207 229
208static void iommu_range_reserve(struct iommu_table *tbl, 230static void iommu_range_reserve(struct iommu_table *tbl,
209 unsigned long start_addr, unsigned int npages) 231 unsigned long start_addr, unsigned int npages)
210{ 232{
211 unsigned long index; 233 unsigned long index;
212 unsigned long end; 234 unsigned long end;
213 unsigned long badbit; 235 unsigned long badbit;
236 unsigned long flags;
214 237
215 index = start_addr >> PAGE_SHIFT; 238 index = start_addr >> PAGE_SHIFT;
216 239
@@ -222,6 +245,8 @@ static void iommu_range_reserve(struct iommu_table *tbl,
222 if (end > tbl->it_size) /* don't go off the table */ 245 if (end > tbl->it_size) /* don't go off the table */
223 end = tbl->it_size; 246 end = tbl->it_size;
224 247
248 spin_lock_irqsave(&tbl->it_lock, flags);
249
225 badbit = verify_bit_range(tbl->it_map, 0, index, end); 250 badbit = verify_bit_range(tbl->it_map, 0, index, end);
226 if (badbit != ~0UL) { 251 if (badbit != ~0UL) {
227 if (printk_ratelimit()) 252 if (printk_ratelimit())
@@ -231,23 +256,29 @@ static void iommu_range_reserve(struct iommu_table *tbl,
231 } 256 }
232 257
233 set_bit_string(tbl->it_map, index, npages); 258 set_bit_string(tbl->it_map, index, npages);
259
260 spin_unlock_irqrestore(&tbl->it_lock, flags);
234} 261}
235 262
236static unsigned long iommu_range_alloc(struct iommu_table *tbl, 263static unsigned long iommu_range_alloc(struct iommu_table *tbl,
237 unsigned int npages) 264 unsigned int npages)
238{ 265{
266 unsigned long flags;
239 unsigned long offset; 267 unsigned long offset;
240 268
241 BUG_ON(npages == 0); 269 BUG_ON(npages == 0);
242 270
271 spin_lock_irqsave(&tbl->it_lock, flags);
272
243 offset = find_next_zero_string(tbl->it_map, tbl->it_hint, 273 offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
244 tbl->it_size, npages); 274 tbl->it_size, npages);
245 if (offset == ~0UL) { 275 if (offset == ~0UL) {
246 tce_cache_blast(tbl); 276 tbl->chip_ops->tce_cache_blast(tbl);
247 offset = find_next_zero_string(tbl->it_map, 0, 277 offset = find_next_zero_string(tbl->it_map, 0,
248 tbl->it_size, npages); 278 tbl->it_size, npages);
249 if (offset == ~0UL) { 279 if (offset == ~0UL) {
250 printk(KERN_WARNING "Calgary: IOMMU full.\n"); 280 printk(KERN_WARNING "Calgary: IOMMU full.\n");
281 spin_unlock_irqrestore(&tbl->it_lock, flags);
251 if (panic_on_overflow) 282 if (panic_on_overflow)
252 panic("Calgary: fix the allocator.\n"); 283 panic("Calgary: fix the allocator.\n");
253 else 284 else
@@ -259,17 +290,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
259 tbl->it_hint = offset + npages; 290 tbl->it_hint = offset + npages;
260 BUG_ON(tbl->it_hint > tbl->it_size); 291 BUG_ON(tbl->it_hint > tbl->it_size);
261 292
293 spin_unlock_irqrestore(&tbl->it_lock, flags);
294
262 return offset; 295 return offset;
263} 296}
264 297
265static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr, 298static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
266 unsigned int npages, int direction) 299 unsigned int npages, int direction)
267{ 300{
268 unsigned long entry, flags; 301 unsigned long entry;
269 dma_addr_t ret = bad_dma_address; 302 dma_addr_t ret = bad_dma_address;
270 303
271 spin_lock_irqsave(&tbl->it_lock, flags);
272
273 entry = iommu_range_alloc(tbl, npages); 304 entry = iommu_range_alloc(tbl, npages);
274 305
275 if (unlikely(entry == bad_dma_address)) 306 if (unlikely(entry == bad_dma_address))
@@ -282,23 +313,21 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
282 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, 313 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
283 direction); 314 direction);
284 315
285 spin_unlock_irqrestore(&tbl->it_lock, flags);
286
287 return ret; 316 return ret;
288 317
289error: 318error:
290 spin_unlock_irqrestore(&tbl->it_lock, flags);
291 printk(KERN_WARNING "Calgary: failed to allocate %u pages in " 319 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
292 "iommu %p\n", npages, tbl); 320 "iommu %p\n", npages, tbl);
293 return bad_dma_address; 321 return bad_dma_address;
294} 322}
295 323
296static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 324static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
297 unsigned int npages) 325 unsigned int npages)
298{ 326{
299 unsigned long entry; 327 unsigned long entry;
300 unsigned long badbit; 328 unsigned long badbit;
301 unsigned long badend; 329 unsigned long badend;
330 unsigned long flags;
302 331
303 /* were we called with bad_dma_address? */ 332 /* were we called with bad_dma_address? */
304 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); 333 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
@@ -315,6 +344,8 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
315 344
316 tce_free(tbl, entry, npages); 345 tce_free(tbl, entry, npages);
317 346
347 spin_lock_irqsave(&tbl->it_lock, flags);
348
318 badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages); 349 badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages);
319 if (badbit != ~0UL) { 350 if (badbit != ~0UL) {
320 if (printk_ratelimit()) 351 if (printk_ratelimit())
@@ -324,23 +355,40 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
324 } 355 }
325 356
326 __clear_bit_string(tbl->it_map, entry, npages); 357 __clear_bit_string(tbl->it_map, entry, npages);
358
359 spin_unlock_irqrestore(&tbl->it_lock, flags);
327} 360}
328 361
329static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 362static inline struct iommu_table *find_iommu_table(struct device *dev)
330 unsigned int npages)
331{ 363{
332 unsigned long flags; 364 struct pci_dev *pdev;
365 struct pci_bus *pbus;
366 struct iommu_table *tbl;
333 367
334 spin_lock_irqsave(&tbl->it_lock, flags); 368 pdev = to_pci_dev(dev);
335 369
336 __iommu_free(tbl, dma_addr, npages); 370 /* is the device behind a bridge? */
371 if (unlikely(pdev->bus->parent))
372 pbus = pdev->bus->parent;
373 else
374 pbus = pdev->bus;
337 375
338 spin_unlock_irqrestore(&tbl->it_lock, flags); 376 tbl = pci_iommu(pbus);
377
378 BUG_ON(pdev->bus->parent &&
379 (tbl->it_busno != pdev->bus->parent->number));
380
381 return tbl;
339} 382}
340 383
341static void __calgary_unmap_sg(struct iommu_table *tbl, 384static void calgary_unmap_sg(struct device *dev,
342 struct scatterlist *sglist, int nelems, int direction) 385 struct scatterlist *sglist, int nelems, int direction)
343{ 386{
387 struct iommu_table *tbl = find_iommu_table(dev);
388
389 if (!translate_phb(to_pci_dev(dev)))
390 return;
391
344 while (nelems--) { 392 while (nelems--) {
345 unsigned int npages; 393 unsigned int npages;
346 dma_addr_t dma = sglist->dma_address; 394 dma_addr_t dma = sglist->dma_address;
@@ -350,33 +398,17 @@ static void __calgary_unmap_sg(struct iommu_table *tbl,
350 break; 398 break;
351 399
352 npages = num_dma_pages(dma, dmalen); 400 npages = num_dma_pages(dma, dmalen);
353 __iommu_free(tbl, dma, npages); 401 iommu_free(tbl, dma, npages);
354 sglist++; 402 sglist++;
355 } 403 }
356} 404}
357 405
358void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
359 int nelems, int direction)
360{
361 unsigned long flags;
362 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
363
364 if (!translate_phb(to_pci_dev(dev)))
365 return;
366
367 spin_lock_irqsave(&tbl->it_lock, flags);
368
369 __calgary_unmap_sg(tbl, sglist, nelems, direction);
370
371 spin_unlock_irqrestore(&tbl->it_lock, flags);
372}
373
374static int calgary_nontranslate_map_sg(struct device* dev, 406static int calgary_nontranslate_map_sg(struct device* dev,
375 struct scatterlist *sg, int nelems, int direction) 407 struct scatterlist *sg, int nelems, int direction)
376{ 408{
377 int i; 409 int i;
378 410
379 for (i = 0; i < nelems; i++ ) { 411 for (i = 0; i < nelems; i++ ) {
380 struct scatterlist *s = &sg[i]; 412 struct scatterlist *s = &sg[i];
381 BUG_ON(!s->page); 413 BUG_ON(!s->page);
382 s->dma_address = virt_to_bus(page_address(s->page) +s->offset); 414 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
@@ -385,11 +417,10 @@ static int calgary_nontranslate_map_sg(struct device* dev,
385 return nelems; 417 return nelems;
386} 418}
387 419
388int calgary_map_sg(struct device *dev, struct scatterlist *sg, 420static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
389 int nelems, int direction) 421 int nelems, int direction)
390{ 422{
391 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; 423 struct iommu_table *tbl = find_iommu_table(dev);
392 unsigned long flags;
393 unsigned long vaddr; 424 unsigned long vaddr;
394 unsigned int npages; 425 unsigned int npages;
395 unsigned long entry; 426 unsigned long entry;
@@ -398,8 +429,6 @@ int calgary_map_sg(struct device *dev, struct scatterlist *sg,
398 if (!translate_phb(to_pci_dev(dev))) 429 if (!translate_phb(to_pci_dev(dev)))
399 return calgary_nontranslate_map_sg(dev, sg, nelems, direction); 430 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
400 431
401 spin_lock_irqsave(&tbl->it_lock, flags);
402
403 for (i = 0; i < nelems; i++ ) { 432 for (i = 0; i < nelems; i++ ) {
404 struct scatterlist *s = &sg[i]; 433 struct scatterlist *s = &sg[i];
405 BUG_ON(!s->page); 434 BUG_ON(!s->page);
@@ -423,26 +452,23 @@ int calgary_map_sg(struct device *dev, struct scatterlist *sg,
423 s->dma_length = s->length; 452 s->dma_length = s->length;
424 } 453 }
425 454
426 spin_unlock_irqrestore(&tbl->it_lock, flags);
427
428 return nelems; 455 return nelems;
429error: 456error:
430 __calgary_unmap_sg(tbl, sg, nelems, direction); 457 calgary_unmap_sg(dev, sg, nelems, direction);
431 for (i = 0; i < nelems; i++) { 458 for (i = 0; i < nelems; i++) {
432 sg[i].dma_address = bad_dma_address; 459 sg[i].dma_address = bad_dma_address;
433 sg[i].dma_length = 0; 460 sg[i].dma_length = 0;
434 } 461 }
435 spin_unlock_irqrestore(&tbl->it_lock, flags);
436 return 0; 462 return 0;
437} 463}
438 464
439dma_addr_t calgary_map_single(struct device *dev, void *vaddr, 465static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
440 size_t size, int direction) 466 size_t size, int direction)
441{ 467{
442 dma_addr_t dma_handle = bad_dma_address; 468 dma_addr_t dma_handle = bad_dma_address;
443 unsigned long uaddr; 469 unsigned long uaddr;
444 unsigned int npages; 470 unsigned int npages;
445 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; 471 struct iommu_table *tbl = find_iommu_table(dev);
446 472
447 uaddr = (unsigned long)vaddr; 473 uaddr = (unsigned long)vaddr;
448 npages = num_dma_pages(uaddr, size); 474 npages = num_dma_pages(uaddr, size);
@@ -455,10 +481,10 @@ dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
455 return dma_handle; 481 return dma_handle;
456} 482}
457 483
458void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, 484static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
459 size_t size, int direction) 485 size_t size, int direction)
460{ 486{
461 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; 487 struct iommu_table *tbl = find_iommu_table(dev);
462 unsigned int npages; 488 unsigned int npages;
463 489
464 if (!translate_phb(to_pci_dev(dev))) 490 if (!translate_phb(to_pci_dev(dev)))
@@ -468,15 +494,13 @@ void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
468 iommu_free(tbl, dma_handle, npages); 494 iommu_free(tbl, dma_handle, npages);
469} 495}
470 496
471void* calgary_alloc_coherent(struct device *dev, size_t size, 497static void* calgary_alloc_coherent(struct device *dev, size_t size,
472 dma_addr_t *dma_handle, gfp_t flag) 498 dma_addr_t *dma_handle, gfp_t flag)
473{ 499{
474 void *ret = NULL; 500 void *ret = NULL;
475 dma_addr_t mapping; 501 dma_addr_t mapping;
476 unsigned int npages, order; 502 unsigned int npages, order;
477 struct iommu_table *tbl; 503 struct iommu_table *tbl = find_iommu_table(dev);
478
479 tbl = to_pci_dev(dev)->bus->self->sysdata;
480 504
481 size = PAGE_ALIGN(size); /* size rounded up to full pages */ 505 size = PAGE_ALIGN(size); /* size rounded up to full pages */
482 npages = size >> PAGE_SHIFT; 506 npages = size >> PAGE_SHIFT;
@@ -552,7 +576,22 @@ static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
552 return (void __iomem*)target; 576 return (void __iomem*)target;
553} 577}
554 578
555static void tce_cache_blast(struct iommu_table *tbl) 579static inline int is_calioc2(unsigned short device)
580{
581 return (device == PCI_DEVICE_ID_IBM_CALIOC2);
582}
583
584static inline int is_calgary(unsigned short device)
585{
586 return (device == PCI_DEVICE_ID_IBM_CALGARY);
587}
588
589static inline int is_cal_pci_dev(unsigned short device)
590{
591 return (is_calgary(device) || is_calioc2(device));
592}
593
594static void calgary_tce_cache_blast(struct iommu_table *tbl)
556{ 595{
557 u64 val; 596 u64 val;
558 u32 aer; 597 u32 aer;
@@ -589,6 +628,85 @@ static void tce_cache_blast(struct iommu_table *tbl)
589 (void)readl(target); /* flush */ 628 (void)readl(target); /* flush */
590} 629}
591 630
631static void calioc2_tce_cache_blast(struct iommu_table *tbl)
632{
633 void __iomem *bbar = tbl->bbar;
634 void __iomem *target;
635 u64 val64;
636 u32 val;
637 int i = 0;
638 int count = 1;
639 unsigned char bus = tbl->it_busno;
640
641begin:
642 printk(KERN_DEBUG "Calgary: CalIOC2 bus 0x%x entering tce cache blast "
643 "sequence - count %d\n", bus, count);
644
645 /* 1. using the Page Migration Control reg set SoftStop */
646 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
647 val = be32_to_cpu(readl(target));
648 printk(KERN_DEBUG "1a. read 0x%x [LE] from %p\n", val, target);
649 val |= PMR_SOFTSTOP;
650 printk(KERN_DEBUG "1b. writing 0x%x [LE] to %p\n", val, target);
651 writel(cpu_to_be32(val), target);
652
653 /* 2. poll split queues until all DMA activity is done */
654 printk(KERN_DEBUG "2a. starting to poll split queues\n");
655 target = calgary_reg(bbar, split_queue_offset(bus));
656 do {
657 val64 = readq(target);
658 i++;
659 } while ((val64 & 0xff) != 0xff && i < 100);
660 if (i == 100)
661 printk(KERN_WARNING "CalIOC2: PCI bus not quiesced, "
662 "continuing anyway\n");
663
664 /* 3. poll Page Migration DEBUG for SoftStopFault */
665 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
666 val = be32_to_cpu(readl(target));
667 printk(KERN_DEBUG "3. read 0x%x [LE] from %p\n", val, target);
668
669 /* 4. if SoftStopFault - goto (1) */
670 if (val & PMR_SOFTSTOPFAULT) {
671 if (++count < 100)
672 goto begin;
673 else {
674 printk(KERN_WARNING "CalIOC2: too many SoftStopFaults, "
675 "aborting TCE cache flush sequence!\n");
676 return; /* pray for the best */
677 }
678 }
679
680 /* 5. Slam into HardStop by reading PHB_PAGE_MIG_CTRL */
681 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
682 printk(KERN_DEBUG "5a. slamming into HardStop by reading %p\n", target);
683 val = be32_to_cpu(readl(target));
684 printk(KERN_DEBUG "5b. read 0x%x [LE] from %p\n", val, target);
685 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
686 val = be32_to_cpu(readl(target));
687 printk(KERN_DEBUG "5c. read 0x%x [LE] from %p (debug)\n", val, target);
688
689 /* 6. invalidate TCE cache */
690 printk(KERN_DEBUG "6. invalidating TCE cache\n");
691 target = calgary_reg(bbar, tar_offset(bus));
692 writeq(tbl->tar_val, target);
693
694 /* 7. Re-read PMCR */
695 printk(KERN_DEBUG "7a. Re-reading PMCR\n");
696 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
697 val = be32_to_cpu(readl(target));
698 printk(KERN_DEBUG "7b. read 0x%x [LE] from %p\n", val, target);
699
700 /* 8. Remove HardStop */
701 printk(KERN_DEBUG "8a. removing HardStop from PMCR\n");
702 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
703 val = 0;
704 printk(KERN_DEBUG "8b. writing 0x%x [LE] to %p\n", val, target);
705 writel(cpu_to_be32(val), target);
706 val = be32_to_cpu(readl(target));
707 printk(KERN_DEBUG "8c. read 0x%x [LE] from %p\n", val, target);
708}
709
592static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start, 710static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
593 u64 limit) 711 u64 limit)
594{ 712{
@@ -598,7 +716,7 @@ static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
598 limit++; 716 limit++;
599 717
600 numpages = ((limit - start) >> PAGE_SHIFT); 718 numpages = ((limit - start) >> PAGE_SHIFT);
601 iommu_range_reserve(dev->sysdata, start, numpages); 719 iommu_range_reserve(pci_iommu(dev->bus), start, numpages);
602} 720}
603 721
604static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev) 722static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
@@ -606,7 +724,7 @@ static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
606 void __iomem *target; 724 void __iomem *target;
607 u64 low, high, sizelow; 725 u64 low, high, sizelow;
608 u64 start, limit; 726 u64 start, limit;
609 struct iommu_table *tbl = dev->sysdata; 727 struct iommu_table *tbl = pci_iommu(dev->bus);
610 unsigned char busnum = dev->bus->number; 728 unsigned char busnum = dev->bus->number;
611 void __iomem *bbar = tbl->bbar; 729 void __iomem *bbar = tbl->bbar;
612 730
@@ -630,7 +748,7 @@ static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
630 u32 val32; 748 u32 val32;
631 u64 low, high, sizelow, sizehigh; 749 u64 low, high, sizelow, sizehigh;
632 u64 start, limit; 750 u64 start, limit;
633 struct iommu_table *tbl = dev->sysdata; 751 struct iommu_table *tbl = pci_iommu(dev->bus);
634 unsigned char busnum = dev->bus->number; 752 unsigned char busnum = dev->bus->number;
635 void __iomem *bbar = tbl->bbar; 753 void __iomem *bbar = tbl->bbar;
636 754
@@ -666,14 +784,20 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
666{ 784{
667 unsigned int npages; 785 unsigned int npages;
668 u64 start; 786 u64 start;
669 struct iommu_table *tbl = dev->sysdata; 787 struct iommu_table *tbl = pci_iommu(dev->bus);
670 788
671 /* reserve EMERGENCY_PAGES from bad_dma_address and up */ 789 /* reserve EMERGENCY_PAGES from bad_dma_address and up */
672 iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES); 790 iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
673 791
674 /* avoid the BIOS/VGA first 640KB-1MB region */ 792 /* avoid the BIOS/VGA first 640KB-1MB region */
675 start = (640 * 1024); 793 /* for CalIOC2 - avoid the entire first MB */
676 npages = ((1024 - 640) * 1024) >> PAGE_SHIFT; 794 if (is_calgary(dev->device)) {
795 start = (640 * 1024);
796 npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
797 } else { /* calioc2 */
798 start = 0;
799 npages = (1 * 1024 * 1024) >> PAGE_SHIFT;
800 }
677 iommu_range_reserve(tbl, start, npages); 801 iommu_range_reserve(tbl, start, npages);
678 802
679 /* reserve the two PCI peripheral memory regions in IO space */ 803 /* reserve the two PCI peripheral memory regions in IO space */
@@ -694,10 +818,17 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
694 if (ret) 818 if (ret)
695 return ret; 819 return ret;
696 820
697 tbl = dev->sysdata; 821 tbl = pci_iommu(dev->bus);
698 tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space; 822 tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space;
699 tce_free(tbl, 0, tbl->it_size); 823 tce_free(tbl, 0, tbl->it_size);
700 824
825 if (is_calgary(dev->device))
826 tbl->chip_ops = &calgary_chip_ops;
827 else if (is_calioc2(dev->device))
828 tbl->chip_ops = &calioc2_chip_ops;
829 else
830 BUG();
831
701 calgary_reserve_regions(dev); 832 calgary_reserve_regions(dev);
702 833
703 /* set TARs for each PHB */ 834 /* set TARs for each PHB */
@@ -706,15 +837,15 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
706 837
707 /* zero out all TAR bits under sw control */ 838 /* zero out all TAR bits under sw control */
708 val64 &= ~TAR_SW_BITS; 839 val64 &= ~TAR_SW_BITS;
709
710 tbl = dev->sysdata;
711 table_phys = (u64)__pa(tbl->it_base); 840 table_phys = (u64)__pa(tbl->it_base);
841
712 val64 |= table_phys; 842 val64 |= table_phys;
713 843
714 BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M); 844 BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
715 val64 |= (u64) specified_table_size; 845 val64 |= (u64) specified_table_size;
716 846
717 tbl->tar_val = cpu_to_be64(val64); 847 tbl->tar_val = cpu_to_be64(val64);
848
718 writeq(tbl->tar_val, target); 849 writeq(tbl->tar_val, target);
719 readq(target); /* flush */ 850 readq(target); /* flush */
720 851
@@ -724,7 +855,7 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
724static void __init calgary_free_bus(struct pci_dev *dev) 855static void __init calgary_free_bus(struct pci_dev *dev)
725{ 856{
726 u64 val64; 857 u64 val64;
727 struct iommu_table *tbl = dev->sysdata; 858 struct iommu_table *tbl = pci_iommu(dev->bus);
728 void __iomem *target; 859 void __iomem *target;
729 unsigned int bitmapsz; 860 unsigned int bitmapsz;
730 861
@@ -739,16 +870,81 @@ static void __init calgary_free_bus(struct pci_dev *dev)
739 tbl->it_map = NULL; 870 tbl->it_map = NULL;
740 871
741 kfree(tbl); 872 kfree(tbl);
742 dev->sysdata = NULL; 873
874 set_pci_iommu(dev->bus, NULL);
743 875
744 /* Can't free bootmem allocated memory after system is up :-( */ 876 /* Can't free bootmem allocated memory after system is up :-( */
745 bus_info[dev->bus->number].tce_space = NULL; 877 bus_info[dev->bus->number].tce_space = NULL;
746} 878}
747 879
880static void calgary_dump_error_regs(struct iommu_table *tbl)
881{
882 void __iomem *bbar = tbl->bbar;
883 void __iomem *target;
884 u32 csr, plssr;
885
886 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
887 csr = be32_to_cpu(readl(target));
888
889 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
890 plssr = be32_to_cpu(readl(target));
891
892 /* If no error, the agent ID in the CSR is not valid */
893 printk(KERN_EMERG "Calgary: DMA error on Calgary PHB 0x%x, "
894 "0x%08x@CSR 0x%08x@PLSSR\n", tbl->it_busno, csr, plssr);
895}
896
897static void calioc2_dump_error_regs(struct iommu_table *tbl)
898{
899 void __iomem *bbar = tbl->bbar;
900 u32 csr, csmr, plssr, mck, rcstat;
901 void __iomem *target;
902 unsigned long phboff = phb_offset(tbl->it_busno);
903 unsigned long erroff;
904 u32 errregs[7];
905 int i;
906
907 /* dump CSR */
908 target = calgary_reg(bbar, phboff | PHB_CSR_OFFSET);
909 csr = be32_to_cpu(readl(target));
910 /* dump PLSSR */
911 target = calgary_reg(bbar, phboff | PHB_PLSSR_OFFSET);
912 plssr = be32_to_cpu(readl(target));
913 /* dump CSMR */
914 target = calgary_reg(bbar, phboff | 0x290);
915 csmr = be32_to_cpu(readl(target));
916 /* dump mck */
917 target = calgary_reg(bbar, phboff | 0x800);
918 mck = be32_to_cpu(readl(target));
919
920 printk(KERN_EMERG "Calgary: DMA error on CalIOC2 PHB 0x%x\n",
921 tbl->it_busno);
922
923 printk(KERN_EMERG "Calgary: 0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
924 csr, plssr, csmr, mck);
925
926 /* dump rest of error regs */
927 printk(KERN_EMERG "Calgary: ");
928 for (i = 0; i < ARRAY_SIZE(errregs); i++) {
929 /* err regs are at 0x810 - 0x870 */
930 erroff = (0x810 + (i * 0x10));
931 target = calgary_reg(bbar, phboff | erroff);
932 errregs[i] = be32_to_cpu(readl(target));
933 printk("0x%08x@0x%lx ", errregs[i], erroff);
934 }
935 printk("\n");
936
937 /* root complex status */
938 target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS);
939 rcstat = be32_to_cpu(readl(target));
940 printk(KERN_EMERG "Calgary: 0x%08x@0x%x\n", rcstat,
941 PHB_ROOT_COMPLEX_STATUS);
942}
943
748static void calgary_watchdog(unsigned long data) 944static void calgary_watchdog(unsigned long data)
749{ 945{
750 struct pci_dev *dev = (struct pci_dev *)data; 946 struct pci_dev *dev = (struct pci_dev *)data;
751 struct iommu_table *tbl = dev->sysdata; 947 struct iommu_table *tbl = pci_iommu(dev->bus);
752 void __iomem *bbar = tbl->bbar; 948 void __iomem *bbar = tbl->bbar;
753 u32 val32; 949 u32 val32;
754 void __iomem *target; 950 void __iomem *target;
@@ -758,13 +954,14 @@ static void calgary_watchdog(unsigned long data)
758 954
759 /* If no error, the agent ID in the CSR is not valid */ 955 /* If no error, the agent ID in the CSR is not valid */
760 if (val32 & CSR_AGENT_MASK) { 956 if (val32 & CSR_AGENT_MASK) {
761 printk(KERN_EMERG "calgary_watchdog: DMA error on PHB %#x, " 957 tbl->chip_ops->dump_error_regs(tbl);
762 "CSR = %#x\n", dev->bus->number, val32); 958
959 /* reset error */
763 writel(0, target); 960 writel(0, target);
764 961
765 /* Disable bus that caused the error */ 962 /* Disable bus that caused the error */
766 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | 963 target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
767 PHB_CONFIG_RW_OFFSET); 964 PHB_CONFIG_RW_OFFSET);
768 val32 = be32_to_cpu(readl(target)); 965 val32 = be32_to_cpu(readl(target));
769 val32 |= PHB_SLOT_DISABLE; 966 val32 |= PHB_SLOT_DISABLE;
770 writel(cpu_to_be32(val32), target); 967 writel(cpu_to_be32(val32), target);
@@ -775,8 +972,8 @@ static void calgary_watchdog(unsigned long data)
775 } 972 }
776} 973}
777 974
778static void __init calgary_increase_split_completion_timeout(void __iomem *bbar, 975static void __init calgary_set_split_completion_timeout(void __iomem *bbar,
779 unsigned char busnum) 976 unsigned char busnum, unsigned long timeout)
780{ 977{
781 u64 val64; 978 u64 val64;
782 void __iomem *target; 979 void __iomem *target;
@@ -802,11 +999,40 @@ static void __init calgary_increase_split_completion_timeout(void __iomem *bbar,
802 /* zero out this PHB's timer bits */ 999 /* zero out this PHB's timer bits */
803 mask = ~(0xFUL << phb_shift); 1000 mask = ~(0xFUL << phb_shift);
804 val64 &= mask; 1001 val64 &= mask;
805 val64 |= (CCR_2SEC_TIMEOUT << phb_shift); 1002 val64 |= (timeout << phb_shift);
806 writeq(cpu_to_be64(val64), target); 1003 writeq(cpu_to_be64(val64), target);
807 readq(target); /* flush */ 1004 readq(target); /* flush */
808} 1005}
809 1006
1007static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
1008{
1009 unsigned char busnum = dev->bus->number;
1010 void __iomem *bbar = tbl->bbar;
1011 void __iomem *target;
1012 u32 val;
1013
1014 /*
1015 * CalIOC2 designers recommend setting bit 8 in 0xnDB0 to 1
1016 */
1017 target = calgary_reg(bbar, phb_offset(busnum) | PHB_SAVIOR_L2);
1018 val = cpu_to_be32(readl(target));
1019 val |= 0x00800000;
1020 writel(cpu_to_be32(val), target);
1021}
1022
1023static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
1024{
1025 unsigned char busnum = dev->bus->number;
1026
1027 /*
1028 * Give split completion a longer timeout on bus 1 for aic94xx
1029 * http://bugzilla.kernel.org/show_bug.cgi?id=7180
1030 */
1031 if (is_calgary(dev->device) && (busnum == 1))
1032 calgary_set_split_completion_timeout(tbl->bbar, busnum,
1033 CCR_2SEC_TIMEOUT);
1034}
1035
810static void __init calgary_enable_translation(struct pci_dev *dev) 1036static void __init calgary_enable_translation(struct pci_dev *dev)
811{ 1037{
812 u32 val32; 1038 u32 val32;
@@ -816,7 +1042,7 @@ static void __init calgary_enable_translation(struct pci_dev *dev)
816 struct iommu_table *tbl; 1042 struct iommu_table *tbl;
817 1043
818 busnum = dev->bus->number; 1044 busnum = dev->bus->number;
819 tbl = dev->sysdata; 1045 tbl = pci_iommu(dev->bus);
820 bbar = tbl->bbar; 1046 bbar = tbl->bbar;
821 1047
822 /* enable TCE in PHB Config Register */ 1048 /* enable TCE in PHB Config Register */
@@ -824,20 +1050,15 @@ static void __init calgary_enable_translation(struct pci_dev *dev)
824 val32 = be32_to_cpu(readl(target)); 1050 val32 = be32_to_cpu(readl(target));
825 val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE; 1051 val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
826 1052
827 printk(KERN_INFO "Calgary: enabling translation on PHB %#x\n", busnum); 1053 printk(KERN_INFO "Calgary: enabling translation on %s PHB %#x\n",
1054 (dev->device == PCI_DEVICE_ID_IBM_CALGARY) ?
1055 "Calgary" : "CalIOC2", busnum);
828 printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this " 1056 printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
829 "bus.\n"); 1057 "bus.\n");
830 1058
831 writel(cpu_to_be32(val32), target); 1059 writel(cpu_to_be32(val32), target);
832 readl(target); /* flush */ 1060 readl(target); /* flush */
833 1061
834 /*
835 * Give split completion a longer timeout on bus 1 for aic94xx
836 * http://bugzilla.kernel.org/show_bug.cgi?id=7180
837 */
838 if (busnum == 1)
839 calgary_increase_split_completion_timeout(bbar, busnum);
840
841 init_timer(&tbl->watchdog_timer); 1062 init_timer(&tbl->watchdog_timer);
842 tbl->watchdog_timer.function = &calgary_watchdog; 1063 tbl->watchdog_timer.function = &calgary_watchdog;
843 tbl->watchdog_timer.data = (unsigned long)dev; 1064 tbl->watchdog_timer.data = (unsigned long)dev;
@@ -853,7 +1074,7 @@ static void __init calgary_disable_translation(struct pci_dev *dev)
853 struct iommu_table *tbl; 1074 struct iommu_table *tbl;
854 1075
855 busnum = dev->bus->number; 1076 busnum = dev->bus->number;
856 tbl = dev->sysdata; 1077 tbl = pci_iommu(dev->bus);
857 bbar = tbl->bbar; 1078 bbar = tbl->bbar;
858 1079
859 /* disable TCE in PHB Config Register */ 1080 /* disable TCE in PHB Config Register */
@@ -871,13 +1092,19 @@ static void __init calgary_disable_translation(struct pci_dev *dev)
871static void __init calgary_init_one_nontraslated(struct pci_dev *dev) 1092static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
872{ 1093{
873 pci_dev_get(dev); 1094 pci_dev_get(dev);
874 dev->sysdata = NULL; 1095 set_pci_iommu(dev->bus, NULL);
875 dev->bus->self = dev; 1096
1097 /* is the device behind a bridge? */
1098 if (dev->bus->parent)
1099 dev->bus->parent->self = dev;
1100 else
1101 dev->bus->self = dev;
876} 1102}
877 1103
878static int __init calgary_init_one(struct pci_dev *dev) 1104static int __init calgary_init_one(struct pci_dev *dev)
879{ 1105{
880 void __iomem *bbar; 1106 void __iomem *bbar;
1107 struct iommu_table *tbl;
881 int ret; 1108 int ret;
882 1109
883 BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM); 1110 BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
@@ -888,7 +1115,18 @@ static int __init calgary_init_one(struct pci_dev *dev)
888 goto done; 1115 goto done;
889 1116
890 pci_dev_get(dev); 1117 pci_dev_get(dev);
891 dev->bus->self = dev; 1118
1119 if (dev->bus->parent) {
1120 if (dev->bus->parent->self)
1121 printk(KERN_WARNING "Calgary: IEEEE, dev %p has "
1122 "bus->parent->self!\n", dev);
1123 dev->bus->parent->self = dev;
1124 } else
1125 dev->bus->self = dev;
1126
1127 tbl = pci_iommu(dev->bus);
1128 tbl->chip_ops->handle_quirks(tbl, dev);
1129
892 calgary_enable_translation(dev); 1130 calgary_enable_translation(dev);
893 1131
894 return 0; 1132 return 0;
@@ -924,11 +1162,18 @@ static int __init calgary_locate_bbars(void)
924 target = calgary_reg(bbar, offset); 1162 target = calgary_reg(bbar, offset);
925 1163
926 val = be32_to_cpu(readl(target)); 1164 val = be32_to_cpu(readl(target));
1165
927 start_bus = (u8)((val & 0x00FF0000) >> 16); 1166 start_bus = (u8)((val & 0x00FF0000) >> 16);
928 end_bus = (u8)((val & 0x0000FF00) >> 8); 1167 end_bus = (u8)((val & 0x0000FF00) >> 8);
929 for (bus = start_bus; bus <= end_bus; bus++) { 1168
930 bus_info[bus].bbar = bbar; 1169 if (end_bus) {
931 bus_info[bus].phbid = phb; 1170 for (bus = start_bus; bus <= end_bus; bus++) {
1171 bus_info[bus].bbar = bbar;
1172 bus_info[bus].phbid = phb;
1173 }
1174 } else {
1175 bus_info[start_bus].bbar = bbar;
1176 bus_info[start_bus].phbid = phb;
932 } 1177 }
933 } 1178 }
934 } 1179 }
@@ -948,22 +1193,24 @@ static int __init calgary_init(void)
948{ 1193{
949 int ret; 1194 int ret;
950 struct pci_dev *dev = NULL; 1195 struct pci_dev *dev = NULL;
1196 void *tce_space;
951 1197
952 ret = calgary_locate_bbars(); 1198 ret = calgary_locate_bbars();
953 if (ret) 1199 if (ret)
954 return ret; 1200 return ret;
955 1201
956 do { 1202 do {
957 dev = pci_get_device(PCI_VENDOR_ID_IBM, 1203 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
958 PCI_DEVICE_ID_IBM_CALGARY,
959 dev);
960 if (!dev) 1204 if (!dev)
961 break; 1205 break;
1206 if (!is_cal_pci_dev(dev->device))
1207 continue;
962 if (!translate_phb(dev)) { 1208 if (!translate_phb(dev)) {
963 calgary_init_one_nontraslated(dev); 1209 calgary_init_one_nontraslated(dev);
964 continue; 1210 continue;
965 } 1211 }
966 if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots) 1212 tce_space = bus_info[dev->bus->number].tce_space;
1213 if (!tce_space && !translate_empty_slots)
967 continue; 1214 continue;
968 1215
969 ret = calgary_init_one(dev); 1216 ret = calgary_init_one(dev);
@@ -976,10 +1223,11 @@ static int __init calgary_init(void)
976error: 1223error:
977 do { 1224 do {
978 dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM, 1225 dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM,
979 PCI_DEVICE_ID_IBM_CALGARY, 1226 PCI_ANY_ID, dev);
980 dev);
981 if (!dev) 1227 if (!dev)
982 break; 1228 break;
1229 if (!is_cal_pci_dev(dev->device))
1230 continue;
983 if (!translate_phb(dev)) { 1231 if (!translate_phb(dev)) {
984 pci_dev_put(dev); 1232 pci_dev_put(dev);
985 continue; 1233 continue;
@@ -1057,9 +1305,29 @@ static int __init build_detail_arrays(void)
1057 return 0; 1305 return 0;
1058} 1306}
1059 1307
1060void __init detect_calgary(void) 1308static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
1061{ 1309{
1310 int dev;
1062 u32 val; 1311 u32 val;
1312
1313 if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
1314 /*
1315 * FIXME: properly scan for devices accross the
1316 * PCI-to-PCI bridge on every CalIOC2 port.
1317 */
1318 return 1;
1319 }
1320
1321 for (dev = 1; dev < 8; dev++) {
1322 val = read_pci_config(bus, dev, 0, 0);
1323 if (val != 0xffffffff)
1324 break;
1325 }
1326 return (val != 0xffffffff);
1327}
1328
1329void __init detect_calgary(void)
1330{
1063 int bus; 1331 int bus;
1064 void *tbl; 1332 void *tbl;
1065 int calgary_found = 0; 1333 int calgary_found = 0;
@@ -1116,29 +1384,26 @@ void __init detect_calgary(void)
1116 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE); 1384 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
1117 1385
1118 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { 1386 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1119 int dev;
1120 struct calgary_bus_info *info = &bus_info[bus]; 1387 struct calgary_bus_info *info = &bus_info[bus];
1388 unsigned short pci_device;
1389 u32 val;
1390
1391 val = read_pci_config(bus, 0, 0, 0);
1392 pci_device = (val & 0xFFFF0000) >> 16;
1121 1393
1122 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY) 1394 if (!is_cal_pci_dev(pci_device))
1123 continue; 1395 continue;
1124 1396
1125 if (info->translation_disabled) 1397 if (info->translation_disabled)
1126 continue; 1398 continue;
1127 1399
1128 /* 1400 if (calgary_bus_has_devices(bus, pci_device) ||
1129 * Scan the slots of the PCI bus to see if there is a device present. 1401 translate_empty_slots) {
1130 * The parent bus will be the zero-ith device, so start at 1. 1402 tbl = alloc_tce_table();
1131 */ 1403 if (!tbl)
1132 for (dev = 1; dev < 8; dev++) { 1404 goto cleanup;
1133 val = read_pci_config(bus, dev, 0, 0); 1405 info->tce_space = tbl;
1134 if (val != 0xffffffff || translate_empty_slots) { 1406 calgary_found = 1;
1135 tbl = alloc_tce_table();
1136 if (!tbl)
1137 goto cleanup;
1138 info->tce_space = tbl;
1139 calgary_found = 1;
1140 break;
1141 }
1142 } 1407 }
1143 } 1408 }
1144 1409
@@ -1249,3 +1514,66 @@ static int __init calgary_parse_options(char *p)
1249 return 1; 1514 return 1;
1250} 1515}
1251__setup("calgary=", calgary_parse_options); 1516__setup("calgary=", calgary_parse_options);
1517
1518static void __init calgary_fixup_one_tce_space(struct pci_dev *dev)
1519{
1520 struct iommu_table *tbl;
1521 unsigned int npages;
1522 int i;
1523
1524 tbl = pci_iommu(dev->bus);
1525
1526 for (i = 0; i < 4; i++) {
1527 struct resource *r = &dev->resource[PCI_BRIDGE_RESOURCES + i];
1528
1529 /* Don't give out TCEs that map MEM resources */
1530 if (!(r->flags & IORESOURCE_MEM))
1531 continue;
1532
1533 /* 0-based? we reserve the whole 1st MB anyway */
1534 if (!r->start)
1535 continue;
1536
1537 /* cover the whole region */
1538 npages = (r->end - r->start) >> PAGE_SHIFT;
1539 npages++;
1540
1541 iommu_range_reserve(tbl, r->start, npages);
1542 }
1543}
1544
1545static int __init calgary_fixup_tce_spaces(void)
1546{
1547 struct pci_dev *dev = NULL;
1548 void *tce_space;
1549
1550 if (no_iommu || swiotlb || !calgary_detected)
1551 return -ENODEV;
1552
1553 printk(KERN_DEBUG "Calgary: fixing up tce spaces\n");
1554
1555 do {
1556 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
1557 if (!dev)
1558 break;
1559 if (!is_cal_pci_dev(dev->device))
1560 continue;
1561 if (!translate_phb(dev))
1562 continue;
1563
1564 tce_space = bus_info[dev->bus->number].tce_space;
1565 if (!tce_space)
1566 continue;
1567
1568 calgary_fixup_one_tce_space(dev);
1569
1570 } while (1);
1571
1572 return 0;
1573}
1574
1575/*
1576 * We need to be call after pcibios_assign_resources (fs_initcall level)
1577 * and before device_initcall.
1578 */
1579rootfs_initcall(calgary_fixup_tce_spaces);
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 90f6315d02d4..05d745ede561 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -8,7 +8,7 @@
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/proto.h> 11#include <asm/iommu.h>
12#include <asm/calgary.h> 12#include <asm/calgary.h>
13 13
14int iommu_merge __read_mostly = 0; 14int iommu_merge __read_mostly = 0;
@@ -321,6 +321,11 @@ static int __init pci_iommu_init(void)
321 return 0; 321 return 0;
322} 322}
323 323
324void pci_iommu_shutdown(void)
325{
326 gart_iommu_shutdown();
327}
328
324#ifdef CONFIG_PCI 329#ifdef CONFIG_PCI
325/* Many VIA bridges seem to corrupt data for DAC. Disable it here */ 330/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
326 331
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index ae091cdc1a4d..4918c575d582 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -28,6 +28,7 @@
28#include <asm/mtrr.h> 28#include <asm/mtrr.h>
29#include <asm/pgtable.h> 29#include <asm/pgtable.h>
30#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/iommu.h>
31#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
32#include <asm/swiotlb.h> 33#include <asm/swiotlb.h>
33#include <asm/dma.h> 34#include <asm/dma.h>
@@ -235,7 +236,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf,
235} 236}
236 237
237/* Map a single area into the IOMMU */ 238/* Map a single area into the IOMMU */
238dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) 239static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
239{ 240{
240 unsigned long phys_mem, bus; 241 unsigned long phys_mem, bus;
241 242
@@ -253,7 +254,7 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
253/* 254/*
254 * Free a DMA mapping. 255 * Free a DMA mapping.
255 */ 256 */
256void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, 257static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
257 size_t size, int direction) 258 size_t size, int direction)
258{ 259{
259 unsigned long iommu_page; 260 unsigned long iommu_page;
@@ -275,7 +276,7 @@ void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
275/* 276/*
276 * Wrapper for pci_unmap_single working with scatterlists. 277 * Wrapper for pci_unmap_single working with scatterlists.
277 */ 278 */
278void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 279static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
279{ 280{
280 int i; 281 int i;
281 282
@@ -571,6 +572,26 @@ static const struct dma_mapping_ops gart_dma_ops = {
571 .unmap_sg = gart_unmap_sg, 572 .unmap_sg = gart_unmap_sg,
572}; 573};
573 574
575void gart_iommu_shutdown(void)
576{
577 struct pci_dev *dev;
578 int i;
579
580 if (no_agp && (dma_ops != &gart_dma_ops))
581 return;
582
583 for (i = 0; i < num_k8_northbridges; i++) {
584 u32 ctl;
585
586 dev = k8_northbridges[i];
587 pci_read_config_dword(dev, 0x90, &ctl);
588
589 ctl &= ~1;
590
591 pci_write_config_dword(dev, 0x90, ctl);
592 }
593}
594
574void __init gart_iommu_init(void) 595void __init gart_iommu_init(void)
575{ 596{
576 struct agp_kern_info info; 597 struct agp_kern_info info;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 6dade0c867cc..2a34c6c025a9 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -6,7 +6,7 @@
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/dma-mapping.h> 7#include <linux/dma-mapping.h>
8 8
9#include <asm/proto.h> 9#include <asm/iommu.h>
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/dma.h> 11#include <asm/dma.h>
12 12
@@ -34,7 +34,7 @@ nommu_map_single(struct device *hwdev, void *ptr, size_t size,
34 return bus; 34 return bus;
35} 35}
36 36
37void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size, 37static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
38 int direction) 38 int direction)
39{ 39{
40} 40}
@@ -54,7 +54,7 @@ void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
54 * Device ownership issues as mentioned above for pci_map_single are 54 * Device ownership issues as mentioned above for pci_map_single are
55 * the same here. 55 * the same here.
56 */ 56 */
57int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, 57static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
58 int nents, int direction) 58 int nents, int direction)
59{ 59{
60 int i; 60 int i;
@@ -74,7 +74,7 @@ int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
74 * Again, cpu read rules concerning calls here are the same as for 74 * Again, cpu read rules concerning calls here are the same as for
75 * pci_unmap_single() above. 75 * pci_unmap_single() above.
76 */ 76 */
77void nommu_unmap_sg(struct device *dev, struct scatterlist *sg, 77static void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
78 int nents, int dir) 78 int nents, int dir)
79{ 79{
80} 80}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 4b4569abc60c..b2f405ea7c85 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -5,7 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/dma-mapping.h> 6#include <linux/dma-mapping.h>
7 7
8#include <asm/proto.h> 8#include <asm/iommu.h>
9#include <asm/swiotlb.h> 9#include <asm/swiotlb.h>
10#include <asm/dma.h> 10#include <asm/dma.h>
11 11
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 5909039f37aa..e7ac629d4c46 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -207,6 +207,7 @@ void cpu_idle (void)
207 if (__get_cpu_var(cpu_idle_state)) 207 if (__get_cpu_var(cpu_idle_state))
208 __get_cpu_var(cpu_idle_state) = 0; 208 __get_cpu_var(cpu_idle_state) = 0;
209 209
210 check_pgt_cache();
210 rmb(); 211 rmb();
211 idle = pm_idle; 212 idle = pm_idle;
212 if (!idle) 213 if (!idle)
@@ -278,7 +279,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
278 */ 279 */
279 if (!pm_idle) { 280 if (!pm_idle) {
280 if (!printed) { 281 if (!printed) {
281 printk("using mwait in idle threads.\n"); 282 printk(KERN_INFO "using mwait in idle threads.\n");
282 printed = 1; 283 printed = 1;
283 } 284 }
284 pm_idle = mwait_idle; 285 pm_idle = mwait_idle;
@@ -305,6 +306,7 @@ early_param("idle", idle_setup);
305void __show_regs(struct pt_regs * regs) 306void __show_regs(struct pt_regs * regs)
306{ 307{
307 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 308 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
309 unsigned long d0, d1, d2, d3, d6, d7;
308 unsigned int fsindex,gsindex; 310 unsigned int fsindex,gsindex;
309 unsigned int ds,cs,es; 311 unsigned int ds,cs,es;
310 312
@@ -340,15 +342,24 @@ void __show_regs(struct pt_regs * regs)
340 rdmsrl(MSR_GS_BASE, gs); 342 rdmsrl(MSR_GS_BASE, gs);
341 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 343 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
342 344
343 asm("movq %%cr0, %0": "=r" (cr0)); 345 cr0 = read_cr0();
344 asm("movq %%cr2, %0": "=r" (cr2)); 346 cr2 = read_cr2();
345 asm("movq %%cr3, %0": "=r" (cr3)); 347 cr3 = read_cr3();
346 asm("movq %%cr4, %0": "=r" (cr4)); 348 cr4 = read_cr4();
347 349
348 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 350 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
349 fs,fsindex,gs,gsindex,shadowgs); 351 fs,fsindex,gs,gsindex,shadowgs);
350 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 352 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
351 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 353 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
354
355 get_debugreg(d0, 0);
356 get_debugreg(d1, 1);
357 get_debugreg(d2, 2);
358 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
359 get_debugreg(d3, 3);
360 get_debugreg(d6, 6);
361 get_debugreg(d7, 7);
362 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
352} 363}
353 364
354void show_regs(struct pt_regs *regs) 365void show_regs(struct pt_regs *regs)
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 7503068e788d..368db2b9c5ac 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -16,6 +16,7 @@
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/iommu.h>
19 20
20/* 21/*
21 * Power off function, if any 22 * Power off function, if any
@@ -81,6 +82,7 @@ static inline void kb_wait(void)
81void machine_shutdown(void) 82void machine_shutdown(void)
82{ 83{
83 unsigned long flags; 84 unsigned long flags;
85
84 /* Stop the cpus and apics */ 86 /* Stop the cpus and apics */
85#ifdef CONFIG_SMP 87#ifdef CONFIG_SMP
86 int reboot_cpu_id; 88 int reboot_cpu_id;
@@ -111,6 +113,8 @@ void machine_shutdown(void)
111 disable_IO_APIC(); 113 disable_IO_APIC();
112 114
113 local_irq_restore(flags); 115 local_irq_restore(flags);
116
117 pci_iommu_shutdown();
114} 118}
115 119
116void machine_emergency_restart(void) 120void machine_emergency_restart(void)
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 33ef718f8cb5..af838f6b0b7f 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -575,6 +575,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
575 level = cpuid_eax(1); 575 level = cpuid_eax(1);
576 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) 576 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
577 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); 577 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
578 if (c->x86 == 0x10)
579 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
578 580
579 /* Enable workaround for FXSAVE leak */ 581 /* Enable workaround for FXSAVE leak */
580 if (c->x86 >= 6) 582 if (c->x86 >= 6)
@@ -600,8 +602,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
600 if (c->extended_cpuid_level >= 0x80000008) 602 if (c->extended_cpuid_level >= 0x80000008)
601 amd_detect_cmp(c); 603 amd_detect_cmp(c);
602 604
603 /* Fix cpuid4 emulation for more */ 605 if (c->extended_cpuid_level >= 0x80000006 &&
604 num_cache_leaves = 3; 606 (cpuid_edx(0x80000006) & 0xf000))
607 num_cache_leaves = 4;
608 else
609 num_cache_leaves = 3;
610
611 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
612 set_bit(X86_FEATURE_K8, &c->x86_capability);
605 613
606 /* RDTSC can be speculated around */ 614 /* RDTSC can be speculated around */
607 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); 615 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 290f5d8037cd..739175b01e06 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -26,6 +26,7 @@
26#include <asm/i387.h> 26#include <asm/i387.h>
27#include <asm/proto.h> 27#include <asm/proto.h>
28#include <asm/ia32_unistd.h> 28#include <asm/ia32_unistd.h>
29#include <asm/mce.h>
29 30
30/* #define DEBUG_SIG 1 */ 31/* #define DEBUG_SIG 1 */
31 32
@@ -472,6 +473,12 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
472 clear_thread_flag(TIF_SINGLESTEP); 473 clear_thread_flag(TIF_SINGLESTEP);
473 } 474 }
474 475
476#ifdef CONFIG_X86_MCE
477 /* notify userspace of pending MCEs */
478 if (thread_info_flags & _TIF_MCE_NOTIFY)
479 mce_notify_user();
480#endif /* CONFIG_X86_MCE */
481
475 /* deal with pending signal delivery */ 482 /* deal with pending signal delivery */
476 if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK)) 483 if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
477 do_signal(regs); 484 do_signal(regs);
@@ -480,7 +487,7 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
480void signal_fault(struct pt_regs *regs, void __user *frame, char *where) 487void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
481{ 488{
482 struct task_struct *me = current; 489 struct task_struct *me = current;
483 if (exception_trace) 490 if (show_unhandled_signals && printk_ratelimit())
484 printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n", 491 printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n",
485 me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax); 492 me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax);
486 493
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 0694940b2e73..673a300b5944 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -241,7 +241,7 @@ void flush_tlb_mm (struct mm_struct * mm)
241 } 241 }
242 if (!cpus_empty(cpu_mask)) 242 if (!cpus_empty(cpu_mask))
243 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 243 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
244 244 check_pgt_cache();
245 preempt_enable(); 245 preempt_enable();
246} 246}
247EXPORT_SYMBOL(flush_tlb_mm); 247EXPORT_SYMBOL(flush_tlb_mm);
@@ -386,9 +386,9 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
386 return 0; 386 return 0;
387 } 387 }
388 388
389 spin_lock_bh(&call_lock); 389 spin_lock(&call_lock);
390 __smp_call_function_single(cpu, func, info, nonatomic, wait); 390 __smp_call_function_single(cpu, func, info, nonatomic, wait);
391 spin_unlock_bh(&call_lock); 391 spin_unlock(&call_lock);
392 put_cpu(); 392 put_cpu();
393 return 0; 393 return 0;
394} 394}
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index 6a5a98f2a75c..ea83a9f91965 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -55,11 +55,11 @@ void __save_processor_state(struct saved_context *ctxt)
55 * control registers 55 * control registers
56 */ 56 */
57 rdmsrl(MSR_EFER, ctxt->efer); 57 rdmsrl(MSR_EFER, ctxt->efer);
58 asm volatile ("movq %%cr0, %0" : "=r" (ctxt->cr0)); 58 ctxt->cr0 = read_cr0();
59 asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2)); 59 ctxt->cr2 = read_cr2();
60 asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3)); 60 ctxt->cr3 = read_cr3();
61 asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4)); 61 ctxt->cr4 = read_cr4();
62 asm volatile ("movq %%cr8, %0" : "=r" (ctxt->cr8)); 62 ctxt->cr8 = read_cr8();
63} 63}
64 64
65void save_processor_state(void) 65void save_processor_state(void)
@@ -81,11 +81,11 @@ void __restore_processor_state(struct saved_context *ctxt)
81 * control registers 81 * control registers
82 */ 82 */
83 wrmsrl(MSR_EFER, ctxt->efer); 83 wrmsrl(MSR_EFER, ctxt->efer);
84 asm volatile ("movq %0, %%cr8" :: "r" (ctxt->cr8)); 84 write_cr8(ctxt->cr8);
85 asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4)); 85 write_cr4(ctxt->cr4);
86 asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3)); 86 write_cr3(ctxt->cr3);
87 asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2)); 87 write_cr2(ctxt->cr2);
88 asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0)); 88 write_cr0(ctxt->cr0);
89 89
90 /* 90 /*
91 * now restore the descriptor tables to their proper values 91 * now restore the descriptor tables to their proper values
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
index f61fb8e4f129..3aeae2fa2e24 100644
--- a/arch/x86_64/kernel/tce.c
+++ b/arch/x86_64/kernel/tce.c
@@ -136,9 +136,9 @@ int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
136 struct iommu_table *tbl; 136 struct iommu_table *tbl;
137 int ret; 137 int ret;
138 138
139 if (dev->sysdata) { 139 if (pci_iommu(dev->bus)) {
140 printk(KERN_ERR "Calgary: dev %p has sysdata %p\n", 140 printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
141 dev, dev->sysdata); 141 dev, pci_iommu(dev->bus));
142 BUG(); 142 BUG();
143 } 143 }
144 144
@@ -155,11 +155,7 @@ int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
155 155
156 tbl->bbar = bbar; 156 tbl->bbar = bbar;
157 157
158 /* 158 set_pci_iommu(dev->bus, tbl);
159 * NUMA is already using the bus's sysdata pointer, so we use
160 * the bus's pci_dev's sysdata instead.
161 */
162 dev->sysdata = tbl;
163 159
164 return 0; 160 return 0;
165 161
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 4a0895bacf51..6d48a4e826d9 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -33,6 +33,7 @@
33#include <acpi/acpi_bus.h> 33#include <acpi/acpi_bus.h>
34#endif 34#endif
35#include <asm/8253pit.h> 35#include <asm/8253pit.h>
36#include <asm/i8253.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/vsyscall.h> 38#include <asm/vsyscall.h>
38#include <asm/timex.h> 39#include <asm/timex.h>
@@ -44,12 +45,14 @@
44#include <asm/hpet.h> 45#include <asm/hpet.h>
45#include <asm/mpspec.h> 46#include <asm/mpspec.h>
46#include <asm/nmi.h> 47#include <asm/nmi.h>
48#include <asm/vgtod.h>
47 49
48static char *timename = NULL; 50static char *timename = NULL;
49 51
50DEFINE_SPINLOCK(rtc_lock); 52DEFINE_SPINLOCK(rtc_lock);
51EXPORT_SYMBOL(rtc_lock); 53EXPORT_SYMBOL(rtc_lock);
52DEFINE_SPINLOCK(i8253_lock); 54DEFINE_SPINLOCK(i8253_lock);
55EXPORT_SYMBOL(i8253_lock);
53 56
54volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; 57volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
55 58
@@ -79,8 +82,9 @@ EXPORT_SYMBOL(profile_pc);
79 * sheet for details. 82 * sheet for details.
80 */ 83 */
81 84
82static void set_rtc_mmss(unsigned long nowtime) 85static int set_rtc_mmss(unsigned long nowtime)
83{ 86{
87 int retval = 0;
84 int real_seconds, real_minutes, cmos_minutes; 88 int real_seconds, real_minutes, cmos_minutes;
85 unsigned char control, freq_select; 89 unsigned char control, freq_select;
86 90
@@ -120,6 +124,7 @@ static void set_rtc_mmss(unsigned long nowtime)
120 if (abs(real_minutes - cmos_minutes) >= 30) { 124 if (abs(real_minutes - cmos_minutes) >= 30) {
121 printk(KERN_WARNING "time.c: can't update CMOS clock " 125 printk(KERN_WARNING "time.c: can't update CMOS clock "
122 "from %d to %d\n", cmos_minutes, real_minutes); 126 "from %d to %d\n", cmos_minutes, real_minutes);
127 retval = -1;
123 } else { 128 } else {
124 BIN_TO_BCD(real_seconds); 129 BIN_TO_BCD(real_seconds);
125 BIN_TO_BCD(real_minutes); 130 BIN_TO_BCD(real_minutes);
@@ -139,12 +144,17 @@ static void set_rtc_mmss(unsigned long nowtime)
139 CMOS_WRITE(freq_select, RTC_FREQ_SELECT); 144 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
140 145
141 spin_unlock(&rtc_lock); 146 spin_unlock(&rtc_lock);
147
148 return retval;
142} 149}
143 150
151int update_persistent_clock(struct timespec now)
152{
153 return set_rtc_mmss(now.tv_sec);
154}
144 155
145void main_timer_handler(void) 156void main_timer_handler(void)
146{ 157{
147 static unsigned long rtc_update = 0;
148/* 158/*
149 * Here we are in the timer irq handler. We have irqs locally disabled (so we 159 * Here we are in the timer irq handler. We have irqs locally disabled (so we
150 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running 160 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
@@ -172,20 +182,6 @@ void main_timer_handler(void)
172 if (!using_apic_timer) 182 if (!using_apic_timer)
173 smp_local_timer_interrupt(); 183 smp_local_timer_interrupt();
174 184
175/*
176 * If we have an externally synchronized Linux clock, then update CMOS clock
177 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
178 * closest to exactly 500 ms before the next second. If the update fails, we
179 * don't care, as it'll be updated on the next turn, and the problem (time way
180 * off) isn't likely to go away much sooner anyway.
181 */
182
183 if (ntp_synced() && xtime.tv_sec > rtc_update &&
184 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
185 set_rtc_mmss(xtime.tv_sec);
186 rtc_update = xtime.tv_sec + 660;
187 }
188
189 write_sequnlock(&xtime_lock); 185 write_sequnlock(&xtime_lock);
190} 186}
191 187
@@ -199,7 +195,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
199 return IRQ_HANDLED; 195 return IRQ_HANDLED;
200} 196}
201 197
202static unsigned long get_cmos_time(void) 198unsigned long read_persistent_clock(void)
203{ 199{
204 unsigned int year, mon, day, hour, min, sec; 200 unsigned int year, mon, day, hour, min, sec;
205 unsigned long flags; 201 unsigned long flags;
@@ -226,7 +222,7 @@ static unsigned long get_cmos_time(void)
226 /* 222 /*
227 * We know that x86-64 always uses BCD format, no need to check the 223 * We know that x86-64 always uses BCD format, no need to check the
228 * config register. 224 * config register.
229 */ 225 */
230 226
231 BCD_TO_BIN(sec); 227 BCD_TO_BIN(sec);
232 BCD_TO_BIN(min); 228 BCD_TO_BIN(min);
@@ -239,11 +235,11 @@ static unsigned long get_cmos_time(void)
239 BCD_TO_BIN(century); 235 BCD_TO_BIN(century);
240 year += century * 100; 236 year += century * 100;
241 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100); 237 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
242 } else { 238 } else {
243 /* 239 /*
244 * x86-64 systems only exists since 2002. 240 * x86-64 systems only exists since 2002.
245 * This will work up to Dec 31, 2100 241 * This will work up to Dec 31, 2100
246 */ 242 */
247 year += 2000; 243 year += 2000;
248 } 244 }
249 245
@@ -255,45 +251,45 @@ static unsigned long get_cmos_time(void)
255#define TICK_COUNT 100000000 251#define TICK_COUNT 100000000
256static unsigned int __init tsc_calibrate_cpu_khz(void) 252static unsigned int __init tsc_calibrate_cpu_khz(void)
257{ 253{
258 int tsc_start, tsc_now; 254 int tsc_start, tsc_now;
259 int i, no_ctr_free; 255 int i, no_ctr_free;
260 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; 256 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
261 unsigned long flags; 257 unsigned long flags;
262 258
263 for (i = 0; i < 4; i++) 259 for (i = 0; i < 4; i++)
264 if (avail_to_resrv_perfctr_nmi_bit(i)) 260 if (avail_to_resrv_perfctr_nmi_bit(i))
265 break; 261 break;
266 no_ctr_free = (i == 4); 262 no_ctr_free = (i == 4);
267 if (no_ctr_free) { 263 if (no_ctr_free) {
268 i = 3; 264 i = 3;
269 rdmsrl(MSR_K7_EVNTSEL3, evntsel3); 265 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
270 wrmsrl(MSR_K7_EVNTSEL3, 0); 266 wrmsrl(MSR_K7_EVNTSEL3, 0);
271 rdmsrl(MSR_K7_PERFCTR3, pmc3); 267 rdmsrl(MSR_K7_PERFCTR3, pmc3);
272 } else { 268 } else {
273 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); 269 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
274 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); 270 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
275 } 271 }
276 local_irq_save(flags); 272 local_irq_save(flags);
277 /* start meauring cycles, incrementing from 0 */ 273 /* start meauring cycles, incrementing from 0 */
278 wrmsrl(MSR_K7_PERFCTR0 + i, 0); 274 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
279 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); 275 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
280 rdtscl(tsc_start); 276 rdtscl(tsc_start);
281 do { 277 do {
282 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); 278 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
283 tsc_now = get_cycles_sync(); 279 tsc_now = get_cycles_sync();
284 } while ((tsc_now - tsc_start) < TICK_COUNT); 280 } while ((tsc_now - tsc_start) < TICK_COUNT);
285 281
286 local_irq_restore(flags); 282 local_irq_restore(flags);
287 if (no_ctr_free) { 283 if (no_ctr_free) {
288 wrmsrl(MSR_K7_EVNTSEL3, 0); 284 wrmsrl(MSR_K7_EVNTSEL3, 0);
289 wrmsrl(MSR_K7_PERFCTR3, pmc3); 285 wrmsrl(MSR_K7_PERFCTR3, pmc3);
290 wrmsrl(MSR_K7_EVNTSEL3, evntsel3); 286 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
291 } else { 287 } else {
292 release_perfctr_nmi(MSR_K7_PERFCTR0 + i); 288 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
293 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); 289 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
294 } 290 }
295 291
296 return pmc_now * tsc_khz / (tsc_now - tsc_start); 292 return pmc_now * tsc_khz / (tsc_now - tsc_start);
297} 293}
298 294
299/* 295/*
@@ -321,7 +317,7 @@ static unsigned int __init pit_calibrate_tsc(void)
321 end = get_cycles_sync(); 317 end = get_cycles_sync();
322 318
323 spin_unlock_irqrestore(&i8253_lock, flags); 319 spin_unlock_irqrestore(&i8253_lock, flags);
324 320
325 return (end - start) / 50; 321 return (end - start) / 50;
326} 322}
327 323
@@ -366,25 +362,20 @@ static struct irqaction irq0 = {
366 .handler = timer_interrupt, 362 .handler = timer_interrupt,
367 .flags = IRQF_DISABLED | IRQF_IRQPOLL, 363 .flags = IRQF_DISABLED | IRQF_IRQPOLL,
368 .mask = CPU_MASK_NONE, 364 .mask = CPU_MASK_NONE,
369 .name = "timer" 365 .name = "timer"
370}; 366};
371 367
372void __init time_init(void) 368void __init time_init(void)
373{ 369{
374 if (nohpet) 370 if (nohpet)
375 hpet_address = 0; 371 hpet_address = 0;
376 xtime.tv_sec = get_cmos_time();
377 xtime.tv_nsec = 0;
378
379 set_normalized_timespec(&wall_to_monotonic,
380 -xtime.tv_sec, -xtime.tv_nsec);
381 372
382 if (hpet_arch_init()) 373 if (hpet_arch_init())
383 hpet_address = 0; 374 hpet_address = 0;
384 375
385 if (hpet_use_timer) { 376 if (hpet_use_timer) {
386 /* set tick_nsec to use the proper rate for HPET */ 377 /* set tick_nsec to use the proper rate for HPET */
387 tick_nsec = TICK_NSEC_HPET; 378 tick_nsec = TICK_NSEC_HPET;
388 tsc_khz = hpet_calibrate_tsc(); 379 tsc_khz = hpet_calibrate_tsc();
389 timename = "HPET"; 380 timename = "HPET";
390 } else { 381 } else {
@@ -415,54 +406,21 @@ void __init time_init(void)
415 setup_irq(0, &irq0); 406 setup_irq(0, &irq0);
416} 407}
417 408
418
419static long clock_cmos_diff;
420static unsigned long sleep_start;
421
422/* 409/*
423 * sysfs support for the timer. 410 * sysfs support for the timer.
424 */ 411 */
425 412
426static int timer_suspend(struct sys_device *dev, pm_message_t state) 413static int timer_suspend(struct sys_device *dev, pm_message_t state)
427{ 414{
428 /*
429 * Estimate time zone so that set_time can update the clock
430 */
431 long cmos_time = get_cmos_time();
432
433 clock_cmos_diff = -cmos_time;
434 clock_cmos_diff += get_seconds();
435 sleep_start = cmos_time;
436 return 0; 415 return 0;
437} 416}
438 417
439static int timer_resume(struct sys_device *dev) 418static int timer_resume(struct sys_device *dev)
440{ 419{
441 unsigned long flags;
442 unsigned long sec;
443 unsigned long ctime = get_cmos_time();
444 long sleep_length = (ctime - sleep_start) * HZ;
445
446 if (sleep_length < 0) {
447 printk(KERN_WARNING "Time skew detected in timer resume!\n");
448 /* The time after the resume must not be earlier than the time
449 * before the suspend or some nasty things will happen
450 */
451 sleep_length = 0;
452 ctime = sleep_start;
453 }
454 if (hpet_address) 420 if (hpet_address)
455 hpet_reenable(); 421 hpet_reenable();
456 else 422 else
457 i8254_timer_resume(); 423 i8254_timer_resume();
458
459 sec = ctime + clock_cmos_diff;
460 write_seqlock_irqsave(&xtime_lock,flags);
461 xtime.tv_sec = sec;
462 xtime.tv_nsec = 0;
463 jiffies += sleep_length;
464 write_sequnlock_irqrestore(&xtime_lock,flags);
465 touch_softlockup_watchdog();
466 return 0; 424 return 0;
467} 425}
468 426
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 8713ad4a4db1..03888420775d 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -584,7 +584,8 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
584 tsk->thread.error_code = error_code; 584 tsk->thread.error_code = error_code;
585 tsk->thread.trap_no = trapnr; 585 tsk->thread.trap_no = trapnr;
586 586
587 if (exception_trace && unhandled_signal(tsk, signr)) 587 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
588 printk_ratelimit())
588 printk(KERN_INFO 589 printk(KERN_INFO
589 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n", 590 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
590 tsk->comm, tsk->pid, str, 591 tsk->comm, tsk->pid, str,
@@ -688,7 +689,8 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
688 tsk->thread.error_code = error_code; 689 tsk->thread.error_code = error_code;
689 tsk->thread.trap_no = 13; 690 tsk->thread.trap_no = 13;
690 691
691 if (exception_trace && unhandled_signal(tsk, SIGSEGV)) 692 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
693 printk_ratelimit())
692 printk(KERN_INFO 694 printk(KERN_INFO
693 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n", 695 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
694 tsk->comm, tsk->pid, 696 tsk->comm, tsk->pid,
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c
index e850aa01e1b3..9b76b03d0600 100644
--- a/arch/x86_64/kernel/tsc.c
+++ b/arch/x86_64/kernel/tsc.c
@@ -61,25 +61,9 @@ inline int check_tsc_unstable(void)
61 * first tick after the change will be slightly wrong. 61 * first tick after the change will be slightly wrong.
62 */ 62 */
63 63
64#include <linux/workqueue.h> 64static unsigned int ref_freq;
65 65static unsigned long loops_per_jiffy_ref;
66static unsigned int cpufreq_delayed_issched = 0; 66static unsigned long tsc_khz_ref;
67static unsigned int cpufreq_init = 0;
68static struct work_struct cpufreq_delayed_get_work;
69
70static void handle_cpufreq_delayed_get(struct work_struct *v)
71{
72 unsigned int cpu;
73 for_each_online_cpu(cpu) {
74 cpufreq_get(cpu);
75 }
76 cpufreq_delayed_issched = 0;
77}
78
79static unsigned int ref_freq = 0;
80static unsigned long loops_per_jiffy_ref = 0;
81
82static unsigned long tsc_khz_ref = 0;
83 67
84static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 68static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
85 void *data) 69 void *data)
@@ -125,10 +109,8 @@ static struct notifier_block time_cpufreq_notifier_block = {
125 109
126static int __init cpufreq_tsc(void) 110static int __init cpufreq_tsc(void)
127{ 111{
128 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); 112 cpufreq_register_notifier(&time_cpufreq_notifier_block,
129 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, 113 CPUFREQ_TRANSITION_NOTIFIER);
130 CPUFREQ_TRANSITION_NOTIFIER))
131 cpufreq_init = 1;
132 return 0; 114 return 0;
133} 115}
134 116
@@ -153,17 +135,18 @@ __cpuinit int unsynchronized_tsc(void)
153#endif 135#endif
154 /* Most intel systems have synchronized TSCs except for 136 /* Most intel systems have synchronized TSCs except for
155 multi node systems */ 137 multi node systems */
156 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 138 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
157#ifdef CONFIG_ACPI 139#ifdef CONFIG_ACPI
158 /* But TSC doesn't tick in C3 so don't use it there */ 140 /* But TSC doesn't tick in C3 so don't use it there */
159 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000) 141 if (acpi_gbl_FADT.header.length > 0 &&
142 acpi_gbl_FADT.C3latency < 1000)
160 return 1; 143 return 1;
161#endif 144#endif
162 return 0; 145 return 0;
163 } 146 }
164 147
165 /* Assume multi socket systems are not synchronized */ 148 /* Assume multi socket systems are not synchronized */
166 return num_present_cpus() > 1; 149 return num_present_cpus() > 1;
167} 150}
168 151
169int __init notsc_setup(char *s) 152int __init notsc_setup(char *s)
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 5c57ea4591c1..ba8ea97abd21 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -28,7 +28,7 @@ SECTIONS
28 _text = .; /* Text and read-only data */ 28 _text = .; /* Text and read-only data */
29 .text : AT(ADDR(.text) - LOAD_OFFSET) { 29 .text : AT(ADDR(.text) - LOAD_OFFSET) {
30 /* First the code that has to be first for bootstrapping */ 30 /* First the code that has to be first for bootstrapping */
31 *(.bootstrap.text) 31 *(.text.head)
32 _stext = .; 32 _stext = .;
33 /* Then the rest */ 33 /* Then the rest */
34 TEXT_TEXT 34 TEXT_TEXT
@@ -54,6 +54,13 @@ SECTIONS
54 54
55 RODATA 55 RODATA
56 56
57 . = ALIGN(4);
58 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
59 __tracedata_start = .;
60 *(.tracedata)
61 __tracedata_end = .;
62 }
63
57 . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ 64 . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
58 /* Data */ 65 /* Data */
59 .data : AT(ADDR(.data) - LOAD_OFFSET) { 66 .data : AT(ADDR(.data) - LOAD_OFFSET) {
@@ -93,6 +100,9 @@ SECTIONS
93 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) 100 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data))
94 { *(.vsyscall_gtod_data) } 101 { *(.vsyscall_gtod_data) }
95 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data); 102 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
103 .vsyscall_clock : AT(VLOAD(.vsyscall_clock))
104 { *(.vsyscall_clock) }
105 vsyscall_clock = VVIRT(.vsyscall_clock);
96 106
97 107
98 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) 108 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
@@ -133,20 +143,11 @@ SECTIONS
133 /* might get freed after init */ 143 /* might get freed after init */
134 . = ALIGN(4096); 144 . = ALIGN(4096);
135 __smp_alt_begin = .; 145 __smp_alt_begin = .;
136 __smp_alt_instructions = .;
137 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
138 *(.smp_altinstructions)
139 }
140 __smp_alt_instructions_end = .;
141 . = ALIGN(8);
142 __smp_locks = .; 146 __smp_locks = .;
143 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 147 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
144 *(.smp_locks) 148 *(.smp_locks)
145 } 149 }
146 __smp_locks_end = .; 150 __smp_locks_end = .;
147 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
148 *(.smp_altinstr_replacement)
149 }
150 . = ALIGN(4096); 151 . = ALIGN(4096);
151 __smp_alt_end = .; 152 __smp_alt_end = .;
152 153
@@ -189,6 +190,12 @@ SECTIONS
189 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } 190 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
190 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } 191 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
191 192
193/* vdso blob that is mapped into user space */
194 vdso_start = . ;
195 .vdso : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) }
196 . = ALIGN(4096);
197 vdso_end = .;
198
192#ifdef CONFIG_BLK_DEV_INITRD 199#ifdef CONFIG_BLK_DEV_INITRD
193 . = ALIGN(4096); 200 . = ALIGN(4096);
194 __initramfs_start = .; 201 __initramfs_start = .;
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 57660d58d500..06c34949bfdc 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -42,6 +42,7 @@
42#include <asm/segment.h> 42#include <asm/segment.h>
43#include <asm/desc.h> 43#include <asm/desc.h>
44#include <asm/topology.h> 44#include <asm/topology.h>
45#include <asm/vgtod.h>
45 46
46#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) 47#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
47#define __syscall_clobber "r11","rcx","memory" 48#define __syscall_clobber "r11","rcx","memory"
@@ -57,26 +58,9 @@
57 * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) 58 * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
58 * Try to keep this structure as small as possible to avoid cache line ping pongs 59 * Try to keep this structure as small as possible to avoid cache line ping pongs
59 */ 60 */
60struct vsyscall_gtod_data_t {
61 seqlock_t lock;
62
63 /* open coded 'struct timespec' */
64 time_t wall_time_sec;
65 u32 wall_time_nsec;
66
67 int sysctl_enabled;
68 struct timezone sys_tz;
69 struct { /* extract of a clocksource struct */
70 cycle_t (*vread)(void);
71 cycle_t cycle_last;
72 cycle_t mask;
73 u32 mult;
74 u32 shift;
75 } clock;
76};
77int __vgetcpu_mode __section_vgetcpu_mode; 61int __vgetcpu_mode __section_vgetcpu_mode;
78 62
79struct vsyscall_gtod_data_t __vsyscall_gtod_data __section_vsyscall_gtod_data = 63struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
80{ 64{
81 .lock = SEQLOCK_UNLOCKED, 65 .lock = SEQLOCK_UNLOCKED,
82 .sysctl_enabled = 1, 66 .sysctl_enabled = 1,
@@ -96,6 +80,8 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
96 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 80 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
97 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 81 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
98 vsyscall_gtod_data.sys_tz = sys_tz; 82 vsyscall_gtod_data.sys_tz = sys_tz;
83 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
84 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
99 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 85 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
100} 86}
101 87
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 84f11728fc76..327c9f2fa626 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -159,7 +159,7 @@ void dump_pagetable(unsigned long address)
159 pmd_t *pmd; 159 pmd_t *pmd;
160 pte_t *pte; 160 pte_t *pte;
161 161
162 asm("movq %%cr3,%0" : "=r" (pgd)); 162 pgd = (pgd_t *)read_cr3();
163 163
164 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 164 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
165 pgd += pgd_index(address); 165 pgd += pgd_index(address);
@@ -221,16 +221,6 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
221 return 0; 221 return 0;
222} 222}
223 223
224int unhandled_signal(struct task_struct *tsk, int sig)
225{
226 if (is_init(tsk))
227 return 1;
228 if (tsk->ptrace & PT_PTRACED)
229 return 0;
230 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
231 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
232}
233
234static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, 224static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
235 unsigned long error_code) 225 unsigned long error_code)
236{ 226{
@@ -301,8 +291,8 @@ static int vmalloc_fault(unsigned long address)
301 return 0; 291 return 0;
302} 292}
303 293
304int page_fault_trace = 0; 294static int page_fault_trace;
305int exception_trace = 1; 295int show_unhandled_signals = 1;
306 296
307/* 297/*
308 * This routine handles page faults. It determines the address, 298 * This routine handles page faults. It determines the address,
@@ -326,7 +316,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
326 prefetchw(&mm->mmap_sem); 316 prefetchw(&mm->mmap_sem);
327 317
328 /* get the address */ 318 /* get the address */
329 __asm__("movq %%cr2,%0":"=r" (address)); 319 address = read_cr2();
330 320
331 info.si_code = SEGV_MAPERR; 321 info.si_code = SEGV_MAPERR;
332 322
@@ -494,7 +484,8 @@ bad_area_nosemaphore:
494 (address >> 32)) 484 (address >> 32))
495 return; 485 return;
496 486
497 if (exception_trace && unhandled_signal(tsk, SIGSEGV)) { 487 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
488 printk_ratelimit()) {
498 printk( 489 printk(
499 "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", 490 "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
500 tsk->pid > 1 ? KERN_INFO : KERN_EMERG, 491 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
@@ -568,7 +559,7 @@ out_of_memory:
568 } 559 }
569 printk("VM: killing process %s\n", tsk->comm); 560 printk("VM: killing process %s\n", tsk->comm);
570 if (error_code & 4) 561 if (error_code & 4)
571 do_exit(SIGKILL); 562 do_group_exit(SIGKILL);
572 goto no_context; 563 goto no_context;
573 564
574do_sigbus: 565do_sigbus:
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 9a0e98accf04..38f5d6368006 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -383,7 +383,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
383 } 383 }
384 384
385 if (!after_bootmem) 385 if (!after_bootmem)
386 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); 386 mmu_cr4_features = read_cr4();
387 __flush_tlb_all(); 387 __flush_tlb_all();
388} 388}
389 389
@@ -600,16 +600,6 @@ void mark_rodata_ro(void)
600{ 600{
601 unsigned long start = (unsigned long)_stext, end; 601 unsigned long start = (unsigned long)_stext, end;
602 602
603#ifdef CONFIG_HOTPLUG_CPU
604 /* It must still be possible to apply SMP alternatives. */
605 if (num_possible_cpus() > 1)
606 start = (unsigned long)_etext;
607#endif
608
609#ifdef CONFIG_KPROBES
610 start = (unsigned long)__start_rodata;
611#endif
612
613 end = (unsigned long)__end_rodata; 603 end = (unsigned long)__end_rodata;
614 start = (start + PAGE_SIZE - 1) & PAGE_MASK; 604 start = (start + PAGE_SIZE - 1) & PAGE_MASK;
615 end &= PAGE_MASK; 605 end &= PAGE_MASK;
@@ -697,41 +687,6 @@ int kern_addr_valid(unsigned long addr)
697 return pfn_valid(pte_pfn(*pte)); 687 return pfn_valid(pte_pfn(*pte));
698} 688}
699 689
700#ifdef CONFIG_SYSCTL
701#include <linux/sysctl.h>
702
703extern int exception_trace, page_fault_trace;
704
705static ctl_table debug_table2[] = {
706 {
707 .ctl_name = 99,
708 .procname = "exception-trace",
709 .data = &exception_trace,
710 .maxlen = sizeof(int),
711 .mode = 0644,
712 .proc_handler = proc_dointvec
713 },
714 {}
715};
716
717static ctl_table debug_root_table2[] = {
718 {
719 .ctl_name = CTL_DEBUG,
720 .procname = "debug",
721 .mode = 0555,
722 .child = debug_table2
723 },
724 {}
725};
726
727static __init int x8664_sysctl_init(void)
728{
729 register_sysctl_table(debug_root_table2);
730 return 0;
731}
732__initcall(x8664_sysctl_init);
733#endif
734
735/* A pseudo VMA to allow ptrace access for the vsyscall page. This only 690/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
736 covers the 64bit vsyscall page now. 32bit has a real VMA now and does 691 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
737 not need special handling anymore. */ 692 not need special handling anymore. */
@@ -769,8 +724,17 @@ int in_gate_area_no_task(unsigned long addr)
769 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); 724 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
770} 725}
771 726
772void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) 727void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
773{ 728{
774 return __alloc_bootmem_core(pgdat->bdata, size, 729 return __alloc_bootmem_core(pgdat->bdata, size,
775 SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); 730 SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
776} 731}
732
733const char *arch_vma_name(struct vm_area_struct *vma)
734{
735 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
736 return "[vdso]";
737 if (vma == &gate_vma)
738 return "[vsyscall]";
739 return NULL;
740}
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index f983c75825d0..a96006f7ae0c 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -44,12 +44,12 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
44{ 44{
45 unsigned long prevbase; 45 unsigned long prevbase;
46 struct bootnode nodes[8]; 46 struct bootnode nodes[8];
47 int nodeid, i, nb; 47 int nodeid, i, j, nb;
48 unsigned char nodeids[8]; 48 unsigned char nodeids[8];
49 int found = 0; 49 int found = 0;
50 u32 reg; 50 u32 reg;
51 unsigned numnodes; 51 unsigned numnodes;
52 unsigned dualcore = 0; 52 unsigned num_cores;
53 53
54 if (!early_pci_allowed()) 54 if (!early_pci_allowed())
55 return -1; 55 return -1;
@@ -60,6 +60,9 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
60 60
61 printk(KERN_INFO "Scanning NUMA topology in Northbridge %d\n", nb); 61 printk(KERN_INFO "Scanning NUMA topology in Northbridge %d\n", nb);
62 62
63 num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
64 printk(KERN_INFO "CPU has %d num_cores\n", num_cores);
65
63 reg = read_pci_config(0, nb, 0, 0x60); 66 reg = read_pci_config(0, nb, 0, 0x60);
64 numnodes = ((reg >> 4) & 0xF) + 1; 67 numnodes = ((reg >> 4) & 0xF) + 1;
65 if (numnodes <= 1) 68 if (numnodes <= 1)
@@ -73,8 +76,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
73 unsigned long base,limit; 76 unsigned long base,limit;
74 u32 nodeid; 77 u32 nodeid;
75 78
76 /* Undefined before E stepping, but hopefully 0 */
77 dualcore |= ((read_pci_config(0, nb, 3, 0xe8) >> 12) & 3) == 1;
78 base = read_pci_config(0, nb, 1, 0x40 + i*8); 79 base = read_pci_config(0, nb, 1, 0x40 + i*8);
79 limit = read_pci_config(0, nb, 1, 0x44 + i*8); 80 limit = read_pci_config(0, nb, 1, 0x44 + i*8);
80 81
@@ -170,8 +171,8 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
170 for (i = 0; i < 8; i++) { 171 for (i = 0; i < 8; i++) {
171 if (nodes[i].start != nodes[i].end) { 172 if (nodes[i].start != nodes[i].end) {
172 nodeid = nodeids[i]; 173 nodeid = nodeids[i];
173 apicid_to_node[nodeid << dualcore] = i; 174 for (j = 0; j < num_cores; j++)
174 apicid_to_node[(nodeid << dualcore) + dualcore] = i; 175 apicid_to_node[(nodeid * num_cores) + j] = i;
175 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 176 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
176 } 177 }
177 } 178 }
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 51548947ad3b..6da235522269 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -273,9 +273,6 @@ void __init numa_init_array(void)
273 273
274#ifdef CONFIG_NUMA_EMU 274#ifdef CONFIG_NUMA_EMU
275/* Numa emulation */ 275/* Numa emulation */
276#define E820_ADDR_HOLE_SIZE(start, end) \
277 (e820_hole_size((start) >> PAGE_SHIFT, (end) >> PAGE_SHIFT) << \
278 PAGE_SHIFT)
279char *cmdline __initdata; 276char *cmdline __initdata;
280 277
281/* 278/*
@@ -319,7 +316,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
319 return -1; 316 return -1;
320 if (num_nodes > MAX_NUMNODES) 317 if (num_nodes > MAX_NUMNODES)
321 num_nodes = MAX_NUMNODES; 318 num_nodes = MAX_NUMNODES;
322 size = (max_addr - *addr - E820_ADDR_HOLE_SIZE(*addr, max_addr)) / 319 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
323 num_nodes; 320 num_nodes;
324 /* 321 /*
325 * Calculate the number of big nodes that can be allocated as a result 322 * Calculate the number of big nodes that can be allocated as a result
@@ -347,7 +344,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
347 if (i == num_nodes + node_start - 1) 344 if (i == num_nodes + node_start - 1)
348 end = max_addr; 345 end = max_addr;
349 else 346 else
350 while (end - *addr - E820_ADDR_HOLE_SIZE(*addr, end) < 347 while (end - *addr - e820_hole_size(*addr, end) <
351 size) { 348 size) {
352 end += FAKE_NODE_MIN_SIZE; 349 end += FAKE_NODE_MIN_SIZE;
353 if (end > max_addr) { 350 if (end > max_addr) {
@@ -476,18 +473,22 @@ out:
476 473
477 /* 474 /*
478 * We need to vacate all active ranges that may have been registered by 475 * We need to vacate all active ranges that may have been registered by
479 * SRAT. 476 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns
477 * true. NUMA emulation has succeeded so we will not scan ACPI nodes.
480 */ 478 */
481 remove_all_active_ranges(); 479 remove_all_active_ranges();
480#ifdef CONFIG_ACPI_NUMA
481 acpi_numa = -1;
482#endif
482 for_each_node_mask(i, node_possible_map) { 483 for_each_node_mask(i, node_possible_map) {
483 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 484 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
484 nodes[i].end >> PAGE_SHIFT); 485 nodes[i].end >> PAGE_SHIFT);
485 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 486 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
486 } 487 }
488 acpi_fake_nodes(nodes, num_nodes);
487 numa_init_array(); 489 numa_init_array();
488 return 0; 490 return 0;
489} 491}
490#undef E820_ADDR_HOLE_SIZE
491#endif /* CONFIG_NUMA_EMU */ 492#endif /* CONFIG_NUMA_EMU */
492 493
493void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) 494void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 9148f4a4cec6..7e161c698af4 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -13,7 +13,7 @@
13#include <asm/tlbflush.h> 13#include <asm/tlbflush.h>
14#include <asm/io.h> 14#include <asm/io.h>
15 15
16static inline pte_t *lookup_address(unsigned long address) 16pte_t *lookup_address(unsigned long address)
17{ 17{
18 pgd_t *pgd = pgd_offset_k(address); 18 pgd_t *pgd = pgd_offset_k(address);
19 pud_t *pud; 19 pud_t *pud;
@@ -74,14 +74,12 @@ static void flush_kernel_map(void *arg)
74 struct page *pg; 74 struct page *pg;
75 75
76 /* When clflush is available always use it because it is 76 /* When clflush is available always use it because it is
77 much cheaper than WBINVD. Disable clflush for now because 77 much cheaper than WBINVD. */
78 the high level code is not ready yet */ 78 if (!cpu_has_clflush)
79 if (1 || !cpu_has_clflush)
80 asm volatile("wbinvd" ::: "memory"); 79 asm volatile("wbinvd" ::: "memory");
81 else list_for_each_entry(pg, l, lru) { 80 else list_for_each_entry(pg, l, lru) {
82 void *adr = page_address(pg); 81 void *adr = page_address(pg);
83 if (cpu_has_clflush) 82 cache_flush_page(adr);
84 cache_flush_page(adr);
85 } 83 }
86 __flush_tlb_all(); 84 __flush_tlb_all();
87} 85}
@@ -95,7 +93,8 @@ static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
95 93
96static inline void save_page(struct page *fpage) 94static inline void save_page(struct page *fpage)
97{ 95{
98 list_add(&fpage->lru, &deferred_pages); 96 if (!test_and_set_bit(PG_arch_1, &fpage->flags))
97 list_add(&fpage->lru, &deferred_pages);
99} 98}
100 99
101/* 100/*
@@ -129,9 +128,12 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
129 pte_t *kpte; 128 pte_t *kpte;
130 struct page *kpte_page; 129 struct page *kpte_page;
131 pgprot_t ref_prot2; 130 pgprot_t ref_prot2;
131
132 kpte = lookup_address(address); 132 kpte = lookup_address(address);
133 if (!kpte) return 0; 133 if (!kpte) return 0;
134 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); 134 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
135 BUG_ON(PageLRU(kpte_page));
136 BUG_ON(PageCompound(kpte_page));
135 if (pgprot_val(prot) != pgprot_val(ref_prot)) { 137 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
136 if (!pte_huge(*kpte)) { 138 if (!pte_huge(*kpte)) {
137 set_pte(kpte, pfn_pte(pfn, prot)); 139 set_pte(kpte, pfn_pte(pfn, prot));
@@ -159,10 +161,9 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
159 /* on x86-64 the direct mapping set at boot is not using 4k pages */ 161 /* on x86-64 the direct mapping set at boot is not using 4k pages */
160 BUG_ON(PageReserved(kpte_page)); 162 BUG_ON(PageReserved(kpte_page));
161 163
162 if (page_private(kpte_page) == 0) { 164 save_page(kpte_page);
163 save_page(kpte_page); 165 if (page_private(kpte_page) == 0)
164 revert_page(address, ref_prot); 166 revert_page(address, ref_prot);
165 }
166 return 0; 167 return 0;
167} 168}
168 169
@@ -234,6 +235,10 @@ void global_flush_tlb(void)
234 flush_map(&l); 235 flush_map(&l);
235 236
236 list_for_each_entry_safe(pg, next, &l, lru) { 237 list_for_each_entry_safe(pg, next, &l, lru) {
238 list_del(&pg->lru);
239 clear_bit(PG_arch_1, &pg->flags);
240 if (page_private(pg) != 0)
241 continue;
237 ClearPagePrivate(pg); 242 ClearPagePrivate(pg);
238 __free_page(pg); 243 __free_page(pg);
239 } 244 }
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 1e76bb0a7277..acdf03e19146 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -106,9 +106,9 @@ static __init int slit_valid(struct acpi_table_slit *slit)
106 for (j = 0; j < d; j++) { 106 for (j = 0; j < d; j++) {
107 u8 val = slit->entry[d*i + j]; 107 u8 val = slit->entry[d*i + j];
108 if (i == j) { 108 if (i == j) {
109 if (val != 10) 109 if (val != LOCAL_DISTANCE)
110 return 0; 110 return 0;
111 } else if (val <= 10) 111 } else if (val <= LOCAL_DISTANCE)
112 return 0; 112 return 0;
113 } 113 }
114 } 114 }
@@ -350,7 +350,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
350 350
351/* Sanity check to catch more bad SRATs (they are amazingly common). 351/* Sanity check to catch more bad SRATs (they are amazingly common).
352 Make sure the PXMs cover all memory. */ 352 Make sure the PXMs cover all memory. */
353static int nodes_cover_memory(void) 353static int __init nodes_cover_memory(const struct bootnode *nodes)
354{ 354{
355 int i; 355 int i;
356 unsigned long pxmram, e820ram; 356 unsigned long pxmram, e820ram;
@@ -394,6 +394,9 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
394{ 394{
395 int i; 395 int i;
396 396
397 if (acpi_numa <= 0)
398 return -1;
399
397 /* First clean up the node list */ 400 /* First clean up the node list */
398 for (i = 0; i < MAX_NUMNODES; i++) { 401 for (i = 0; i < MAX_NUMNODES; i++) {
399 cutoff_node(i, start, end); 402 cutoff_node(i, start, end);
@@ -403,10 +406,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
403 } 406 }
404 } 407 }
405 408
406 if (acpi_numa <= 0) 409 if (!nodes_cover_memory(nodes)) {
407 return -1;
408
409 if (!nodes_cover_memory()) {
410 bad_srat(); 410 bad_srat();
411 return -1; 411 return -1;
412 } 412 }
@@ -440,6 +440,86 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
440 return 0; 440 return 0;
441} 441}
442 442
443#ifdef CONFIG_NUMA_EMU
444static int __init find_node_by_addr(unsigned long addr)
445{
446 int ret = NUMA_NO_NODE;
447 int i;
448
449 for_each_node_mask(i, nodes_parsed) {
450 /*
451 * Find the real node that this emulated node appears on. For
452 * the sake of simplicity, we only use a real node's starting
453 * address to determine which emulated node it appears on.
454 */
455 if (addr >= nodes[i].start && addr < nodes[i].end) {
456 ret = i;
457 break;
458 }
459 }
460 return i;
461}
462
463/*
464 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
465 * mappings that respect the real ACPI topology but reflect our emulated
466 * environment. For each emulated node, we find which real node it appears on
467 * and create PXM to NID mappings for those fake nodes which mirror that
468 * locality. SLIT will now represent the correct distances between emulated
469 * nodes as a result of the real topology.
470 */
471void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
472{
473 int i, j;
474 int fake_node_to_pxm_map[MAX_NUMNODES] = {
475 [0 ... MAX_NUMNODES-1] = PXM_INVAL
476 };
477 unsigned char fake_apicid_to_node[MAX_LOCAL_APIC] = {
478 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
479 };
480
481 printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
482 "topology.\n");
483 for (i = 0; i < num_nodes; i++) {
484 int nid, pxm;
485
486 nid = find_node_by_addr(fake_nodes[i].start);
487 if (nid == NUMA_NO_NODE)
488 continue;
489 pxm = node_to_pxm(nid);
490 if (pxm == PXM_INVAL)
491 continue;
492 fake_node_to_pxm_map[i] = pxm;
493 /*
494 * For each apicid_to_node mapping that exists for this real
495 * node, it must now point to the fake node ID.
496 */
497 for (j = 0; j < MAX_LOCAL_APIC; j++)
498 if (apicid_to_node[j] == nid)
499 fake_apicid_to_node[j] = i;
500 }
501 for (i = 0; i < num_nodes; i++)
502 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
503 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
504
505 nodes_clear(nodes_parsed);
506 for (i = 0; i < num_nodes; i++)
507 if (fake_nodes[i].start != fake_nodes[i].end)
508 node_set(i, nodes_parsed);
509 WARN_ON(!nodes_cover_memory(fake_nodes));
510}
511
512static int null_slit_node_compare(int a, int b)
513{
514 return node_to_pxm(a) == node_to_pxm(b);
515}
516#else
517static int null_slit_node_compare(int a, int b)
518{
519 return a == b;
520}
521#endif /* CONFIG_NUMA_EMU */
522
443void __init srat_reserve_add_area(int nodeid) 523void __init srat_reserve_add_area(int nodeid)
444{ 524{
445 if (found_add_area && nodes_add[nodeid].end) { 525 if (found_add_area && nodes_add[nodeid].end) {
@@ -464,7 +544,8 @@ int __node_distance(int a, int b)
464 int index; 544 int index;
465 545
466 if (!acpi_slit) 546 if (!acpi_slit)
467 return a == b ? 10 : 20; 547 return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
548 REMOTE_DISTANCE;
468 index = acpi_slit->locality_count * node_to_pxm(a); 549 index = acpi_slit->locality_count * node_to_pxm(a);
469 return acpi_slit->entry[index + node_to_pxm(b)]; 550 return acpi_slit->entry[index + node_to_pxm(b)];
470} 551}
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index 3acf60ded2a0..9cc813e29706 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -59,6 +59,8 @@ fill_mp_bus_to_cpumask(void)
59 j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus); 59 j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus);
60 j++) { 60 j++) {
61 struct pci_bus *bus; 61 struct pci_bus *bus;
62 struct pci_sysdata *sd;
63
62 long node = NODE_ID(nid); 64 long node = NODE_ID(nid);
63 /* Algorithm a bit dumb, but 65 /* Algorithm a bit dumb, but
64 it shouldn't matter here */ 66 it shouldn't matter here */
@@ -67,7 +69,9 @@ fill_mp_bus_to_cpumask(void)
67 continue; 69 continue;
68 if (!node_online(node)) 70 if (!node_online(node))
69 node = 0; 71 node = 0;
70 bus->sysdata = (void *)node; 72
73 sd = bus->sysdata;
74 sd->node = node;
71 } 75 }
72 } 76 }
73 } 77 }
diff --git a/arch/x86_64/vdso/Makefile b/arch/x86_64/vdso/Makefile
new file mode 100644
index 000000000000..faaa72fb250c
--- /dev/null
+++ b/arch/x86_64/vdso/Makefile
@@ -0,0 +1,49 @@
1#
2# x86-64 vDSO.
3#
4
5# files to link into the vdso
6# vdso-start.o has to be first
7vobjs-y := vdso-start.o vdso-note.o vclock_gettime.o vgetcpu.o vvar.o
8
9# files to link into kernel
10obj-y := vma.o vdso.o vdso-syms.o
11
12vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
13
14$(obj)/vdso.o: $(obj)/vdso.so
15
16targets += vdso.so vdso.lds $(vobjs-y) vdso-syms.o
17
18# The DSO images are built using a special linker script.
19quiet_cmd_syscall = SYSCALL $@
20 cmd_syscall = $(CC) -m elf_x86_64 -nostdlib $(SYSCFLAGS_$(@F)) \
21 -Wl,-T,$(filter-out FORCE,$^) -o $@
22
23export CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
24
25vdso-flags = -fPIC -shared -Wl,-soname=linux-vdso.so.1 \
26 $(call ld-option, -Wl$(comma)--hash-style=sysv) \
27 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
28SYSCFLAGS_vdso.so = $(vdso-flags)
29
30$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
31
32$(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE
33 $(call if_changed,syscall)
34
35CF := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64
36
37$(obj)/vclock_gettime.o: CFLAGS = $(CF)
38$(obj)/vgetcpu.o: CFLAGS = $(CF)
39
40# We also create a special relocatable object that should mirror the symbol
41# table and layout of the linked DSO. With ld -R we can then refer to
42# these symbols in the kernel code rather than hand-coded addresses.
43extra-y += vdso-syms.o
44$(obj)/built-in.o: $(obj)/vdso-syms.o
45$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
46
47SYSCFLAGS_vdso-syms.o = -r -d
48$(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE
49 $(call if_changed,syscall)
diff --git a/arch/x86_64/vdso/vclock_gettime.c b/arch/x86_64/vdso/vclock_gettime.c
new file mode 100644
index 000000000000..17f6a00de712
--- /dev/null
+++ b/arch/x86_64/vdso/vclock_gettime.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
5 * Fast user context implementation of clock_gettime and gettimeofday.
6 *
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
9 * Also alternative() doesn't work.
10 */
11
12#include <linux/kernel.h>
13#include <linux/posix-timers.h>
14#include <linux/time.h>
15#include <linux/string.h>
16#include <asm/vsyscall.h>
17#include <asm/vgtod.h>
18#include <asm/timex.h>
19#include <asm/hpet.h>
20#include <asm/unistd.h>
21#include <asm/io.h>
22#include <asm/vgtod.h>
23#include "vextern.h"
24
25#define gtod vdso_vsyscall_gtod_data
26
27static long vdso_fallback_gettime(long clock, struct timespec *ts)
28{
29 long ret;
30 asm("syscall" : "=a" (ret) :
31 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
32 return ret;
33}
34
35static inline long vgetns(void)
36{
37 cycles_t (*vread)(void);
38 vread = gtod->clock.vread;
39 return ((vread() - gtod->clock.cycle_last) * gtod->clock.mult) >>
40 gtod->clock.shift;
41}
42
43static noinline int do_realtime(struct timespec *ts)
44{
45 unsigned long seq, ns;
46 do {
47 seq = read_seqbegin(&gtod->lock);
48 ts->tv_sec = gtod->wall_time_sec;
49 ts->tv_nsec = gtod->wall_time_nsec;
50 ns = vgetns();
51 } while (unlikely(read_seqretry(&gtod->lock, seq)));
52 timespec_add_ns(ts, ns);
53 return 0;
54}
55
56/* Copy of the version in kernel/time.c which we cannot directly access */
57static void vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
58{
59 while (nsec >= NSEC_PER_SEC) {
60 nsec -= NSEC_PER_SEC;
61 ++sec;
62 }
63 while (nsec < 0) {
64 nsec += NSEC_PER_SEC;
65 --sec;
66 }
67 ts->tv_sec = sec;
68 ts->tv_nsec = nsec;
69}
70
71static noinline int do_monotonic(struct timespec *ts)
72{
73 unsigned long seq, ns, secs;
74 do {
75 seq = read_seqbegin(&gtod->lock);
76 secs = gtod->wall_time_sec;
77 ns = gtod->wall_time_nsec + vgetns();
78 secs += gtod->wall_to_monotonic.tv_sec;
79 ns += gtod->wall_to_monotonic.tv_nsec;
80 } while (unlikely(read_seqretry(&gtod->lock, seq)));
81 vset_normalized_timespec(ts, secs, ns);
82 return 0;
83}
84
85int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
86{
87 if (likely(gtod->sysctl_enabled && gtod->clock.vread))
88 switch (clock) {
89 case CLOCK_REALTIME:
90 return do_realtime(ts);
91 case CLOCK_MONOTONIC:
92 return do_monotonic(ts);
93 }
94 return vdso_fallback_gettime(clock, ts);
95}
96int clock_gettime(clockid_t, struct timespec *)
97 __attribute__((weak, alias("__vdso_clock_gettime")));
98
99int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
100{
101 long ret;
102 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
103 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
104 offsetof(struct timespec, tv_nsec) ||
105 sizeof(*tv) != sizeof(struct timespec));
106 do_realtime((struct timespec *)tv);
107 tv->tv_usec /= 1000;
108 if (unlikely(tz != NULL)) {
109 /* This relies on gcc inlining the memcpy. We'll notice
110 if it ever fails to do so. */
111 memcpy(tz, &gtod->sys_tz, sizeof(struct timezone));
112 }
113 return 0;
114 }
115 asm("syscall" : "=a" (ret) :
116 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
117 return ret;
118}
119int gettimeofday(struct timeval *, struct timezone *)
120 __attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/x86_64/vdso/vdso-note.S b/arch/x86_64/vdso/vdso-note.S
new file mode 100644
index 000000000000..79a071e4357e
--- /dev/null
+++ b/arch/x86_64/vdso/vdso-note.S
@@ -0,0 +1,12 @@
1/*
2 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
3 * Here we can supply some information useful to userland.
4 */
5
6#include <linux/uts.h>
7#include <linux/version.h>
8#include <linux/elfnote.h>
9
10ELFNOTE_START(Linux, 0, "a")
11 .long LINUX_VERSION_CODE
12ELFNOTE_END
diff --git a/arch/x86_64/vdso/vdso-start.S b/arch/x86_64/vdso/vdso-start.S
new file mode 100644
index 000000000000..2dc2cdb84d67
--- /dev/null
+++ b/arch/x86_64/vdso/vdso-start.S
@@ -0,0 +1,2 @@
1 .globl vdso_kernel_start
2vdso_kernel_start:
diff --git a/arch/x86_64/vdso/vdso.S b/arch/x86_64/vdso/vdso.S
new file mode 100644
index 000000000000..92e80c1972a7
--- /dev/null
+++ b/arch/x86_64/vdso/vdso.S
@@ -0,0 +1,2 @@
1 .section ".vdso","a"
2 .incbin "arch/x86_64/vdso/vdso.so"
diff --git a/arch/x86_64/vdso/vdso.lds.S b/arch/x86_64/vdso/vdso.lds.S
new file mode 100644
index 000000000000..b9a60e665d08
--- /dev/null
+++ b/arch/x86_64/vdso/vdso.lds.S
@@ -0,0 +1,77 @@
1/*
2 * Linker script for vsyscall DSO. The vsyscall page is an ELF shared
3 * object prelinked to its virtual address, and with only one read-only
4 * segment (that fits in one page). This script controls its layout.
5 */
6#include <asm/asm-offsets.h>
7#include "voffset.h"
8
9#define VDSO_PRELINK 0xffffffffff700000
10
11SECTIONS
12{
13 . = VDSO_PRELINK + SIZEOF_HEADERS;
14
15 .hash : { *(.hash) } :text
16 .gnu.hash : { *(.gnu.hash) }
17 .dynsym : { *(.dynsym) }
18 .dynstr : { *(.dynstr) }
19 .gnu.version : { *(.gnu.version) }
20 .gnu.version_d : { *(.gnu.version_d) }
21 .gnu.version_r : { *(.gnu.version_r) }
22
23 /* This linker script is used both with -r and with -shared.
24 For the layouts to match, we need to skip more than enough
25 space for the dynamic symbol table et al. If this amount
26 is insufficient, ld -shared will barf. Just increase it here. */
27 . = VDSO_PRELINK + VDSO_TEXT_OFFSET;
28
29 .text : { *(.text) } :text
30 .text.ptr : { *(.text.ptr) } :text
31 . = VDSO_PRELINK + 0x900;
32 .data : { *(.data) } :text
33 .bss : { *(.bss) } :text
34
35 .altinstructions : { *(.altinstructions) } :text
36 .altinstr_replacement : { *(.altinstr_replacement) } :text
37
38 .note : { *(.note.*) } :text :note
39 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
40 .eh_frame : { KEEP (*(.eh_frame)) } :text
41 .dynamic : { *(.dynamic) } :text :dynamic
42 .useless : {
43 *(.got.plt) *(.got)
44 *(.gnu.linkonce.d.*)
45 *(.dynbss)
46 *(.gnu.linkonce.b.*)
47 } :text
48}
49
50/*
51 * We must supply the ELF program headers explicitly to get just one
52 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
53 */
54PHDRS
55{
56 text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
57 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
58 note PT_NOTE FLAGS(4); /* PF_R */
59 eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
60}
61
62/*
63 * This controls what symbols we export from the DSO.
64 */
65VERSION
66{
67 LINUX_2.6 {
68 global:
69 clock_gettime;
70 __vdso_clock_gettime;
71 gettimeofday;
72 __vdso_gettimeofday;
73 getcpu;
74 __vdso_getcpu;
75 local: *;
76 };
77}
diff --git a/arch/x86_64/vdso/vextern.h b/arch/x86_64/vdso/vextern.h
new file mode 100644
index 000000000000..1683ba2ae3e8
--- /dev/null
+++ b/arch/x86_64/vdso/vextern.h
@@ -0,0 +1,16 @@
1#ifndef VEXTERN
2#include <asm/vsyscall.h>
3#define VEXTERN(x) \
4 extern typeof(x) *vdso_ ## x __attribute__((visibility("hidden")));
5#endif
6
7#define VMAGIC 0xfeedbabeabcdefabUL
8
9/* Any kernel variables used in the vDSO must be exported in the main
10 kernel's vmlinux.lds.S/vsyscall.h/proper __section and
11 put into vextern.h and be referenced as a pointer with vdso prefix.
12 The main kernel later fills in the values. */
13
14VEXTERN(jiffies)
15VEXTERN(vgetcpu_mode)
16VEXTERN(vsyscall_gtod_data)
diff --git a/arch/x86_64/vdso/vgetcpu.c b/arch/x86_64/vdso/vgetcpu.c
new file mode 100644
index 000000000000..91f6e85d0fc2
--- /dev/null
+++ b/arch/x86_64/vdso/vgetcpu.c
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
5 * Fast user context implementation of getcpu()
6 */
7
8#include <linux/kernel.h>
9#include <linux/getcpu.h>
10#include <linux/jiffies.h>
11#include <linux/time.h>
12#include <asm/vsyscall.h>
13#include <asm/vgtod.h>
14#include "vextern.h"
15
16long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
17{
18 unsigned int dummy, p;
19 unsigned long j = 0;
20
21 /* Fast cache - only recompute value once per jiffies and avoid
22 relatively costly rdtscp/cpuid otherwise.
23 This works because the scheduler usually keeps the process
24 on the same CPU and this syscall doesn't guarantee its
25 results anyways.
26 We do this here because otherwise user space would do it on
27 its own in a likely inferior way (no access to jiffies).
28 If you don't like it pass NULL. */
29 if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) {
30 p = tcache->blob[1];
31 } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
32 /* Load per CPU data from RDTSCP */
33 rdtscp(dummy, dummy, p);
34 } else {
35 /* Load per CPU data from GDT */
36 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
37 }
38 if (tcache) {
39 tcache->blob[0] = j;
40 tcache->blob[1] = p;
41 }
42 if (cpu)
43 *cpu = p & 0xfff;
44 if (node)
45 *node = p >> 12;
46 return 0;
47}
48
49long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
50 __attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/x86_64/vdso/vma.c b/arch/x86_64/vdso/vma.c
new file mode 100644
index 000000000000..d4cb83a6c066
--- /dev/null
+++ b/arch/x86_64/vdso/vma.c
@@ -0,0 +1,139 @@
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/init.h>
9#include <linux/random.h>
10#include <asm/vsyscall.h>
11#include <asm/vgtod.h>
12#include <asm/proto.h>
13#include "voffset.h"
14
15int vdso_enabled = 1;
16
17#define VEXTERN(x) extern typeof(__ ## x) *vdso_ ## x;
18#include "vextern.h"
19#undef VEXTERN
20
21extern char vdso_kernel_start[], vdso_start[], vdso_end[];
22extern unsigned short vdso_sync_cpuid;
23
24struct page **vdso_pages;
25
26static inline void *var_ref(void *vbase, char *var, char *name)
27{
28 unsigned offset = var - &vdso_kernel_start[0] + VDSO_TEXT_OFFSET;
29 void *p = vbase + offset;
30 if (*(void **)p != (void *)VMAGIC) {
31 printk("VDSO: variable %s broken\n", name);
32 vdso_enabled = 0;
33 }
34 return p;
35}
36
37static int __init init_vdso_vars(void)
38{
39 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
40 int i;
41 char *vbase;
42
43 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
44 if (!vdso_pages)
45 goto oom;
46 for (i = 0; i < npages; i++) {
47 struct page *p;
48 p = alloc_page(GFP_KERNEL);
49 if (!p)
50 goto oom;
51 vdso_pages[i] = p;
52 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
53 }
54
55 vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
56 if (!vbase)
57 goto oom;
58
59 if (memcmp(vbase, "\177ELF", 4)) {
60 printk("VDSO: I'm broken; not ELF\n");
61 vdso_enabled = 0;
62 }
63
64#define V(x) *(typeof(x) *) var_ref(vbase, (char *)RELOC_HIDE(&x, 0), #x)
65#define VEXTERN(x) \
66 V(vdso_ ## x) = &__ ## x;
67#include "vextern.h"
68#undef VEXTERN
69 return 0;
70
71 oom:
72 printk("Cannot allocate vdso\n");
73 vdso_enabled = 0;
74 return -ENOMEM;
75}
76__initcall(init_vdso_vars);
77
78struct linux_binprm;
79
80/* Put the vdso above the (randomized) stack with another randomized offset.
81 This way there is no hole in the middle of address space.
82 To save memory make sure it is still in the same PTE as the stack top.
83 This doesn't give that many random bits */
84static unsigned long vdso_addr(unsigned long start, unsigned len)
85{
86 unsigned long addr, end;
87 unsigned offset;
88 end = (start + PMD_SIZE - 1) & PMD_MASK;
89 if (end >= TASK_SIZE64)
90 end = TASK_SIZE64;
91 end -= len;
92 /* This loses some more bits than a modulo, but is cheaper */
93 offset = get_random_int() & (PTRS_PER_PTE - 1);
94 addr = start + (offset << PAGE_SHIFT);
95 if (addr >= end)
96 addr = end;
97 return addr;
98}
99
100/* Setup a VMA at program startup for the vsyscall page.
101 Not called for compat tasks */
102int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
103{
104 struct mm_struct *mm = current->mm;
105 unsigned long addr;
106 int ret;
107 unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE);
108
109 if (!vdso_enabled)
110 return 0;
111
112 down_write(&mm->mmap_sem);
113 addr = vdso_addr(mm->start_stack, len);
114 addr = get_unmapped_area(NULL, addr, len, 0, 0);
115 if (IS_ERR_VALUE(addr)) {
116 ret = addr;
117 goto up_fail;
118 }
119
120 ret = install_special_mapping(mm, addr, len,
121 VM_READ|VM_EXEC|
122 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
123 VM_ALWAYSDUMP,
124 vdso_pages);
125 if (ret)
126 goto up_fail;
127
128 current->mm->context.vdso = (void *)addr;
129up_fail:
130 up_write(&mm->mmap_sem);
131 return ret;
132}
133
134static __init int vdso_setup(char *s)
135{
136 vdso_enabled = simple_strtoul(s, NULL, 0);
137 return 0;
138}
139__setup("vdso=", vdso_setup);
diff --git a/arch/x86_64/vdso/voffset.h b/arch/x86_64/vdso/voffset.h
new file mode 100644
index 000000000000..5304204911f2
--- /dev/null
+++ b/arch/x86_64/vdso/voffset.h
@@ -0,0 +1 @@
#define VDSO_TEXT_OFFSET 0x500
diff --git a/arch/x86_64/vdso/vvar.c b/arch/x86_64/vdso/vvar.c
new file mode 100644
index 000000000000..6fc22219a472
--- /dev/null
+++ b/arch/x86_64/vdso/vvar.c
@@ -0,0 +1,12 @@
1/* Define pointer to external vDSO variables.
2 These are part of the vDSO. The kernel fills in the real addresses
3 at boot time. This is done because when the vdso is linked the
4 kernel isn't yet and we don't know the final addresses. */
5#include <linux/kernel.h>
6#include <linux/time.h>
7#include <asm/vsyscall.h>
8#include <asm/timex.h>
9#include <asm/vgtod.h>
10
11#define VEXTERN(x) typeof (__ ## x) *vdso_ ## x = (void *)VMAGIC;
12#include "vextern.h"