aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/unistd.h6
-rw-r--r--arch/alpha/kernel/systbls.S12
-rw-r--r--arch/alpha/kernel/time.c3
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S35
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in1
-rw-r--r--arch/arm/common/vic.c69
-rw-r--r--arch/arm/include/asm/mach/time.h1
-rw-r--r--arch/arm/include/asm/system.h2
-rw-r--r--arch/arm/kernel/leds.c28
-rw-r--r--arch/arm/kernel/signal.c90
-rw-r--r--arch/arm/kernel/time.c35
-rw-r--r--arch/arm/mach-davinci/cpufreq.c4
-rw-r--r--arch/arm/mach-exynos4/pm.c45
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c26
-rw-r--r--arch/arm/mach-omap1/pm_bus.c69
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/clkt34xx_dpll3m2.c1
-rw-r--r--arch/arm/mach-omap2/pm_bus.c85
-rw-r--r--arch/arm/mach-pxa/balloon3.c1
-rw-r--r--arch/arm/mach-pxa/clock-pxa2xx.c18
-rw-r--r--arch/arm/mach-pxa/clock-pxa3xx.c17
-rw-r--r--arch/arm/mach-pxa/clock.h7
-rw-r--r--arch/arm/mach-pxa/cm-x270.c1
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c23
-rw-r--r--arch/arm/mach-pxa/colibri-evalboard.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270-income.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c1
-rw-r--r--arch/arm/mach-pxa/generic.h8
-rw-r--r--arch/arm/mach-pxa/irq.c17
-rw-r--r--arch/arm/mach-pxa/lpd270.c20
-rw-r--r--arch/arm/mach-pxa/lubbock.c21
-rw-r--r--arch/arm/mach-pxa/mainstone.c22
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c12
-rw-r--r--arch/arm/mach-pxa/mfp-pxa3xx.c21
-rw-r--r--arch/arm/mach-pxa/mioa701.c43
-rw-r--r--arch/arm/mach-pxa/palmld.c1
-rw-r--r--arch/arm/mach-pxa/palmtreo.c1
-rw-r--r--arch/arm/mach-pxa/palmz72.c24
-rw-r--r--arch/arm/mach-pxa/pxa25x.c25
-rw-r--r--arch/arm/mach-pxa/pxa27x.c25
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c25
-rw-r--r--arch/arm/mach-pxa/pxa95x.c20
-rw-r--r--arch/arm/mach-pxa/raumfeld.c1
-rw-r--r--arch/arm/mach-pxa/smemc.c29
-rw-r--r--arch/arm/mach-pxa/trizeps4.c1
-rw-r--r--arch/arm/mach-pxa/viper.c12
-rw-r--r--arch/arm/mach-pxa/vpac270.c1
-rw-r--r--arch/arm/mach-realview/include/mach/barriers.h2
-rw-r--r--arch/arm/mach-s3c2410/irq.c30
-rw-r--r--arch/arm/mach-s3c2410/mach-bast.c17
-rw-r--r--arch/arm/mach-s3c2410/pm.c13
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.c5
-rw-r--r--arch/arm/mach-s3c2412/irq.c2
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c19
-rw-r--r--arch/arm/mach-s3c2412/pm.c27
-rw-r--r--arch/arm/mach-s3c2412/s3c2412.c4
-rw-r--r--arch/arm/mach-s3c2416/irq.c2
-rw-r--r--arch/arm/mach-s3c2416/pm.c27
-rw-r--r--arch/arm/mach-s3c2416/s3c2416.c5
-rw-r--r--arch/arm/mach-s3c2440/mach-osiris.c18
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c8
-rw-r--r--arch/arm/mach-s3c2440/s3c2442.c6
-rw-r--r--arch/arm/mach-s3c2440/s3c244x-irq.c4
-rw-r--r--arch/arm/mach-s3c2440/s3c244x.c62
-rw-r--r--arch/arm/mach-s3c64xx/irq-pm.c18
-rw-r--r--arch/arm/mach-s5pv210/pm.c25
-rw-r--r--arch/arm/mach-sa1100/irq.c19
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c145
-rw-r--r--arch/arm/mach-tegra/include/mach/barriers.h2
-rw-r--r--arch/arm/mm/init.c16
-rw-r--r--arch/arm/plat-omap/gpio.c35
-rw-r--r--arch/arm/plat-omap/iommu.c2
-rw-r--r--arch/arm/plat-omap/omap_device.c23
-rw-r--r--arch/arm/plat-pxa/gpio.c17
-rw-r--r--arch/arm/plat-pxa/mfp.c1
-rw-r--r--arch/arm/plat-s3c24xx/dma.c68
-rw-r--r--arch/arm/plat-s3c24xx/irq-pm.c7
-rw-r--r--arch/arm/plat-s5p/irq-pm.c7
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h6
-rw-r--r--arch/arm/vfp/vfpmodule.c19
-rw-r--r--arch/avr32/mach-at32ap/intc.c38
-rw-r--r--arch/blackfin/kernel/nmi.c30
-rw-r--r--arch/blackfin/mach-common/dpmc.c3
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c44
-rw-r--r--arch/m68k/atari/atakeyb.c9
-rw-r--r--arch/m68k/atari/stdma.c2
-rw-r--r--arch/m68k/include/asm/atarikb.h2
-rw-r--r--arch/m68k/include/asm/bitops_mm.h87
-rw-r--r--arch/m68k/include/asm/unistd.h46
-rw-r--r--arch/m68k/kernel/Makefile_mm2
-rw-r--r--arch/m68k/kernel/entry_mm.S348
-rw-r--r--arch/m68k/kernel/syscalltable.S195
-rw-r--r--arch/mips/Kbuild.platforms1
-rw-r--r--arch/mips/Kconfig66
-rw-r--r--arch/mips/Makefile12
-rw-r--r--arch/mips/alchemy/common/dbdma.c123
-rw-r--r--arch/mips/alchemy/common/dma.c46
-rw-r--r--arch/mips/alchemy/common/irq.c345
-rw-r--r--arch/mips/alchemy/common/platform.c250
-rw-r--r--arch/mips/alchemy/common/setup.c4
-rw-r--r--arch/mips/alchemy/devboards/db1200/setup.c7
-rw-r--r--arch/mips/alchemy/devboards/db1x00/board_setup.c61
-rw-r--r--arch/mips/alchemy/devboards/pb1000/board_setup.c2
-rw-r--r--arch/mips/alchemy/devboards/pb1500/board_setup.c2
-rw-r--r--arch/mips/alchemy/devboards/prom.c2
-rw-r--r--arch/mips/alchemy/gpr/board_setup.c14
-rw-r--r--arch/mips/alchemy/gpr/init.c2
-rw-r--r--arch/mips/alchemy/mtx-1/board_setup.c2
-rw-r--r--arch/mips/alchemy/mtx-1/init.c2
-rw-r--r--arch/mips/alchemy/mtx-1/platform.c4
-rw-r--r--arch/mips/alchemy/xxs1500/board_setup.c11
-rw-r--r--arch/mips/alchemy/xxs1500/init.c7
-rw-r--r--arch/mips/ar7/gpio.c4
-rw-r--r--arch/mips/bcm47xx/nvram.c3
-rw-r--r--arch/mips/bcm47xx/setup.c130
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c16
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c2
-rw-r--r--arch/mips/boot/compressed/uart-alchemy.c2
-rw-r--r--arch/mips/cavium-octeon/Kconfig15
-rw-r--r--arch/mips/cavium-octeon/setup.c7
-rw-r--r--arch/mips/cavium-octeon/smp.c15
-rw-r--r--arch/mips/configs/lemote2f_defconfig6
-rw-r--r--arch/mips/configs/malta_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig4
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig574
-rw-r--r--arch/mips/include/asm/cache.h2
-rw-r--r--arch/mips/include/asm/cevt-r4k.h3
-rw-r--r--arch/mips/include/asm/cpu.h27
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/include/asm/hugetlb.h1
-rw-r--r--arch/mips/include/asm/jump_label.h22
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h334
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000_dma.h4
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h8
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1000.h122
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/nvram.h12
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h5
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h63
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq_platform.h53
-rw-r--r--arch/mips/include/asm/mach-lantiq/war.h24
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/irq.h18
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h66
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h141
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/xway_dma.h60
-rw-r--r--arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h47
-rw-r--r--arch/mips/include/asm/mach-netlogic/irq.h14
-rw-r--r--arch/mips/include/asm/mach-netlogic/war.h26
-rw-r--r--arch/mips/include/asm/module.h2
-rw-r--r--arch/mips/include/asm/netlogic/interrupt.h45
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h76
-rw-r--r--arch/mips/include/asm/netlogic/psb-bootinfo.h109
-rw-r--r--arch/mips/include/asm/netlogic/xlr/gpio.h73
-rw-r--r--arch/mips/include/asm/netlogic/xlr/iomap.h131
-rw-r--r--arch/mips/include/asm/netlogic/xlr/pic.h231
-rw-r--r--arch/mips/include/asm/netlogic/xlr/xlr.h75
-rw-r--r--arch/mips/include/asm/ptrace.h3
-rw-r--r--arch/mips/include/asm/thread_info.h3
-rw-r--r--arch/mips/jazz/jazzdma.c5
-rw-r--r--arch/mips/jz4740/dma.c4
-rw-r--r--arch/mips/jz4740/setup.c32
-rw-r--r--arch/mips/jz4740/time.c2
-rw-r--r--arch/mips/jz4740/timer.c2
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cpu-probe.c83
-rw-r--r--arch/mips/kernel/entry.S7
-rw-r--r--arch/mips/kernel/ftrace.c5
-rw-r--r--arch/mips/kernel/ptrace.c43
-rw-r--r--arch/mips/kernel/scall32-o32.S5
-rw-r--r--arch/mips/kernel/scall64-64.S5
-rw-r--r--arch/mips/kernel/scall64-n32.S5
-rw-r--r--arch/mips/kernel/scall64-o32.S5
-rw-r--r--arch/mips/kernel/syscall.c120
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/lantiq/Kconfig23
-rw-r--r--arch/mips/lantiq/Makefile11
-rw-r--r--arch/mips/lantiq/Platform8
-rw-r--r--arch/mips/lantiq/clk.c140
-rw-r--r--arch/mips/lantiq/clk.h18
-rw-r--r--arch/mips/lantiq/devices.c122
-rw-r--r--arch/mips/lantiq/devices.h23
-rw-r--r--arch/mips/lantiq/early_printk.c33
-rw-r--r--arch/mips/lantiq/irq.c326
-rw-r--r--arch/mips/lantiq/machtypes.h20
-rw-r--r--arch/mips/lantiq/prom.c71
-rw-r--r--arch/mips/lantiq/prom.h25
-rw-r--r--arch/mips/lantiq/setup.c66
-rw-r--r--arch/mips/lantiq/xway/Kconfig23
-rw-r--r--arch/mips/lantiq/xway/Makefile7
-rw-r--r--arch/mips/lantiq/xway/clk-ase.c48
-rw-r--r--arch/mips/lantiq/xway/clk-xway.c223
-rw-r--r--arch/mips/lantiq/xway/devices.c121
-rw-r--r--arch/mips/lantiq/xway/devices.h20
-rw-r--r--arch/mips/lantiq/xway/dma.c253
-rw-r--r--arch/mips/lantiq/xway/ebu.c53
-rw-r--r--arch/mips/lantiq/xway/gpio.c195
-rw-r--r--arch/mips/lantiq/xway/gpio_ebu.c126
-rw-r--r--arch/mips/lantiq/xway/gpio_stp.c157
-rw-r--r--arch/mips/lantiq/xway/mach-easy50601.c57
-rw-r--r--arch/mips/lantiq/xway/mach-easy50712.c74
-rw-r--r--arch/mips/lantiq/xway/pmu.c70
-rw-r--r--arch/mips/lantiq/xway/prom-ase.c39
-rw-r--r--arch/mips/lantiq/xway/prom-xway.c54
-rw-r--r--arch/mips/lantiq/xway/reset.c91
-rw-r--r--arch/mips/lantiq/xway/setup-ase.c19
-rw-r--r--arch/mips/lantiq/xway/setup-xway.c20
-rw-r--r--arch/mips/lib/Makefile1
-rw-r--r--arch/mips/loongson/common/env.c5
-rw-r--r--arch/mips/mm/Makefile4
-rw-r--r--arch/mips/mm/c-r4k.c3
-rw-r--r--arch/mips/mm/mmap.c122
-rw-r--r--arch/mips/mm/tlbex.c5
-rw-r--r--arch/mips/mti-malta/malta-init.c14
-rw-r--r--arch/mips/mti-malta/malta-int.c3
-rw-r--r--arch/mips/netlogic/Kconfig5
-rw-r--r--arch/mips/netlogic/xlr/Makefile5
-rw-r--r--arch/mips/netlogic/xlr/irq.c300
-rw-r--r--arch/mips/netlogic/xlr/platform.c98
-rw-r--r--arch/mips/netlogic/xlr/setup.c188
-rw-r--r--arch/mips/netlogic/xlr/smp.c225
-rw-r--r--arch/mips/netlogic/xlr/smpboot.S94
-rw-r--r--arch/mips/netlogic/xlr/time.c51
-rw-r--r--arch/mips/netlogic/xlr/xlr_console.c46
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/ops-lantiq.c116
-rw-r--r--arch/mips/pci/pci-lantiq.c297
-rw-r--r--arch/mips/pci/pci-lantiq.h18
-rw-r--r--arch/mips/pci/pci-xlr.c214
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_per.c2
-rw-r--r--arch/mips/power/hibernate.S2
-rw-r--r--arch/mips/rb532/gpio.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c13
-rw-r--r--arch/mips/sni/time.c4
-rw-r--r--arch/powerpc/include/asm/mpic.h3
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c7
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c28
-rw-r--r--arch/powerpc/platforms/powermac/pic.c42
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c7
-rw-r--r--arch/powerpc/sysdev/ipic.c36
-rw-r--r--arch/powerpc/sysdev/mpic.c48
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/cacheflush.h1
-rw-r--r--arch/s390/include/asm/diag.h17
-rw-r--r--arch/s390/include/asm/ftrace.h4
-rw-r--r--arch/s390/include/asm/jump_label.h37
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/diag.c21
-rw-r--r--arch/s390/kernel/dis.c1
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/entry64.S2
-rw-r--r--arch/s390/kernel/jump_label.c59
-rw-r--r--arch/s390/mm/cmm.c2
-rw-r--r--arch/s390/mm/pageattr.c5
-rw-r--r--arch/s390/oprofile/hwsampler.c14
-rw-r--r--arch/s390/oprofile/hwsampler.h4
-rw-r--r--arch/s390/oprofile/init.c8
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig1
-rw-r--r--arch/sh/configs/sdk7786_defconfig1
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c33
-rw-r--r--arch/sparc/include/asm/jump_label.h25
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc/kernel/pci_sabre.c5
-rw-r--r--arch/sparc/kernel/pci_schizo.c8
-rw-r--r--arch/sparc/kernel/pmc.c2
-rw-r--r--arch/sparc/kernel/smp_32.c10
-rw-r--r--arch/sparc/kernel/time_32.c2
-rw-r--r--arch/sparc/lib/checksum_32.S12
-rw-r--r--arch/um/os-Linux/util.c23
-rw-r--r--arch/unicore32/kernel/irq.c23
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/alternative-asm.h9
-rw-r--r--arch/x86/include/asm/alternative.h3
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h13
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h28
-rw-r--r--arch/x86/include/asm/apicdef.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/ftrace.h7
-rw-r--r--arch/x86/include/asm/jump_label.h27
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/setup.h2
-rw-r--r--arch/x86/include/asm/stacktrace.h3
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h17
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h16
-rw-r--r--arch/x86/include/asm/x86_init.h12
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/include/asm/xen/pci.h16
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/sleep.c5
-rw-r--r--arch/x86/kernel/alternative.c11
-rw-r--r--arch/x86/kernel/amd_gart_64.c (renamed from arch/x86/kernel/pci-gart_64.c)0
-rw-r--r--arch/x86/kernel/amd_iommu.c526
-rw-r--r--arch/x86/kernel/amd_iommu_init.c48
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c48
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig266
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Makefile21
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c776
-rw-r--r--arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c446
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c367
-rw-r--r--arch/x86/kernel/cpu/cpufreq/elanfreq.c309
-rw-r--r--arch/x86/kernel/cpu/cpufreq/gx-suspmod.c517
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c1029
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.h353
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longrun.c327
-rw-r--r--arch/x86/kernel/cpu/cpufreq/mperf.c51
-rw-r--r--arch/x86/kernel/cpu/cpufreq/mperf.h9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c331
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c624
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k6.c261
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c752
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.h43
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c1607
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h224
-rw-r--r--arch/x86/kernel/cpu/cpufreq/sc520_freq.c194
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c636
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c452
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c481
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.h49
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-smi.c467
-rw-r--r--arch/x86/kernel/cpu/intel.c19
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event.c28
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c37
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c7
-rw-r--r--arch/x86/kernel/dumpstack.c16
-rw-r--r--arch/x86/kernel/kprobes.c5
-rw-r--r--arch/x86/kernel/module.c1
-rw-r--r--arch/x86/kernel/pci-iommu_table.c18
-rw-r--r--arch/x86/kernel/stacktrace.c13
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/lib/clear_page_64.S33
-rw-r--r--arch/x86/lib/copy_user_64.S69
-rw-r--r--arch/x86/lib/memcpy_64.S45
-rw-r--r--arch/x86/lib/memmove_64.S29
-rw-r--r--arch/x86/lib/memset_64.S54
-rw-r--r--arch/x86/mm/init.c24
-rw-r--r--arch/x86/oprofile/backtrace.c13
-rw-r--r--arch/x86/pci/xen.c96
-rw-r--r--arch/x86/platform/uv/tlb_uv.c92
-rw-r--r--arch/x86/xen/enlighten.c20
-rw-r--r--arch/x86/xen/irq.c2
-rw-r--r--arch/x86/xen/mmu.c174
-rw-r--r--arch/x86/xen/p2m.c43
-rw-r--r--arch/x86/xen/setup.c10
-rw-r--r--arch/x86/xen/smp.c8
-rw-r--r--arch/x86/xen/time.c8
-rw-r--r--arch/x86/xen/xen-ops.h2
364 files changed, 9442 insertions, 14031 deletions
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 058937bf5a77..b1834166922d 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -452,10 +452,14 @@
452#define __NR_fanotify_init 494 452#define __NR_fanotify_init 494
453#define __NR_fanotify_mark 495 453#define __NR_fanotify_mark 495
454#define __NR_prlimit64 496 454#define __NR_prlimit64 496
455#define __NR_name_to_handle_at 497
456#define __NR_open_by_handle_at 498
457#define __NR_clock_adjtime 499
458#define __NR_syncfs 500
455 459
456#ifdef __KERNEL__ 460#ifdef __KERNEL__
457 461
458#define NR_SYSCALLS 497 462#define NR_SYSCALLS 501
459 463
460#define __ARCH_WANT_IPC_PARSE_VERSION 464#define __ARCH_WANT_IPC_PARSE_VERSION
461#define __ARCH_WANT_OLD_READDIR 465#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index a6a1de9db16f..15f999d41c75 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -498,23 +498,27 @@ sys_call_table:
498 .quad sys_ni_syscall /* sys_timerfd */ 498 .quad sys_ni_syscall /* sys_timerfd */
499 .quad sys_eventfd 499 .quad sys_eventfd
500 .quad sys_recvmmsg 500 .quad sys_recvmmsg
501 .quad sys_fallocate /* 480 */ 501 .quad sys_fallocate /* 480 */
502 .quad sys_timerfd_create 502 .quad sys_timerfd_create
503 .quad sys_timerfd_settime 503 .quad sys_timerfd_settime
504 .quad sys_timerfd_gettime 504 .quad sys_timerfd_gettime
505 .quad sys_signalfd4 505 .quad sys_signalfd4
506 .quad sys_eventfd2 /* 485 */ 506 .quad sys_eventfd2 /* 485 */
507 .quad sys_epoll_create1 507 .quad sys_epoll_create1
508 .quad sys_dup3 508 .quad sys_dup3
509 .quad sys_pipe2 509 .quad sys_pipe2
510 .quad sys_inotify_init1 510 .quad sys_inotify_init1
511 .quad sys_preadv /* 490 */ 511 .quad sys_preadv /* 490 */
512 .quad sys_pwritev 512 .quad sys_pwritev
513 .quad sys_rt_tgsigqueueinfo 513 .quad sys_rt_tgsigqueueinfo
514 .quad sys_perf_event_open 514 .quad sys_perf_event_open
515 .quad sys_fanotify_init 515 .quad sys_fanotify_init
516 .quad sys_fanotify_mark /* 495 */ 516 .quad sys_fanotify_mark /* 495 */
517 .quad sys_prlimit64 517 .quad sys_prlimit64
518 .quad sys_name_to_handle_at
519 .quad sys_open_by_handle_at
520 .quad sys_clock_adjtime
521 .quad sys_syncfs /* 500 */
518 522
519 .size sys_call_table, . - sys_call_table 523 .size sys_call_table, . - sys_call_table
520 .type sys_call_table, @object 524 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 918e8e0b72ff..818e74ed45dc 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = {
375 375
376static inline void register_rpcc_clocksource(long cycle_freq) 376static inline void register_rpcc_clocksource(long cycle_freq)
377{ 377{
378 clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); 378 clocksource_register_hz(&clocksource_rpcc, cycle_freq);
379 clocksource_register(&clocksource_rpcc);
380} 379}
381#else /* !CONFIG_SMP */ 380#else /* !CONFIG_SMP */
382static inline void register_rpcc_clocksource(long cycle_freq) 381static inline void register_rpcc_clocksource(long cycle_freq)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 8ebbb511c783..0c6852d93506 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT)
74ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) 74ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS)
75else 75else
76ZTEXTADDR := 0 76ZTEXTADDR := 0
77ZBSSADDR := ALIGN(4) 77ZBSSADDR := ALIGN(8)
78endif 78endif
79 79
80SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ 80SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index adf583cd0c35..49f5b2eaaa87 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -179,15 +179,14 @@ not_angel:
179 bl cache_on 179 bl cache_on
180 180
181restart: adr r0, LC0 181restart: adr r0, LC0
182 ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} 182 ldmia r0, {r1, r2, r3, r6, r9, r11, r12}
183 ldr sp, [r0, #32] 183 ldr sp, [r0, #28]
184 184
185 /* 185 /*
186 * We might be running at a different address. We need 186 * We might be running at a different address. We need
187 * to fix up various pointers. 187 * to fix up various pointers.
188 */ 188 */
189 sub r0, r0, r1 @ calculate the delta offset 189 sub r0, r0, r1 @ calculate the delta offset
190 add r5, r5, r0 @ _start
191 add r6, r6, r0 @ _edata 190 add r6, r6, r0 @ _edata
192 191
193#ifndef CONFIG_ZBOOT_ROM 192#ifndef CONFIG_ZBOOT_ROM
@@ -206,31 +205,40 @@ restart: adr r0, LC0
206/* 205/*
207 * Check to see if we will overwrite ourselves. 206 * Check to see if we will overwrite ourselves.
208 * r4 = final kernel address 207 * r4 = final kernel address
209 * r5 = start of this image
210 * r9 = size of decompressed image 208 * r9 = size of decompressed image
211 * r10 = end of this image, including bss/stack/malloc space if non XIP 209 * r10 = end of this image, including bss/stack/malloc space if non XIP
212 * We basically want: 210 * We basically want:
213 * r4 >= r10 -> OK 211 * r4 - 16k page directory >= r10 -> OK
214 * r4 + image length <= r5 -> OK 212 * r4 + image length <= current position (pc) -> OK
215 */ 213 */
214 add r10, r10, #16384
216 cmp r4, r10 215 cmp r4, r10
217 bhs wont_overwrite 216 bhs wont_overwrite
218 add r10, r4, r9 217 add r10, r4, r9
219 cmp r10, r5 218 ARM( cmp r10, pc )
219 THUMB( mov lr, pc )
220 THUMB( cmp r10, lr )
220 bls wont_overwrite 221 bls wont_overwrite
221 222
222/* 223/*
223 * Relocate ourselves past the end of the decompressed kernel. 224 * Relocate ourselves past the end of the decompressed kernel.
224 * r5 = start of this image
225 * r6 = _edata 225 * r6 = _edata
226 * r10 = end of the decompressed kernel 226 * r10 = end of the decompressed kernel
227 * Because we always copy ahead, we need to do it from the end and go 227 * Because we always copy ahead, we need to do it from the end and go
228 * backward in case the source and destination overlap. 228 * backward in case the source and destination overlap.
229 */ 229 */
230 /* Round up to next 256-byte boundary. */ 230 /*
231 add r10, r10, #256 231 * Bump to the next 256-byte boundary with the size of
232 * the relocation code added. This avoids overwriting
233 * ourself when the offset is small.
234 */
235 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
232 bic r10, r10, #255 236 bic r10, r10, #255
233 237
238 /* Get start of code we want to copy and align it down. */
239 adr r5, restart
240 bic r5, r5, #31
241
234 sub r9, r6, r5 @ size to copy 242 sub r9, r6, r5 @ size to copy
235 add r9, r9, #31 @ rounded up to a multiple 243 add r9, r9, #31 @ rounded up to a multiple
236 bic r9, r9, #31 @ ... of 32 bytes 244 bic r9, r9, #31 @ ... of 32 bytes
@@ -245,6 +253,11 @@ restart: adr r0, LC0
245 /* Preserve offset to relocated code. */ 253 /* Preserve offset to relocated code. */
246 sub r6, r9, r6 254 sub r6, r9, r6
247 255
256#ifndef CONFIG_ZBOOT_ROM
257 /* cache_clean_flush may use the stack, so relocate it */
258 add sp, sp, r6
259#endif
260
248 bl cache_clean_flush 261 bl cache_clean_flush
249 262
250 adr r0, BSYM(restart) 263 adr r0, BSYM(restart)
@@ -333,7 +346,6 @@ not_relocated: mov r0, #0
333LC0: .word LC0 @ r1 346LC0: .word LC0 @ r1
334 .word __bss_start @ r2 347 .word __bss_start @ r2
335 .word _end @ r3 348 .word _end @ r3
336 .word _start @ r5
337 .word _edata @ r6 349 .word _edata @ r6
338 .word _image_size @ r9 350 .word _image_size @ r9
339 .word _got_start @ r11 351 .word _got_start @ r11
@@ -1062,6 +1074,7 @@ memdump: mov r12, r0
1062#endif 1074#endif
1063 1075
1064 .ltorg 1076 .ltorg
1077reloc_code_end:
1065 1078
1066 .align 1079 .align
1067 .section ".stack", "aw", %nobits 1080 .section ".stack", "aw", %nobits
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index 5309909d7282..ea80abe78844 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -54,6 +54,7 @@ SECTIONS
54 .bss : { *(.bss) } 54 .bss : { *(.bss) }
55 _end = .; 55 _end = .;
56 56
57 . = ALIGN(8); /* the stack must be 64-bit aligned */
57 .stack : { *(.stack) } 58 .stack : { *(.stack) }
58 59
59 .stab 0 : { *(.stab) } 60 .stab 0 : { *(.stab) }
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 113085a77123..7aa4262ada7a 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -22,17 +22,16 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/sysdev.h> 25#include <linux/syscore_ops.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/amba/bus.h> 27#include <linux/amba/bus.h>
28 28
29#include <asm/mach/irq.h> 29#include <asm/mach/irq.h>
30#include <asm/hardware/vic.h> 30#include <asm/hardware/vic.h>
31 31
32#if defined(CONFIG_PM) 32#ifdef CONFIG_PM
33/** 33/**
34 * struct vic_device - VIC PM device 34 * struct vic_device - VIC PM device
35 * @sysdev: The system device which is registered.
36 * @irq: The IRQ number for the base of the VIC. 35 * @irq: The IRQ number for the base of the VIC.
37 * @base: The register base for the VIC. 36 * @base: The register base for the VIC.
38 * @resume_sources: A bitmask of interrupts for resume. 37 * @resume_sources: A bitmask of interrupts for resume.
@@ -43,8 +42,6 @@
43 * @protect: Save for VIC_PROTECT. 42 * @protect: Save for VIC_PROTECT.
44 */ 43 */
45struct vic_device { 44struct vic_device {
46 struct sys_device sysdev;
47
48 void __iomem *base; 45 void __iomem *base;
49 int irq; 46 int irq;
50 u32 resume_sources; 47 u32 resume_sources;
@@ -59,11 +56,6 @@ struct vic_device {
59static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; 56static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
60 57
61static int vic_id; 58static int vic_id;
62
63static inline struct vic_device *to_vic(struct sys_device *sys)
64{
65 return container_of(sys, struct vic_device, sysdev);
66}
67#endif /* CONFIG_PM */ 59#endif /* CONFIG_PM */
68 60
69/** 61/**
@@ -85,10 +77,9 @@ static void vic_init2(void __iomem *base)
85 writel(32, base + VIC_PL190_DEF_VECT_ADDR); 77 writel(32, base + VIC_PL190_DEF_VECT_ADDR);
86} 78}
87 79
88#if defined(CONFIG_PM) 80#ifdef CONFIG_PM
89static int vic_class_resume(struct sys_device *dev) 81static void resume_one_vic(struct vic_device *vic)
90{ 82{
91 struct vic_device *vic = to_vic(dev);
92 void __iomem *base = vic->base; 83 void __iomem *base = vic->base;
93 84
94 printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); 85 printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
@@ -107,13 +98,18 @@ static int vic_class_resume(struct sys_device *dev)
107 98
108 writel(vic->soft_int, base + VIC_INT_SOFT); 99 writel(vic->soft_int, base + VIC_INT_SOFT);
109 writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); 100 writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
101}
110 102
111 return 0; 103static void vic_resume(void)
104{
105 int id;
106
107 for (id = vic_id - 1; id >= 0; id--)
108 resume_one_vic(vic_devices + id);
112} 109}
113 110
114static int vic_class_suspend(struct sys_device *dev, pm_message_t state) 111static void suspend_one_vic(struct vic_device *vic)
115{ 112{
116 struct vic_device *vic = to_vic(dev);
117 void __iomem *base = vic->base; 113 void __iomem *base = vic->base;
118 114
119 printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); 115 printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
@@ -128,14 +124,21 @@ static int vic_class_suspend(struct sys_device *dev, pm_message_t state)
128 124
129 writel(vic->resume_irqs, base + VIC_INT_ENABLE); 125 writel(vic->resume_irqs, base + VIC_INT_ENABLE);
130 writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); 126 writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
127}
128
129static int vic_suspend(void)
130{
131 int id;
132
133 for (id = 0; id < vic_id; id++)
134 suspend_one_vic(vic_devices + id);
131 135
132 return 0; 136 return 0;
133} 137}
134 138
135struct sysdev_class vic_class = { 139struct syscore_ops vic_syscore_ops = {
136 .name = "vic", 140 .suspend = vic_suspend,
137 .suspend = vic_class_suspend, 141 .resume = vic_resume,
138 .resume = vic_class_resume,
139}; 142};
140 143
141/** 144/**
@@ -147,30 +150,8 @@ struct sysdev_class vic_class = {
147*/ 150*/
148static int __init vic_pm_init(void) 151static int __init vic_pm_init(void)
149{ 152{
150 struct vic_device *dev = vic_devices; 153 if (vic_id > 0)
151 int err; 154 register_syscore_ops(&vic_syscore_ops);
152 int id;
153
154 if (vic_id == 0)
155 return 0;
156
157 err = sysdev_class_register(&vic_class);
158 if (err) {
159 printk(KERN_ERR "%s: cannot register class\n", __func__);
160 return err;
161 }
162
163 for (id = 0; id < vic_id; id++, dev++) {
164 dev->sysdev.id = id;
165 dev->sysdev.cls = &vic_class;
166
167 err = sysdev_register(&dev->sysdev);
168 if (err) {
169 printk(KERN_ERR "%s: failed to register device\n",
170 __func__);
171 return err;
172 }
173 }
174 155
175 return 0; 156 return 0;
176} 157}
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 883f6be5117a..d5adaae5ee2c 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -34,7 +34,6 @@
34 * timer interrupt which may be pending. 34 * timer interrupt which may be pending.
35 */ 35 */
36struct sys_timer { 36struct sys_timer {
37 struct sys_device dev;
38 void (*init)(void); 37 void (*init)(void);
39 void (*suspend)(void); 38 void (*suspend)(void);
40 void (*resume)(void); 39 void (*resume)(void);
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 885be097769d..832888d0c20c 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -159,7 +159,7 @@ extern unsigned int user_debug;
159#include <mach/barriers.h> 159#include <mach/barriers.h>
160#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 160#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
161#define mb() do { dsb(); outer_sync(); } while (0) 161#define mb() do { dsb(); outer_sync(); } while (0)
162#define rmb() dmb() 162#define rmb() dsb()
163#define wmb() mb() 163#define wmb() mb()
164#else 164#else
165#include <asm/memory.h> 165#include <asm/memory.h>
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
index 31a316c1777b..0f107dcb0347 100644
--- a/arch/arm/kernel/leds.c
+++ b/arch/arm/kernel/leds.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/sysdev.h> 12#include <linux/sysdev.h>
13#include <linux/syscore_ops.h>
13 14
14#include <asm/leds.h> 15#include <asm/leds.h>
15 16
@@ -69,36 +70,37 @@ static ssize_t leds_store(struct sys_device *dev,
69 70
70static SYSDEV_ATTR(event, 0200, NULL, leds_store); 71static SYSDEV_ATTR(event, 0200, NULL, leds_store);
71 72
72static int leds_suspend(struct sys_device *dev, pm_message_t state) 73static struct sysdev_class leds_sysclass = {
74 .name = "leds",
75};
76
77static struct sys_device leds_device = {
78 .id = 0,
79 .cls = &leds_sysclass,
80};
81
82static int leds_suspend(void)
73{ 83{
74 leds_event(led_stop); 84 leds_event(led_stop);
75 return 0; 85 return 0;
76} 86}
77 87
78static int leds_resume(struct sys_device *dev) 88static void leds_resume(void)
79{ 89{
80 leds_event(led_start); 90 leds_event(led_start);
81 return 0;
82} 91}
83 92
84static int leds_shutdown(struct sys_device *dev) 93static void leds_shutdown(void)
85{ 94{
86 leds_event(led_halted); 95 leds_event(led_halted);
87 return 0;
88} 96}
89 97
90static struct sysdev_class leds_sysclass = { 98static struct syscore_ops leds_syscore_ops = {
91 .name = "leds",
92 .shutdown = leds_shutdown, 99 .shutdown = leds_shutdown,
93 .suspend = leds_suspend, 100 .suspend = leds_suspend,
94 .resume = leds_resume, 101 .resume = leds_resume,
95}; 102};
96 103
97static struct sys_device leds_device = {
98 .id = 0,
99 .cls = &leds_sysclass,
100};
101
102static int __init leds_init(void) 104static int __init leds_init(void)
103{ 105{
104 int ret; 106 int ret;
@@ -107,6 +109,8 @@ static int __init leds_init(void)
107 ret = sysdev_register(&leds_device); 109 ret = sysdev_register(&leds_device);
108 if (ret == 0) 110 if (ret == 0)
109 ret = sysdev_create_file(&leds_device, &attr_event); 111 ret = sysdev_create_file(&leds_device, &attr_event);
112 if (ret == 0)
113 register_syscore_ops(&leds_syscore_ops);
110 return ret; 114 return ret;
111} 115}
112 116
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index cb8398317644..0340224cf73c 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -597,19 +597,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
597 return err; 597 return err;
598} 598}
599 599
600static inline void setup_syscall_restart(struct pt_regs *regs)
601{
602 regs->ARM_r0 = regs->ARM_ORIG_r0;
603 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
604}
605
606/* 600/*
607 * OK, we're invoking a handler 601 * OK, we're invoking a handler
608 */ 602 */
609static int 603static int
610handle_signal(unsigned long sig, struct k_sigaction *ka, 604handle_signal(unsigned long sig, struct k_sigaction *ka,
611 siginfo_t *info, sigset_t *oldset, 605 siginfo_t *info, sigset_t *oldset,
612 struct pt_regs * regs, int syscall) 606 struct pt_regs * regs)
613{ 607{
614 struct thread_info *thread = current_thread_info(); 608 struct thread_info *thread = current_thread_info();
615 struct task_struct *tsk = current; 609 struct task_struct *tsk = current;
@@ -617,26 +611,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
617 int ret; 611 int ret;
618 612
619 /* 613 /*
620 * If we were from a system call, check for system call restarting...
621 */
622 if (syscall) {
623 switch (regs->ARM_r0) {
624 case -ERESTART_RESTARTBLOCK:
625 case -ERESTARTNOHAND:
626 regs->ARM_r0 = -EINTR;
627 break;
628 case -ERESTARTSYS:
629 if (!(ka->sa.sa_flags & SA_RESTART)) {
630 regs->ARM_r0 = -EINTR;
631 break;
632 }
633 /* fallthrough */
634 case -ERESTARTNOINTR:
635 setup_syscall_restart(regs);
636 }
637 }
638
639 /*
640 * translate the signal 614 * translate the signal
641 */ 615 */
642 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) 616 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
@@ -685,6 +659,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
685 */ 659 */
686static void do_signal(struct pt_regs *regs, int syscall) 660static void do_signal(struct pt_regs *regs, int syscall)
687{ 661{
662 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
688 struct k_sigaction ka; 663 struct k_sigaction ka;
689 siginfo_t info; 664 siginfo_t info;
690 int signr; 665 int signr;
@@ -698,18 +673,61 @@ static void do_signal(struct pt_regs *regs, int syscall)
698 if (!user_mode(regs)) 673 if (!user_mode(regs))
699 return; 674 return;
700 675
676 /*
677 * If we were from a system call, check for system call restarting...
678 */
679 if (syscall) {
680 continue_addr = regs->ARM_pc;
681 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
682 retval = regs->ARM_r0;
683
684 /*
685 * Prepare for system call restart. We do this here so that a
686 * debugger will see the already changed PSW.
687 */
688 switch (retval) {
689 case -ERESTARTNOHAND:
690 case -ERESTARTSYS:
691 case -ERESTARTNOINTR:
692 regs->ARM_r0 = regs->ARM_ORIG_r0;
693 regs->ARM_pc = restart_addr;
694 break;
695 case -ERESTART_RESTARTBLOCK:
696 regs->ARM_r0 = -EINTR;
697 break;
698 }
699 }
700
701 if (try_to_freeze()) 701 if (try_to_freeze())
702 goto no_signal; 702 goto no_signal;
703 703
704 /*
705 * Get the signal to deliver. When running under ptrace, at this
706 * point the debugger may change all our registers ...
707 */
704 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 708 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
705 if (signr > 0) { 709 if (signr > 0) {
706 sigset_t *oldset; 710 sigset_t *oldset;
707 711
712 /*
713 * Depending on the signal settings we may need to revert the
714 * decision to restart the system call. But skip this if a
715 * debugger has chosen to restart at a different PC.
716 */
717 if (regs->ARM_pc == restart_addr) {
718 if (retval == -ERESTARTNOHAND
719 || (retval == -ERESTARTSYS
720 && !(ka.sa.sa_flags & SA_RESTART))) {
721 regs->ARM_r0 = -EINTR;
722 regs->ARM_pc = continue_addr;
723 }
724 }
725
708 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 726 if (test_thread_flag(TIF_RESTORE_SIGMASK))
709 oldset = &current->saved_sigmask; 727 oldset = &current->saved_sigmask;
710 else 728 else
711 oldset = &current->blocked; 729 oldset = &current->blocked;
712 if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { 730 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
713 /* 731 /*
714 * A signal was successfully delivered; the saved 732 * A signal was successfully delivered; the saved
715 * sigmask will have been stored in the signal frame, 733 * sigmask will have been stored in the signal frame,
@@ -723,11 +741,14 @@ static void do_signal(struct pt_regs *regs, int syscall)
723 } 741 }
724 742
725 no_signal: 743 no_signal:
726 /*
727 * No signal to deliver to the process - restart the syscall.
728 */
729 if (syscall) { 744 if (syscall) {
730 if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { 745 /*
746 * Handle restarting a different system call. As above,
747 * if a debugger has chosen to restart at a different PC,
748 * ignore the restart.
749 */
750 if (retval == -ERESTART_RESTARTBLOCK
751 && regs->ARM_pc == continue_addr) {
731 if (thumb_mode(regs)) { 752 if (thumb_mode(regs)) {
732 regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; 753 regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
733 regs->ARM_pc -= 2; 754 regs->ARM_pc -= 2;
@@ -750,11 +771,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
750#endif 771#endif
751 } 772 }
752 } 773 }
753 if (regs->ARM_r0 == -ERESTARTNOHAND ||
754 regs->ARM_r0 == -ERESTARTSYS ||
755 regs->ARM_r0 == -ERESTARTNOINTR) {
756 setup_syscall_restart(regs);
757 }
758 774
759 /* If there's no signal to deliver, we just put the saved sigmask 775 /* If there's no signal to deliver, we just put the saved sigmask
760 * back. 776 * back.
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 1ff46cabc7ef..cb634c3e28e9 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -21,7 +21,7 @@
21#include <linux/timex.h> 21#include <linux/timex.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/profile.h> 23#include <linux/profile.h>
24#include <linux/sysdev.h> 24#include <linux/syscore_ops.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/irq.h> 26#include <linux/irq.h>
27 27
@@ -115,48 +115,37 @@ void timer_tick(void)
115#endif 115#endif
116 116
117#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) 117#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
118static int timer_suspend(struct sys_device *dev, pm_message_t state) 118static int timer_suspend(void)
119{ 119{
120 struct sys_timer *timer = container_of(dev, struct sys_timer, dev); 120 if (system_timer->suspend)
121 121 system_timer->suspend();
122 if (timer->suspend != NULL)
123 timer->suspend();
124 122
125 return 0; 123 return 0;
126} 124}
127 125
128static int timer_resume(struct sys_device *dev) 126static void timer_resume(void)
129{ 127{
130 struct sys_timer *timer = container_of(dev, struct sys_timer, dev); 128 if (system_timer->resume)
131 129 system_timer->resume();
132 if (timer->resume != NULL)
133 timer->resume();
134
135 return 0;
136} 130}
137#else 131#else
138#define timer_suspend NULL 132#define timer_suspend NULL
139#define timer_resume NULL 133#define timer_resume NULL
140#endif 134#endif
141 135
142static struct sysdev_class timer_sysclass = { 136static struct syscore_ops timer_syscore_ops = {
143 .name = "timer",
144 .suspend = timer_suspend, 137 .suspend = timer_suspend,
145 .resume = timer_resume, 138 .resume = timer_resume,
146}; 139};
147 140
148static int __init timer_init_sysfs(void) 141static int __init timer_init_syscore_ops(void)
149{ 142{
150 int ret = sysdev_class_register(&timer_sysclass); 143 register_syscore_ops(&timer_syscore_ops);
151 if (ret == 0) {
152 system_timer->dev.cls = &timer_sysclass;
153 ret = sysdev_register(&system_timer->dev);
154 }
155 144
156 return ret; 145 return 0;
157} 146}
158 147
159device_initcall(timer_init_sysfs); 148device_initcall(timer_init_syscore_ops);
160 149
161void __init time_init(void) 150void __init time_init(void)
162{ 151{
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index 0a95be1512bb..41669ecc1f91 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -94,9 +94,7 @@ static int davinci_target(struct cpufreq_policy *policy,
94 if (freqs.old == freqs.new) 94 if (freqs.old == freqs.new)
95 return ret; 95 return ret;
96 96
97 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, 97 dev_dbg(&cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
98 dev_driver_string(cpufreq.dev),
99 "transition: %u --> %u\n", freqs.old, freqs.new);
100 98
101 ret = cpufreq_frequency_table_target(policy, pdata->freq_table, 99 ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
102 freqs.new, relation, &idx); 100 freqs.new, relation, &idx);
diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c
index 10d917d9e3ad..8755ca8dd48d 100644
--- a/arch/arm/mach-exynos4/pm.c
+++ b/arch/arm/mach-exynos4/pm.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/suspend.h> 18#include <linux/suspend.h>
19#include <linux/syscore_ops.h>
19#include <linux/io.h> 20#include <linux/io.h>
20 21
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
@@ -372,7 +373,27 @@ void exynos4_scu_enable(void __iomem *scu_base)
372 flush_cache_all(); 373 flush_cache_all();
373} 374}
374 375
375static int exynos4_pm_resume(struct sys_device *dev) 376static struct sysdev_driver exynos4_pm_driver = {
377 .add = exynos4_pm_add,
378};
379
380static __init int exynos4_pm_drvinit(void)
381{
382 unsigned int tmp;
383
384 s3c_pm_init();
385
386 /* All wakeup disable */
387
388 tmp = __raw_readl(S5P_WAKEUP_MASK);
389 tmp |= ((0xFF << 8) | (0x1F << 1));
390 __raw_writel(tmp, S5P_WAKEUP_MASK);
391
392 return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver);
393}
394arch_initcall(exynos4_pm_drvinit);
395
396static void exynos4_pm_resume(void)
376{ 397{
377 /* For release retention */ 398 /* For release retention */
378 399
@@ -394,27 +415,15 @@ static int exynos4_pm_resume(struct sys_device *dev)
394 /* enable L2X0*/ 415 /* enable L2X0*/
395 writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL); 416 writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL);
396#endif 417#endif
397
398 return 0;
399} 418}
400 419
401static struct sysdev_driver exynos4_pm_driver = { 420static struct syscore_ops exynos4_pm_syscore_ops = {
402 .add = exynos4_pm_add,
403 .resume = exynos4_pm_resume, 421 .resume = exynos4_pm_resume,
404}; 422};
405 423
406static __init int exynos4_pm_drvinit(void) 424static __init int exynos4_pm_syscore_init(void)
407{ 425{
408 unsigned int tmp; 426 register_syscore_ops(&exynos4_pm_syscore_ops);
409 427 return 0;
410 s3c_pm_init();
411
412 /* All wakeup disable */
413
414 tmp = __raw_readl(S5P_WAKEUP_MASK);
415 tmp |= ((0xFF << 8) | (0x1F << 1));
416 __raw_writel(tmp, S5P_WAKEUP_MASK);
417
418 return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver);
419} 428}
420arch_initcall(exynos4_pm_drvinit); 429arch_initcall(exynos4_pm_syscore_init);
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 980803ff348c..d3e96451529c 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -24,7 +24,7 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/sysdev.h> 27#include <linux/syscore_ops.h>
28#include <linux/amba/bus.h> 28#include <linux/amba/bus.h>
29#include <linux/amba/kmi.h> 29#include <linux/amba/kmi.h>
30#include <linux/clocksource.h> 30#include <linux/clocksource.h>
@@ -180,13 +180,13 @@ static void __init ap_init_irq(void)
180#ifdef CONFIG_PM 180#ifdef CONFIG_PM
181static unsigned long ic_irq_enable; 181static unsigned long ic_irq_enable;
182 182
183static int irq_suspend(struct sys_device *dev, pm_message_t state) 183static int irq_suspend(void)
184{ 184{
185 ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); 185 ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE);
186 return 0; 186 return 0;
187} 187}
188 188
189static int irq_resume(struct sys_device *dev) 189static void irq_resume(void)
190{ 190{
191 /* disable all irq sources */ 191 /* disable all irq sources */
192 writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR); 192 writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR);
@@ -194,33 +194,25 @@ static int irq_resume(struct sys_device *dev)
194 writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR); 194 writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR);
195 195
196 writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET); 196 writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET);
197 return 0;
198} 197}
199#else 198#else
200#define irq_suspend NULL 199#define irq_suspend NULL
201#define irq_resume NULL 200#define irq_resume NULL
202#endif 201#endif
203 202
204static struct sysdev_class irq_class = { 203static struct syscore_ops irq_syscore_ops = {
205 .name = "irq",
206 .suspend = irq_suspend, 204 .suspend = irq_suspend,
207 .resume = irq_resume, 205 .resume = irq_resume,
208}; 206};
209 207
210static struct sys_device irq_device = { 208static int __init irq_syscore_init(void)
211 .id = 0,
212 .cls = &irq_class,
213};
214
215static int __init irq_init_sysfs(void)
216{ 209{
217 int ret = sysdev_class_register(&irq_class); 210 register_syscore_ops(&irq_syscore_ops);
218 if (ret == 0) 211
219 ret = sysdev_register(&irq_device); 212 return 0;
220 return ret;
221} 213}
222 214
223device_initcall(irq_init_sysfs); 215device_initcall(irq_syscore_init);
224 216
225/* 217/*
226 * Flash handling. 218 * Flash handling.
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 6588c22b8a64..fe31d933f0ed 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -24,75 +24,50 @@
24#ifdef CONFIG_PM_RUNTIME 24#ifdef CONFIG_PM_RUNTIME
25static int omap1_pm_runtime_suspend(struct device *dev) 25static int omap1_pm_runtime_suspend(struct device *dev)
26{ 26{
27 struct clk *iclk, *fclk; 27 int ret;
28 int ret = 0;
29 28
30 dev_dbg(dev, "%s\n", __func__); 29 dev_dbg(dev, "%s\n", __func__);
31 30
32 ret = pm_generic_runtime_suspend(dev); 31 ret = pm_generic_runtime_suspend(dev);
32 if (ret)
33 return ret;
33 34
34 fclk = clk_get(dev, "fck"); 35 ret = pm_runtime_clk_suspend(dev);
35 if (!IS_ERR(fclk)) { 36 if (ret) {
36 clk_disable(fclk); 37 pm_generic_runtime_resume(dev);
37 clk_put(fclk); 38 return ret;
38 }
39
40 iclk = clk_get(dev, "ick");
41 if (!IS_ERR(iclk)) {
42 clk_disable(iclk);
43 clk_put(iclk);
44 } 39 }
45 40
46 return 0; 41 return 0;
47}; 42}
48 43
49static int omap1_pm_runtime_resume(struct device *dev) 44static int omap1_pm_runtime_resume(struct device *dev)
50{ 45{
51 struct clk *iclk, *fclk;
52
53 dev_dbg(dev, "%s\n", __func__); 46 dev_dbg(dev, "%s\n", __func__);
54 47
55 iclk = clk_get(dev, "ick"); 48 pm_runtime_clk_resume(dev);
56 if (!IS_ERR(iclk)) { 49 return pm_generic_runtime_resume(dev);
57 clk_enable(iclk); 50}
58 clk_put(iclk);
59 }
60 51
61 fclk = clk_get(dev, "fck"); 52static struct dev_power_domain default_power_domain = {
62 if (!IS_ERR(fclk)) { 53 .ops = {
63 clk_enable(fclk); 54 .runtime_suspend = omap1_pm_runtime_suspend,
64 clk_put(fclk); 55 .runtime_resume = omap1_pm_runtime_resume,
65 } 56 USE_PLATFORM_PM_SLEEP_OPS
57 },
58};
66 59
67 return pm_generic_runtime_resume(dev); 60static struct pm_clk_notifier_block platform_bus_notifier = {
61 .pwr_domain = &default_power_domain,
62 .con_ids = { "ick", "fck", NULL, },
68}; 63};
69 64
70static int __init omap1_pm_runtime_init(void) 65static int __init omap1_pm_runtime_init(void)
71{ 66{
72 const struct dev_pm_ops *pm;
73 struct dev_pm_ops *omap_pm;
74
75 if (!cpu_class_is_omap1()) 67 if (!cpu_class_is_omap1())
76 return -ENODEV; 68 return -ENODEV;
77 69
78 pm = platform_bus_get_pm_ops(); 70 pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
79 if (!pm) {
80 pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
81 __func__);
82 return -ENODEV;
83 }
84
85 omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL);
86 if (!omap_pm) {
87 pr_err("%s: unable to alloc memory for new dev_pm_ops\n",
88 __func__);
89 return -ENOMEM;
90 }
91
92 omap_pm->runtime_suspend = omap1_pm_runtime_suspend;
93 omap_pm->runtime_resume = omap1_pm_runtime_resume;
94
95 platform_bus_set_pm_ops(omap_pm);
96 71
97 return 0; 72 return 0;
98} 73}
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 512b15204450..66dfbccacd25 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -59,10 +59,10 @@ endif
59# Power Management 59# Power Management
60ifeq ($(CONFIG_PM),y) 60ifeq ($(CONFIG_PM),y)
61obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o 61obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
62obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o 62obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
63obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ 63obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
64 cpuidle34xx.o pm_bus.o 64 cpuidle34xx.o
65obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o 65obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o
66obj-$(CONFIG_PM_DEBUG) += pm-debug.o 66obj-$(CONFIG_PM_DEBUG) += pm-debug.o
67obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o 67obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
68obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o 68obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
index b2b1e37bb6bb..d6e34dd9e7e7 100644
--- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
+++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
@@ -115,6 +115,7 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
115 sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, 115 sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
116 sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, 116 sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
117 0, 0, 0, 0); 117 0, 0, 0, 0);
118 clk->rate = rate;
118 119
119 return 0; 120 return 0;
120} 121}
diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c
deleted file mode 100644
index 5acd2ab298b1..000000000000
--- a/arch/arm/mach-omap2/pm_bus.c
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Runtime PM support code for OMAP
3 *
4 * Author: Kevin Hilman, Deep Root Systems, LLC
5 *
6 * Copyright (C) 2010 Texas Instruments, Inc.
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/pm_runtime.h>
16#include <linux/platform_device.h>
17#include <linux/mutex.h>
18
19#include <plat/omap_device.h>
20#include <plat/omap-pm.h>
21
22#ifdef CONFIG_PM_RUNTIME
23static int omap_pm_runtime_suspend(struct device *dev)
24{
25 struct platform_device *pdev = to_platform_device(dev);
26 int r, ret = 0;
27
28 dev_dbg(dev, "%s\n", __func__);
29
30 ret = pm_generic_runtime_suspend(dev);
31
32 if (!ret && dev->parent == &omap_device_parent) {
33 r = omap_device_idle(pdev);
34 WARN_ON(r);
35 }
36
37 return ret;
38};
39
40static int omap_pm_runtime_resume(struct device *dev)
41{
42 struct platform_device *pdev = to_platform_device(dev);
43 int r;
44
45 dev_dbg(dev, "%s\n", __func__);
46
47 if (dev->parent == &omap_device_parent) {
48 r = omap_device_enable(pdev);
49 WARN_ON(r);
50 }
51
52 return pm_generic_runtime_resume(dev);
53};
54#else
55#define omap_pm_runtime_suspend NULL
56#define omap_pm_runtime_resume NULL
57#endif /* CONFIG_PM_RUNTIME */
58
59static int __init omap_pm_runtime_init(void)
60{
61 const struct dev_pm_ops *pm;
62 struct dev_pm_ops *omap_pm;
63
64 pm = platform_bus_get_pm_ops();
65 if (!pm) {
66 pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
67 __func__);
68 return -ENODEV;
69 }
70
71 omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL);
72 if (!omap_pm) {
73 pr_err("%s: unable to alloc memory for new dev_pm_ops\n",
74 __func__);
75 return -ENOMEM;
76 }
77
78 omap_pm->runtime_suspend = omap_pm_runtime_suspend;
79 omap_pm->runtime_resume = omap_pm_runtime_resume;
80
81 platform_bus_set_pm_ops(omap_pm);
82
83 return 0;
84}
85core_initcall(omap_pm_runtime_init);
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index bfbecec6d05f..810a982a66f8 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sysdev.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <linux/sched.h> 19#include <linux/sched.h>
21#include <linux/bitops.h> 20#include <linux/bitops.h>
diff --git a/arch/arm/mach-pxa/clock-pxa2xx.c b/arch/arm/mach-pxa/clock-pxa2xx.c
index 1ce090448493..1d5859d9a0e3 100644
--- a/arch/arm/mach-pxa/clock-pxa2xx.c
+++ b/arch/arm/mach-pxa/clock-pxa2xx.c
@@ -9,7 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/sysdev.h> 12#include <linux/syscore_ops.h>
13 13
14#include <mach/pxa2xx-regs.h> 14#include <mach/pxa2xx-regs.h>
15 15
@@ -33,32 +33,22 @@ const struct clkops clk_pxa2xx_cken_ops = {
33#ifdef CONFIG_PM 33#ifdef CONFIG_PM
34static uint32_t saved_cken; 34static uint32_t saved_cken;
35 35
36static int pxa2xx_clock_suspend(struct sys_device *d, pm_message_t state) 36static int pxa2xx_clock_suspend(void)
37{ 37{
38 saved_cken = CKEN; 38 saved_cken = CKEN;
39 return 0; 39 return 0;
40} 40}
41 41
42static int pxa2xx_clock_resume(struct sys_device *d) 42static void pxa2xx_clock_resume(void)
43{ 43{
44 CKEN = saved_cken; 44 CKEN = saved_cken;
45 return 0;
46} 45}
47#else 46#else
48#define pxa2xx_clock_suspend NULL 47#define pxa2xx_clock_suspend NULL
49#define pxa2xx_clock_resume NULL 48#define pxa2xx_clock_resume NULL
50#endif 49#endif
51 50
52struct sysdev_class pxa2xx_clock_sysclass = { 51struct syscore_ops pxa2xx_clock_syscore_ops = {
53 .name = "pxa2xx-clock",
54 .suspend = pxa2xx_clock_suspend, 52 .suspend = pxa2xx_clock_suspend,
55 .resume = pxa2xx_clock_resume, 53 .resume = pxa2xx_clock_resume,
56}; 54};
57
58static int __init pxa2xx_clock_init(void)
59{
60 if (cpu_is_pxa2xx())
61 return sysdev_class_register(&pxa2xx_clock_sysclass);
62 return 0;
63}
64postcore_initcall(pxa2xx_clock_init);
diff --git a/arch/arm/mach-pxa/clock-pxa3xx.c b/arch/arm/mach-pxa/clock-pxa3xx.c
index 3f864cd0bd28..2a37a9a8f621 100644
--- a/arch/arm/mach-pxa/clock-pxa3xx.c
+++ b/arch/arm/mach-pxa/clock-pxa3xx.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/syscore_ops.h>
13 14
14#include <mach/smemc.h> 15#include <mach/smemc.h>
15#include <mach/pxa3xx-regs.h> 16#include <mach/pxa3xx-regs.h>
@@ -182,7 +183,7 @@ const struct clkops clk_pxa3xx_pout_ops = {
182static uint32_t cken[2]; 183static uint32_t cken[2];
183static uint32_t accr; 184static uint32_t accr;
184 185
185static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state) 186static int pxa3xx_clock_suspend(void)
186{ 187{
187 cken[0] = CKENA; 188 cken[0] = CKENA;
188 cken[1] = CKENB; 189 cken[1] = CKENB;
@@ -190,28 +191,18 @@ static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state)
190 return 0; 191 return 0;
191} 192}
192 193
193static int pxa3xx_clock_resume(struct sys_device *d) 194static void pxa3xx_clock_resume(void)
194{ 195{
195 ACCR = accr; 196 ACCR = accr;
196 CKENA = cken[0]; 197 CKENA = cken[0];
197 CKENB = cken[1]; 198 CKENB = cken[1];
198 return 0;
199} 199}
200#else 200#else
201#define pxa3xx_clock_suspend NULL 201#define pxa3xx_clock_suspend NULL
202#define pxa3xx_clock_resume NULL 202#define pxa3xx_clock_resume NULL
203#endif 203#endif
204 204
205struct sysdev_class pxa3xx_clock_sysclass = { 205struct syscore_ops pxa3xx_clock_syscore_ops = {
206 .name = "pxa3xx-clock",
207 .suspend = pxa3xx_clock_suspend, 206 .suspend = pxa3xx_clock_suspend,
208 .resume = pxa3xx_clock_resume, 207 .resume = pxa3xx_clock_resume,
209}; 208};
210
211static int __init pxa3xx_clock_init(void)
212{
213 if (cpu_is_pxa3xx() || cpu_is_pxa95x())
214 return sysdev_class_register(&pxa3xx_clock_sysclass);
215 return 0;
216}
217postcore_initcall(pxa3xx_clock_init);
diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h
index f9f349a21b54..1f2fb9c43f06 100644
--- a/arch/arm/mach-pxa/clock.h
+++ b/arch/arm/mach-pxa/clock.h
@@ -1,5 +1,5 @@
1#include <linux/clkdev.h> 1#include <linux/clkdev.h>
2#include <linux/sysdev.h> 2#include <linux/syscore_ops.h>
3 3
4struct clkops { 4struct clkops {
5 void (*enable)(struct clk *); 5 void (*enable)(struct clk *);
@@ -54,7 +54,7 @@ extern const struct clkops clk_pxa2xx_cken_ops;
54void clk_pxa2xx_cken_enable(struct clk *clk); 54void clk_pxa2xx_cken_enable(struct clk *clk);
55void clk_pxa2xx_cken_disable(struct clk *clk); 55void clk_pxa2xx_cken_disable(struct clk *clk);
56 56
57extern struct sysdev_class pxa2xx_clock_sysclass; 57extern struct syscore_ops pxa2xx_clock_syscore_ops;
58 58
59#if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) 59#if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x)
60#define DEFINE_PXA3_CKEN(_name, _cken, _rate, _delay) \ 60#define DEFINE_PXA3_CKEN(_name, _cken, _rate, _delay) \
@@ -74,5 +74,6 @@ extern const struct clkops clk_pxa3xx_smemc_ops;
74extern void clk_pxa3xx_cken_enable(struct clk *); 74extern void clk_pxa3xx_cken_enable(struct clk *);
75extern void clk_pxa3xx_cken_disable(struct clk *); 75extern void clk_pxa3xx_cken_disable(struct clk *);
76 76
77extern struct sysdev_class pxa3xx_clock_sysclass; 77extern struct syscore_ops pxa3xx_clock_syscore_ops;
78
78#endif 79#endif
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c
index b88d601a8090..13518a705399 100644
--- a/arch/arm/mach-pxa/cm-x270.c
+++ b/arch/arm/mach-pxa/cm-x270.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/sysdev.h>
14#include <linux/irq.h> 13#include <linux/irq.h>
15#include <linux/gpio.h> 14#include <linux/gpio.h>
16#include <linux/delay.h> 15#include <linux/delay.h>
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index 8225e2e58c6e..a10996782476 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/sysdev.h> 13#include <linux/syscore_ops.h>
14#include <linux/irq.h> 14#include <linux/irq.h>
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16 16
@@ -388,7 +388,7 @@ static inline void cmx2xx_init_display(void) {}
388#ifdef CONFIG_PM 388#ifdef CONFIG_PM
389static unsigned long sleep_save_msc[10]; 389static unsigned long sleep_save_msc[10];
390 390
391static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state) 391static int cmx2xx_suspend(void)
392{ 392{
393 cmx2xx_pci_suspend(); 393 cmx2xx_pci_suspend();
394 394
@@ -412,7 +412,7 @@ static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state)
412 return 0; 412 return 0;
413} 413}
414 414
415static int cmx2xx_resume(struct sys_device *dev) 415static void cmx2xx_resume(void)
416{ 416{
417 cmx2xx_pci_resume(); 417 cmx2xx_pci_resume();
418 418
@@ -420,27 +420,18 @@ static int cmx2xx_resume(struct sys_device *dev)
420 __raw_writel(sleep_save_msc[0], MSC0); 420 __raw_writel(sleep_save_msc[0], MSC0);
421 __raw_writel(sleep_save_msc[1], MSC1); 421 __raw_writel(sleep_save_msc[1], MSC1);
422 __raw_writel(sleep_save_msc[2], MSC2); 422 __raw_writel(sleep_save_msc[2], MSC2);
423
424 return 0;
425} 423}
426 424
427static struct sysdev_class cmx2xx_pm_sysclass = { 425static struct syscore_ops cmx2xx_pm_syscore_ops = {
428 .name = "pm",
429 .resume = cmx2xx_resume, 426 .resume = cmx2xx_resume,
430 .suspend = cmx2xx_suspend, 427 .suspend = cmx2xx_suspend,
431}; 428};
432 429
433static struct sys_device cmx2xx_pm_device = {
434 .cls = &cmx2xx_pm_sysclass,
435};
436
437static int __init cmx2xx_pm_init(void) 430static int __init cmx2xx_pm_init(void)
438{ 431{
439 int error; 432 register_syscore_ops(&cmx2xx_pm_syscore_ops);
440 error = sysdev_class_register(&cmx2xx_pm_sysclass); 433
441 if (error == 0) 434 return 0;
442 error = sysdev_register(&cmx2xx_pm_device);
443 return error;
444} 435}
445#else 436#else
446static int __init cmx2xx_pm_init(void) { return 0; } 437static int __init cmx2xx_pm_init(void) { return 0; }
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c
index 81c3c433e2d6..d28e802e2448 100644
--- a/arch/arm/mach-pxa/colibri-evalboard.c
+++ b/arch/arm/mach-pxa/colibri-evalboard.c
@@ -13,7 +13,6 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/sysdev.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
18#include <linux/gpio.h> 17#include <linux/gpio.h>
19#include <asm/mach-types.h> 18#include <asm/mach-types.h>
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index 44c1b77ece67..80538b8806ed 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -22,7 +22,6 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/pwm_backlight.h> 23#include <linux/pwm_backlight.h>
24#include <linux/i2c/pxa-i2c.h> 24#include <linux/i2c/pxa-i2c.h>
25#include <linux/sysdev.h>
26 25
27#include <asm/irq.h> 26#include <asm/irq.h>
28#include <asm/mach-types.h> 27#include <asm/mach-types.h>
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 6fc5d328ba7f..7545a48ed88b 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -17,7 +17,6 @@
17#include <linux/mtd/partitions.h> 17#include <linux/mtd/partitions.h>
18#include <linux/mtd/physmap.h> 18#include <linux/mtd/physmap.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sysdev.h>
21#include <linux/ucb1400.h> 20#include <linux/ucb1400.h>
22 21
23#include <asm/mach/arch.h> 22#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index a079d8baa45a..e6c9344a95ae 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -61,10 +61,10 @@ extern unsigned pxa3xx_get_clk_frequency_khz(int);
61#define pxa3xx_get_clk_frequency_khz(x) (0) 61#define pxa3xx_get_clk_frequency_khz(x) (0)
62#endif 62#endif
63 63
64extern struct sysdev_class pxa_irq_sysclass; 64extern struct syscore_ops pxa_irq_syscore_ops;
65extern struct sysdev_class pxa_gpio_sysclass; 65extern struct syscore_ops pxa_gpio_syscore_ops;
66extern struct sysdev_class pxa2xx_mfp_sysclass; 66extern struct syscore_ops pxa2xx_mfp_syscore_ops;
67extern struct sysdev_class pxa3xx_mfp_sysclass; 67extern struct syscore_ops pxa3xx_mfp_syscore_ops;
68 68
69void __init pxa_set_ffuart_info(void *info); 69void __init pxa_set_ffuart_info(void *info);
70void __init pxa_set_btuart_info(void *info); 70void __init pxa_set_btuart_info(void *info);
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 6251e3f5c62c..32ed551bf9c5 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -15,7 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/sysdev.h> 18#include <linux/syscore_ops.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21 21
@@ -183,7 +183,7 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn)
183static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; 183static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
184static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; 184static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
185 185
186static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state) 186static int pxa_irq_suspend(void)
187{ 187{
188 int i; 188 int i;
189 189
@@ -202,7 +202,7 @@ static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state)
202 return 0; 202 return 0;
203} 203}
204 204
205static int pxa_irq_resume(struct sys_device *dev) 205static void pxa_irq_resume(void)
206{ 206{
207 int i; 207 int i;
208 208
@@ -218,22 +218,13 @@ static int pxa_irq_resume(struct sys_device *dev)
218 __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i)); 218 __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i));
219 219
220 __raw_writel(1, IRQ_BASE + ICCR); 220 __raw_writel(1, IRQ_BASE + ICCR);
221 return 0;
222} 221}
223#else 222#else
224#define pxa_irq_suspend NULL 223#define pxa_irq_suspend NULL
225#define pxa_irq_resume NULL 224#define pxa_irq_resume NULL
226#endif 225#endif
227 226
228struct sysdev_class pxa_irq_sysclass = { 227struct syscore_ops pxa_irq_syscore_ops = {
229 .name = "irq",
230 .suspend = pxa_irq_suspend, 228 .suspend = pxa_irq_suspend,
231 .resume = pxa_irq_resume, 229 .resume = pxa_irq_resume,
232}; 230};
233
234static int __init pxa_irq_init(void)
235{
236 return sysdev_class_register(&pxa_irq_sysclass);
237}
238
239core_initcall(pxa_irq_init);
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index f5de541725b1..6cf8180bf5bd 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sysdev.h> 18#include <linux/syscore_ops.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
@@ -159,30 +159,22 @@ static void __init lpd270_init_irq(void)
159 159
160 160
161#ifdef CONFIG_PM 161#ifdef CONFIG_PM
162static int lpd270_irq_resume(struct sys_device *dev) 162static void lpd270_irq_resume(void)
163{ 163{
164 __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK); 164 __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
165 return 0;
166} 165}
167 166
168static struct sysdev_class lpd270_irq_sysclass = { 167static struct syscore_ops lpd270_irq_syscore_ops = {
169 .name = "cpld_irq",
170 .resume = lpd270_irq_resume, 168 .resume = lpd270_irq_resume,
171}; 169};
172 170
173static struct sys_device lpd270_irq_device = {
174 .cls = &lpd270_irq_sysclass,
175};
176
177static int __init lpd270_irq_device_init(void) 171static int __init lpd270_irq_device_init(void)
178{ 172{
179 int ret = -ENODEV;
180 if (machine_is_logicpd_pxa270()) { 173 if (machine_is_logicpd_pxa270()) {
181 ret = sysdev_class_register(&lpd270_irq_sysclass); 174 register_syscore_ops(&lpd270_irq_syscore_ops);
182 if (ret == 0) 175 return 0;
183 ret = sysdev_register(&lpd270_irq_device);
184 } 176 }
185 return ret; 177 return -ENODEV;
186} 178}
187 179
188device_initcall(lpd270_irq_device_init); 180device_initcall(lpd270_irq_device_init);
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index 3ede978c83d9..e10ddb827147 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -15,7 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sysdev.h> 18#include <linux/syscore_ops.h>
19#include <linux/major.h> 19#include <linux/major.h>
20#include <linux/fb.h> 20#include <linux/fb.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
@@ -176,31 +176,22 @@ static void __init lubbock_init_irq(void)
176 176
177#ifdef CONFIG_PM 177#ifdef CONFIG_PM
178 178
179static int lubbock_irq_resume(struct sys_device *dev) 179static void lubbock_irq_resume(void)
180{ 180{
181 LUB_IRQ_MASK_EN = lubbock_irq_enabled; 181 LUB_IRQ_MASK_EN = lubbock_irq_enabled;
182 return 0;
183} 182}
184 183
185static struct sysdev_class lubbock_irq_sysclass = { 184static struct syscore_ops lubbock_irq_syscore_ops = {
186 .name = "cpld_irq",
187 .resume = lubbock_irq_resume, 185 .resume = lubbock_irq_resume,
188}; 186};
189 187
190static struct sys_device lubbock_irq_device = {
191 .cls = &lubbock_irq_sysclass,
192};
193
194static int __init lubbock_irq_device_init(void) 188static int __init lubbock_irq_device_init(void)
195{ 189{
196 int ret = -ENODEV;
197
198 if (machine_is_lubbock()) { 190 if (machine_is_lubbock()) {
199 ret = sysdev_class_register(&lubbock_irq_sysclass); 191 register_syscore_ops(&lubbock_irq_syscore_ops);
200 if (ret == 0) 192 return 0;
201 ret = sysdev_register(&lubbock_irq_device);
202 } 193 }
203 return ret; 194 return -ENODEV;
204} 195}
205 196
206device_initcall(lubbock_irq_device_init); 197device_initcall(lubbock_irq_device_init);
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 95163baca29e..3479e2b3b511 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sysdev.h> 18#include <linux/syscore_ops.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
@@ -185,31 +185,21 @@ static void __init mainstone_init_irq(void)
185 185
186#ifdef CONFIG_PM 186#ifdef CONFIG_PM
187 187
188static int mainstone_irq_resume(struct sys_device *dev) 188static void mainstone_irq_resume(void)
189{ 189{
190 MST_INTMSKENA = mainstone_irq_enabled; 190 MST_INTMSKENA = mainstone_irq_enabled;
191 return 0;
192} 191}
193 192
194static struct sysdev_class mainstone_irq_sysclass = { 193static struct syscore_ops mainstone_irq_syscore_ops = {
195 .name = "cpld_irq",
196 .resume = mainstone_irq_resume, 194 .resume = mainstone_irq_resume,
197}; 195};
198 196
199static struct sys_device mainstone_irq_device = {
200 .cls = &mainstone_irq_sysclass,
201};
202
203static int __init mainstone_irq_device_init(void) 197static int __init mainstone_irq_device_init(void)
204{ 198{
205 int ret = -ENODEV; 199 if (machine_is_mainstone())
200 register_syscore_ops(&mainstone_irq_syscore_ops);
206 201
207 if (machine_is_mainstone()) { 202 return 0;
208 ret = sysdev_class_register(&mainstone_irq_sysclass);
209 if (ret == 0)
210 ret = sysdev_register(&mainstone_irq_device);
211 }
212 return ret;
213} 203}
214 204
215device_initcall(mainstone_irq_device_init); 205device_initcall(mainstone_irq_device_init);
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index 1d1419b73457..87ae3129f4f7 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -16,7 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/sysdev.h> 19#include <linux/syscore_ops.h>
20 20
21#include <mach/gpio.h> 21#include <mach/gpio.h>
22#include <mach/pxa2xx-regs.h> 22#include <mach/pxa2xx-regs.h>
@@ -338,7 +338,7 @@ static unsigned long saved_gafr[2][4];
338static unsigned long saved_gpdr[4]; 338static unsigned long saved_gpdr[4];
339static unsigned long saved_pgsr[4]; 339static unsigned long saved_pgsr[4];
340 340
341static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) 341static int pxa2xx_mfp_suspend(void)
342{ 342{
343 int i; 343 int i;
344 344
@@ -365,7 +365,7 @@ static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state)
365 return 0; 365 return 0;
366} 366}
367 367
368static int pxa2xx_mfp_resume(struct sys_device *d) 368static void pxa2xx_mfp_resume(void)
369{ 369{
370 int i; 370 int i;
371 371
@@ -376,15 +376,13 @@ static int pxa2xx_mfp_resume(struct sys_device *d)
376 PGSR(i) = saved_pgsr[i]; 376 PGSR(i) = saved_pgsr[i];
377 } 377 }
378 PSSR = PSSR_RDH | PSSR_PH; 378 PSSR = PSSR_RDH | PSSR_PH;
379 return 0;
380} 379}
381#else 380#else
382#define pxa2xx_mfp_suspend NULL 381#define pxa2xx_mfp_suspend NULL
383#define pxa2xx_mfp_resume NULL 382#define pxa2xx_mfp_resume NULL
384#endif 383#endif
385 384
386struct sysdev_class pxa2xx_mfp_sysclass = { 385struct syscore_ops pxa2xx_mfp_syscore_ops = {
387 .name = "mfp",
388 .suspend = pxa2xx_mfp_suspend, 386 .suspend = pxa2xx_mfp_suspend,
389 .resume = pxa2xx_mfp_resume, 387 .resume = pxa2xx_mfp_resume,
390}; 388};
@@ -409,6 +407,6 @@ static int __init pxa2xx_mfp_init(void)
409 for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) 407 for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++)
410 gpdr_lpm[i] = GPDR(i * 32); 408 gpdr_lpm[i] = GPDR(i * 32);
411 409
412 return sysdev_class_register(&pxa2xx_mfp_sysclass); 410 return 0;
413} 411}
414postcore_initcall(pxa2xx_mfp_init); 412postcore_initcall(pxa2xx_mfp_init);
diff --git a/arch/arm/mach-pxa/mfp-pxa3xx.c b/arch/arm/mach-pxa/mfp-pxa3xx.c
index 7a270eecd480..89863a01ecd7 100644
--- a/arch/arm/mach-pxa/mfp-pxa3xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa3xx.c
@@ -17,7 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/sysdev.h> 20#include <linux/syscore_ops.h>
21 21
22#include <mach/hardware.h> 22#include <mach/hardware.h>
23#include <mach/mfp-pxa3xx.h> 23#include <mach/mfp-pxa3xx.h>
@@ -31,13 +31,13 @@
31 * a pull-down mode if they're an active low chip select, and we're 31 * a pull-down mode if they're an active low chip select, and we're
32 * just entering standby. 32 * just entering standby.
33 */ 33 */
34static int pxa3xx_mfp_suspend(struct sys_device *d, pm_message_t state) 34static int pxa3xx_mfp_suspend(void)
35{ 35{
36 mfp_config_lpm(); 36 mfp_config_lpm();
37 return 0; 37 return 0;
38} 38}
39 39
40static int pxa3xx_mfp_resume(struct sys_device *d) 40static void pxa3xx_mfp_resume(void)
41{ 41{
42 mfp_config_run(); 42 mfp_config_run();
43 43
@@ -47,24 +47,13 @@ static int pxa3xx_mfp_resume(struct sys_device *d)
47 * preserve them here in case they will be referenced later 47 * preserve them here in case they will be referenced later
48 */ 48 */
49 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); 49 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
50 return 0;
51} 50}
52#else 51#else
53#define pxa3xx_mfp_suspend NULL 52#define pxa3xx_mfp_suspend NULL
54#define pxa3xx_mfp_resume NULL 53#define pxa3xx_mfp_resume NULL
55#endif 54#endif
56 55
57struct sysdev_class pxa3xx_mfp_sysclass = { 56struct syscore_ops pxa3xx_mfp_syscore_ops = {
58 .name = "mfp",
59 .suspend = pxa3xx_mfp_suspend, 57 .suspend = pxa3xx_mfp_suspend,
60 .resume = pxa3xx_mfp_resume, 58 .resume = pxa3xx_mfp_resume,
61}; 59};
62
63static int __init mfp_init_devicefs(void)
64{
65 if (cpu_is_pxa3xx())
66 return sysdev_class_register(&pxa3xx_mfp_sysclass);
67
68 return 0;
69}
70postcore_initcall(mfp_init_devicefs);
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 23925db8ff74..e3470137c934 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -22,7 +22,7 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/sysdev.h> 25#include <linux/syscore_ops.h>
26#include <linux/input.h> 26#include <linux/input.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/gpio_keys.h> 28#include <linux/gpio_keys.h>
@@ -488,7 +488,7 @@ static void install_bootstrap(void)
488} 488}
489 489
490 490
491static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) 491static int mioa701_sys_suspend(void)
492{ 492{
493 int i = 0, is_bt_on; 493 int i = 0, is_bt_on;
494 u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); 494 u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR);
@@ -514,7 +514,7 @@ static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state)
514 return 0; 514 return 0;
515} 515}
516 516
517static int mioa701_sys_resume(struct sys_device *sysdev) 517static void mioa701_sys_resume(void)
518{ 518{
519 int i = 0; 519 int i = 0;
520 u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); 520 u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR);
@@ -527,43 +527,18 @@ static int mioa701_sys_resume(struct sys_device *sysdev)
527 *mem_resume_enabler = save_buffer[i++]; 527 *mem_resume_enabler = save_buffer[i++];
528 *mem_resume_bt = save_buffer[i++]; 528 *mem_resume_bt = save_buffer[i++];
529 *mem_resume_unknown = save_buffer[i++]; 529 *mem_resume_unknown = save_buffer[i++];
530
531 return 0;
532} 530}
533 531
534static struct sysdev_class mioa701_sysclass = { 532static struct syscore_ops mioa701_syscore_ops = {
535 .name = "mioa701", 533 .suspend = mioa701_sys_suspend,
536}; 534 .resume = mioa701_sys_resume,
537
538static struct sys_device sysdev_bootstrap = {
539 .cls = &mioa701_sysclass,
540};
541
542static struct sysdev_driver driver_bootstrap = {
543 .suspend = &mioa701_sys_suspend,
544 .resume = &mioa701_sys_resume,
545}; 535};
546 536
547static int __init bootstrap_init(void) 537static int __init bootstrap_init(void)
548{ 538{
549 int rc;
550 int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); 539 int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3);
551 540
552 rc = sysdev_class_register(&mioa701_sysclass); 541 register_syscore_ops(&mioa701_syscore_ops);
553 if (rc) {
554 printk(KERN_ERR "Failed registering mioa701 sys class\n");
555 return -ENODEV;
556 }
557 rc = sysdev_register(&sysdev_bootstrap);
558 if (rc) {
559 printk(KERN_ERR "Failed registering mioa701 sys device\n");
560 return -ENODEV;
561 }
562 rc = sysdev_driver_register(&mioa701_sysclass, &driver_bootstrap);
563 if (rc) {
564 printk(KERN_ERR "Failed registering PMU sys driver\n");
565 return -ENODEV;
566 }
567 542
568 save_buffer = kmalloc(save_size, GFP_KERNEL); 543 save_buffer = kmalloc(save_size, GFP_KERNEL);
569 if (!save_buffer) 544 if (!save_buffer)
@@ -576,9 +551,7 @@ static int __init bootstrap_init(void)
576static void bootstrap_exit(void) 551static void bootstrap_exit(void)
577{ 552{
578 kfree(save_buffer); 553 kfree(save_buffer);
579 sysdev_driver_unregister(&mioa701_sysclass, &driver_bootstrap); 554 unregister_syscore_ops(&mioa701_syscore_ops);
580 sysdev_unregister(&sysdev_bootstrap);
581 sysdev_class_unregister(&mioa701_sysclass);
582 555
583 printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" 556 printk(KERN_CRIT "Unregistering mioa701 suspend will hang next"
584 "resume !!!\n"); 557 "resume !!!\n");
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index a6f898cbfac9..4061ecddee70 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -24,7 +24,6 @@
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/wm97xx.h> 25#include <linux/wm97xx.h>
26#include <linux/power_supply.h> 26#include <linux/power_supply.h>
27#include <linux/sysdev.h>
28#include <linux/mtd/mtd.h> 27#include <linux/mtd/mtd.h>
29#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
30#include <linux/mtd/physmap.h> 29#include <linux/mtd/physmap.h>
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c
index 8aadad55fbe4..20d1b18b1733 100644
--- a/arch/arm/mach-pxa/palmtreo.c
+++ b/arch/arm/mach-pxa/palmtreo.c
@@ -25,7 +25,6 @@
25#include <linux/pwm_backlight.h> 25#include <linux/pwm_backlight.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include <linux/power_supply.h> 27#include <linux/power_supply.h>
28#include <linux/sysdev.h>
29#include <linux/w1-gpio.h> 28#include <linux/w1-gpio.h>
30 29
31#include <asm/mach-types.h> 30#include <asm/mach-types.h>
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 3b8a4f37dbbe..65f24f0b77e8 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -19,7 +19,7 @@
19 */ 19 */
20 20
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/sysdev.h> 22#include <linux/syscore_ops.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/gpio_keys.h> 25#include <linux/gpio_keys.h>
@@ -233,9 +233,9 @@ static struct palmz72_resume_info palmz72_resume_info = {
233 233
234static unsigned long store_ptr; 234static unsigned long store_ptr;
235 235
236/* sys_device for Palm Zire 72 PM */ 236/* syscore_ops for Palm Zire 72 PM */
237 237
238static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg) 238static int palmz72_pm_suspend(void)
239{ 239{
240 /* setup the resume_info struct for the original bootloader */ 240 /* setup the resume_info struct for the original bootloader */
241 palmz72_resume_info.resume_addr = (u32) cpu_resume; 241 palmz72_resume_info.resume_addr = (u32) cpu_resume;
@@ -249,31 +249,23 @@ static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg)
249 return 0; 249 return 0;
250} 250}
251 251
252static int palmz72_pm_resume(struct sys_device *dev) 252static void palmz72_pm_resume(void)
253{ 253{
254 *PALMZ72_SAVE_DWORD = store_ptr; 254 *PALMZ72_SAVE_DWORD = store_ptr;
255 return 0;
256} 255}
257 256
258static struct sysdev_class palmz72_pm_sysclass = { 257static struct syscore_ops palmz72_pm_syscore_ops = {
259 .name = "palmz72_pm",
260 .suspend = palmz72_pm_suspend, 258 .suspend = palmz72_pm_suspend,
261 .resume = palmz72_pm_resume, 259 .resume = palmz72_pm_resume,
262}; 260};
263 261
264static struct sys_device palmz72_pm_device = {
265 .cls = &palmz72_pm_sysclass,
266};
267
268static int __init palmz72_pm_init(void) 262static int __init palmz72_pm_init(void)
269{ 263{
270 int ret = -ENODEV;
271 if (machine_is_palmz72()) { 264 if (machine_is_palmz72()) {
272 ret = sysdev_class_register(&palmz72_pm_sysclass); 265 register_syscore_ops(&palmz72_pm_syscore_ops);
273 if (ret == 0) 266 return 0;
274 ret = sysdev_register(&palmz72_pm_device);
275 } 267 }
276 return ret; 268 return -ENODEV;
277} 269}
278 270
279device_initcall(palmz72_pm_init); 271device_initcall(palmz72_pm_init);
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index a4af8c52d7ee..fed363cec9c6 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -21,7 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/suspend.h> 23#include <linux/suspend.h>
24#include <linux/sysdev.h> 24#include <linux/syscore_ops.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26 26
27#include <asm/mach/map.h> 27#include <asm/mach/map.h>
@@ -350,21 +350,9 @@ static struct platform_device *pxa25x_devices[] __initdata = {
350 &pxa_device_asoc_platform, 350 &pxa_device_asoc_platform,
351}; 351};
352 352
353static struct sys_device pxa25x_sysdev[] = {
354 {
355 .cls = &pxa_irq_sysclass,
356 }, {
357 .cls = &pxa2xx_mfp_sysclass,
358 }, {
359 .cls = &pxa_gpio_sysclass,
360 }, {
361 .cls = &pxa2xx_clock_sysclass,
362 }
363};
364
365static int __init pxa25x_init(void) 353static int __init pxa25x_init(void)
366{ 354{
367 int i, ret = 0; 355 int ret = 0;
368 356
369 if (cpu_is_pxa25x()) { 357 if (cpu_is_pxa25x()) {
370 358
@@ -377,11 +365,10 @@ static int __init pxa25x_init(void)
377 365
378 pxa25x_init_pm(); 366 pxa25x_init_pm();
379 367
380 for (i = 0; i < ARRAY_SIZE(pxa25x_sysdev); i++) { 368 register_syscore_ops(&pxa_irq_syscore_ops);
381 ret = sysdev_register(&pxa25x_sysdev[i]); 369 register_syscore_ops(&pxa2xx_mfp_syscore_ops);
382 if (ret) 370 register_syscore_ops(&pxa_gpio_syscore_ops);
383 pr_err("failed to register sysdev[%d]\n", i); 371 register_syscore_ops(&pxa2xx_clock_syscore_ops);
384 }
385 372
386 ret = platform_add_devices(pxa25x_devices, 373 ret = platform_add_devices(pxa25x_devices,
387 ARRAY_SIZE(pxa25x_devices)); 374 ARRAY_SIZE(pxa25x_devices));
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 909756eaf4b7..2fecbec58d88 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -16,7 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/sysdev.h> 19#include <linux/syscore_ops.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/i2c/pxa-i2c.h> 22#include <linux/i2c/pxa-i2c.h>
@@ -428,21 +428,9 @@ static struct platform_device *devices[] __initdata = {
428 &pxa27x_device_pwm1, 428 &pxa27x_device_pwm1,
429}; 429};
430 430
431static struct sys_device pxa27x_sysdev[] = {
432 {
433 .cls = &pxa_irq_sysclass,
434 }, {
435 .cls = &pxa2xx_mfp_sysclass,
436 }, {
437 .cls = &pxa_gpio_sysclass,
438 }, {
439 .cls = &pxa2xx_clock_sysclass,
440 }
441};
442
443static int __init pxa27x_init(void) 431static int __init pxa27x_init(void)
444{ 432{
445 int i, ret = 0; 433 int ret = 0;
446 434
447 if (cpu_is_pxa27x()) { 435 if (cpu_is_pxa27x()) {
448 436
@@ -455,11 +443,10 @@ static int __init pxa27x_init(void)
455 443
456 pxa27x_init_pm(); 444 pxa27x_init_pm();
457 445
458 for (i = 0; i < ARRAY_SIZE(pxa27x_sysdev); i++) { 446 register_syscore_ops(&pxa_irq_syscore_ops);
459 ret = sysdev_register(&pxa27x_sysdev[i]); 447 register_syscore_ops(&pxa2xx_mfp_syscore_ops);
460 if (ret) 448 register_syscore_ops(&pxa_gpio_syscore_ops);
461 pr_err("failed to register sysdev[%d]\n", i); 449 register_syscore_ops(&pxa2xx_clock_syscore_ops);
462 }
463 450
464 ret = platform_add_devices(devices, ARRAY_SIZE(devices)); 451 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
465 } 452 }
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 8dd107391157..8521d7d6f1da 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -20,7 +20,7 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/sysdev.h> 23#include <linux/syscore_ops.h>
24#include <linux/i2c/pxa-i2c.h> 24#include <linux/i2c/pxa-i2c.h>
25 25
26#include <asm/mach/map.h> 26#include <asm/mach/map.h>
@@ -427,21 +427,9 @@ static struct platform_device *devices[] __initdata = {
427 &pxa27x_device_pwm1, 427 &pxa27x_device_pwm1,
428}; 428};
429 429
430static struct sys_device pxa3xx_sysdev[] = {
431 {
432 .cls = &pxa_irq_sysclass,
433 }, {
434 .cls = &pxa3xx_mfp_sysclass,
435 }, {
436 .cls = &pxa_gpio_sysclass,
437 }, {
438 .cls = &pxa3xx_clock_sysclass,
439 }
440};
441
442static int __init pxa3xx_init(void) 430static int __init pxa3xx_init(void)
443{ 431{
444 int i, ret = 0; 432 int ret = 0;
445 433
446 if (cpu_is_pxa3xx()) { 434 if (cpu_is_pxa3xx()) {
447 435
@@ -462,11 +450,10 @@ static int __init pxa3xx_init(void)
462 450
463 pxa3xx_init_pm(); 451 pxa3xx_init_pm();
464 452
465 for (i = 0; i < ARRAY_SIZE(pxa3xx_sysdev); i++) { 453 register_syscore_ops(&pxa_irq_syscore_ops);
466 ret = sysdev_register(&pxa3xx_sysdev[i]); 454 register_syscore_ops(&pxa3xx_mfp_syscore_ops);
467 if (ret) 455 register_syscore_ops(&pxa_gpio_syscore_ops);
468 pr_err("failed to register sysdev[%d]\n", i); 456 register_syscore_ops(&pxa3xx_clock_syscore_ops);
469 }
470 457
471 ret = platform_add_devices(devices, ARRAY_SIZE(devices)); 458 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
472 } 459 }
diff --git a/arch/arm/mach-pxa/pxa95x.c b/arch/arm/mach-pxa/pxa95x.c
index 23b229bd06e9..ecc82a330fad 100644
--- a/arch/arm/mach-pxa/pxa95x.c
+++ b/arch/arm/mach-pxa/pxa95x.c
@@ -18,7 +18,7 @@
18#include <linux/i2c/pxa-i2c.h> 18#include <linux/i2c/pxa-i2c.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/sysdev.h> 21#include <linux/syscore_ops.h>
22 22
23#include <mach/hardware.h> 23#include <mach/hardware.h>
24#include <mach/gpio.h> 24#include <mach/gpio.h>
@@ -260,16 +260,6 @@ static struct platform_device *devices[] __initdata = {
260 &pxa27x_device_pwm1, 260 &pxa27x_device_pwm1,
261}; 261};
262 262
263static struct sys_device pxa95x_sysdev[] = {
264 {
265 .cls = &pxa_irq_sysclass,
266 }, {
267 .cls = &pxa_gpio_sysclass,
268 }, {
269 .cls = &pxa3xx_clock_sysclass,
270 }
271};
272
273static int __init pxa95x_init(void) 263static int __init pxa95x_init(void)
274{ 264{
275 int ret = 0, i; 265 int ret = 0, i;
@@ -293,11 +283,9 @@ static int __init pxa95x_init(void)
293 if ((ret = pxa_init_dma(IRQ_DMA, 32))) 283 if ((ret = pxa_init_dma(IRQ_DMA, 32)))
294 return ret; 284 return ret;
295 285
296 for (i = 0; i < ARRAY_SIZE(pxa95x_sysdev); i++) { 286 register_syscore_ops(&pxa_irq_syscore_ops);
297 ret = sysdev_register(&pxa95x_sysdev[i]); 287 register_syscore_ops(&pxa_gpio_syscore_ops);
298 if (ret) 288 register_syscore_ops(&pxa3xx_clock_syscore_ops);
299 pr_err("failed to register sysdev[%d]\n", i);
300 }
301 289
302 ret = platform_add_devices(devices, ARRAY_SIZE(devices)); 290 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
303 } 291 }
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index cd1861351f75..d130f77b6d11 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -18,7 +18,6 @@
18 18
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/sysdev.h>
22#include <linux/platform_device.h> 21#include <linux/platform_device.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24#include <linux/gpio.h> 23#include <linux/gpio.h>
diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c
index 232b7316ec08..79923058d10f 100644
--- a/arch/arm/mach-pxa/smemc.c
+++ b/arch/arm/mach-pxa/smemc.c
@@ -6,7 +6,7 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/io.h> 8#include <linux/io.h>
9#include <linux/sysdev.h> 9#include <linux/syscore_ops.h>
10 10
11#include <mach/hardware.h> 11#include <mach/hardware.h>
12#include <mach/smemc.h> 12#include <mach/smemc.h>
@@ -16,7 +16,7 @@ static unsigned long msc[2];
16static unsigned long sxcnfg, memclkcfg; 16static unsigned long sxcnfg, memclkcfg;
17static unsigned long csadrcfg[4]; 17static unsigned long csadrcfg[4];
18 18
19static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state) 19static int pxa3xx_smemc_suspend(void)
20{ 20{
21 msc[0] = __raw_readl(MSC0); 21 msc[0] = __raw_readl(MSC0);
22 msc[1] = __raw_readl(MSC1); 22 msc[1] = __raw_readl(MSC1);
@@ -30,7 +30,7 @@ static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state)
30 return 0; 30 return 0;
31} 31}
32 32
33static int pxa3xx_smemc_resume(struct sys_device *dev) 33static void pxa3xx_smemc_resume(void)
34{ 34{
35 __raw_writel(msc[0], MSC0); 35 __raw_writel(msc[0], MSC0);
36 __raw_writel(msc[1], MSC1); 36 __raw_writel(msc[1], MSC1);
@@ -40,34 +40,19 @@ static int pxa3xx_smemc_resume(struct sys_device *dev)
40 __raw_writel(csadrcfg[1], CSADRCFG1); 40 __raw_writel(csadrcfg[1], CSADRCFG1);
41 __raw_writel(csadrcfg[2], CSADRCFG2); 41 __raw_writel(csadrcfg[2], CSADRCFG2);
42 __raw_writel(csadrcfg[3], CSADRCFG3); 42 __raw_writel(csadrcfg[3], CSADRCFG3);
43
44 return 0;
45} 43}
46 44
47static struct sysdev_class smemc_sysclass = { 45static struct syscore_ops smemc_syscore_ops = {
48 .name = "smemc",
49 .suspend = pxa3xx_smemc_suspend, 46 .suspend = pxa3xx_smemc_suspend,
50 .resume = pxa3xx_smemc_resume, 47 .resume = pxa3xx_smemc_resume,
51}; 48};
52 49
53static struct sys_device smemc_sysdev = {
54 .id = 0,
55 .cls = &smemc_sysclass,
56};
57
58static int __init smemc_init(void) 50static int __init smemc_init(void)
59{ 51{
60 int ret = 0; 52 if (cpu_is_pxa3xx())
53 register_syscore_ops(&smemc_syscore_ops);
61 54
62 if (cpu_is_pxa3xx()) { 55 return 0;
63 ret = sysdev_class_register(&smemc_sysclass);
64 if (ret)
65 return ret;
66
67 ret = sysdev_register(&smemc_sysdev);
68 }
69
70 return ret;
71} 56}
72subsys_initcall(smemc_init); 57subsys_initcall(smemc_init);
73#endif 58#endif
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index b9cfbebdfe9c..687417a93698 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -15,7 +15,6 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sysdev.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <linux/sched.h> 19#include <linux/sched.h>
21#include <linux/bitops.h> 20#include <linux/bitops.h>
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index b523f119e0f0..903218eab56d 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -44,6 +44,7 @@
44#include <linux/mtd/mtd.h> 44#include <linux/mtd/mtd.h>
45#include <linux/mtd/partitions.h> 45#include <linux/mtd/partitions.h>
46#include <linux/mtd/physmap.h> 46#include <linux/mtd/physmap.h>
47#include <linux/syscore_ops.h>
47 48
48#include <mach/pxa25x.h> 49#include <mach/pxa25x.h>
49#include <mach/audio.h> 50#include <mach/audio.h>
@@ -130,20 +131,19 @@ static u8 viper_hw_version(void)
130 return v1; 131 return v1;
131} 132}
132 133
133/* CPU sysdev */ 134/* CPU system core operations. */
134static int viper_cpu_suspend(struct sys_device *sysdev, pm_message_t state) 135static int viper_cpu_suspend(void)
135{ 136{
136 viper_icr_set_bit(VIPER_ICR_R_DIS); 137 viper_icr_set_bit(VIPER_ICR_R_DIS);
137 return 0; 138 return 0;
138} 139}
139 140
140static int viper_cpu_resume(struct sys_device *sysdev) 141static void viper_cpu_resume(void)
141{ 142{
142 viper_icr_clear_bit(VIPER_ICR_R_DIS); 143 viper_icr_clear_bit(VIPER_ICR_R_DIS);
143 return 0;
144} 144}
145 145
146static struct sysdev_driver viper_cpu_sysdev_driver = { 146static struct syscore_ops viper_cpu_syscore_ops = {
147 .suspend = viper_cpu_suspend, 147 .suspend = viper_cpu_suspend,
148 .resume = viper_cpu_resume, 148 .resume = viper_cpu_resume,
149}; 149};
@@ -945,7 +945,7 @@ static void __init viper_init(void)
945 viper_init_vcore_gpios(); 945 viper_init_vcore_gpios();
946 viper_init_cpufreq(); 946 viper_init_cpufreq();
947 947
948 sysdev_driver_register(&cpu_sysdev_class, &viper_cpu_sysdev_driver); 948 register_syscore_ops(&viper_cpu_syscore_ops);
949 949
950 if (version) { 950 if (version) {
951 pr_info("viper: hardware v%di%d detected. " 951 pr_info("viper: hardware v%di%d detected. "
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index f71d377c8640..67bd41488bf8 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -16,7 +16,6 @@
16#include <linux/gpio_keys.h> 16#include <linux/gpio_keys.h>
17#include <linux/input.h> 17#include <linux/input.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/sysdev.h>
20#include <linux/usb/gpio_vbus.h> 19#include <linux/usb/gpio_vbus.h>
21#include <linux/mtd/mtd.h> 20#include <linux/mtd/mtd.h>
22#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h
index 0c5d749d7b5f..9a732195aa1c 100644
--- a/arch/arm/mach-realview/include/mach/barriers.h
+++ b/arch/arm/mach-realview/include/mach/barriers.h
@@ -4,5 +4,5 @@
4 * operation to deadlock the system. 4 * operation to deadlock the system.
5 */ 5 */
6#define mb() dsb() 6#define mb() dsb()
7#define rmb() dmb() 7#define rmb() dsb()
8#define wmb() mb() 8#define wmb() mb()
diff --git a/arch/arm/mach-s3c2410/irq.c b/arch/arm/mach-s3c2410/irq.c
index 5e2f35332056..2854129f8cc7 100644
--- a/arch/arm/mach-s3c2410/irq.c
+++ b/arch/arm/mach-s3c2410/irq.c
@@ -23,38 +23,12 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/sysdev.h> 26#include <linux/syscore_ops.h>
27 27
28#include <plat/cpu.h> 28#include <plat/cpu.h>
29#include <plat/pm.h> 29#include <plat/pm.h>
30 30
31static int s3c2410_irq_add(struct sys_device *sysdev) 31struct syscore_ops s3c24xx_irq_syscore_ops = {
32{
33 return 0;
34}
35
36static struct sysdev_driver s3c2410_irq_driver = {
37 .add = s3c2410_irq_add,
38 .suspend = s3c24xx_irq_suspend, 32 .suspend = s3c24xx_irq_suspend,
39 .resume = s3c24xx_irq_resume, 33 .resume = s3c24xx_irq_resume,
40}; 34};
41
42static int __init s3c2410_irq_init(void)
43{
44 return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_irq_driver);
45}
46
47arch_initcall(s3c2410_irq_init);
48
49static struct sysdev_driver s3c2410a_irq_driver = {
50 .add = s3c2410_irq_add,
51 .suspend = s3c24xx_irq_suspend,
52 .resume = s3c24xx_irq_resume,
53};
54
55static int __init s3c2410a_irq_init(void)
56{
57 return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_irq_driver);
58}
59
60arch_initcall(s3c2410a_irq_init);
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index 2970ea9f7c2b..1e2d536adda9 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -17,7 +17,7 @@
17#include <linux/timer.h> 17#include <linux/timer.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/sysdev.h> 20#include <linux/syscore_ops.h>
21#include <linux/serial_core.h> 21#include <linux/serial_core.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/dm9000.h> 23#include <linux/dm9000.h>
@@ -214,17 +214,16 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
214/* NAND Flash on BAST board */ 214/* NAND Flash on BAST board */
215 215
216#ifdef CONFIG_PM 216#ifdef CONFIG_PM
217static int bast_pm_suspend(struct sys_device *sd, pm_message_t state) 217static int bast_pm_suspend(void)
218{ 218{
219 /* ensure that an nRESET is not generated on resume. */ 219 /* ensure that an nRESET is not generated on resume. */
220 gpio_direction_output(S3C2410_GPA(21), 1); 220 gpio_direction_output(S3C2410_GPA(21), 1);
221 return 0; 221 return 0;
222} 222}
223 223
224static int bast_pm_resume(struct sys_device *sd) 224static void bast_pm_resume(void)
225{ 225{
226 s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); 226 s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
227 return 0;
228} 227}
229 228
230#else 229#else
@@ -232,16 +231,11 @@ static int bast_pm_resume(struct sys_device *sd)
232#define bast_pm_resume NULL 231#define bast_pm_resume NULL
233#endif 232#endif
234 233
235static struct sysdev_class bast_pm_sysclass = { 234static struct syscore_ops bast_pm_syscore_ops = {
236 .name = "mach-bast",
237 .suspend = bast_pm_suspend, 235 .suspend = bast_pm_suspend,
238 .resume = bast_pm_resume, 236 .resume = bast_pm_resume,
239}; 237};
240 238
241static struct sys_device bast_pm_sysdev = {
242 .cls = &bast_pm_sysclass,
243};
244
245static int smartmedia_map[] = { 0 }; 239static int smartmedia_map[] = { 0 };
246static int chip0_map[] = { 1 }; 240static int chip0_map[] = { 1 };
247static int chip1_map[] = { 2 }; 241static int chip1_map[] = { 2 };
@@ -642,8 +636,7 @@ static void __init bast_map_io(void)
642 636
643static void __init bast_init(void) 637static void __init bast_init(void)
644{ 638{
645 sysdev_class_register(&bast_pm_sysclass); 639 register_syscore_ops(&bast_pm_syscore_ops);
646 sysdev_register(&bast_pm_sysdev);
647 640
648 s3c_i2c0_set_platdata(&bast_i2c_info); 641 s3c_i2c0_set_platdata(&bast_i2c_info);
649 s3c_nand_set_platdata(&bast_nand_info); 642 s3c_nand_set_platdata(&bast_nand_info);
diff --git a/arch/arm/mach-s3c2410/pm.c b/arch/arm/mach-s3c2410/pm.c
index 725636fc4dc3..4728f9aa7df1 100644
--- a/arch/arm/mach-s3c2410/pm.c
+++ b/arch/arm/mach-s3c2410/pm.c
@@ -25,6 +25,7 @@
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/time.h> 26#include <linux/time.h>
27#include <linux/sysdev.h> 27#include <linux/sysdev.h>
28#include <linux/syscore_ops.h>
28#include <linux/gpio.h> 29#include <linux/gpio.h>
29#include <linux/io.h> 30#include <linux/io.h>
30 31
@@ -92,7 +93,7 @@ static void s3c2410_pm_prepare(void)
92 } 93 }
93} 94}
94 95
95static int s3c2410_pm_resume(struct sys_device *dev) 96static void s3c2410_pm_resume(void)
96{ 97{
97 unsigned long tmp; 98 unsigned long tmp;
98 99
@@ -104,10 +105,12 @@ static int s3c2410_pm_resume(struct sys_device *dev)
104 105
105 if ( machine_is_aml_m5900() ) 106 if ( machine_is_aml_m5900() )
106 s3c2410_gpio_setpin(S3C2410_GPF(2), 0); 107 s3c2410_gpio_setpin(S3C2410_GPF(2), 0);
107
108 return 0;
109} 108}
110 109
110struct syscore_ops s3c2410_pm_syscore_ops = {
111 .resume = s3c2410_pm_resume,
112};
113
111static int s3c2410_pm_add(struct sys_device *dev) 114static int s3c2410_pm_add(struct sys_device *dev)
112{ 115{
113 pm_cpu_prep = s3c2410_pm_prepare; 116 pm_cpu_prep = s3c2410_pm_prepare;
@@ -119,7 +122,6 @@ static int s3c2410_pm_add(struct sys_device *dev)
119#if defined(CONFIG_CPU_S3C2410) 122#if defined(CONFIG_CPU_S3C2410)
120static struct sysdev_driver s3c2410_pm_driver = { 123static struct sysdev_driver s3c2410_pm_driver = {
121 .add = s3c2410_pm_add, 124 .add = s3c2410_pm_add,
122 .resume = s3c2410_pm_resume,
123}; 125};
124 126
125/* register ourselves */ 127/* register ourselves */
@@ -133,7 +135,6 @@ arch_initcall(s3c2410_pm_drvinit);
133 135
134static struct sysdev_driver s3c2410a_pm_driver = { 136static struct sysdev_driver s3c2410a_pm_driver = {
135 .add = s3c2410_pm_add, 137 .add = s3c2410_pm_add,
136 .resume = s3c2410_pm_resume,
137}; 138};
138 139
139static int __init s3c2410a_pm_drvinit(void) 140static int __init s3c2410a_pm_drvinit(void)
@@ -147,7 +148,6 @@ arch_initcall(s3c2410a_pm_drvinit);
147#if defined(CONFIG_CPU_S3C2440) 148#if defined(CONFIG_CPU_S3C2440)
148static struct sysdev_driver s3c2440_pm_driver = { 149static struct sysdev_driver s3c2440_pm_driver = {
149 .add = s3c2410_pm_add, 150 .add = s3c2410_pm_add,
150 .resume = s3c2410_pm_resume,
151}; 151};
152 152
153static int __init s3c2440_pm_drvinit(void) 153static int __init s3c2440_pm_drvinit(void)
@@ -161,7 +161,6 @@ arch_initcall(s3c2440_pm_drvinit);
161#if defined(CONFIG_CPU_S3C2442) 161#if defined(CONFIG_CPU_S3C2442)
162static struct sysdev_driver s3c2442_pm_driver = { 162static struct sysdev_driver s3c2442_pm_driver = {
163 .add = s3c2410_pm_add, 163 .add = s3c2410_pm_add,
164 .resume = s3c2410_pm_resume,
165}; 164};
166 165
167static int __init s3c2442_pm_drvinit(void) 166static int __init s3c2442_pm_drvinit(void)
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index adc90a3c5890..f1d3bd8f6f17 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -19,6 +19,7 @@
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/sysdev.h> 21#include <linux/sysdev.h>
22#include <linux/syscore_ops.h>
22#include <linux/serial_core.h> 23#include <linux/serial_core.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/io.h> 25#include <linux/io.h>
@@ -40,6 +41,7 @@
40#include <plat/devs.h> 41#include <plat/devs.h>
41#include <plat/clock.h> 42#include <plat/clock.h>
42#include <plat/pll.h> 43#include <plat/pll.h>
44#include <plat/pm.h>
43 45
44#include <plat/gpio-core.h> 46#include <plat/gpio-core.h>
45#include <plat/gpio-cfg.h> 47#include <plat/gpio-cfg.h>
@@ -168,6 +170,9 @@ int __init s3c2410_init(void)
168{ 170{
169 printk("S3C2410: Initialising architecture\n"); 171 printk("S3C2410: Initialising architecture\n");
170 172
173 register_syscore_ops(&s3c2410_pm_syscore_ops);
174 register_syscore_ops(&s3c24xx_irq_syscore_ops);
175
171 return sysdev_register(&s3c2410_sysdev); 176 return sysdev_register(&s3c2410_sysdev);
172} 177}
173 178
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c
index f3355d2ec634..1a1aa220972b 100644
--- a/arch/arm/mach-s3c2412/irq.c
+++ b/arch/arm/mach-s3c2412/irq.c
@@ -202,8 +202,6 @@ static int s3c2412_irq_add(struct sys_device *sysdev)
202 202
203static struct sysdev_driver s3c2412_irq_driver = { 203static struct sysdev_driver s3c2412_irq_driver = {
204 .add = s3c2412_irq_add, 204 .add = s3c2412_irq_add,
205 .suspend = s3c24xx_irq_suspend,
206 .resume = s3c24xx_irq_resume,
207}; 205};
208 206
209static int s3c2412_irq_init(void) 207static int s3c2412_irq_init(void)
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 923e01bdf017..85dcaeb9e62f 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -17,7 +17,7 @@
17#include <linux/timer.h> 17#include <linux/timer.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/sysdev.h> 20#include <linux/syscore_ops.h>
21#include <linux/serial_core.h> 21#include <linux/serial_core.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/i2c.h> 23#include <linux/i2c.h>
@@ -486,7 +486,7 @@ static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = {
486/* Jive power management device */ 486/* Jive power management device */
487 487
488#ifdef CONFIG_PM 488#ifdef CONFIG_PM
489static int jive_pm_suspend(struct sys_device *sd, pm_message_t state) 489static int jive_pm_suspend(void)
490{ 490{
491 /* Write the magic value u-boot uses to check for resume into 491 /* Write the magic value u-boot uses to check for resume into
492 * the INFORM0 register, and ensure INFORM1 is set to the 492 * the INFORM0 register, and ensure INFORM1 is set to the
@@ -498,10 +498,9 @@ static int jive_pm_suspend(struct sys_device *sd, pm_message_t state)
498 return 0; 498 return 0;
499} 499}
500 500
501static int jive_pm_resume(struct sys_device *sd) 501static void jive_pm_resume(void)
502{ 502{
503 __raw_writel(0x0, S3C2412_INFORM0); 503 __raw_writel(0x0, S3C2412_INFORM0);
504 return 0;
505} 504}
506 505
507#else 506#else
@@ -509,16 +508,11 @@ static int jive_pm_resume(struct sys_device *sd)
509#define jive_pm_resume NULL 508#define jive_pm_resume NULL
510#endif 509#endif
511 510
512static struct sysdev_class jive_pm_sysclass = { 511static struct syscore_ops jive_pm_syscore_ops = {
513 .name = "jive-pm",
514 .suspend = jive_pm_suspend, 512 .suspend = jive_pm_suspend,
515 .resume = jive_pm_resume, 513 .resume = jive_pm_resume,
516}; 514};
517 515
518static struct sys_device jive_pm_sysdev = {
519 .cls = &jive_pm_sysclass,
520};
521
522static void __init jive_map_io(void) 516static void __init jive_map_io(void)
523{ 517{
524 s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc)); 518 s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc));
@@ -536,10 +530,9 @@ static void jive_power_off(void)
536 530
537static void __init jive_machine_init(void) 531static void __init jive_machine_init(void)
538{ 532{
539 /* register system devices for managing low level suspend */ 533 /* register system core operations for managing low level suspend */
540 534
541 sysdev_class_register(&jive_pm_sysclass); 535 register_syscore_ops(&jive_pm_syscore_ops);
542 sysdev_register(&jive_pm_sysdev);
543 536
544 /* write our sleep configurations for the IO. Pull down all unused 537 /* write our sleep configurations for the IO. Pull down all unused
545 * IO, ensure that we have turned off all peripherals we do not 538 * IO, ensure that we have turned off all peripherals we do not
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c
index a7417c479ffe..752b13a7b3db 100644
--- a/arch/arm/mach-s3c2412/pm.c
+++ b/arch/arm/mach-s3c2412/pm.c
@@ -17,6 +17,7 @@
17#include <linux/timer.h> 17#include <linux/timer.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/sysdev.h> 19#include <linux/sysdev.h>
20#include <linux/syscore_ops.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/io.h> 22#include <linux/io.h>
22 23
@@ -86,13 +87,24 @@ static struct sleep_save s3c2412_sleep[] = {
86 SAVE_ITEM(S3C2413_GPJSLPCON), 87 SAVE_ITEM(S3C2413_GPJSLPCON),
87}; 88};
88 89
89static int s3c2412_pm_suspend(struct sys_device *dev, pm_message_t state) 90static struct sysdev_driver s3c2412_pm_driver = {
91 .add = s3c2412_pm_add,
92};
93
94static __init int s3c2412_pm_init(void)
95{
96 return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver);
97}
98
99arch_initcall(s3c2412_pm_init);
100
101static int s3c2412_pm_suspend(void)
90{ 102{
91 s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); 103 s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep));
92 return 0; 104 return 0;
93} 105}
94 106
95static int s3c2412_pm_resume(struct sys_device *dev) 107static void s3c2412_pm_resume(void)
96{ 108{
97 unsigned long tmp; 109 unsigned long tmp;
98 110
@@ -102,18 +114,9 @@ static int s3c2412_pm_resume(struct sys_device *dev)
102 __raw_writel(tmp, S3C2412_PWRCFG); 114 __raw_writel(tmp, S3C2412_PWRCFG);
103 115
104 s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); 116 s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep));
105 return 0;
106} 117}
107 118
108static struct sysdev_driver s3c2412_pm_driver = { 119struct syscore_ops s3c2412_pm_syscore_ops = {
109 .add = s3c2412_pm_add,
110 .suspend = s3c2412_pm_suspend, 120 .suspend = s3c2412_pm_suspend,
111 .resume = s3c2412_pm_resume, 121 .resume = s3c2412_pm_resume,
112}; 122};
113
114static __init int s3c2412_pm_init(void)
115{
116 return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver);
117}
118
119arch_initcall(s3c2412_pm_init);
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c
index 4c6df51ddf33..ef0958d3e5c6 100644
--- a/arch/arm/mach-s3c2412/s3c2412.c
+++ b/arch/arm/mach-s3c2412/s3c2412.c
@@ -19,6 +19,7 @@
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/sysdev.h> 21#include <linux/sysdev.h>
22#include <linux/syscore_ops.h>
22#include <linux/serial_core.h> 23#include <linux/serial_core.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/io.h> 25#include <linux/io.h>
@@ -244,5 +245,8 @@ int __init s3c2412_init(void)
244{ 245{
245 printk("S3C2412: Initialising architecture\n"); 246 printk("S3C2412: Initialising architecture\n");
246 247
248 register_syscore_ops(&s3c2412_pm_syscore_ops);
249 register_syscore_ops(&s3c24xx_irq_syscore_ops);
250
247 return sysdev_register(&s3c2412_sysdev); 251 return sysdev_register(&s3c2412_sysdev);
248} 252}
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
index 77b38f2381c1..28ad20d42445 100644
--- a/arch/arm/mach-s3c2416/irq.c
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -236,8 +236,6 @@ static int __init s3c2416_irq_add(struct sys_device *sysdev)
236 236
237static struct sysdev_driver s3c2416_irq_driver = { 237static struct sysdev_driver s3c2416_irq_driver = {
238 .add = s3c2416_irq_add, 238 .add = s3c2416_irq_add,
239 .suspend = s3c24xx_irq_suspend,
240 .resume = s3c24xx_irq_resume,
241}; 239};
242 240
243static int __init s3c2416_irq_init(void) 241static int __init s3c2416_irq_init(void)
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c
index 4a04205b04d5..41db2b21e213 100644
--- a/arch/arm/mach-s3c2416/pm.c
+++ b/arch/arm/mach-s3c2416/pm.c
@@ -11,6 +11,7 @@
11*/ 11*/
12 12
13#include <linux/sysdev.h> 13#include <linux/sysdev.h>
14#include <linux/syscore_ops.h>
14#include <linux/io.h> 15#include <linux/io.h>
15 16
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
@@ -55,30 +56,26 @@ static int s3c2416_pm_add(struct sys_device *sysdev)
55 return 0; 56 return 0;
56} 57}
57 58
58static int s3c2416_pm_suspend(struct sys_device *dev, pm_message_t state) 59static struct sysdev_driver s3c2416_pm_driver = {
60 .add = s3c2416_pm_add,
61};
62
63static __init int s3c2416_pm_init(void)
59{ 64{
60 return 0; 65 return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver);
61} 66}
62 67
63static int s3c2416_pm_resume(struct sys_device *dev) 68arch_initcall(s3c2416_pm_init);
69
70
71static void s3c2416_pm_resume(void)
64{ 72{
65 /* unset the return-from-sleep amd inform flags */ 73 /* unset the return-from-sleep amd inform flags */
66 __raw_writel(0x0, S3C2443_PWRMODE); 74 __raw_writel(0x0, S3C2443_PWRMODE);
67 __raw_writel(0x0, S3C2412_INFORM0); 75 __raw_writel(0x0, S3C2412_INFORM0);
68 __raw_writel(0x0, S3C2412_INFORM1); 76 __raw_writel(0x0, S3C2412_INFORM1);
69
70 return 0;
71} 77}
72 78
73static struct sysdev_driver s3c2416_pm_driver = { 79struct syscore_ops s3c2416_pm_syscore_ops = {
74 .add = s3c2416_pm_add,
75 .suspend = s3c2416_pm_suspend,
76 .resume = s3c2416_pm_resume, 80 .resume = s3c2416_pm_resume,
77}; 81};
78
79static __init int s3c2416_pm_init(void)
80{
81 return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver);
82}
83
84arch_initcall(s3c2416_pm_init);
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index ba7fd8737434..494ce913dc95 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -32,6 +32,7 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/serial_core.h> 33#include <linux/serial_core.h>
34#include <linux/sysdev.h> 34#include <linux/sysdev.h>
35#include <linux/syscore_ops.h>
35#include <linux/clk.h> 36#include <linux/clk.h>
36#include <linux/io.h> 37#include <linux/io.h>
37 38
@@ -54,6 +55,7 @@
54#include <plat/devs.h> 55#include <plat/devs.h>
55#include <plat/cpu.h> 56#include <plat/cpu.h>
56#include <plat/sdhci.h> 57#include <plat/sdhci.h>
58#include <plat/pm.h>
57 59
58#include <plat/iic-core.h> 60#include <plat/iic-core.h>
59#include <plat/fb-core.h> 61#include <plat/fb-core.h>
@@ -95,6 +97,9 @@ int __init s3c2416_init(void)
95 97
96 s3c_fb_setname("s3c2443-fb"); 98 s3c_fb_setname("s3c2443-fb");
97 99
100 register_syscore_ops(&s3c2416_pm_syscore_ops);
101 register_syscore_ops(&s3c24xx_irq_syscore_ops);
102
98 return sysdev_register(&s3c2416_sysdev); 103 return sysdev_register(&s3c2416_sysdev);
99} 104}
100 105
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c
index 14dc67897757..d88536393310 100644
--- a/arch/arm/mach-s3c2440/mach-osiris.c
+++ b/arch/arm/mach-s3c2440/mach-osiris.c
@@ -17,7 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/sysdev.h> 20#include <linux/syscore_ops.h>
21#include <linux/serial_core.h> 21#include <linux/serial_core.h>
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/i2c.h> 23#include <linux/i2c.h>
@@ -284,7 +284,7 @@ static struct platform_device osiris_pcmcia = {
284#ifdef CONFIG_PM 284#ifdef CONFIG_PM
285static unsigned char pm_osiris_ctrl0; 285static unsigned char pm_osiris_ctrl0;
286 286
287static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) 287static int osiris_pm_suspend(void)
288{ 288{
289 unsigned int tmp; 289 unsigned int tmp;
290 290
@@ -304,7 +304,7 @@ static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state)
304 return 0; 304 return 0;
305} 305}
306 306
307static int osiris_pm_resume(struct sys_device *sd) 307static void osiris_pm_resume(void)
308{ 308{
309 if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) 309 if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8)
310 __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); 310 __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1);
@@ -312,8 +312,6 @@ static int osiris_pm_resume(struct sys_device *sd)
312 __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0); 312 __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0);
313 313
314 s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); 314 s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
315
316 return 0;
317} 315}
318 316
319#else 317#else
@@ -321,16 +319,11 @@ static int osiris_pm_resume(struct sys_device *sd)
321#define osiris_pm_resume NULL 319#define osiris_pm_resume NULL
322#endif 320#endif
323 321
324static struct sysdev_class osiris_pm_sysclass = { 322static struct syscore_ops osiris_pm_syscore_ops = {
325 .name = "mach-osiris",
326 .suspend = osiris_pm_suspend, 323 .suspend = osiris_pm_suspend,
327 .resume = osiris_pm_resume, 324 .resume = osiris_pm_resume,
328}; 325};
329 326
330static struct sys_device osiris_pm_sysdev = {
331 .cls = &osiris_pm_sysclass,
332};
333
334/* Link for DVS driver to TPS65011 */ 327/* Link for DVS driver to TPS65011 */
335 328
336static void osiris_tps_release(struct device *dev) 329static void osiris_tps_release(struct device *dev)
@@ -439,8 +432,7 @@ static void __init osiris_map_io(void)
439 432
440static void __init osiris_init(void) 433static void __init osiris_init(void)
441{ 434{
442 sysdev_class_register(&osiris_pm_sysclass); 435 register_syscore_ops(&osiris_pm_syscore_ops);
443 sysdev_register(&osiris_pm_sysdev);
444 436
445 s3c_i2c0_set_platdata(NULL); 437 s3c_i2c0_set_platdata(NULL);
446 s3c_nand_set_platdata(&osiris_nand_info); 438 s3c_nand_set_platdata(&osiris_nand_info);
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index f7663f731ea0..ce99ff72838d 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/serial_core.h> 20#include <linux/serial_core.h>
21#include <linux/sysdev.h> 21#include <linux/sysdev.h>
22#include <linux/syscore_ops.h>
22#include <linux/gpio.h> 23#include <linux/gpio.h>
23#include <linux/clk.h> 24#include <linux/clk.h>
24#include <linux/io.h> 25#include <linux/io.h>
@@ -33,6 +34,7 @@
33#include <plat/devs.h> 34#include <plat/devs.h>
34#include <plat/cpu.h> 35#include <plat/cpu.h>
35#include <plat/s3c244x.h> 36#include <plat/s3c244x.h>
37#include <plat/pm.h>
36 38
37#include <plat/gpio-core.h> 39#include <plat/gpio-core.h>
38#include <plat/gpio-cfg.h> 40#include <plat/gpio-cfg.h>
@@ -51,6 +53,12 @@ int __init s3c2440_init(void)
51 s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; 53 s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT;
52 s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; 54 s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT;
53 55
56 /* register suspend/resume handlers */
57
58 register_syscore_ops(&s3c2410_pm_syscore_ops);
59 register_syscore_ops(&s3c244x_pm_syscore_ops);
60 register_syscore_ops(&s3c24xx_irq_syscore_ops);
61
54 /* register our system device for everything else */ 62 /* register our system device for everything else */
55 63
56 return sysdev_register(&s3c2440_sysdev); 64 return sysdev_register(&s3c2440_sysdev);
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index ecf813546554..6224bad4d604 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -29,6 +29,7 @@
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/sysdev.h> 31#include <linux/sysdev.h>
32#include <linux/syscore_ops.h>
32#include <linux/interrupt.h> 33#include <linux/interrupt.h>
33#include <linux/ioport.h> 34#include <linux/ioport.h>
34#include <linux/mutex.h> 35#include <linux/mutex.h>
@@ -45,6 +46,7 @@
45#include <plat/clock.h> 46#include <plat/clock.h>
46#include <plat/cpu.h> 47#include <plat/cpu.h>
47#include <plat/s3c244x.h> 48#include <plat/s3c244x.h>
49#include <plat/pm.h>
48 50
49#include <plat/gpio-core.h> 51#include <plat/gpio-core.h>
50#include <plat/gpio-cfg.h> 52#include <plat/gpio-cfg.h>
@@ -167,6 +169,10 @@ int __init s3c2442_init(void)
167{ 169{
168 printk("S3C2442: Initialising architecture\n"); 170 printk("S3C2442: Initialising architecture\n");
169 171
172 register_syscore_ops(&s3c2410_pm_syscore_ops);
173 register_syscore_ops(&s3c244x_pm_syscore_ops);
174 register_syscore_ops(&s3c24xx_irq_syscore_ops);
175
170 return sysdev_register(&s3c2442_sysdev); 176 return sysdev_register(&s3c2442_sysdev);
171} 177}
172 178
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c
index de07c2feaa32..c63e8f26d901 100644
--- a/arch/arm/mach-s3c2440/s3c244x-irq.c
+++ b/arch/arm/mach-s3c2440/s3c244x-irq.c
@@ -116,8 +116,6 @@ static int s3c244x_irq_add(struct sys_device *sysdev)
116 116
117static struct sysdev_driver s3c2440_irq_driver = { 117static struct sysdev_driver s3c2440_irq_driver = {
118 .add = s3c244x_irq_add, 118 .add = s3c244x_irq_add,
119 .suspend = s3c24xx_irq_suspend,
120 .resume = s3c24xx_irq_resume,
121}; 119};
122 120
123static int s3c2440_irq_init(void) 121static int s3c2440_irq_init(void)
@@ -129,8 +127,6 @@ arch_initcall(s3c2440_irq_init);
129 127
130static struct sysdev_driver s3c2442_irq_driver = { 128static struct sysdev_driver s3c2442_irq_driver = {
131 .add = s3c244x_irq_add, 129 .add = s3c244x_irq_add,
132 .suspend = s3c24xx_irq_suspend,
133 .resume = s3c24xx_irq_resume,
134}; 130};
135 131
136 132
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c
index 90c1707b9c95..7e8a23d2098a 100644
--- a/arch/arm/mach-s3c2440/s3c244x.c
+++ b/arch/arm/mach-s3c2440/s3c244x.c
@@ -19,6 +19,7 @@
19#include <linux/serial_core.h> 19#include <linux/serial_core.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/sysdev.h> 21#include <linux/sysdev.h>
22#include <linux/syscore_ops.h>
22#include <linux/clk.h> 23#include <linux/clk.h>
23#include <linux/io.h> 24#include <linux/io.h>
24 25
@@ -134,45 +135,14 @@ void __init s3c244x_init_clocks(int xtal)
134 s3c2410_baseclk_add(); 135 s3c2410_baseclk_add();
135} 136}
136 137
137#ifdef CONFIG_PM
138
139static struct sleep_save s3c244x_sleep[] = {
140 SAVE_ITEM(S3C2440_DSC0),
141 SAVE_ITEM(S3C2440_DSC1),
142 SAVE_ITEM(S3C2440_GPJDAT),
143 SAVE_ITEM(S3C2440_GPJCON),
144 SAVE_ITEM(S3C2440_GPJUP)
145};
146
147static int s3c244x_suspend(struct sys_device *dev, pm_message_t state)
148{
149 s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep));
150 return 0;
151}
152
153static int s3c244x_resume(struct sys_device *dev)
154{
155 s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep));
156 return 0;
157}
158
159#else
160#define s3c244x_suspend NULL
161#define s3c244x_resume NULL
162#endif
163
164/* Since the S3C2442 and S3C2440 share items, put both sysclasses here */ 138/* Since the S3C2442 and S3C2440 share items, put both sysclasses here */
165 139
166struct sysdev_class s3c2440_sysclass = { 140struct sysdev_class s3c2440_sysclass = {
167 .name = "s3c2440-core", 141 .name = "s3c2440-core",
168 .suspend = s3c244x_suspend,
169 .resume = s3c244x_resume
170}; 142};
171 143
172struct sysdev_class s3c2442_sysclass = { 144struct sysdev_class s3c2442_sysclass = {
173 .name = "s3c2442-core", 145 .name = "s3c2442-core",
174 .suspend = s3c244x_suspend,
175 .resume = s3c244x_resume
176}; 146};
177 147
178/* need to register class before we actually register the device, and 148/* need to register class before we actually register the device, and
@@ -194,3 +164,33 @@ static int __init s3c2442_core_init(void)
194} 164}
195 165
196core_initcall(s3c2442_core_init); 166core_initcall(s3c2442_core_init);
167
168
169#ifdef CONFIG_PM
170static struct sleep_save s3c244x_sleep[] = {
171 SAVE_ITEM(S3C2440_DSC0),
172 SAVE_ITEM(S3C2440_DSC1),
173 SAVE_ITEM(S3C2440_GPJDAT),
174 SAVE_ITEM(S3C2440_GPJCON),
175 SAVE_ITEM(S3C2440_GPJUP)
176};
177
178static int s3c244x_suspend(void)
179{
180 s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep));
181 return 0;
182}
183
184static void s3c244x_resume(void)
185{
186 s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep));
187}
188#else
189#define s3c244x_suspend NULL
190#define s3c244x_resume NULL
191#endif
192
193struct syscore_ops s3c244x_pm_syscore_ops = {
194 .suspend = s3c244x_suspend,
195 .resume = s3c244x_resume,
196};
diff --git a/arch/arm/mach-s3c64xx/irq-pm.c b/arch/arm/mach-s3c64xx/irq-pm.c
index da1bec64b9da..8bec61e242c7 100644
--- a/arch/arm/mach-s3c64xx/irq-pm.c
+++ b/arch/arm/mach-s3c64xx/irq-pm.c
@@ -13,7 +13,7 @@
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/sysdev.h> 16#include <linux/syscore_ops.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/serial_core.h> 18#include <linux/serial_core.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
@@ -54,7 +54,7 @@ static struct irq_grp_save {
54 54
55static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS]; 55static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS];
56 56
57static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state) 57static int s3c64xx_irq_pm_suspend(void)
58{ 58{
59 struct irq_grp_save *grp = eint_grp_save; 59 struct irq_grp_save *grp = eint_grp_save;
60 int i; 60 int i;
@@ -75,7 +75,7 @@ static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state)
75 return 0; 75 return 0;
76} 76}
77 77
78static int s3c64xx_irq_pm_resume(struct sys_device *dev) 78static void s3c64xx_irq_pm_resume(void)
79{ 79{
80 struct irq_grp_save *grp = eint_grp_save; 80 struct irq_grp_save *grp = eint_grp_save;
81 int i; 81 int i;
@@ -94,18 +94,18 @@ static int s3c64xx_irq_pm_resume(struct sys_device *dev)
94 } 94 }
95 95
96 S3C_PMDBG("%s: IRQ configuration restored\n", __func__); 96 S3C_PMDBG("%s: IRQ configuration restored\n", __func__);
97 return 0;
98} 97}
99 98
100static struct sysdev_driver s3c64xx_irq_driver = { 99struct syscore_ops s3c64xx_irq_syscore_ops = {
101 .suspend = s3c64xx_irq_pm_suspend, 100 .suspend = s3c64xx_irq_pm_suspend,
102 .resume = s3c64xx_irq_pm_resume, 101 .resume = s3c64xx_irq_pm_resume,
103}; 102};
104 103
105static int __init s3c64xx_irq_pm_init(void) 104static __init int s3c64xx_syscore_init(void)
106{ 105{
107 return sysdev_driver_register(&s3c64xx_sysclass, &s3c64xx_irq_driver); 106 register_syscore_ops(&s3c64xx_irq_syscore_ops);
108}
109 107
110arch_initcall(s3c64xx_irq_pm_init); 108 return 0;
109}
111 110
111core_initcall(s3c64xx_syscore_init);
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index 549d7924fd4c..24febae3d4c0 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/suspend.h> 18#include <linux/suspend.h>
19#include <linux/syscore_ops.h>
19#include <linux/io.h> 20#include <linux/io.h>
20 21
21#include <plat/cpu.h> 22#include <plat/cpu.h>
@@ -140,7 +141,17 @@ static int s5pv210_pm_add(struct sys_device *sysdev)
140 return 0; 141 return 0;
141} 142}
142 143
143static int s5pv210_pm_resume(struct sys_device *dev) 144static struct sysdev_driver s5pv210_pm_driver = {
145 .add = s5pv210_pm_add,
146};
147
148static __init int s5pv210_pm_drvinit(void)
149{
150 return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver);
151}
152arch_initcall(s5pv210_pm_drvinit);
153
154static void s5pv210_pm_resume(void)
144{ 155{
145 u32 tmp; 156 u32 tmp;
146 157
@@ -150,17 +161,15 @@ static int s5pv210_pm_resume(struct sys_device *dev)
150 __raw_writel(tmp , S5P_OTHERS); 161 __raw_writel(tmp , S5P_OTHERS);
151 162
152 s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); 163 s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
153
154 return 0;
155} 164}
156 165
157static struct sysdev_driver s5pv210_pm_driver = { 166static struct syscore_ops s5pv210_pm_syscore_ops = {
158 .add = s5pv210_pm_add,
159 .resume = s5pv210_pm_resume, 167 .resume = s5pv210_pm_resume,
160}; 168};
161 169
162static __init int s5pv210_pm_drvinit(void) 170static __init int s5pv210_pm_syscore_init(void)
163{ 171{
164 return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); 172 register_syscore_ops(&s5pv210_pm_syscore_ops);
173 return 0;
165} 174}
166arch_initcall(s5pv210_pm_drvinit); 175arch_initcall(s5pv210_pm_syscore_init);
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c
index 423ddb3d65e9..dfbf824a69fa 100644
--- a/arch/arm/mach-sa1100/irq.c
+++ b/arch/arm/mach-sa1100/irq.c
@@ -14,7 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/ioport.h> 16#include <linux/ioport.h>
17#include <linux/sysdev.h> 17#include <linux/syscore_ops.h>
18 18
19#include <mach/hardware.h> 19#include <mach/hardware.h>
20#include <asm/mach/irq.h> 20#include <asm/mach/irq.h>
@@ -234,7 +234,7 @@ static struct sa1100irq_state {
234 unsigned int iccr; 234 unsigned int iccr;
235} sa1100irq_state; 235} sa1100irq_state;
236 236
237static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state) 237static int sa1100irq_suspend(void)
238{ 238{
239 struct sa1100irq_state *st = &sa1100irq_state; 239 struct sa1100irq_state *st = &sa1100irq_state;
240 240
@@ -264,7 +264,7 @@ static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state)
264 return 0; 264 return 0;
265} 265}
266 266
267static int sa1100irq_resume(struct sys_device *dev) 267static void sa1100irq_resume(void)
268{ 268{
269 struct sa1100irq_state *st = &sa1100irq_state; 269 struct sa1100irq_state *st = &sa1100irq_state;
270 270
@@ -277,24 +277,17 @@ static int sa1100irq_resume(struct sys_device *dev)
277 277
278 ICMR = st->icmr; 278 ICMR = st->icmr;
279 } 279 }
280 return 0;
281} 280}
282 281
283static struct sysdev_class sa1100irq_sysclass = { 282static struct syscore_ops sa1100irq_syscore_ops = {
284 .name = "sa11x0-irq",
285 .suspend = sa1100irq_suspend, 283 .suspend = sa1100irq_suspend,
286 .resume = sa1100irq_resume, 284 .resume = sa1100irq_resume,
287}; 285};
288 286
289static struct sys_device sa1100irq_device = {
290 .id = 0,
291 .cls = &sa1100irq_sysclass,
292};
293
294static int __init sa1100irq_init_devicefs(void) 287static int __init sa1100irq_init_devicefs(void)
295{ 288{
296 sysdev_class_register(&sa1100irq_sysclass); 289 register_syscore_ops(&sa1100irq_syscore_ops);
297 return sysdev_register(&sa1100irq_device); 290 return 0;
298} 291}
299 292
300device_initcall(sa1100irq_init_devicefs); 293device_initcall(sa1100irq_init_devicefs);
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 94912d3944d3..2d1b67a59e4a 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -18,152 +18,41 @@
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/sh_clk.h> 19#include <linux/sh_clk.h>
20#include <linux/bitmap.h> 20#include <linux/bitmap.h>
21#include <linux/slab.h>
21 22
22#ifdef CONFIG_PM_RUNTIME 23#ifdef CONFIG_PM_RUNTIME
23#define BIT_ONCE 0
24#define BIT_ACTIVE 1
25#define BIT_CLK_ENABLED 2
26 24
27struct pm_runtime_data { 25static int default_platform_runtime_idle(struct device *dev)
28 unsigned long flags;
29 struct clk *clk;
30};
31
32static void __devres_release(struct device *dev, void *res)
33{
34 struct pm_runtime_data *prd = res;
35
36 dev_dbg(dev, "__devres_release()\n");
37
38 if (test_bit(BIT_CLK_ENABLED, &prd->flags))
39 clk_disable(prd->clk);
40
41 if (test_bit(BIT_ACTIVE, &prd->flags))
42 clk_put(prd->clk);
43}
44
45static struct pm_runtime_data *__to_prd(struct device *dev)
46{
47 return devres_find(dev, __devres_release, NULL, NULL);
48}
49
50static void platform_pm_runtime_init(struct device *dev,
51 struct pm_runtime_data *prd)
52{
53 if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
54 prd->clk = clk_get(dev, NULL);
55 if (!IS_ERR(prd->clk)) {
56 set_bit(BIT_ACTIVE, &prd->flags);
57 dev_info(dev, "clocks managed by runtime pm\n");
58 }
59 }
60}
61
62static void platform_pm_runtime_bug(struct device *dev,
63 struct pm_runtime_data *prd)
64{
65 if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
66 dev_err(dev, "runtime pm suspend before resume\n");
67}
68
69int platform_pm_runtime_suspend(struct device *dev)
70{
71 struct pm_runtime_data *prd = __to_prd(dev);
72
73 dev_dbg(dev, "platform_pm_runtime_suspend()\n");
74
75 platform_pm_runtime_bug(dev, prd);
76
77 if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
78 clk_disable(prd->clk);
79 clear_bit(BIT_CLK_ENABLED, &prd->flags);
80 }
81
82 return 0;
83}
84
85int platform_pm_runtime_resume(struct device *dev)
86{
87 struct pm_runtime_data *prd = __to_prd(dev);
88
89 dev_dbg(dev, "platform_pm_runtime_resume()\n");
90
91 platform_pm_runtime_init(dev, prd);
92
93 if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
94 clk_enable(prd->clk);
95 set_bit(BIT_CLK_ENABLED, &prd->flags);
96 }
97
98 return 0;
99}
100
101int platform_pm_runtime_idle(struct device *dev)
102{ 26{
103 /* suspend synchronously to disable clocks immediately */ 27 /* suspend synchronously to disable clocks immediately */
104 return pm_runtime_suspend(dev); 28 return pm_runtime_suspend(dev);
105} 29}
106 30
107static int platform_bus_notify(struct notifier_block *nb, 31static struct dev_power_domain default_power_domain = {
108 unsigned long action, void *data) 32 .ops = {
109{ 33 .runtime_suspend = pm_runtime_clk_suspend,
110 struct device *dev = data; 34 .runtime_resume = pm_runtime_clk_resume,
111 struct pm_runtime_data *prd; 35 .runtime_idle = default_platform_runtime_idle,
112 36 USE_PLATFORM_PM_SLEEP_OPS
113 dev_dbg(dev, "platform_bus_notify() %ld !\n", action); 37 },
114 38};
115 if (action == BUS_NOTIFY_BIND_DRIVER) {
116 prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
117 if (prd)
118 devres_add(dev, prd);
119 else
120 dev_err(dev, "unable to alloc memory for runtime pm\n");
121 }
122
123 return 0;
124}
125
126#else /* CONFIG_PM_RUNTIME */
127
128static int platform_bus_notify(struct notifier_block *nb,
129 unsigned long action, void *data)
130{
131 struct device *dev = data;
132 struct clk *clk;
133 39
134 dev_dbg(dev, "platform_bus_notify() %ld !\n", action); 40#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain)
135 41
136 switch (action) { 42#else
137 case BUS_NOTIFY_BIND_DRIVER:
138 clk = clk_get(dev, NULL);
139 if (!IS_ERR(clk)) {
140 clk_enable(clk);
141 clk_put(clk);
142 dev_info(dev, "runtime pm disabled, clock forced on\n");
143 }
144 break;
145 case BUS_NOTIFY_UNBOUND_DRIVER:
146 clk = clk_get(dev, NULL);
147 if (!IS_ERR(clk)) {
148 clk_disable(clk);
149 clk_put(clk);
150 dev_info(dev, "runtime pm disabled, clock forced off\n");
151 }
152 break;
153 }
154 43
155 return 0; 44#define DEFAULT_PWR_DOMAIN_PTR NULL
156}
157 45
158#endif /* CONFIG_PM_RUNTIME */ 46#endif /* CONFIG_PM_RUNTIME */
159 47
160static struct notifier_block platform_bus_notifier = { 48static struct pm_clk_notifier_block platform_bus_notifier = {
161 .notifier_call = platform_bus_notify 49 .pwr_domain = DEFAULT_PWR_DOMAIN_PTR,
50 .con_ids = { NULL, },
162}; 51};
163 52
164static int __init sh_pm_runtime_init(void) 53static int __init sh_pm_runtime_init(void)
165{ 54{
166 bus_register_notifier(&platform_bus_type, &platform_bus_notifier); 55 pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
167 return 0; 56 return 0;
168} 57}
169core_initcall(sh_pm_runtime_init); 58core_initcall(sh_pm_runtime_init);
diff --git a/arch/arm/mach-tegra/include/mach/barriers.h b/arch/arm/mach-tegra/include/mach/barriers.h
index cc115174899b..425b42e91ef6 100644
--- a/arch/arm/mach-tegra/include/mach/barriers.h
+++ b/arch/arm/mach-tegra/include/mach/barriers.h
@@ -23,7 +23,7 @@
23 23
24#include <asm/outercache.h> 24#include <asm/outercache.h>
25 25
26#define rmb() dmb() 26#define rmb() dsb()
27#define wmb() do { dsb(); outer_sync(); } while (0) 27#define wmb() do { dsb(); outer_sync(); } while (0)
28#define mb() wmb() 28#define mb() wmb()
29 29
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e5f6fc428348..e591513bb53e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
392 * Convert start_pfn/end_pfn to a struct page pointer. 392 * Convert start_pfn/end_pfn to a struct page pointer.
393 */ 393 */
394 start_pg = pfn_to_page(start_pfn - 1) + 1; 394 start_pg = pfn_to_page(start_pfn - 1) + 1;
395 end_pg = pfn_to_page(end_pfn); 395 end_pg = pfn_to_page(end_pfn - 1) + 1;
396 396
397 /* 397 /*
398 * Convert to physical addresses, and 398 * Convert to physical addresses, and
@@ -426,6 +426,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
426 426
427 bank_start = bank_pfn_start(bank); 427 bank_start = bank_pfn_start(bank);
428 428
429#ifdef CONFIG_SPARSEMEM
430 /*
431 * Take care not to free memmap entries that don't exist
432 * due to SPARSEMEM sections which aren't present.
433 */
434 bank_start = min(bank_start,
435 ALIGN(prev_bank_end, PAGES_PER_SECTION));
436#endif
429 /* 437 /*
430 * If we had a previous bank, and there is a space 438 * If we had a previous bank, and there is a space
431 * between the current bank and the previous, free it. 439 * between the current bank and the previous, free it.
@@ -440,6 +448,12 @@ static void __init free_unused_memmap(struct meminfo *mi)
440 */ 448 */
441 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 449 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
442 } 450 }
451
452#ifdef CONFIG_SPARSEMEM
453 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
454 free_memmap(prev_bank_end,
455 ALIGN(prev_bank_end, PAGES_PER_SECTION));
456#endif
443} 457}
444 458
445static void __init free_highpages(void) 459static void __init free_highpages(void)
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index d2adcdda23cf..bd9e32187eab 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -17,7 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sysdev.h> 20#include <linux/syscore_ops.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/io.h> 23#include <linux/io.h>
@@ -1372,9 +1372,7 @@ static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
1372 .resume_noirq = omap_mpuio_resume_noirq, 1372 .resume_noirq = omap_mpuio_resume_noirq,
1373}; 1373};
1374 1374
1375/* use platform_driver for this, now that there's no longer any 1375/* use platform_driver for this. */
1376 * point to sys_device (other than not disturbing old code).
1377 */
1378static struct platform_driver omap_mpuio_driver = { 1376static struct platform_driver omap_mpuio_driver = {
1379 .driver = { 1377 .driver = {
1380 .name = "mpuio", 1378 .name = "mpuio",
@@ -1745,7 +1743,7 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
1745} 1743}
1746 1744
1747#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) 1745#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
1748static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) 1746static int omap_gpio_suspend(void)
1749{ 1747{
1750 int i; 1748 int i;
1751 1749
@@ -1795,12 +1793,12 @@ static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg)
1795 return 0; 1793 return 0;
1796} 1794}
1797 1795
1798static int omap_gpio_resume(struct sys_device *dev) 1796static void omap_gpio_resume(void)
1799{ 1797{
1800 int i; 1798 int i;
1801 1799
1802 if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) 1800 if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
1803 return 0; 1801 return;
1804 1802
1805 for (i = 0; i < gpio_bank_count; i++) { 1803 for (i = 0; i < gpio_bank_count; i++) {
1806 struct gpio_bank *bank = &gpio_bank[i]; 1804 struct gpio_bank *bank = &gpio_bank[i];
@@ -1836,21 +1834,13 @@ static int omap_gpio_resume(struct sys_device *dev)
1836 __raw_writel(bank->saved_wakeup, wake_set); 1834 __raw_writel(bank->saved_wakeup, wake_set);
1837 spin_unlock_irqrestore(&bank->lock, flags); 1835 spin_unlock_irqrestore(&bank->lock, flags);
1838 } 1836 }
1839
1840 return 0;
1841} 1837}
1842 1838
1843static struct sysdev_class omap_gpio_sysclass = { 1839static struct syscore_ops omap_gpio_syscore_ops = {
1844 .name = "gpio",
1845 .suspend = omap_gpio_suspend, 1840 .suspend = omap_gpio_suspend,
1846 .resume = omap_gpio_resume, 1841 .resume = omap_gpio_resume,
1847}; 1842};
1848 1843
1849static struct sys_device omap_gpio_device = {
1850 .id = 0,
1851 .cls = &omap_gpio_sysclass,
1852};
1853
1854#endif 1844#endif
1855 1845
1856#ifdef CONFIG_ARCH_OMAP2PLUS 1846#ifdef CONFIG_ARCH_OMAP2PLUS
@@ -2108,21 +2098,14 @@ postcore_initcall(omap_gpio_drv_reg);
2108 2098
2109static int __init omap_gpio_sysinit(void) 2099static int __init omap_gpio_sysinit(void)
2110{ 2100{
2111 int ret = 0;
2112
2113 mpuio_init(); 2101 mpuio_init();
2114 2102
2115#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) 2103#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
2116 if (cpu_is_omap16xx() || cpu_class_is_omap2()) { 2104 if (cpu_is_omap16xx() || cpu_class_is_omap2())
2117 if (ret == 0) { 2105 register_syscore_ops(&omap_gpio_syscore_ops);
2118 ret = sysdev_class_register(&omap_gpio_sysclass);
2119 if (ret == 0)
2120 ret = sysdev_register(&omap_gpio_device);
2121 }
2122 }
2123#endif 2106#endif
2124 2107
2125 return ret; 2108 return 0;
2126} 2109}
2127 2110
2128arch_initcall(omap_gpio_sysinit); 2111arch_initcall(omap_gpio_sysinit);
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 8a51fd58f656..34fc31ee9081 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -793,6 +793,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
793 clk_enable(obj->clk); 793 clk_enable(obj->clk);
794 errs = iommu_report_fault(obj, &da); 794 errs = iommu_report_fault(obj, &da);
795 clk_disable(obj->clk); 795 clk_disable(obj->clk);
796 if (errs == 0)
797 return IRQ_HANDLED;
796 798
797 /* Fault callback or TLB/PTE Dynamic loading */ 799 /* Fault callback or TLB/PTE Dynamic loading */
798 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) 800 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 9bbda9acb73b..a37b8eb65b76 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -536,6 +536,28 @@ int omap_early_device_register(struct omap_device *od)
536 return 0; 536 return 0;
537} 537}
538 538
539static int _od_runtime_suspend(struct device *dev)
540{
541 struct platform_device *pdev = to_platform_device(dev);
542
543 return omap_device_idle(pdev);
544}
545
546static int _od_runtime_resume(struct device *dev)
547{
548 struct platform_device *pdev = to_platform_device(dev);
549
550 return omap_device_enable(pdev);
551}
552
553static struct dev_power_domain omap_device_power_domain = {
554 .ops = {
555 .runtime_suspend = _od_runtime_suspend,
556 .runtime_resume = _od_runtime_resume,
557 USE_PLATFORM_PM_SLEEP_OPS
558 }
559};
560
539/** 561/**
540 * omap_device_register - register an omap_device with one omap_hwmod 562 * omap_device_register - register an omap_device with one omap_hwmod
541 * @od: struct omap_device * to register 563 * @od: struct omap_device * to register
@@ -549,6 +571,7 @@ int omap_device_register(struct omap_device *od)
549 pr_debug("omap_device: %s: registering\n", od->pdev.name); 571 pr_debug("omap_device: %s: registering\n", od->pdev.name);
550 572
551 od->pdev.dev.parent = &omap_device_parent; 573 od->pdev.dev.parent = &omap_device_parent;
574 od->pdev.dev.pwr_domain = &omap_device_power_domain;
552 return platform_device_register(&od->pdev); 575 return platform_device_register(&od->pdev);
553} 576}
554 577
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c
index dce088f45678..48ebb9479b61 100644
--- a/arch/arm/plat-pxa/gpio.c
+++ b/arch/arm/plat-pxa/gpio.c
@@ -15,7 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/sysdev.h> 18#include <linux/syscore_ops.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20
21#include <mach/gpio.h> 21#include <mach/gpio.h>
@@ -295,7 +295,7 @@ void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
295} 295}
296 296
297#ifdef CONFIG_PM 297#ifdef CONFIG_PM
298static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state) 298static int pxa_gpio_suspend(void)
299{ 299{
300 struct pxa_gpio_chip *c; 300 struct pxa_gpio_chip *c;
301 int gpio; 301 int gpio;
@@ -312,7 +312,7 @@ static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state)
312 return 0; 312 return 0;
313} 313}
314 314
315static int pxa_gpio_resume(struct sys_device *dev) 315static void pxa_gpio_resume(void)
316{ 316{
317 struct pxa_gpio_chip *c; 317 struct pxa_gpio_chip *c;
318 int gpio; 318 int gpio;
@@ -326,22 +326,13 @@ static int pxa_gpio_resume(struct sys_device *dev)
326 __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET); 326 __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET);
327 __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET); 327 __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET);
328 } 328 }
329 return 0;
330} 329}
331#else 330#else
332#define pxa_gpio_suspend NULL 331#define pxa_gpio_suspend NULL
333#define pxa_gpio_resume NULL 332#define pxa_gpio_resume NULL
334#endif 333#endif
335 334
336struct sysdev_class pxa_gpio_sysclass = { 335struct syscore_ops pxa_gpio_syscore_ops = {
337 .name = "gpio",
338 .suspend = pxa_gpio_suspend, 336 .suspend = pxa_gpio_suspend,
339 .resume = pxa_gpio_resume, 337 .resume = pxa_gpio_resume,
340}; 338};
341
342static int __init pxa_gpio_init(void)
343{
344 return sysdev_class_register(&pxa_gpio_sysclass);
345}
346
347core_initcall(pxa_gpio_init);
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c
index a9aa5ad3f4eb..be12eadcce20 100644
--- a/arch/arm/plat-pxa/mfp.c
+++ b/arch/arm/plat-pxa/mfp.c
@@ -17,7 +17,6 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/sysdev.h>
21 20
22#include <plat/mfp.h> 21#include <plat/mfp.h>
23 22
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 27ea852e3370..c10d10c56e2e 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -22,7 +22,7 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/sysdev.h> 25#include <linux/syscore_ops.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/io.h> 28#include <linux/io.h>
@@ -1195,19 +1195,12 @@ int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *d
1195 1195
1196EXPORT_SYMBOL(s3c2410_dma_getposition); 1196EXPORT_SYMBOL(s3c2410_dma_getposition);
1197 1197
1198static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) 1198/* system core operations */
1199{
1200 return container_of(dev, struct s3c2410_dma_chan, dev);
1201}
1202
1203/* system device class */
1204 1199
1205#ifdef CONFIG_PM 1200#ifdef CONFIG_PM
1206 1201
1207static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state) 1202static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp)
1208{ 1203{
1209 struct s3c2410_dma_chan *cp = to_dma_chan(dev);
1210
1211 printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); 1204 printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
1212 1205
1213 if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { 1206 if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) {
@@ -1222,13 +1215,21 @@ static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state)
1222 1215
1223 s3c2410_dma_dostop(cp); 1216 s3c2410_dma_dostop(cp);
1224 } 1217 }
1218}
1219
1220static int s3c2410_dma_suspend(void)
1221{
1222 struct s3c2410_dma_chan *cp = s3c2410_chans;
1223 int channel;
1224
1225 for (channel = 0; channel < dma_channels; cp++, channel++)
1226 s3c2410_dma_suspend_chan(cp);
1225 1227
1226 return 0; 1228 return 0;
1227} 1229}
1228 1230
1229static int s3c2410_dma_resume(struct sys_device *dev) 1231static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
1230{ 1232{
1231 struct s3c2410_dma_chan *cp = to_dma_chan(dev);
1232 unsigned int no = cp->number | DMACH_LOW_LEVEL; 1233 unsigned int no = cp->number | DMACH_LOW_LEVEL;
1233 1234
1234 /* restore channel's hardware configuration */ 1235 /* restore channel's hardware configuration */
@@ -1249,13 +1250,21 @@ static int s3c2410_dma_resume(struct sys_device *dev)
1249 return 0; 1250 return 0;
1250} 1251}
1251 1252
1253static void s3c2410_dma_resume(void)
1254{
1255 struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1;
1256 int channel;
1257
1258 for (channel = dma_channels - 1; channel >= 0; cp++, channel--)
1259 s3c2410_dma_resume_chan(cp);
1260}
1261
1252#else 1262#else
1253#define s3c2410_dma_suspend NULL 1263#define s3c2410_dma_suspend NULL
1254#define s3c2410_dma_resume NULL 1264#define s3c2410_dma_resume NULL
1255#endif /* CONFIG_PM */ 1265#endif /* CONFIG_PM */
1256 1266
1257struct sysdev_class dma_sysclass = { 1267struct syscore_ops dma_syscore_ops = {
1258 .name = "s3c24xx-dma",
1259 .suspend = s3c2410_dma_suspend, 1268 .suspend = s3c2410_dma_suspend,
1260 .resume = s3c2410_dma_resume, 1269 .resume = s3c2410_dma_resume,
1261}; 1270};
@@ -1269,39 +1278,14 @@ static void s3c2410_dma_cache_ctor(void *p)
1269 1278
1270/* initialisation code */ 1279/* initialisation code */
1271 1280
1272static int __init s3c24xx_dma_sysclass_init(void) 1281static int __init s3c24xx_dma_syscore_init(void)
1273{ 1282{
1274 int ret = sysdev_class_register(&dma_sysclass); 1283 register_syscore_ops(&dma_syscore_ops);
1275
1276 if (ret != 0)
1277 printk(KERN_ERR "dma sysclass registration failed\n");
1278
1279 return ret;
1280}
1281
1282core_initcall(s3c24xx_dma_sysclass_init);
1283
1284static int __init s3c24xx_dma_sysdev_register(void)
1285{
1286 struct s3c2410_dma_chan *cp = s3c2410_chans;
1287 int channel, ret;
1288
1289 for (channel = 0; channel < dma_channels; cp++, channel++) {
1290 cp->dev.cls = &dma_sysclass;
1291 cp->dev.id = channel;
1292 ret = sysdev_register(&cp->dev);
1293
1294 if (ret) {
1295 printk(KERN_ERR "error registering dev for dma %d\n",
1296 channel);
1297 return ret;
1298 }
1299 }
1300 1284
1301 return 0; 1285 return 0;
1302} 1286}
1303 1287
1304late_initcall(s3c24xx_dma_sysdev_register); 1288late_initcall(s3c24xx_dma_syscore_init);
1305 1289
1306int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, 1290int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq,
1307 unsigned int stride) 1291 unsigned int stride)
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/plat-s3c24xx/irq-pm.c
index c3624d898630..0efb2e2848c8 100644
--- a/arch/arm/plat-s3c24xx/irq-pm.c
+++ b/arch/arm/plat-s3c24xx/irq-pm.c
@@ -14,7 +14,6 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/sysdev.h>
18#include <linux/irq.h> 17#include <linux/irq.h>
19 18
20#include <plat/cpu.h> 19#include <plat/cpu.h>
@@ -65,7 +64,7 @@ static unsigned long save_extint[3];
65static unsigned long save_eintflt[4]; 64static unsigned long save_eintflt[4];
66static unsigned long save_eintmask; 65static unsigned long save_eintmask;
67 66
68int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) 67int s3c24xx_irq_suspend(void)
69{ 68{
70 unsigned int i; 69 unsigned int i;
71 70
@@ -81,7 +80,7 @@ int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state)
81 return 0; 80 return 0;
82} 81}
83 82
84int s3c24xx_irq_resume(struct sys_device *dev) 83void s3c24xx_irq_resume(void)
85{ 84{
86 unsigned int i; 85 unsigned int i;
87 86
@@ -93,6 +92,4 @@ int s3c24xx_irq_resume(struct sys_device *dev)
93 92
94 s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save)); 93 s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
95 __raw_writel(save_eintmask, S3C24XX_EINTMASK); 94 __raw_writel(save_eintmask, S3C24XX_EINTMASK);
96
97 return 0;
98} 95}
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-s5p/irq-pm.c
index 5259ad458bc8..327acb3a4464 100644
--- a/arch/arm/plat-s5p/irq-pm.c
+++ b/arch/arm/plat-s5p/irq-pm.c
@@ -16,7 +16,6 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/sysdev.h>
20 19
21#include <plat/cpu.h> 20#include <plat/cpu.h>
22#include <plat/irqs.h> 21#include <plat/irqs.h>
@@ -77,17 +76,15 @@ static struct sleep_save eint_save[] = {
77 SAVE_ITEM(S5P_EINT_MASK(3)), 76 SAVE_ITEM(S5P_EINT_MASK(3)),
78}; 77};
79 78
80int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) 79int s3c24xx_irq_suspend(void)
81{ 80{
82 s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save)); 81 s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save));
83 82
84 return 0; 83 return 0;
85} 84}
86 85
87int s3c24xx_irq_resume(struct sys_device *dev) 86void s3c24xx_irq_resume(void)
88{ 87{
89 s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save)); 88 s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save));
90
91 return 0;
92} 89}
93 90
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index cedfff51c82b..3aedac0034ba 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -68,6 +68,12 @@ extern void s3c24xx_init_uartdevs(char *name,
68struct sys_timer; 68struct sys_timer;
69extern struct sys_timer s3c24xx_timer; 69extern struct sys_timer s3c24xx_timer;
70 70
71extern struct syscore_ops s3c2410_pm_syscore_ops;
72extern struct syscore_ops s3c2412_pm_syscore_ops;
73extern struct syscore_ops s3c2416_pm_syscore_ops;
74extern struct syscore_ops s3c244x_pm_syscore_ops;
75extern struct syscore_ops s3c64xx_irq_syscore_ops;
76
71/* system device classes */ 77/* system device classes */
72 78
73extern struct sysdev_class s3c2410_sysclass; 79extern struct sysdev_class s3c2410_sysclass;
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 937cc2ace517..7fb6f6be8c81 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -103,14 +103,16 @@ extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
103 103
104#ifdef CONFIG_PM 104#ifdef CONFIG_PM
105extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); 105extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
106extern int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state); 106extern int s3c24xx_irq_suspend(void);
107extern int s3c24xx_irq_resume(struct sys_device *dev); 107extern void s3c24xx_irq_resume(void);
108#else 108#else
109#define s3c_irqext_wake NULL 109#define s3c_irqext_wake NULL
110#define s3c24xx_irq_suspend NULL 110#define s3c24xx_irq_suspend NULL
111#define s3c24xx_irq_resume NULL 111#define s3c24xx_irq_resume NULL
112#endif 112#endif
113 113
114extern struct syscore_ops s3c24xx_irq_syscore_ops;
115
114/* PM debug functions */ 116/* PM debug functions */
115 117
116#ifdef CONFIG_SAMSUNG_PM_DEBUG 118#ifdef CONFIG_SAMSUNG_PM_DEBUG
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f74695075e64..f25e7ec89416 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -398,9 +398,9 @@ static void vfp_enable(void *unused)
398} 398}
399 399
400#ifdef CONFIG_PM 400#ifdef CONFIG_PM
401#include <linux/sysdev.h> 401#include <linux/syscore_ops.h>
402 402
403static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) 403static int vfp_pm_suspend(void)
404{ 404{
405 struct thread_info *ti = current_thread_info(); 405 struct thread_info *ti = current_thread_info();
406 u32 fpexc = fmrx(FPEXC); 406 u32 fpexc = fmrx(FPEXC);
@@ -420,34 +420,25 @@ static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
420 return 0; 420 return 0;
421} 421}
422 422
423static int vfp_pm_resume(struct sys_device *dev) 423static void vfp_pm_resume(void)
424{ 424{
425 /* ensure we have access to the vfp */ 425 /* ensure we have access to the vfp */
426 vfp_enable(NULL); 426 vfp_enable(NULL);
427 427
428 /* and disable it to ensure the next usage restores the state */ 428 /* and disable it to ensure the next usage restores the state */
429 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); 429 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
430
431 return 0;
432} 430}
433 431
434static struct sysdev_class vfp_pm_sysclass = { 432static struct syscore_ops vfp_pm_syscore_ops = {
435 .name = "vfp",
436 .suspend = vfp_pm_suspend, 433 .suspend = vfp_pm_suspend,
437 .resume = vfp_pm_resume, 434 .resume = vfp_pm_resume,
438}; 435};
439 436
440static struct sys_device vfp_pm_sysdev = {
441 .cls = &vfp_pm_sysclass,
442};
443
444static void vfp_pm_init(void) 437static void vfp_pm_init(void)
445{ 438{
446 sysdev_class_register(&vfp_pm_sysclass); 439 register_syscore_ops(&vfp_pm_syscore_ops);
447 sysdev_register(&vfp_pm_sysdev);
448} 440}
449 441
450
451#else 442#else
452static inline void vfp_pm_init(void) { } 443static inline void vfp_pm_init(void) { }
453#endif /* CONFIG_PM */ 444#endif /* CONFIG_PM */
diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c
index 21ce35f33aa5..3e3646186c9f 100644
--- a/arch/avr32/mach-at32ap/intc.c
+++ b/arch/avr32/mach-at32ap/intc.c
@@ -12,7 +12,7 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/sysdev.h> 15#include <linux/syscore_ops.h>
16 16
17#include <asm/io.h> 17#include <asm/io.h>
18 18
@@ -21,7 +21,6 @@
21struct intc { 21struct intc {
22 void __iomem *regs; 22 void __iomem *regs;
23 struct irq_chip chip; 23 struct irq_chip chip;
24 struct sys_device sysdev;
25#ifdef CONFIG_PM 24#ifdef CONFIG_PM
26 unsigned long suspend_ipr; 25 unsigned long suspend_ipr;
27 unsigned long saved_ipr[64]; 26 unsigned long saved_ipr[64];
@@ -146,9 +145,8 @@ void intc_set_suspend_handler(unsigned long offset)
146 intc0.suspend_ipr = offset; 145 intc0.suspend_ipr = offset;
147} 146}
148 147
149static int intc_suspend(struct sys_device *sdev, pm_message_t state) 148static int intc_suspend(void)
150{ 149{
151 struct intc *intc = container_of(sdev, struct intc, sysdev);
152 int i; 150 int i;
153 151
154 if (unlikely(!irqs_disabled())) { 152 if (unlikely(!irqs_disabled())) {
@@ -156,28 +154,25 @@ static int intc_suspend(struct sys_device *sdev, pm_message_t state)
156 return -EINVAL; 154 return -EINVAL;
157 } 155 }
158 156
159 if (unlikely(!intc->suspend_ipr)) { 157 if (unlikely(!intc0.suspend_ipr)) {
160 pr_err("intc_suspend: suspend_ipr not initialized\n"); 158 pr_err("intc_suspend: suspend_ipr not initialized\n");
161 return -EINVAL; 159 return -EINVAL;
162 } 160 }
163 161
164 for (i = 0; i < 64; i++) { 162 for (i = 0; i < 64; i++) {
165 intc->saved_ipr[i] = intc_readl(intc, INTPR0 + 4 * i); 163 intc0.saved_ipr[i] = intc_readl(&intc0, INTPR0 + 4 * i);
166 intc_writel(intc, INTPR0 + 4 * i, intc->suspend_ipr); 164 intc_writel(&intc0, INTPR0 + 4 * i, intc0.suspend_ipr);
167 } 165 }
168 166
169 return 0; 167 return 0;
170} 168}
171 169
172static int intc_resume(struct sys_device *sdev) 170static int intc_resume(void)
173{ 171{
174 struct intc *intc = container_of(sdev, struct intc, sysdev);
175 int i; 172 int i;
176 173
177 WARN_ON(!irqs_disabled());
178
179 for (i = 0; i < 64; i++) 174 for (i = 0; i < 64; i++)
180 intc_writel(intc, INTPR0 + 4 * i, intc->saved_ipr[i]); 175 intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]);
181 176
182 return 0; 177 return 0;
183} 178}
@@ -186,27 +181,18 @@ static int intc_resume(struct sys_device *sdev)
186#define intc_resume NULL 181#define intc_resume NULL
187#endif 182#endif
188 183
189static struct sysdev_class intc_class = { 184static struct syscore_ops intc_syscore_ops = {
190 .name = "intc",
191 .suspend = intc_suspend, 185 .suspend = intc_suspend,
192 .resume = intc_resume, 186 .resume = intc_resume,
193}; 187};
194 188
195static int __init intc_init_sysdev(void) 189static int __init intc_init_syscore(void)
196{ 190{
197 int ret; 191 register_syscore_ops(&intc_syscore_ops);
198
199 ret = sysdev_class_register(&intc_class);
200 if (ret)
201 return ret;
202 192
203 intc0.sysdev.id = 0; 193 return 0;
204 intc0.sysdev.cls = &intc_class;
205 ret = sysdev_register(&intc0.sysdev);
206
207 return ret;
208} 194}
209device_initcall(intc_init_sysdev); 195device_initcall(intc_init_syscore);
210 196
211unsigned long intc_get_pending(unsigned int group) 197unsigned long intc_get_pending(unsigned int group)
212{ 198{
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
index 0b5f72f17fd0..401eb1d8e3b4 100644
--- a/arch/blackfin/kernel/nmi.c
+++ b/arch/blackfin/kernel/nmi.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/hardirq.h> 14#include <linux/hardirq.h>
15#include <linux/sysdev.h> 15#include <linux/syscore_ops.h>
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/nmi.h> 17#include <linux/nmi.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
@@ -196,43 +196,31 @@ void touch_nmi_watchdog(void)
196 196
197/* Suspend/resume support */ 197/* Suspend/resume support */
198#ifdef CONFIG_PM 198#ifdef CONFIG_PM
199static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) 199static int nmi_wdt_suspend(void)
200{ 200{
201 nmi_wdt_stop(); 201 nmi_wdt_stop();
202 return 0; 202 return 0;
203} 203}
204 204
205static int nmi_wdt_resume(struct sys_device *dev) 205static void nmi_wdt_resume(void)
206{ 206{
207 if (nmi_active) 207 if (nmi_active)
208 nmi_wdt_start(); 208 nmi_wdt_start();
209 return 0;
210} 209}
211 210
212static struct sysdev_class nmi_sysclass = { 211static struct syscore_ops nmi_syscore_ops = {
213 .name = DRV_NAME,
214 .resume = nmi_wdt_resume, 212 .resume = nmi_wdt_resume,
215 .suspend = nmi_wdt_suspend, 213 .suspend = nmi_wdt_suspend,
216}; 214};
217 215
218static struct sys_device device_nmi_wdt = { 216static int __init init_nmi_wdt_syscore(void)
219 .id = 0,
220 .cls = &nmi_sysclass,
221};
222
223static int __init init_nmi_wdt_sysfs(void)
224{ 217{
225 int error; 218 if (nmi_active)
226 219 register_syscore_ops(&nmi_syscore_ops);
227 if (!nmi_active)
228 return 0;
229 220
230 error = sysdev_class_register(&nmi_sysclass); 221 return 0;
231 if (!error)
232 error = sysdev_register(&device_nmi_wdt);
233 return error;
234} 222}
235late_initcall(init_nmi_wdt_sysfs); 223late_initcall(init_nmi_wdt_syscore);
236 224
237#endif /* CONFIG_PM */ 225#endif /* CONFIG_PM */
238 226
diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c
index 382099fd5561..5e4112e518a9 100644
--- a/arch/blackfin/mach-common/dpmc.c
+++ b/arch/blackfin/mach-common/dpmc.c
@@ -19,9 +19,6 @@
19 19
20#define DRIVER_NAME "bfin dpmc" 20#define DRIVER_NAME "bfin dpmc"
21 21
22#define dprintk(msg...) \
23 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg)
24
25struct bfin_dpmc_platform_data *pdata; 22struct bfin_dpmc_platform_data *pdata;
26 23
27/** 24/**
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 22f61526a8e1..f09b174244d5 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -23,8 +23,6 @@
23#include <linux/acpi.h> 23#include <linux/acpi.h>
24#include <acpi/processor.h> 24#include <acpi/processor.h>
25 25
26#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
27
28MODULE_AUTHOR("Venkatesh Pallipadi"); 26MODULE_AUTHOR("Venkatesh Pallipadi");
29MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 27MODULE_DESCRIPTION("ACPI Processor P-States Driver");
30MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
@@ -47,12 +45,12 @@ processor_set_pstate (
47{ 45{
48 s64 retval; 46 s64 retval;
49 47
50 dprintk("processor_set_pstate\n"); 48 pr_debug("processor_set_pstate\n");
51 49
52 retval = ia64_pal_set_pstate((u64)value); 50 retval = ia64_pal_set_pstate((u64)value);
53 51
54 if (retval) { 52 if (retval) {
55 dprintk("Failed to set freq to 0x%x, with error 0x%lx\n", 53 pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
56 value, retval); 54 value, retval);
57 return -ENODEV; 55 return -ENODEV;
58 } 56 }
@@ -67,14 +65,14 @@ processor_get_pstate (
67 u64 pstate_index = 0; 65 u64 pstate_index = 0;
68 s64 retval; 66 s64 retval;
69 67
70 dprintk("processor_get_pstate\n"); 68 pr_debug("processor_get_pstate\n");
71 69
72 retval = ia64_pal_get_pstate(&pstate_index, 70 retval = ia64_pal_get_pstate(&pstate_index,
73 PAL_GET_PSTATE_TYPE_INSTANT); 71 PAL_GET_PSTATE_TYPE_INSTANT);
74 *value = (u32) pstate_index; 72 *value = (u32) pstate_index;
75 73
76 if (retval) 74 if (retval)
77 dprintk("Failed to get current freq with " 75 pr_debug("Failed to get current freq with "
78 "error 0x%lx, idx 0x%x\n", retval, *value); 76 "error 0x%lx, idx 0x%x\n", retval, *value);
79 77
80 return (int)retval; 78 return (int)retval;
@@ -90,7 +88,7 @@ extract_clock (
90{ 88{
91 unsigned long i; 89 unsigned long i;
92 90
93 dprintk("extract_clock\n"); 91 pr_debug("extract_clock\n");
94 92
95 for (i = 0; i < data->acpi_data.state_count; i++) { 93 for (i = 0; i < data->acpi_data.state_count; i++) {
96 if (value == data->acpi_data.states[i].status) 94 if (value == data->acpi_data.states[i].status)
@@ -110,7 +108,7 @@ processor_get_freq (
110 cpumask_t saved_mask; 108 cpumask_t saved_mask;
111 unsigned long clock_freq; 109 unsigned long clock_freq;
112 110
113 dprintk("processor_get_freq\n"); 111 pr_debug("processor_get_freq\n");
114 112
115 saved_mask = current->cpus_allowed; 113 saved_mask = current->cpus_allowed;
116 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 114 set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -148,7 +146,7 @@ processor_set_freq (
148 cpumask_t saved_mask; 146 cpumask_t saved_mask;
149 int retval; 147 int retval;
150 148
151 dprintk("processor_set_freq\n"); 149 pr_debug("processor_set_freq\n");
152 150
153 saved_mask = current->cpus_allowed; 151 saved_mask = current->cpus_allowed;
154 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 152 set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -159,16 +157,16 @@ processor_set_freq (
159 157
160 if (state == data->acpi_data.state) { 158 if (state == data->acpi_data.state) {
161 if (unlikely(data->resume)) { 159 if (unlikely(data->resume)) {
162 dprintk("Called after resume, resetting to P%d\n", state); 160 pr_debug("Called after resume, resetting to P%d\n", state);
163 data->resume = 0; 161 data->resume = 0;
164 } else { 162 } else {
165 dprintk("Already at target state (P%d)\n", state); 163 pr_debug("Already at target state (P%d)\n", state);
166 retval = 0; 164 retval = 0;
167 goto migrate_end; 165 goto migrate_end;
168 } 166 }
169 } 167 }
170 168
171 dprintk("Transitioning from P%d to P%d\n", 169 pr_debug("Transitioning from P%d to P%d\n",
172 data->acpi_data.state, state); 170 data->acpi_data.state, state);
173 171
174 /* cpufreq frequency struct */ 172 /* cpufreq frequency struct */
@@ -186,7 +184,7 @@ processor_set_freq (
186 184
187 value = (u32) data->acpi_data.states[state].control; 185 value = (u32) data->acpi_data.states[state].control;
188 186
189 dprintk("Transitioning to state: 0x%08x\n", value); 187 pr_debug("Transitioning to state: 0x%08x\n", value);
190 188
191 ret = processor_set_pstate(value); 189 ret = processor_set_pstate(value);
192 if (ret) { 190 if (ret) {
@@ -219,7 +217,7 @@ acpi_cpufreq_get (
219{ 217{
220 struct cpufreq_acpi_io *data = acpi_io_data[cpu]; 218 struct cpufreq_acpi_io *data = acpi_io_data[cpu];
221 219
222 dprintk("acpi_cpufreq_get\n"); 220 pr_debug("acpi_cpufreq_get\n");
223 221
224 return processor_get_freq(data, cpu); 222 return processor_get_freq(data, cpu);
225} 223}
@@ -235,7 +233,7 @@ acpi_cpufreq_target (
235 unsigned int next_state = 0; 233 unsigned int next_state = 0;
236 unsigned int result = 0; 234 unsigned int result = 0;
237 235
238 dprintk("acpi_cpufreq_setpolicy\n"); 236 pr_debug("acpi_cpufreq_setpolicy\n");
239 237
240 result = cpufreq_frequency_table_target(policy, 238 result = cpufreq_frequency_table_target(policy,
241 data->freq_table, target_freq, relation, &next_state); 239 data->freq_table, target_freq, relation, &next_state);
@@ -255,7 +253,7 @@ acpi_cpufreq_verify (
255 unsigned int result = 0; 253 unsigned int result = 0;
256 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 254 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
257 255
258 dprintk("acpi_cpufreq_verify\n"); 256 pr_debug("acpi_cpufreq_verify\n");
259 257
260 result = cpufreq_frequency_table_verify(policy, 258 result = cpufreq_frequency_table_verify(policy,
261 data->freq_table); 259 data->freq_table);
@@ -273,7 +271,7 @@ acpi_cpufreq_cpu_init (
273 struct cpufreq_acpi_io *data; 271 struct cpufreq_acpi_io *data;
274 unsigned int result = 0; 272 unsigned int result = 0;
275 273
276 dprintk("acpi_cpufreq_cpu_init\n"); 274 pr_debug("acpi_cpufreq_cpu_init\n");
277 275
278 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 276 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
279 if (!data) 277 if (!data)
@@ -288,7 +286,7 @@ acpi_cpufreq_cpu_init (
288 286
289 /* capability check */ 287 /* capability check */
290 if (data->acpi_data.state_count <= 1) { 288 if (data->acpi_data.state_count <= 1) {
291 dprintk("No P-States\n"); 289 pr_debug("No P-States\n");
292 result = -ENODEV; 290 result = -ENODEV;
293 goto err_unreg; 291 goto err_unreg;
294 } 292 }
@@ -297,7 +295,7 @@ acpi_cpufreq_cpu_init (
297 ACPI_ADR_SPACE_FIXED_HARDWARE) || 295 ACPI_ADR_SPACE_FIXED_HARDWARE) ||
298 (data->acpi_data.status_register.space_id != 296 (data->acpi_data.status_register.space_id !=
299 ACPI_ADR_SPACE_FIXED_HARDWARE)) { 297 ACPI_ADR_SPACE_FIXED_HARDWARE)) {
300 dprintk("Unsupported address space [%d, %d]\n", 298 pr_debug("Unsupported address space [%d, %d]\n",
301 (u32) (data->acpi_data.control_register.space_id), 299 (u32) (data->acpi_data.control_register.space_id),
302 (u32) (data->acpi_data.status_register.space_id)); 300 (u32) (data->acpi_data.status_register.space_id));
303 result = -ENODEV; 301 result = -ENODEV;
@@ -348,7 +346,7 @@ acpi_cpufreq_cpu_init (
348 "activated.\n", cpu); 346 "activated.\n", cpu);
349 347
350 for (i = 0; i < data->acpi_data.state_count; i++) 348 for (i = 0; i < data->acpi_data.state_count; i++)
351 dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", 349 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
352 (i == data->acpi_data.state?'*':' '), i, 350 (i == data->acpi_data.state?'*':' '), i,
353 (u32) data->acpi_data.states[i].core_frequency, 351 (u32) data->acpi_data.states[i].core_frequency,
354 (u32) data->acpi_data.states[i].power, 352 (u32) data->acpi_data.states[i].power,
@@ -383,7 +381,7 @@ acpi_cpufreq_cpu_exit (
383{ 381{
384 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 382 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
385 383
386 dprintk("acpi_cpufreq_cpu_exit\n"); 384 pr_debug("acpi_cpufreq_cpu_exit\n");
387 385
388 if (data) { 386 if (data) {
389 cpufreq_frequency_table_put_attr(policy->cpu); 387 cpufreq_frequency_table_put_attr(policy->cpu);
@@ -418,7 +416,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
418static int __init 416static int __init
419acpi_cpufreq_init (void) 417acpi_cpufreq_init (void)
420{ 418{
421 dprintk("acpi_cpufreq_init\n"); 419 pr_debug("acpi_cpufreq_init\n");
422 420
423 return cpufreq_register_driver(&acpi_cpufreq_driver); 421 return cpufreq_register_driver(&acpi_cpufreq_driver);
424} 422}
@@ -427,7 +425,7 @@ acpi_cpufreq_init (void)
427static void __exit 425static void __exit
428acpi_cpufreq_exit (void) 426acpi_cpufreq_exit (void)
429{ 427{
430 dprintk("acpi_cpufreq_exit\n"); 428 pr_debug("acpi_cpufreq_exit\n");
431 429
432 cpufreq_unregister_driver(&acpi_cpufreq_driver); 430 cpufreq_unregister_driver(&acpi_cpufreq_driver);
433 return; 431 return;
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index b995513d527f..95022b04b62d 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -36,13 +36,10 @@
36 36
37/* Hook for MIDI serial driver */ 37/* Hook for MIDI serial driver */
38void (*atari_MIDI_interrupt_hook) (void); 38void (*atari_MIDI_interrupt_hook) (void);
39/* Hook for mouse driver */
40void (*atari_mouse_interrupt_hook) (char *);
41/* Hook for keyboard inputdev driver */ 39/* Hook for keyboard inputdev driver */
42void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); 40void (*atari_input_keyboard_interrupt_hook) (unsigned char, char);
43/* Hook for mouse inputdev driver */ 41/* Hook for mouse inputdev driver */
44void (*atari_input_mouse_interrupt_hook) (char *); 42void (*atari_input_mouse_interrupt_hook) (char *);
45EXPORT_SYMBOL(atari_mouse_interrupt_hook);
46EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook); 43EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook);
47EXPORT_SYMBOL(atari_input_mouse_interrupt_hook); 44EXPORT_SYMBOL(atari_input_mouse_interrupt_hook);
48 45
@@ -263,8 +260,8 @@ repeat:
263 kb_state.buf[kb_state.len++] = scancode; 260 kb_state.buf[kb_state.len++] = scancode;
264 if (kb_state.len == 3) { 261 if (kb_state.len == 3) {
265 kb_state.state = KEYBOARD; 262 kb_state.state = KEYBOARD;
266 if (atari_mouse_interrupt_hook) 263 if (atari_input_mouse_interrupt_hook)
267 atari_mouse_interrupt_hook(kb_state.buf); 264 atari_input_mouse_interrupt_hook(kb_state.buf);
268 } 265 }
269 break; 266 break;
270 267
@@ -575,7 +572,7 @@ int atari_keyb_init(void)
575 kb_state.len = 0; 572 kb_state.len = 0;
576 573
577 error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, 574 error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt,
578 IRQ_TYPE_SLOW, "keyboard/mouse/MIDI", 575 IRQ_TYPE_SLOW, "keyboard,mouse,MIDI",
579 atari_keyboard_interrupt); 576 atari_keyboard_interrupt);
580 if (error) 577 if (error)
581 return error; 578 return error;
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index 604329fafbb8..ddbf43ca8858 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -180,7 +180,7 @@ void __init stdma_init(void)
180{ 180{
181 stdma_isr = NULL; 181 stdma_isr = NULL;
182 if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, 182 if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
183 "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int)) 183 "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int))
184 pr_err("Couldn't register ST-DMA interrupt\n"); 184 pr_err("Couldn't register ST-DMA interrupt\n");
185} 185}
186 186
diff --git a/arch/m68k/include/asm/atarikb.h b/arch/m68k/include/asm/atarikb.h
index 546e7da5804f..68f3622bf591 100644
--- a/arch/m68k/include/asm/atarikb.h
+++ b/arch/m68k/include/asm/atarikb.h
@@ -34,8 +34,6 @@ void ikbd_joystick_disable(void);
34 34
35/* Hook for MIDI serial driver */ 35/* Hook for MIDI serial driver */
36extern void (*atari_MIDI_interrupt_hook) (void); 36extern void (*atari_MIDI_interrupt_hook) (void);
37/* Hook for mouse driver */
38extern void (*atari_mouse_interrupt_hook) (char *);
39/* Hook for keyboard inputdev driver */ 37/* Hook for keyboard inputdev driver */
40extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); 38extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char);
41/* Hook for mouse inputdev driver */ 39/* Hook for mouse inputdev driver */
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
index 9d69f6e62365..e9020f88a748 100644
--- a/arch/m68k/include/asm/bitops_mm.h
+++ b/arch/m68k/include/asm/bitops_mm.h
@@ -181,14 +181,15 @@ static inline int find_first_zero_bit(const unsigned long *vaddr,
181{ 181{
182 const unsigned long *p = vaddr; 182 const unsigned long *p = vaddr;
183 int res = 32; 183 int res = 32;
184 unsigned int words;
184 unsigned long num; 185 unsigned long num;
185 186
186 if (!size) 187 if (!size)
187 return 0; 188 return 0;
188 189
189 size = (size + 31) >> 5; 190 words = (size + 31) >> 5;
190 while (!(num = ~*p++)) { 191 while (!(num = ~*p++)) {
191 if (!--size) 192 if (!--words)
192 goto out; 193 goto out;
193 } 194 }
194 195
@@ -196,7 +197,8 @@ static inline int find_first_zero_bit(const unsigned long *vaddr,
196 : "=d" (res) : "d" (num & -num)); 197 : "=d" (res) : "d" (num & -num));
197 res ^= 31; 198 res ^= 31;
198out: 199out:
199 return ((long)p - (long)vaddr - 4) * 8 + res; 200 res += ((long)p - (long)vaddr - 4) * 8;
201 return res < size ? res : size;
200} 202}
201 203
202static inline int find_next_zero_bit(const unsigned long *vaddr, int size, 204static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
@@ -215,27 +217,32 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
215 /* Look for zero in first longword */ 217 /* Look for zero in first longword */
216 __asm__ __volatile__ ("bfffo %1{#0,#0},%0" 218 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
217 : "=d" (res) : "d" (num & -num)); 219 : "=d" (res) : "d" (num & -num));
218 if (res < 32) 220 if (res < 32) {
219 return offset + (res ^ 31); 221 offset += res ^ 31;
222 return offset < size ? offset : size;
223 }
220 offset += 32; 224 offset += 32;
225
226 if (offset >= size)
227 return size;
221 } 228 }
222 /* No zero yet, search remaining full bytes for a zero */ 229 /* No zero yet, search remaining full bytes for a zero */
223 res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); 230 return offset + find_first_zero_bit(p, size - offset);
224 return offset + res;
225} 231}
226 232
227static inline int find_first_bit(const unsigned long *vaddr, unsigned size) 233static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
228{ 234{
229 const unsigned long *p = vaddr; 235 const unsigned long *p = vaddr;
230 int res = 32; 236 int res = 32;
237 unsigned int words;
231 unsigned long num; 238 unsigned long num;
232 239
233 if (!size) 240 if (!size)
234 return 0; 241 return 0;
235 242
236 size = (size + 31) >> 5; 243 words = (size + 31) >> 5;
237 while (!(num = *p++)) { 244 while (!(num = *p++)) {
238 if (!--size) 245 if (!--words)
239 goto out; 246 goto out;
240 } 247 }
241 248
@@ -243,7 +250,8 @@ static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
243 : "=d" (res) : "d" (num & -num)); 250 : "=d" (res) : "d" (num & -num));
244 res ^= 31; 251 res ^= 31;
245out: 252out:
246 return ((long)p - (long)vaddr - 4) * 8 + res; 253 res += ((long)p - (long)vaddr - 4) * 8;
254 return res < size ? res : size;
247} 255}
248 256
249static inline int find_next_bit(const unsigned long *vaddr, int size, 257static inline int find_next_bit(const unsigned long *vaddr, int size,
@@ -262,13 +270,17 @@ static inline int find_next_bit(const unsigned long *vaddr, int size,
262 /* Look for one in first longword */ 270 /* Look for one in first longword */
263 __asm__ __volatile__ ("bfffo %1{#0,#0},%0" 271 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
264 : "=d" (res) : "d" (num & -num)); 272 : "=d" (res) : "d" (num & -num));
265 if (res < 32) 273 if (res < 32) {
266 return offset + (res ^ 31); 274 offset += res ^ 31;
275 return offset < size ? offset : size;
276 }
267 offset += 32; 277 offset += 32;
278
279 if (offset >= size)
280 return size;
268 } 281 }
269 /* No one yet, search remaining full bytes for a one */ 282 /* No one yet, search remaining full bytes for a one */
270 res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); 283 return offset + find_first_bit(p, size - offset);
271 return offset + res;
272} 284}
273 285
274/* 286/*
@@ -366,23 +378,25 @@ static inline int test_bit_le(int nr, const void *vaddr)
366static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) 378static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
367{ 379{
368 const unsigned long *p = vaddr, *addr = vaddr; 380 const unsigned long *p = vaddr, *addr = vaddr;
369 int res; 381 int res = 0;
382 unsigned int words;
370 383
371 if (!size) 384 if (!size)
372 return 0; 385 return 0;
373 386
374 size = (size >> 5) + ((size & 31) > 0); 387 words = (size >> 5) + ((size & 31) > 0);
375 while (*p++ == ~0UL) 388 while (*p++ == ~0UL) {
376 { 389 if (--words == 0)
377 if (--size == 0) 390 goto out;
378 return (p - addr) << 5;
379 } 391 }
380 392
381 --p; 393 --p;
382 for (res = 0; res < 32; res++) 394 for (res = 0; res < 32; res++)
383 if (!test_bit_le(res, p)) 395 if (!test_bit_le(res, p))
384 break; 396 break;
385 return (p - addr) * 32 + res; 397out:
398 res += (p - addr) * 32;
399 return res < size ? res : size;
386} 400}
387 401
388static inline unsigned long find_next_zero_bit_le(const void *addr, 402static inline unsigned long find_next_zero_bit_le(const void *addr,
@@ -400,10 +414,15 @@ static inline unsigned long find_next_zero_bit_le(const void *addr,
400 offset -= bit; 414 offset -= bit;
401 /* Look for zero in first longword */ 415 /* Look for zero in first longword */
402 for (res = bit; res < 32; res++) 416 for (res = bit; res < 32; res++)
403 if (!test_bit_le(res, p)) 417 if (!test_bit_le(res, p)) {
404 return offset + res; 418 offset += res;
419 return offset < size ? offset : size;
420 }
405 p++; 421 p++;
406 offset += 32; 422 offset += 32;
423
424 if (offset >= size)
425 return size;
407 } 426 }
408 /* No zero yet, search remaining full bytes for a zero */ 427 /* No zero yet, search remaining full bytes for a zero */
409 return offset + find_first_zero_bit_le(p, size - offset); 428 return offset + find_first_zero_bit_le(p, size - offset);
@@ -412,22 +431,25 @@ static inline unsigned long find_next_zero_bit_le(const void *addr,
412static inline int find_first_bit_le(const void *vaddr, unsigned size) 431static inline int find_first_bit_le(const void *vaddr, unsigned size)
413{ 432{
414 const unsigned long *p = vaddr, *addr = vaddr; 433 const unsigned long *p = vaddr, *addr = vaddr;
415 int res; 434 int res = 0;
435 unsigned int words;
416 436
417 if (!size) 437 if (!size)
418 return 0; 438 return 0;
419 439
420 size = (size >> 5) + ((size & 31) > 0); 440 words = (size >> 5) + ((size & 31) > 0);
421 while (*p++ == 0UL) { 441 while (*p++ == 0UL) {
422 if (--size == 0) 442 if (--words == 0)
423 return (p - addr) << 5; 443 goto out;
424 } 444 }
425 445
426 --p; 446 --p;
427 for (res = 0; res < 32; res++) 447 for (res = 0; res < 32; res++)
428 if (test_bit_le(res, p)) 448 if (test_bit_le(res, p))
429 break; 449 break;
430 return (p - addr) * 32 + res; 450out:
451 res += (p - addr) * 32;
452 return res < size ? res : size;
431} 453}
432 454
433static inline unsigned long find_next_bit_le(const void *addr, 455static inline unsigned long find_next_bit_le(const void *addr,
@@ -445,10 +467,15 @@ static inline unsigned long find_next_bit_le(const void *addr,
445 offset -= bit; 467 offset -= bit;
446 /* Look for one in first longword */ 468 /* Look for one in first longword */
447 for (res = bit; res < 32; res++) 469 for (res = bit; res < 32; res++)
448 if (test_bit_le(res, p)) 470 if (test_bit_le(res, p)) {
449 return offset + res; 471 offset += res;
472 return offset < size ? offset : size;
473 }
450 p++; 474 p++;
451 offset += 32; 475 offset += 32;
476
477 if (offset >= size)
478 return size;
452 } 479 }
453 /* No set bit yet, search remaining full bytes for a set bit */ 480 /* No set bit yet, search remaining full bytes for a set bit */
454 return offset + find_first_bit_le(p, size - offset); 481 return offset + find_first_bit_le(p, size - offset);
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 29e17907d9f2..f3b649de2a1b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -22,7 +22,7 @@
22#define __NR_mknod 14 22#define __NR_mknod 14
23#define __NR_chmod 15 23#define __NR_chmod 15
24#define __NR_chown 16 24#define __NR_chown 16
25#define __NR_break 17 25/*#define __NR_break 17*/
26#define __NR_oldstat 18 26#define __NR_oldstat 18
27#define __NR_lseek 19 27#define __NR_lseek 19
28#define __NR_getpid 20 28#define __NR_getpid 20
@@ -36,11 +36,11 @@
36#define __NR_oldfstat 28 36#define __NR_oldfstat 28
37#define __NR_pause 29 37#define __NR_pause 29
38#define __NR_utime 30 38#define __NR_utime 30
39#define __NR_stty 31 39/*#define __NR_stty 31*/
40#define __NR_gtty 32 40/*#define __NR_gtty 32*/
41#define __NR_access 33 41#define __NR_access 33
42#define __NR_nice 34 42#define __NR_nice 34
43#define __NR_ftime 35 43/*#define __NR_ftime 35*/
44#define __NR_sync 36 44#define __NR_sync 36
45#define __NR_kill 37 45#define __NR_kill 37
46#define __NR_rename 38 46#define __NR_rename 38
@@ -49,7 +49,7 @@
49#define __NR_dup 41 49#define __NR_dup 41
50#define __NR_pipe 42 50#define __NR_pipe 42
51#define __NR_times 43 51#define __NR_times 43
52#define __NR_prof 44 52/*#define __NR_prof 44*/
53#define __NR_brk 45 53#define __NR_brk 45
54#define __NR_setgid 46 54#define __NR_setgid 46
55#define __NR_getgid 47 55#define __NR_getgid 47
@@ -58,13 +58,13 @@
58#define __NR_getegid 50 58#define __NR_getegid 50
59#define __NR_acct 51 59#define __NR_acct 51
60#define __NR_umount2 52 60#define __NR_umount2 52
61#define __NR_lock 53 61/*#define __NR_lock 53*/
62#define __NR_ioctl 54 62#define __NR_ioctl 54
63#define __NR_fcntl 55 63#define __NR_fcntl 55
64#define __NR_mpx 56 64/*#define __NR_mpx 56*/
65#define __NR_setpgid 57 65#define __NR_setpgid 57
66#define __NR_ulimit 58 66/*#define __NR_ulimit 58*/
67#define __NR_oldolduname 59 67/*#define __NR_oldolduname 59*/
68#define __NR_umask 60 68#define __NR_umask 60
69#define __NR_chroot 61 69#define __NR_chroot 61
70#define __NR_ustat 62 70#define __NR_ustat 62
@@ -103,10 +103,10 @@
103#define __NR_fchown 95 103#define __NR_fchown 95
104#define __NR_getpriority 96 104#define __NR_getpriority 96
105#define __NR_setpriority 97 105#define __NR_setpriority 97
106#define __NR_profil 98 106/*#define __NR_profil 98*/
107#define __NR_statfs 99 107#define __NR_statfs 99
108#define __NR_fstatfs 100 108#define __NR_fstatfs 100
109#define __NR_ioperm 101 109/*#define __NR_ioperm 101*/
110#define __NR_socketcall 102 110#define __NR_socketcall 102
111#define __NR_syslog 103 111#define __NR_syslog 103
112#define __NR_setitimer 104 112#define __NR_setitimer 104
@@ -114,11 +114,11 @@
114#define __NR_stat 106 114#define __NR_stat 106
115#define __NR_lstat 107 115#define __NR_lstat 107
116#define __NR_fstat 108 116#define __NR_fstat 108
117#define __NR_olduname 109 117/*#define __NR_olduname 109*/
118#define __NR_iopl /* 110 */ not supported 118/*#define __NR_iopl 110*/ /* not supported */
119#define __NR_vhangup 111 119#define __NR_vhangup 111
120#define __NR_idle /* 112 */ Obsolete 120/*#define __NR_idle 112*/ /* Obsolete */
121#define __NR_vm86 /* 113 */ not supported 121/*#define __NR_vm86 113*/ /* not supported */
122#define __NR_wait4 114 122#define __NR_wait4 114
123#define __NR_swapoff 115 123#define __NR_swapoff 115
124#define __NR_sysinfo 116 124#define __NR_sysinfo 116
@@ -132,17 +132,17 @@
132#define __NR_adjtimex 124 132#define __NR_adjtimex 124
133#define __NR_mprotect 125 133#define __NR_mprotect 125
134#define __NR_sigprocmask 126 134#define __NR_sigprocmask 126
135#define __NR_create_module 127 135/*#define __NR_create_module 127*/
136#define __NR_init_module 128 136#define __NR_init_module 128
137#define __NR_delete_module 129 137#define __NR_delete_module 129
138#define __NR_get_kernel_syms 130 138/*#define __NR_get_kernel_syms 130*/
139#define __NR_quotactl 131 139#define __NR_quotactl 131
140#define __NR_getpgid 132 140#define __NR_getpgid 132
141#define __NR_fchdir 133 141#define __NR_fchdir 133
142#define __NR_bdflush 134 142#define __NR_bdflush 134
143#define __NR_sysfs 135 143#define __NR_sysfs 135
144#define __NR_personality 136 144#define __NR_personality 136
145#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ 145/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */
146#define __NR_setfsuid 138 146#define __NR_setfsuid 138
147#define __NR_setfsgid 139 147#define __NR_setfsgid 139
148#define __NR__llseek 140 148#define __NR__llseek 140
@@ -172,7 +172,7 @@
172#define __NR_setresuid 164 172#define __NR_setresuid 164
173#define __NR_getresuid 165 173#define __NR_getresuid 165
174#define __NR_getpagesize 166 174#define __NR_getpagesize 166
175#define __NR_query_module 167 175/*#define __NR_query_module 167*/
176#define __NR_poll 168 176#define __NR_poll 168
177#define __NR_nfsservctl 169 177#define __NR_nfsservctl 169
178#define __NR_setresgid 170 178#define __NR_setresgid 170
@@ -193,8 +193,8 @@
193#define __NR_capset 185 193#define __NR_capset 185
194#define __NR_sigaltstack 186 194#define __NR_sigaltstack 186
195#define __NR_sendfile 187 195#define __NR_sendfile 187
196#define __NR_getpmsg 188 /* some people actually want streams */ 196/*#define __NR_getpmsg 188*/ /* some people actually want streams */
197#define __NR_putpmsg 189 /* some people actually want streams */ 197/*#define __NR_putpmsg 189*/ /* some people actually want streams */
198#define __NR_vfork 190 198#define __NR_vfork 190
199#define __NR_ugetrlimit 191 199#define __NR_ugetrlimit 191
200#define __NR_mmap2 192 200#define __NR_mmap2 192
@@ -223,6 +223,8 @@
223#define __NR_setfsuid32 215 223#define __NR_setfsuid32 215
224#define __NR_setfsgid32 216 224#define __NR_setfsgid32 216
225#define __NR_pivot_root 217 225#define __NR_pivot_root 217
226/* 218*/
227/* 219*/
226#define __NR_getdents64 220 228#define __NR_getdents64 220
227#define __NR_gettid 221 229#define __NR_gettid 221
228#define __NR_tkill 222 230#define __NR_tkill 222
@@ -281,7 +283,7 @@
281#define __NR_mq_notify 275 283#define __NR_mq_notify 275
282#define __NR_mq_getsetattr 276 284#define __NR_mq_getsetattr 276
283#define __NR_waitid 277 285#define __NR_waitid 277
284#define __NR_vserver 278 286/*#define __NR_vserver 278*/
285#define __NR_add_key 279 287#define __NR_add_key 279
286#define __NR_request_key 280 288#define __NR_request_key 280
287#define __NR_keyctl 281 289#define __NR_keyctl 281
diff --git a/arch/m68k/kernel/Makefile_mm b/arch/m68k/kernel/Makefile_mm
index 55d5d6b680a2..aced67804579 100644
--- a/arch/m68k/kernel/Makefile_mm
+++ b/arch/m68k/kernel/Makefile_mm
@@ -10,7 +10,7 @@ endif
10extra-y += vmlinux.lds 10extra-y += vmlinux.lds
11 11
12obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ 12obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
13 sys_m68k.o time.o setup.o m68k_ksyms.o devres.o 13 sys_m68k.o time.o setup.o m68k_ksyms.o devres.o syscalltable.o
14 14
15devres-y = ../../../kernel/irq/devres.o 15devres-y = ../../../kernel/irq/devres.o
16 16
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
index 1359ee659574..bd0ec05263b2 100644
--- a/arch/m68k/kernel/entry_mm.S
+++ b/arch/m68k/kernel/entry_mm.S
@@ -407,351 +407,3 @@ resume:
407 407
408 rts 408 rts
409 409
410.data
411ALIGN
412sys_call_table:
413 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
414 .long sys_exit
415 .long sys_fork
416 .long sys_read
417 .long sys_write
418 .long sys_open /* 5 */
419 .long sys_close
420 .long sys_waitpid
421 .long sys_creat
422 .long sys_link
423 .long sys_unlink /* 10 */
424 .long sys_execve
425 .long sys_chdir
426 .long sys_time
427 .long sys_mknod
428 .long sys_chmod /* 15 */
429 .long sys_chown16
430 .long sys_ni_syscall /* old break syscall holder */
431 .long sys_stat
432 .long sys_lseek
433 .long sys_getpid /* 20 */
434 .long sys_mount
435 .long sys_oldumount
436 .long sys_setuid16
437 .long sys_getuid16
438 .long sys_stime /* 25 */
439 .long sys_ptrace
440 .long sys_alarm
441 .long sys_fstat
442 .long sys_pause
443 .long sys_utime /* 30 */
444 .long sys_ni_syscall /* old stty syscall holder */
445 .long sys_ni_syscall /* old gtty syscall holder */
446 .long sys_access
447 .long sys_nice
448 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
449 .long sys_sync
450 .long sys_kill
451 .long sys_rename
452 .long sys_mkdir
453 .long sys_rmdir /* 40 */
454 .long sys_dup
455 .long sys_pipe
456 .long sys_times
457 .long sys_ni_syscall /* old prof syscall holder */
458 .long sys_brk /* 45 */
459 .long sys_setgid16
460 .long sys_getgid16
461 .long sys_signal
462 .long sys_geteuid16
463 .long sys_getegid16 /* 50 */
464 .long sys_acct
465 .long sys_umount /* recycled never used phys() */
466 .long sys_ni_syscall /* old lock syscall holder */
467 .long sys_ioctl
468 .long sys_fcntl /* 55 */
469 .long sys_ni_syscall /* old mpx syscall holder */
470 .long sys_setpgid
471 .long sys_ni_syscall /* old ulimit syscall holder */
472 .long sys_ni_syscall
473 .long sys_umask /* 60 */
474 .long sys_chroot
475 .long sys_ustat
476 .long sys_dup2
477 .long sys_getppid
478 .long sys_getpgrp /* 65 */
479 .long sys_setsid
480 .long sys_sigaction
481 .long sys_sgetmask
482 .long sys_ssetmask
483 .long sys_setreuid16 /* 70 */
484 .long sys_setregid16
485 .long sys_sigsuspend
486 .long sys_sigpending
487 .long sys_sethostname
488 .long sys_setrlimit /* 75 */
489 .long sys_old_getrlimit
490 .long sys_getrusage
491 .long sys_gettimeofday
492 .long sys_settimeofday
493 .long sys_getgroups16 /* 80 */
494 .long sys_setgroups16
495 .long sys_old_select
496 .long sys_symlink
497 .long sys_lstat
498 .long sys_readlink /* 85 */
499 .long sys_uselib
500 .long sys_swapon
501 .long sys_reboot
502 .long sys_old_readdir
503 .long sys_old_mmap /* 90 */
504 .long sys_munmap
505 .long sys_truncate
506 .long sys_ftruncate
507 .long sys_fchmod
508 .long sys_fchown16 /* 95 */
509 .long sys_getpriority
510 .long sys_setpriority
511 .long sys_ni_syscall /* old profil syscall holder */
512 .long sys_statfs
513 .long sys_fstatfs /* 100 */
514 .long sys_ni_syscall /* ioperm for i386 */
515 .long sys_socketcall
516 .long sys_syslog
517 .long sys_setitimer
518 .long sys_getitimer /* 105 */
519 .long sys_newstat
520 .long sys_newlstat
521 .long sys_newfstat
522 .long sys_ni_syscall
523 .long sys_ni_syscall /* 110 */ /* iopl for i386 */
524 .long sys_vhangup
525 .long sys_ni_syscall /* obsolete idle() syscall */
526 .long sys_ni_syscall /* vm86old for i386 */
527 .long sys_wait4
528 .long sys_swapoff /* 115 */
529 .long sys_sysinfo
530 .long sys_ipc
531 .long sys_fsync
532 .long sys_sigreturn
533 .long sys_clone /* 120 */
534 .long sys_setdomainname
535 .long sys_newuname
536 .long sys_cacheflush /* modify_ldt for i386 */
537 .long sys_adjtimex
538 .long sys_mprotect /* 125 */
539 .long sys_sigprocmask
540 .long sys_ni_syscall /* old "create_module" */
541 .long sys_init_module
542 .long sys_delete_module
543 .long sys_ni_syscall /* 130 - old "get_kernel_syms" */
544 .long sys_quotactl
545 .long sys_getpgid
546 .long sys_fchdir
547 .long sys_bdflush
548 .long sys_sysfs /* 135 */
549 .long sys_personality
550 .long sys_ni_syscall /* for afs_syscall */
551 .long sys_setfsuid16
552 .long sys_setfsgid16
553 .long sys_llseek /* 140 */
554 .long sys_getdents
555 .long sys_select
556 .long sys_flock
557 .long sys_msync
558 .long sys_readv /* 145 */
559 .long sys_writev
560 .long sys_getsid
561 .long sys_fdatasync
562 .long sys_sysctl
563 .long sys_mlock /* 150 */
564 .long sys_munlock
565 .long sys_mlockall
566 .long sys_munlockall
567 .long sys_sched_setparam
568 .long sys_sched_getparam /* 155 */
569 .long sys_sched_setscheduler
570 .long sys_sched_getscheduler
571 .long sys_sched_yield
572 .long sys_sched_get_priority_max
573 .long sys_sched_get_priority_min /* 160 */
574 .long sys_sched_rr_get_interval
575 .long sys_nanosleep
576 .long sys_mremap
577 .long sys_setresuid16
578 .long sys_getresuid16 /* 165 */
579 .long sys_getpagesize
580 .long sys_ni_syscall /* old sys_query_module */
581 .long sys_poll
582 .long sys_nfsservctl
583 .long sys_setresgid16 /* 170 */
584 .long sys_getresgid16
585 .long sys_prctl
586 .long sys_rt_sigreturn
587 .long sys_rt_sigaction
588 .long sys_rt_sigprocmask /* 175 */
589 .long sys_rt_sigpending
590 .long sys_rt_sigtimedwait
591 .long sys_rt_sigqueueinfo
592 .long sys_rt_sigsuspend
593 .long sys_pread64 /* 180 */
594 .long sys_pwrite64
595 .long sys_lchown16;
596 .long sys_getcwd
597 .long sys_capget
598 .long sys_capset /* 185 */
599 .long sys_sigaltstack
600 .long sys_sendfile
601 .long sys_ni_syscall /* streams1 */
602 .long sys_ni_syscall /* streams2 */
603 .long sys_vfork /* 190 */
604 .long sys_getrlimit
605 .long sys_mmap2
606 .long sys_truncate64
607 .long sys_ftruncate64
608 .long sys_stat64 /* 195 */
609 .long sys_lstat64
610 .long sys_fstat64
611 .long sys_chown
612 .long sys_getuid
613 .long sys_getgid /* 200 */
614 .long sys_geteuid
615 .long sys_getegid
616 .long sys_setreuid
617 .long sys_setregid
618 .long sys_getgroups /* 205 */
619 .long sys_setgroups
620 .long sys_fchown
621 .long sys_setresuid
622 .long sys_getresuid
623 .long sys_setresgid /* 210 */
624 .long sys_getresgid
625 .long sys_lchown
626 .long sys_setuid
627 .long sys_setgid
628 .long sys_setfsuid /* 215 */
629 .long sys_setfsgid
630 .long sys_pivot_root
631 .long sys_ni_syscall
632 .long sys_ni_syscall
633 .long sys_getdents64 /* 220 */
634 .long sys_gettid
635 .long sys_tkill
636 .long sys_setxattr
637 .long sys_lsetxattr
638 .long sys_fsetxattr /* 225 */
639 .long sys_getxattr
640 .long sys_lgetxattr
641 .long sys_fgetxattr
642 .long sys_listxattr
643 .long sys_llistxattr /* 230 */
644 .long sys_flistxattr
645 .long sys_removexattr
646 .long sys_lremovexattr
647 .long sys_fremovexattr
648 .long sys_futex /* 235 */
649 .long sys_sendfile64
650 .long sys_mincore
651 .long sys_madvise
652 .long sys_fcntl64
653 .long sys_readahead /* 240 */
654 .long sys_io_setup
655 .long sys_io_destroy
656 .long sys_io_getevents
657 .long sys_io_submit
658 .long sys_io_cancel /* 245 */
659 .long sys_fadvise64
660 .long sys_exit_group
661 .long sys_lookup_dcookie
662 .long sys_epoll_create
663 .long sys_epoll_ctl /* 250 */
664 .long sys_epoll_wait
665 .long sys_remap_file_pages
666 .long sys_set_tid_address
667 .long sys_timer_create
668 .long sys_timer_settime /* 255 */
669 .long sys_timer_gettime
670 .long sys_timer_getoverrun
671 .long sys_timer_delete
672 .long sys_clock_settime
673 .long sys_clock_gettime /* 260 */
674 .long sys_clock_getres
675 .long sys_clock_nanosleep
676 .long sys_statfs64
677 .long sys_fstatfs64
678 .long sys_tgkill /* 265 */
679 .long sys_utimes
680 .long sys_fadvise64_64
681 .long sys_mbind
682 .long sys_get_mempolicy
683 .long sys_set_mempolicy /* 270 */
684 .long sys_mq_open
685 .long sys_mq_unlink
686 .long sys_mq_timedsend
687 .long sys_mq_timedreceive
688 .long sys_mq_notify /* 275 */
689 .long sys_mq_getsetattr
690 .long sys_waitid
691 .long sys_ni_syscall /* for sys_vserver */
692 .long sys_add_key
693 .long sys_request_key /* 280 */
694 .long sys_keyctl
695 .long sys_ioprio_set
696 .long sys_ioprio_get
697 .long sys_inotify_init
698 .long sys_inotify_add_watch /* 285 */
699 .long sys_inotify_rm_watch
700 .long sys_migrate_pages
701 .long sys_openat
702 .long sys_mkdirat
703 .long sys_mknodat /* 290 */
704 .long sys_fchownat
705 .long sys_futimesat
706 .long sys_fstatat64
707 .long sys_unlinkat
708 .long sys_renameat /* 295 */
709 .long sys_linkat
710 .long sys_symlinkat
711 .long sys_readlinkat
712 .long sys_fchmodat
713 .long sys_faccessat /* 300 */
714 .long sys_ni_syscall /* Reserved for pselect6 */
715 .long sys_ni_syscall /* Reserved for ppoll */
716 .long sys_unshare
717 .long sys_set_robust_list
718 .long sys_get_robust_list /* 305 */
719 .long sys_splice
720 .long sys_sync_file_range
721 .long sys_tee
722 .long sys_vmsplice
723 .long sys_move_pages /* 310 */
724 .long sys_sched_setaffinity
725 .long sys_sched_getaffinity
726 .long sys_kexec_load
727 .long sys_getcpu
728 .long sys_epoll_pwait /* 315 */
729 .long sys_utimensat
730 .long sys_signalfd
731 .long sys_timerfd_create
732 .long sys_eventfd
733 .long sys_fallocate /* 320 */
734 .long sys_timerfd_settime
735 .long sys_timerfd_gettime
736 .long sys_signalfd4
737 .long sys_eventfd2
738 .long sys_epoll_create1 /* 325 */
739 .long sys_dup3
740 .long sys_pipe2
741 .long sys_inotify_init1
742 .long sys_preadv
743 .long sys_pwritev /* 330 */
744 .long sys_rt_tgsigqueueinfo
745 .long sys_perf_event_open
746 .long sys_get_thread_area
747 .long sys_set_thread_area
748 .long sys_atomic_cmpxchg_32 /* 335 */
749 .long sys_atomic_barrier
750 .long sys_fanotify_init
751 .long sys_fanotify_mark
752 .long sys_prlimit64
753 .long sys_name_to_handle_at /* 340 */
754 .long sys_open_by_handle_at
755 .long sys_clock_adjtime
756 .long sys_syncfs
757
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 9b8393d8adb8..5909e392cb1e 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/arch/m68knommu/kernel/syscalltable.S
3 *
4 * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com) 2 * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com)
5 * 3 *
6 * Based on older entry.S files, the following copyrights apply: 4 * Based on older entry.S files, the following copyrights apply:
@@ -9,171 +7,176 @@
9 * Kenneth Albanowski <kjahds@kjahds.com>, 7 * Kenneth Albanowski <kjahds@kjahds.com>,
10 * Copyright (C) 2000 Lineo Inc. (www.lineo.com) 8 * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
11 * Copyright (C) 1991, 1992 Linus Torvalds 9 * Copyright (C) 1991, 1992 Linus Torvalds
10 *
11 * Linux/m68k support by Hamish Macdonald
12 */ 12 */
13 13
14#include <linux/sys.h> 14#include <linux/sys.h>
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/unistd.h>
17 16
18.text 17#ifndef CONFIG_MMU
18#define sys_mmap2 sys_mmap_pgoff
19#endif
20
21.section .rodata
19ALIGN 22ALIGN
20ENTRY(sys_call_table) 23ENTRY(sys_call_table)
21 .long sys_restart_syscall /* 0 - old "setup()" system call */ 24 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
22 .long sys_exit 25 .long sys_exit
23 .long sys_fork 26 .long sys_fork
24 .long sys_read 27 .long sys_read
25 .long sys_write 28 .long sys_write
26 .long sys_open /* 5 */ 29 .long sys_open /* 5 */
27 .long sys_close 30 .long sys_close
28 .long sys_waitpid 31 .long sys_waitpid
29 .long sys_creat 32 .long sys_creat
30 .long sys_link 33 .long sys_link
31 .long sys_unlink /* 10 */ 34 .long sys_unlink /* 10 */
32 .long sys_execve 35 .long sys_execve
33 .long sys_chdir 36 .long sys_chdir
34 .long sys_time 37 .long sys_time
35 .long sys_mknod 38 .long sys_mknod
36 .long sys_chmod /* 15 */ 39 .long sys_chmod /* 15 */
37 .long sys_chown16 40 .long sys_chown16
38 .long sys_ni_syscall /* old break syscall holder */ 41 .long sys_ni_syscall /* old break syscall holder */
39 .long sys_stat 42 .long sys_stat
40 .long sys_lseek 43 .long sys_lseek
41 .long sys_getpid /* 20 */ 44 .long sys_getpid /* 20 */
42 .long sys_mount 45 .long sys_mount
43 .long sys_oldumount 46 .long sys_oldumount
44 .long sys_setuid16 47 .long sys_setuid16
45 .long sys_getuid16 48 .long sys_getuid16
46 .long sys_stime /* 25 */ 49 .long sys_stime /* 25 */
47 .long sys_ptrace 50 .long sys_ptrace
48 .long sys_alarm 51 .long sys_alarm
49 .long sys_fstat 52 .long sys_fstat
50 .long sys_pause 53 .long sys_pause
51 .long sys_utime /* 30 */ 54 .long sys_utime /* 30 */
52 .long sys_ni_syscall /* old stty syscall holder */ 55 .long sys_ni_syscall /* old stty syscall holder */
53 .long sys_ni_syscall /* old gtty syscall holder */ 56 .long sys_ni_syscall /* old gtty syscall holder */
54 .long sys_access 57 .long sys_access
55 .long sys_nice 58 .long sys_nice
56 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ 59 .long sys_ni_syscall /* 35 - old ftime syscall holder */
57 .long sys_sync 60 .long sys_sync
58 .long sys_kill 61 .long sys_kill
59 .long sys_rename 62 .long sys_rename
60 .long sys_mkdir 63 .long sys_mkdir
61 .long sys_rmdir /* 40 */ 64 .long sys_rmdir /* 40 */
62 .long sys_dup 65 .long sys_dup
63 .long sys_pipe 66 .long sys_pipe
64 .long sys_times 67 .long sys_times
65 .long sys_ni_syscall /* old prof syscall holder */ 68 .long sys_ni_syscall /* old prof syscall holder */
66 .long sys_brk /* 45 */ 69 .long sys_brk /* 45 */
67 .long sys_setgid16 70 .long sys_setgid16
68 .long sys_getgid16 71 .long sys_getgid16
69 .long sys_signal 72 .long sys_signal
70 .long sys_geteuid16 73 .long sys_geteuid16
71 .long sys_getegid16 /* 50 */ 74 .long sys_getegid16 /* 50 */
72 .long sys_acct 75 .long sys_acct
73 .long sys_umount /* recycled never used phys() */ 76 .long sys_umount /* recycled never used phys() */
74 .long sys_ni_syscall /* old lock syscall holder */ 77 .long sys_ni_syscall /* old lock syscall holder */
75 .long sys_ioctl 78 .long sys_ioctl
76 .long sys_fcntl /* 55 */ 79 .long sys_fcntl /* 55 */
77 .long sys_ni_syscall /* old mpx syscall holder */ 80 .long sys_ni_syscall /* old mpx syscall holder */
78 .long sys_setpgid 81 .long sys_setpgid
79 .long sys_ni_syscall /* old ulimit syscall holder */ 82 .long sys_ni_syscall /* old ulimit syscall holder */
80 .long sys_ni_syscall 83 .long sys_ni_syscall
81 .long sys_umask /* 60 */ 84 .long sys_umask /* 60 */
82 .long sys_chroot 85 .long sys_chroot
83 .long sys_ustat 86 .long sys_ustat
84 .long sys_dup2 87 .long sys_dup2
85 .long sys_getppid 88 .long sys_getppid
86 .long sys_getpgrp /* 65 */ 89 .long sys_getpgrp /* 65 */
87 .long sys_setsid 90 .long sys_setsid
88 .long sys_sigaction 91 .long sys_sigaction
89 .long sys_sgetmask 92 .long sys_sgetmask
90 .long sys_ssetmask 93 .long sys_ssetmask
91 .long sys_setreuid16 /* 70 */ 94 .long sys_setreuid16 /* 70 */
92 .long sys_setregid16 95 .long sys_setregid16
93 .long sys_sigsuspend 96 .long sys_sigsuspend
94 .long sys_sigpending 97 .long sys_sigpending
95 .long sys_sethostname 98 .long sys_sethostname
96 .long sys_setrlimit /* 75 */ 99 .long sys_setrlimit /* 75 */
97 .long sys_old_getrlimit 100 .long sys_old_getrlimit
98 .long sys_getrusage 101 .long sys_getrusage
99 .long sys_gettimeofday 102 .long sys_gettimeofday
100 .long sys_settimeofday 103 .long sys_settimeofday
101 .long sys_getgroups16 /* 80 */ 104 .long sys_getgroups16 /* 80 */
102 .long sys_setgroups16 105 .long sys_setgroups16
103 .long sys_old_select 106 .long sys_old_select
104 .long sys_symlink 107 .long sys_symlink
105 .long sys_lstat 108 .long sys_lstat
106 .long sys_readlink /* 85 */ 109 .long sys_readlink /* 85 */
107 .long sys_uselib 110 .long sys_uselib
108 .long sys_ni_syscall /* sys_swapon */ 111 .long sys_swapon
109 .long sys_reboot 112 .long sys_reboot
110 .long sys_old_readdir 113 .long sys_old_readdir
111 .long sys_old_mmap /* 90 */ 114 .long sys_old_mmap /* 90 */
112 .long sys_munmap 115 .long sys_munmap
113 .long sys_truncate 116 .long sys_truncate
114 .long sys_ftruncate 117 .long sys_ftruncate
115 .long sys_fchmod 118 .long sys_fchmod
116 .long sys_fchown16 /* 95 */ 119 .long sys_fchown16 /* 95 */
117 .long sys_getpriority 120 .long sys_getpriority
118 .long sys_setpriority 121 .long sys_setpriority
119 .long sys_ni_syscall /* old profil syscall holder */ 122 .long sys_ni_syscall /* old profil syscall holder */
120 .long sys_statfs 123 .long sys_statfs
121 .long sys_fstatfs /* 100 */ 124 .long sys_fstatfs /* 100 */
122 .long sys_ni_syscall /* ioperm for i386 */ 125 .long sys_ni_syscall /* ioperm for i386 */
123 .long sys_socketcall 126 .long sys_socketcall
124 .long sys_syslog 127 .long sys_syslog
125 .long sys_setitimer 128 .long sys_setitimer
126 .long sys_getitimer /* 105 */ 129 .long sys_getitimer /* 105 */
127 .long sys_newstat 130 .long sys_newstat
128 .long sys_newlstat 131 .long sys_newlstat
129 .long sys_newfstat 132 .long sys_newfstat
130 .long sys_ni_syscall 133 .long sys_ni_syscall
131 .long sys_ni_syscall /* iopl for i386 */ /* 110 */ 134 .long sys_ni_syscall /* 110 - iopl for i386 */
132 .long sys_vhangup 135 .long sys_vhangup
133 .long sys_ni_syscall /* obsolete idle() syscall */ 136 .long sys_ni_syscall /* obsolete idle() syscall */
134 .long sys_ni_syscall /* vm86old for i386 */ 137 .long sys_ni_syscall /* vm86old for i386 */
135 .long sys_wait4 138 .long sys_wait4
136 .long sys_ni_syscall /* 115 */ /* sys_swapoff */ 139 .long sys_swapoff /* 115 */
137 .long sys_sysinfo 140 .long sys_sysinfo
138 .long sys_ipc 141 .long sys_ipc
139 .long sys_fsync 142 .long sys_fsync
140 .long sys_sigreturn 143 .long sys_sigreturn
141 .long sys_clone /* 120 */ 144 .long sys_clone /* 120 */
142 .long sys_setdomainname 145 .long sys_setdomainname
143 .long sys_newuname 146 .long sys_newuname
144 .long sys_cacheflush /* modify_ldt for i386 */ 147 .long sys_cacheflush /* modify_ldt for i386 */
145 .long sys_adjtimex 148 .long sys_adjtimex
146 .long sys_ni_syscall /* 125 */ /* sys_mprotect */ 149 .long sys_mprotect /* 125 */
147 .long sys_sigprocmask 150 .long sys_sigprocmask
148 .long sys_ni_syscall /* old "creat_module" */ 151 .long sys_ni_syscall /* old "create_module" */
149 .long sys_init_module 152 .long sys_init_module
150 .long sys_delete_module 153 .long sys_delete_module
151 .long sys_ni_syscall /* 130: old "get_kernel_syms" */ 154 .long sys_ni_syscall /* 130 - old "get_kernel_syms" */
152 .long sys_quotactl 155 .long sys_quotactl
153 .long sys_getpgid 156 .long sys_getpgid
154 .long sys_fchdir 157 .long sys_fchdir
155 .long sys_bdflush 158 .long sys_bdflush
156 .long sys_sysfs /* 135 */ 159 .long sys_sysfs /* 135 */
157 .long sys_personality 160 .long sys_personality
158 .long sys_ni_syscall /* for afs_syscall */ 161 .long sys_ni_syscall /* for afs_syscall */
159 .long sys_setfsuid16 162 .long sys_setfsuid16
160 .long sys_setfsgid16 163 .long sys_setfsgid16
161 .long sys_llseek /* 140 */ 164 .long sys_llseek /* 140 */
162 .long sys_getdents 165 .long sys_getdents
163 .long sys_select 166 .long sys_select
164 .long sys_flock 167 .long sys_flock
165 .long sys_ni_syscall /* sys_msync */ 168 .long sys_msync
166 .long sys_readv /* 145 */ 169 .long sys_readv /* 145 */
167 .long sys_writev 170 .long sys_writev
168 .long sys_getsid 171 .long sys_getsid
169 .long sys_fdatasync 172 .long sys_fdatasync
170 .long sys_sysctl 173 .long sys_sysctl
171 .long sys_ni_syscall /* 150 */ /* sys_mlock */ 174 .long sys_mlock /* 150 */
172 .long sys_ni_syscall /* sys_munlock */ 175 .long sys_munlock
173 .long sys_ni_syscall /* sys_mlockall */ 176 .long sys_mlockall
174 .long sys_ni_syscall /* sys_munlockall */ 177 .long sys_munlockall
175 .long sys_sched_setparam 178 .long sys_sched_setparam
176 .long sys_sched_getparam /* 155 */ 179 .long sys_sched_getparam /* 155 */
177 .long sys_sched_setscheduler 180 .long sys_sched_setscheduler
178 .long sys_sched_getscheduler 181 .long sys_sched_getscheduler
179 .long sys_sched_yield 182 .long sys_sched_yield
@@ -181,124 +184,124 @@ ENTRY(sys_call_table)
181 .long sys_sched_get_priority_min /* 160 */ 184 .long sys_sched_get_priority_min /* 160 */
182 .long sys_sched_rr_get_interval 185 .long sys_sched_rr_get_interval
183 .long sys_nanosleep 186 .long sys_nanosleep
184 .long sys_ni_syscall /* sys_mremap */ 187 .long sys_mremap
185 .long sys_setresuid16 188 .long sys_setresuid16
186 .long sys_getresuid16 /* 165 */ 189 .long sys_getresuid16 /* 165 */
187 .long sys_getpagesize /* sys_getpagesize */ 190 .long sys_getpagesize
188 .long sys_ni_syscall /* old "query_module" */ 191 .long sys_ni_syscall /* old "query_module" */
189 .long sys_poll 192 .long sys_poll
190 .long sys_ni_syscall /* sys_nfsservctl */ 193 .long sys_nfsservctl
191 .long sys_setresgid16 /* 170 */ 194 .long sys_setresgid16 /* 170 */
192 .long sys_getresgid16 195 .long sys_getresgid16
193 .long sys_prctl 196 .long sys_prctl
194 .long sys_rt_sigreturn 197 .long sys_rt_sigreturn
195 .long sys_rt_sigaction 198 .long sys_rt_sigaction
196 .long sys_rt_sigprocmask /* 175 */ 199 .long sys_rt_sigprocmask /* 175 */
197 .long sys_rt_sigpending 200 .long sys_rt_sigpending
198 .long sys_rt_sigtimedwait 201 .long sys_rt_sigtimedwait
199 .long sys_rt_sigqueueinfo 202 .long sys_rt_sigqueueinfo
200 .long sys_rt_sigsuspend 203 .long sys_rt_sigsuspend
201 .long sys_pread64 /* 180 */ 204 .long sys_pread64 /* 180 */
202 .long sys_pwrite64 205 .long sys_pwrite64
203 .long sys_lchown16 206 .long sys_lchown16
204 .long sys_getcwd 207 .long sys_getcwd
205 .long sys_capget 208 .long sys_capget
206 .long sys_capset /* 185 */ 209 .long sys_capset /* 185 */
207 .long sys_sigaltstack 210 .long sys_sigaltstack
208 .long sys_sendfile 211 .long sys_sendfile
209 .long sys_ni_syscall /* streams1 */ 212 .long sys_ni_syscall /* streams1 */
210 .long sys_ni_syscall /* streams2 */ 213 .long sys_ni_syscall /* streams2 */
211 .long sys_vfork /* 190 */ 214 .long sys_vfork /* 190 */
212 .long sys_getrlimit 215 .long sys_getrlimit
213 .long sys_mmap_pgoff 216 .long sys_mmap2
214 .long sys_truncate64 217 .long sys_truncate64
215 .long sys_ftruncate64 218 .long sys_ftruncate64
216 .long sys_stat64 /* 195 */ 219 .long sys_stat64 /* 195 */
217 .long sys_lstat64 220 .long sys_lstat64
218 .long sys_fstat64 221 .long sys_fstat64
219 .long sys_chown 222 .long sys_chown
220 .long sys_getuid 223 .long sys_getuid
221 .long sys_getgid /* 200 */ 224 .long sys_getgid /* 200 */
222 .long sys_geteuid 225 .long sys_geteuid
223 .long sys_getegid 226 .long sys_getegid
224 .long sys_setreuid 227 .long sys_setreuid
225 .long sys_setregid 228 .long sys_setregid
226 .long sys_getgroups /* 205 */ 229 .long sys_getgroups /* 205 */
227 .long sys_setgroups 230 .long sys_setgroups
228 .long sys_fchown 231 .long sys_fchown
229 .long sys_setresuid 232 .long sys_setresuid
230 .long sys_getresuid 233 .long sys_getresuid
231 .long sys_setresgid /* 210 */ 234 .long sys_setresgid /* 210 */
232 .long sys_getresgid 235 .long sys_getresgid
233 .long sys_lchown 236 .long sys_lchown
234 .long sys_setuid 237 .long sys_setuid
235 .long sys_setgid 238 .long sys_setgid
236 .long sys_setfsuid /* 215 */ 239 .long sys_setfsuid /* 215 */
237 .long sys_setfsgid 240 .long sys_setfsgid
238 .long sys_pivot_root 241 .long sys_pivot_root
239 .long sys_ni_syscall 242 .long sys_ni_syscall
240 .long sys_ni_syscall 243 .long sys_ni_syscall
241 .long sys_getdents64 /* 220 */ 244 .long sys_getdents64 /* 220 */
242 .long sys_gettid 245 .long sys_gettid
243 .long sys_tkill 246 .long sys_tkill
244 .long sys_setxattr 247 .long sys_setxattr
245 .long sys_lsetxattr 248 .long sys_lsetxattr
246 .long sys_fsetxattr /* 225 */ 249 .long sys_fsetxattr /* 225 */
247 .long sys_getxattr 250 .long sys_getxattr
248 .long sys_lgetxattr 251 .long sys_lgetxattr
249 .long sys_fgetxattr 252 .long sys_fgetxattr
250 .long sys_listxattr 253 .long sys_listxattr
251 .long sys_llistxattr /* 230 */ 254 .long sys_llistxattr /* 230 */
252 .long sys_flistxattr 255 .long sys_flistxattr
253 .long sys_removexattr 256 .long sys_removexattr
254 .long sys_lremovexattr 257 .long sys_lremovexattr
255 .long sys_fremovexattr 258 .long sys_fremovexattr
256 .long sys_futex /* 235 */ 259 .long sys_futex /* 235 */
257 .long sys_sendfile64 260 .long sys_sendfile64
258 .long sys_ni_syscall /* sys_mincore */ 261 .long sys_mincore
259 .long sys_ni_syscall /* sys_madvise */ 262 .long sys_madvise
260 .long sys_fcntl64 263 .long sys_fcntl64
261 .long sys_readahead /* 240 */ 264 .long sys_readahead /* 240 */
262 .long sys_io_setup 265 .long sys_io_setup
263 .long sys_io_destroy 266 .long sys_io_destroy
264 .long sys_io_getevents 267 .long sys_io_getevents
265 .long sys_io_submit 268 .long sys_io_submit
266 .long sys_io_cancel /* 245 */ 269 .long sys_io_cancel /* 245 */
267 .long sys_fadvise64 270 .long sys_fadvise64
268 .long sys_exit_group 271 .long sys_exit_group
269 .long sys_lookup_dcookie 272 .long sys_lookup_dcookie
270 .long sys_epoll_create 273 .long sys_epoll_create
271 .long sys_epoll_ctl /* 250 */ 274 .long sys_epoll_ctl /* 250 */
272 .long sys_epoll_wait 275 .long sys_epoll_wait
273 .long sys_ni_syscall /* sys_remap_file_pages */ 276 .long sys_remap_file_pages
274 .long sys_set_tid_address 277 .long sys_set_tid_address
275 .long sys_timer_create 278 .long sys_timer_create
276 .long sys_timer_settime /* 255 */ 279 .long sys_timer_settime /* 255 */
277 .long sys_timer_gettime 280 .long sys_timer_gettime
278 .long sys_timer_getoverrun 281 .long sys_timer_getoverrun
279 .long sys_timer_delete 282 .long sys_timer_delete
280 .long sys_clock_settime 283 .long sys_clock_settime
281 .long sys_clock_gettime /* 260 */ 284 .long sys_clock_gettime /* 260 */
282 .long sys_clock_getres 285 .long sys_clock_getres
283 .long sys_clock_nanosleep 286 .long sys_clock_nanosleep
284 .long sys_statfs64 287 .long sys_statfs64
285 .long sys_fstatfs64 288 .long sys_fstatfs64
286 .long sys_tgkill /* 265 */ 289 .long sys_tgkill /* 265 */
287 .long sys_utimes 290 .long sys_utimes
288 .long sys_fadvise64_64 291 .long sys_fadvise64_64
289 .long sys_mbind 292 .long sys_mbind
290 .long sys_get_mempolicy 293 .long sys_get_mempolicy
291 .long sys_set_mempolicy /* 270 */ 294 .long sys_set_mempolicy /* 270 */
292 .long sys_mq_open 295 .long sys_mq_open
293 .long sys_mq_unlink 296 .long sys_mq_unlink
294 .long sys_mq_timedsend 297 .long sys_mq_timedsend
295 .long sys_mq_timedreceive 298 .long sys_mq_timedreceive
296 .long sys_mq_notify /* 275 */ 299 .long sys_mq_notify /* 275 */
297 .long sys_mq_getsetattr 300 .long sys_mq_getsetattr
298 .long sys_waitid 301 .long sys_waitid
299 .long sys_ni_syscall /* for sys_vserver */ 302 .long sys_ni_syscall /* for sys_vserver */
300 .long sys_add_key 303 .long sys_add_key
301 .long sys_request_key /* 280 */ 304 .long sys_request_key /* 280 */
302 .long sys_keyctl 305 .long sys_keyctl
303 .long sys_ioprio_set 306 .long sys_ioprio_set
304 .long sys_ioprio_get 307 .long sys_ioprio_get
@@ -319,8 +322,8 @@ ENTRY(sys_call_table)
319 .long sys_readlinkat 322 .long sys_readlinkat
320 .long sys_fchmodat 323 .long sys_fchmodat
321 .long sys_faccessat /* 300 */ 324 .long sys_faccessat /* 300 */
322 .long sys_ni_syscall /* Reserved for pselect6 */ 325 .long sys_pselect6
323 .long sys_ni_syscall /* Reserved for ppoll */ 326 .long sys_ppoll
324 .long sys_unshare 327 .long sys_unshare
325 .long sys_set_robust_list 328 .long sys_set_robust_list
326 .long sys_get_robust_list /* 305 */ 329 .long sys_get_robust_list /* 305 */
@@ -363,7 +366,3 @@ ENTRY(sys_call_table)
363 .long sys_clock_adjtime 366 .long sys_clock_adjtime
364 .long sys_syncfs 367 .long sys_syncfs
365 368
366 .rept NR_syscalls-(.-sys_call_table)/4
367 .long sys_ni_syscall
368 .endr
369
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 7ff9b5492041..aef6c917b45a 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -11,6 +11,7 @@ platforms += dec
11platforms += emma 11platforms += emma
12platforms += jazz 12platforms += jazz
13platforms += jz4740 13platforms += jz4740
14platforms += lantiq
14platforms += lasat 15platforms += lasat
15platforms += loongson 16platforms += loongson
16platforms += mipssim 17platforms += mipssim
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8e256cc5dcd9..2d1cf9740953 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -212,6 +212,24 @@ config MACH_JZ4740
212 select HAVE_PWM 212 select HAVE_PWM
213 select HAVE_CLK 213 select HAVE_CLK
214 214
215config LANTIQ
216 bool "Lantiq based platforms"
217 select DMA_NONCOHERENT
218 select IRQ_CPU
219 select CEVT_R4K
220 select CSRC_R4K
221 select SYS_HAS_CPU_MIPS32_R1
222 select SYS_HAS_CPU_MIPS32_R2
223 select SYS_SUPPORTS_BIG_ENDIAN
224 select SYS_SUPPORTS_32BIT_KERNEL
225 select SYS_SUPPORTS_MULTITHREADING
226 select SYS_HAS_EARLY_PRINTK
227 select ARCH_REQUIRE_GPIOLIB
228 select SWAP_IO_SPACE
229 select BOOT_RAW
230 select HAVE_CLK
231 select MIPS_MACHINE
232
215config LASAT 233config LASAT
216 bool "LASAT Networks platforms" 234 bool "LASAT Networks platforms"
217 select CEVT_R4K 235 select CEVT_R4K
@@ -736,6 +754,33 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
736 Hikari 754 Hikari
737 Say Y here for most Octeon reference boards. 755 Say Y here for most Octeon reference boards.
738 756
757config NLM_XLR_BOARD
758 bool "Netlogic XLR/XLS based systems"
759 depends on EXPERIMENTAL
760 select BOOT_ELF32
761 select NLM_COMMON
762 select NLM_XLR
763 select SYS_HAS_CPU_XLR
764 select SYS_SUPPORTS_SMP
765 select HW_HAS_PCI
766 select SWAP_IO_SPACE
767 select SYS_SUPPORTS_32BIT_KERNEL
768 select SYS_SUPPORTS_64BIT_KERNEL
769 select 64BIT_PHYS_ADDR
770 select SYS_SUPPORTS_BIG_ENDIAN
771 select SYS_SUPPORTS_HIGHMEM
772 select DMA_COHERENT
773 select NR_CPUS_DEFAULT_32
774 select CEVT_R4K
775 select CSRC_R4K
776 select IRQ_CPU
777 select ZONE_DMA if 64BIT
778 select SYNC_R4K
779 select SYS_HAS_EARLY_PRINTK
780 help
781 Support for systems based on Netlogic XLR and XLS processors.
782 Say Y here if you have a XLR or XLS based board.
783
739endchoice 784endchoice
740 785
741source "arch/mips/alchemy/Kconfig" 786source "arch/mips/alchemy/Kconfig"
@@ -743,6 +788,7 @@ source "arch/mips/ath79/Kconfig"
743source "arch/mips/bcm63xx/Kconfig" 788source "arch/mips/bcm63xx/Kconfig"
744source "arch/mips/jazz/Kconfig" 789source "arch/mips/jazz/Kconfig"
745source "arch/mips/jz4740/Kconfig" 790source "arch/mips/jz4740/Kconfig"
791source "arch/mips/lantiq/Kconfig"
746source "arch/mips/lasat/Kconfig" 792source "arch/mips/lasat/Kconfig"
747source "arch/mips/pmc-sierra/Kconfig" 793source "arch/mips/pmc-sierra/Kconfig"
748source "arch/mips/powertv/Kconfig" 794source "arch/mips/powertv/Kconfig"
@@ -752,6 +798,7 @@ source "arch/mips/txx9/Kconfig"
752source "arch/mips/vr41xx/Kconfig" 798source "arch/mips/vr41xx/Kconfig"
753source "arch/mips/cavium-octeon/Kconfig" 799source "arch/mips/cavium-octeon/Kconfig"
754source "arch/mips/loongson/Kconfig" 800source "arch/mips/loongson/Kconfig"
801source "arch/mips/netlogic/Kconfig"
755 802
756endmenu 803endmenu
757 804
@@ -997,9 +1044,6 @@ config IRQ_GT641XX
997config IRQ_GIC 1044config IRQ_GIC
998 bool 1045 bool
999 1046
1000config IRQ_CPU_OCTEON
1001 bool
1002
1003config MIPS_BOARDS_GEN 1047config MIPS_BOARDS_GEN
1004 bool 1048 bool
1005 1049
@@ -1359,8 +1403,6 @@ config CPU_SB1
1359config CPU_CAVIUM_OCTEON 1403config CPU_CAVIUM_OCTEON
1360 bool "Cavium Octeon processor" 1404 bool "Cavium Octeon processor"
1361 depends on SYS_HAS_CPU_CAVIUM_OCTEON 1405 depends on SYS_HAS_CPU_CAVIUM_OCTEON
1362 select IRQ_CPU
1363 select IRQ_CPU_OCTEON
1364 select CPU_HAS_PREFETCH 1406 select CPU_HAS_PREFETCH
1365 select CPU_SUPPORTS_64BIT_KERNEL 1407 select CPU_SUPPORTS_64BIT_KERNEL
1366 select SYS_SUPPORTS_SMP 1408 select SYS_SUPPORTS_SMP
@@ -1425,6 +1467,17 @@ config CPU_BMIPS5000
1425 help 1467 help
1426 Broadcom BMIPS5000 processors. 1468 Broadcom BMIPS5000 processors.
1427 1469
1470config CPU_XLR
1471 bool "Netlogic XLR SoC"
1472 depends on SYS_HAS_CPU_XLR
1473 select CPU_SUPPORTS_32BIT_KERNEL
1474 select CPU_SUPPORTS_64BIT_KERNEL
1475 select CPU_SUPPORTS_HIGHMEM
1476 select WEAK_ORDERING
1477 select WEAK_REORDERING_BEYOND_LLSC
1478 select CPU_SUPPORTS_HUGEPAGES
1479 help
1480 Netlogic Microsystems XLR/XLS processors.
1428endchoice 1481endchoice
1429 1482
1430if CPU_LOONGSON2F 1483if CPU_LOONGSON2F
@@ -1555,6 +1608,9 @@ config SYS_HAS_CPU_BMIPS4380
1555config SYS_HAS_CPU_BMIPS5000 1608config SYS_HAS_CPU_BMIPS5000
1556 bool 1609 bool
1557 1610
1611config SYS_HAS_CPU_XLR
1612 bool
1613
1558# 1614#
1559# CPU may reorder R->R, R->W, W->R, W->W 1615# CPU may reorder R->R, R->W, W->R, W->W
1560# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC 1616# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 53e3514ba10e..884819cd0607 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -191,6 +191,18 @@ endif
191# 191#
192include $(srctree)/arch/mips/Kbuild.platforms 192include $(srctree)/arch/mips/Kbuild.platforms
193 193
194#
195# NETLOGIC SOC Common (common)
196#
197cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic
198cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic
199
200#
201# NETLOGIC XLR/XLS SoC, Simulator and boards
202#
203core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/
204load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000
205
194cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic 206cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
195drivers-$(CONFIG_PCI) += arch/mips/pci/ 207drivers-$(CONFIG_PCI) += arch/mips/pci/
196 208
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index ca0506a8585a..3a5abb54d505 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -36,7 +36,7 @@
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/sysdev.h> 39#include <linux/syscore_ops.h>
40#include <asm/mach-au1x00/au1000.h> 40#include <asm/mach-au1x00/au1000.h>
41#include <asm/mach-au1x00/au1xxx_dbdma.h> 41#include <asm/mach-au1x00/au1xxx_dbdma.h>
42 42
@@ -58,7 +58,8 @@ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
58/* I couldn't find a macro that did this... */ 58/* I couldn't find a macro that did this... */
59#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) 59#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
60 60
61static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; 61static dbdma_global_t *dbdma_gptr =
62 (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
62static int dbdma_initialized; 63static int dbdma_initialized;
63 64
64static dbdev_tab_t dbdev_tab[] = { 65static dbdev_tab_t dbdev_tab[] = {
@@ -299,7 +300,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
299 if (ctp != NULL) { 300 if (ctp != NULL) {
300 memset(ctp, 0, sizeof(chan_tab_t)); 301 memset(ctp, 0, sizeof(chan_tab_t));
301 ctp->chan_index = chan = i; 302 ctp->chan_index = chan = i;
302 dcp = DDMA_CHANNEL_BASE; 303 dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
303 dcp += (0x0100 * chan); 304 dcp += (0x0100 * chan);
304 ctp->chan_ptr = (au1x_dma_chan_t *)dcp; 305 ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
305 cp = (au1x_dma_chan_t *)dcp; 306 cp = (au1x_dma_chan_t *)dcp;
@@ -958,105 +959,75 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
958} 959}
959 960
960 961
961struct alchemy_dbdma_sysdev { 962static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
962 struct sys_device sysdev;
963 u32 pm_regs[NUM_DBDMA_CHANS + 1][6];
964};
965 963
966static int alchemy_dbdma_suspend(struct sys_device *dev, 964static int alchemy_dbdma_suspend(void)
967 pm_message_t state)
968{ 965{
969 struct alchemy_dbdma_sysdev *sdev =
970 container_of(dev, struct alchemy_dbdma_sysdev, sysdev);
971 int i; 966 int i;
972 u32 addr; 967 void __iomem *addr;
973 968
974 addr = DDMA_GLOBAL_BASE; 969 addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
975 sdev->pm_regs[0][0] = au_readl(addr + 0x00); 970 alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
976 sdev->pm_regs[0][1] = au_readl(addr + 0x04); 971 alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
977 sdev->pm_regs[0][2] = au_readl(addr + 0x08); 972 alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
978 sdev->pm_regs[0][3] = au_readl(addr + 0x0c); 973 alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
979 974
980 /* save channel configurations */ 975 /* save channel configurations */
981 for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { 976 addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
982 sdev->pm_regs[i][0] = au_readl(addr + 0x00); 977 for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
983 sdev->pm_regs[i][1] = au_readl(addr + 0x04); 978 alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
984 sdev->pm_regs[i][2] = au_readl(addr + 0x08); 979 alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
985 sdev->pm_regs[i][3] = au_readl(addr + 0x0c); 980 alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
986 sdev->pm_regs[i][4] = au_readl(addr + 0x10); 981 alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
987 sdev->pm_regs[i][5] = au_readl(addr + 0x14); 982 alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
983 alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
988 984
989 /* halt channel */ 985 /* halt channel */
990 au_writel(sdev->pm_regs[i][0] & ~1, addr + 0x00); 986 __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
991 au_sync(); 987 wmb();
992 while (!(au_readl(addr + 0x14) & 1)) 988 while (!(__raw_readl(addr + 0x14) & 1))
993 au_sync(); 989 wmb();
994 990
995 addr += 0x100; /* next channel base */ 991 addr += 0x100; /* next channel base */
996 } 992 }
997 /* disable channel interrupts */ 993 /* disable channel interrupts */
998 au_writel(0, DDMA_GLOBAL_BASE + 0x0c); 994 addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
999 au_sync(); 995 __raw_writel(0, addr + 0x0c);
996 wmb();
1000 997
1001 return 0; 998 return 0;
1002} 999}
1003 1000
1004static int alchemy_dbdma_resume(struct sys_device *dev) 1001static void alchemy_dbdma_resume(void)
1005{ 1002{
1006 struct alchemy_dbdma_sysdev *sdev =
1007 container_of(dev, struct alchemy_dbdma_sysdev, sysdev);
1008 int i; 1003 int i;
1009 u32 addr; 1004 void __iomem *addr;
1010 1005
1011 addr = DDMA_GLOBAL_BASE; 1006 addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1012 au_writel(sdev->pm_regs[0][0], addr + 0x00); 1007 __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
1013 au_writel(sdev->pm_regs[0][1], addr + 0x04); 1008 __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
1014 au_writel(sdev->pm_regs[0][2], addr + 0x08); 1009 __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
1015 au_writel(sdev->pm_regs[0][3], addr + 0x0c); 1010 __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
1016 1011
1017 /* restore channel configurations */ 1012 /* restore channel configurations */
1018 for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { 1013 addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
1019 au_writel(sdev->pm_regs[i][0], addr + 0x00); 1014 for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
1020 au_writel(sdev->pm_regs[i][1], addr + 0x04); 1015 __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
1021 au_writel(sdev->pm_regs[i][2], addr + 0x08); 1016 __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
1022 au_writel(sdev->pm_regs[i][3], addr + 0x0c); 1017 __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
1023 au_writel(sdev->pm_regs[i][4], addr + 0x10); 1018 __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
1024 au_writel(sdev->pm_regs[i][5], addr + 0x14); 1019 __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
1025 au_sync(); 1020 __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
1021 wmb();
1026 addr += 0x100; /* next channel base */ 1022 addr += 0x100; /* next channel base */
1027 } 1023 }
1028
1029 return 0;
1030} 1024}
1031 1025
1032static struct sysdev_class alchemy_dbdma_sysdev_class = { 1026static struct syscore_ops alchemy_dbdma_syscore_ops = {
1033 .name = "dbdma",
1034 .suspend = alchemy_dbdma_suspend, 1027 .suspend = alchemy_dbdma_suspend,
1035 .resume = alchemy_dbdma_resume, 1028 .resume = alchemy_dbdma_resume,
1036}; 1029};
1037 1030
1038static int __init alchemy_dbdma_sysdev_init(void)
1039{
1040 struct alchemy_dbdma_sysdev *sdev;
1041 int ret;
1042
1043 ret = sysdev_class_register(&alchemy_dbdma_sysdev_class);
1044 if (ret)
1045 return ret;
1046
1047 sdev = kzalloc(sizeof(struct alchemy_dbdma_sysdev), GFP_KERNEL);
1048 if (!sdev)
1049 return -ENOMEM;
1050
1051 sdev->sysdev.id = -1;
1052 sdev->sysdev.cls = &alchemy_dbdma_sysdev_class;
1053 ret = sysdev_register(&sdev->sysdev);
1054 if (ret)
1055 kfree(sdev);
1056
1057 return ret;
1058}
1059
1060static int __init au1xxx_dbdma_init(void) 1031static int __init au1xxx_dbdma_init(void)
1061{ 1032{
1062 int irq_nr, ret; 1033 int irq_nr, ret;
@@ -1084,11 +1055,7 @@ static int __init au1xxx_dbdma_init(void)
1084 else { 1055 else {
1085 dbdma_initialized = 1; 1056 dbdma_initialized = 1;
1086 printk(KERN_INFO "Alchemy DBDMA initialized\n"); 1057 printk(KERN_INFO "Alchemy DBDMA initialized\n");
1087 ret = alchemy_dbdma_sysdev_init(); 1058 register_syscore_ops(&alchemy_dbdma_syscore_ops);
1088 if (ret) {
1089 printk(KERN_ERR "DBDMA PM init failed\n");
1090 ret = 0;
1091 }
1092 } 1059 }
1093 1060
1094 return ret; 1061 return ret;
diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c
index d5278877891d..347980e79a89 100644
--- a/arch/mips/alchemy/common/dma.c
+++ b/arch/mips/alchemy/common/dma.c
@@ -58,6 +58,9 @@
58 * returned from request_dma. 58 * returned from request_dma.
59 */ 59 */
60 60
61/* DMA Channel register block spacing */
62#define DMA_CHANNEL_LEN 0x00000100
63
61DEFINE_SPINLOCK(au1000_dma_spin_lock); 64DEFINE_SPINLOCK(au1000_dma_spin_lock);
62 65
63struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { 66struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
@@ -77,22 +80,23 @@ static const struct dma_dev {
77 unsigned int fifo_addr; 80 unsigned int fifo_addr;
78 unsigned int dma_mode; 81 unsigned int dma_mode;
79} dma_dev_table[DMA_NUM_DEV] = { 82} dma_dev_table[DMA_NUM_DEV] = {
80 {UART0_ADDR + UART_TX, 0}, 83 { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */
81 {UART0_ADDR + UART_RX, 0}, 84 { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */
82 {0, 0}, 85 { 0, 0 }, /* DMA_REQ0 */
83 {0, 0}, 86 { 0, 0 }, /* DMA_REQ1 */
84 {AC97C_DATA, DMA_DW16 }, /* coherent */ 87 { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */
85 {AC97C_DATA, DMA_DR | DMA_DW16 }, /* coherent */ 88 { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */
86 {UART3_ADDR + UART_TX, DMA_DW8 | DMA_NC}, 89 { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */
87 {UART3_ADDR + UART_RX, DMA_DR | DMA_DW8 | DMA_NC}, 90 { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */
88 {USBD_EP0RD, DMA_DR | DMA_DW8 | DMA_NC}, 91 { AU1000_USBD_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */
89 {USBD_EP0WR, DMA_DW8 | DMA_NC}, 92 { AU1000_USBD_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */
90 {USBD_EP2WR, DMA_DW8 | DMA_NC}, 93 { AU1000_USBD_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */
91 {USBD_EP3WR, DMA_DW8 | DMA_NC}, 94 { AU1000_USBD_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */
92 {USBD_EP4RD, DMA_DR | DMA_DW8 | DMA_NC}, 95 { AU1000_USBD_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */
93 {USBD_EP5RD, DMA_DR | DMA_DW8 | DMA_NC}, 96 { AU1000_USBD_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */
94 {I2S_DATA, DMA_DW32 | DMA_NC}, 97 /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
95 {I2S_DATA, DMA_DR | DMA_DW32 | DMA_NC} 98 { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */
99 { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */
96}; 100};
97 101
98int au1000_dma_read_proc(char *buf, char **start, off_t fpos, 102int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
@@ -123,10 +127,10 @@ int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
123 127
124/* Device FIFO addresses and default DMA modes - 2nd bank */ 128/* Device FIFO addresses and default DMA modes - 2nd bank */
125static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { 129static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
126 { SD0_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ 130 { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
127 { SD0_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 }, /* coherent */ 131 { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */
128 { SD1_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ 132 { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
129 { SD1_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 } /* coherent */ 133 { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */
130}; 134};
131 135
132void dump_au1000_dma_channel(unsigned int dmanr) 136void dump_au1000_dma_channel(unsigned int dmanr)
@@ -202,7 +206,7 @@ int request_au1000_dma(int dev_id, const char *dev_str,
202 } 206 }
203 207
204 /* fill it in */ 208 /* fill it in */
205 chan->io = DMA_CHANNEL_BASE + i * DMA_CHANNEL_LEN; 209 chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN;
206 chan->dev_id = dev_id; 210 chan->dev_id = dev_id;
207 chan->dev_str = dev_str; 211 chan->dev_str = dev_str;
208 chan->fifo_addr = dev->fifo_addr; 212 chan->fifo_addr = dev->fifo_addr;
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 55dd7c888517..8b60ba0675e2 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -30,7 +30,7 @@
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/sysdev.h> 33#include <linux/syscore_ops.h>
34 34
35#include <asm/irq_cpu.h> 35#include <asm/irq_cpu.h>
36#include <asm/mipsregs.h> 36#include <asm/mipsregs.h>
@@ -39,6 +39,36 @@
39#include <asm/mach-pb1x00/pb1000.h> 39#include <asm/mach-pb1x00/pb1000.h>
40#endif 40#endif
41 41
42/* Interrupt Controller register offsets */
43#define IC_CFG0RD 0x40
44#define IC_CFG0SET 0x40
45#define IC_CFG0CLR 0x44
46#define IC_CFG1RD 0x48
47#define IC_CFG1SET 0x48
48#define IC_CFG1CLR 0x4C
49#define IC_CFG2RD 0x50
50#define IC_CFG2SET 0x50
51#define IC_CFG2CLR 0x54
52#define IC_REQ0INT 0x54
53#define IC_SRCRD 0x58
54#define IC_SRCSET 0x58
55#define IC_SRCCLR 0x5C
56#define IC_REQ1INT 0x5C
57#define IC_ASSIGNRD 0x60
58#define IC_ASSIGNSET 0x60
59#define IC_ASSIGNCLR 0x64
60#define IC_WAKERD 0x68
61#define IC_WAKESET 0x68
62#define IC_WAKECLR 0x6C
63#define IC_MASKRD 0x70
64#define IC_MASKSET 0x70
65#define IC_MASKCLR 0x74
66#define IC_RISINGRD 0x78
67#define IC_RISINGCLR 0x78
68#define IC_FALLINGRD 0x7C
69#define IC_FALLINGCLR 0x7C
70#define IC_TESTBIT 0x80
71
42static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type); 72static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type);
43 73
44/* NOTE on interrupt priorities: The original writers of this code said: 74/* NOTE on interrupt priorities: The original writers of this code said:
@@ -221,89 +251,101 @@ struct au1xxx_irqmap au1200_irqmap[] __initdata = {
221static void au1x_ic0_unmask(struct irq_data *d) 251static void au1x_ic0_unmask(struct irq_data *d)
222{ 252{
223 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; 253 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
224 au_writel(1 << bit, IC0_MASKSET); 254 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
225 au_writel(1 << bit, IC0_WAKESET); 255
226 au_sync(); 256 __raw_writel(1 << bit, base + IC_MASKSET);
257 __raw_writel(1 << bit, base + IC_WAKESET);
258 wmb();
227} 259}
228 260
229static void au1x_ic1_unmask(struct irq_data *d) 261static void au1x_ic1_unmask(struct irq_data *d)
230{ 262{
231 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; 263 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
232 au_writel(1 << bit, IC1_MASKSET); 264 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
233 au_writel(1 << bit, IC1_WAKESET); 265
266 __raw_writel(1 << bit, base + IC_MASKSET);
267 __raw_writel(1 << bit, base + IC_WAKESET);
234 268
235/* very hacky. does the pb1000 cpld auto-disable this int? 269/* very hacky. does the pb1000 cpld auto-disable this int?
236 * nowhere in the current kernel sources is it disabled. --mlau 270 * nowhere in the current kernel sources is it disabled. --mlau
237 */ 271 */
238#if defined(CONFIG_MIPS_PB1000) 272#if defined(CONFIG_MIPS_PB1000)
239 if (d->irq == AU1000_GPIO15_INT) 273 if (d->irq == AU1000_GPIO15_INT)
240 au_writel(0x4000, PB1000_MDR); /* enable int */ 274 __raw_writel(0x4000, (void __iomem *)PB1000_MDR); /* enable int */
241#endif 275#endif
242 au_sync(); 276 wmb();
243} 277}
244 278
245static void au1x_ic0_mask(struct irq_data *d) 279static void au1x_ic0_mask(struct irq_data *d)
246{ 280{
247 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; 281 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
248 au_writel(1 << bit, IC0_MASKCLR); 282 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
249 au_writel(1 << bit, IC0_WAKECLR); 283
250 au_sync(); 284 __raw_writel(1 << bit, base + IC_MASKCLR);
285 __raw_writel(1 << bit, base + IC_WAKECLR);
286 wmb();
251} 287}
252 288
253static void au1x_ic1_mask(struct irq_data *d) 289static void au1x_ic1_mask(struct irq_data *d)
254{ 290{
255 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; 291 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
256 au_writel(1 << bit, IC1_MASKCLR); 292 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
257 au_writel(1 << bit, IC1_WAKECLR); 293
258 au_sync(); 294 __raw_writel(1 << bit, base + IC_MASKCLR);
295 __raw_writel(1 << bit, base + IC_WAKECLR);
296 wmb();
259} 297}
260 298
261static void au1x_ic0_ack(struct irq_data *d) 299static void au1x_ic0_ack(struct irq_data *d)
262{ 300{
263 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; 301 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
302 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
264 303
265 /* 304 /*
266 * This may assume that we don't get interrupts from 305 * This may assume that we don't get interrupts from
267 * both edges at once, or if we do, that we don't care. 306 * both edges at once, or if we do, that we don't care.
268 */ 307 */
269 au_writel(1 << bit, IC0_FALLINGCLR); 308 __raw_writel(1 << bit, base + IC_FALLINGCLR);
270 au_writel(1 << bit, IC0_RISINGCLR); 309 __raw_writel(1 << bit, base + IC_RISINGCLR);
271 au_sync(); 310 wmb();
272} 311}
273 312
274static void au1x_ic1_ack(struct irq_data *d) 313static void au1x_ic1_ack(struct irq_data *d)
275{ 314{
276 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; 315 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
316 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
277 317
278 /* 318 /*
279 * This may assume that we don't get interrupts from 319 * This may assume that we don't get interrupts from
280 * both edges at once, or if we do, that we don't care. 320 * both edges at once, or if we do, that we don't care.
281 */ 321 */
282 au_writel(1 << bit, IC1_FALLINGCLR); 322 __raw_writel(1 << bit, base + IC_FALLINGCLR);
283 au_writel(1 << bit, IC1_RISINGCLR); 323 __raw_writel(1 << bit, base + IC_RISINGCLR);
284 au_sync(); 324 wmb();
285} 325}
286 326
287static void au1x_ic0_maskack(struct irq_data *d) 327static void au1x_ic0_maskack(struct irq_data *d)
288{ 328{
289 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; 329 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
330 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
290 331
291 au_writel(1 << bit, IC0_WAKECLR); 332 __raw_writel(1 << bit, base + IC_WAKECLR);
292 au_writel(1 << bit, IC0_MASKCLR); 333 __raw_writel(1 << bit, base + IC_MASKCLR);
293 au_writel(1 << bit, IC0_RISINGCLR); 334 __raw_writel(1 << bit, base + IC_RISINGCLR);
294 au_writel(1 << bit, IC0_FALLINGCLR); 335 __raw_writel(1 << bit, base + IC_FALLINGCLR);
295 au_sync(); 336 wmb();
296} 337}
297 338
298static void au1x_ic1_maskack(struct irq_data *d) 339static void au1x_ic1_maskack(struct irq_data *d)
299{ 340{
300 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; 341 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
342 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
301 343
302 au_writel(1 << bit, IC1_WAKECLR); 344 __raw_writel(1 << bit, base + IC_WAKECLR);
303 au_writel(1 << bit, IC1_MASKCLR); 345 __raw_writel(1 << bit, base + IC_MASKCLR);
304 au_writel(1 << bit, IC1_RISINGCLR); 346 __raw_writel(1 << bit, base + IC_RISINGCLR);
305 au_writel(1 << bit, IC1_FALLINGCLR); 347 __raw_writel(1 << bit, base + IC_FALLINGCLR);
306 au_sync(); 348 wmb();
307} 349}
308 350
309static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) 351static int au1x_ic1_setwake(struct irq_data *d, unsigned int on)
@@ -318,13 +360,13 @@ static int au1x_ic1_setwake(struct irq_data *d, unsigned int on)
318 return -EINVAL; 360 return -EINVAL;
319 361
320 local_irq_save(flags); 362 local_irq_save(flags);
321 wakemsk = au_readl(SYS_WAKEMSK); 363 wakemsk = __raw_readl((void __iomem *)SYS_WAKEMSK);
322 if (on) 364 if (on)
323 wakemsk |= 1 << bit; 365 wakemsk |= 1 << bit;
324 else 366 else
325 wakemsk &= ~(1 << bit); 367 wakemsk &= ~(1 << bit);
326 au_writel(wakemsk, SYS_WAKEMSK); 368 __raw_writel(wakemsk, (void __iomem *)SYS_WAKEMSK);
327 au_sync(); 369 wmb();
328 local_irq_restore(flags); 370 local_irq_restore(flags);
329 371
330 return 0; 372 return 0;
@@ -356,81 +398,74 @@ static struct irq_chip au1x_ic1_chip = {
356static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type) 398static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type)
357{ 399{
358 struct irq_chip *chip; 400 struct irq_chip *chip;
359 unsigned long icr[6]; 401 unsigned int bit, irq = d->irq;
360 unsigned int bit, ic, irq = d->irq;
361 irq_flow_handler_t handler = NULL; 402 irq_flow_handler_t handler = NULL;
362 unsigned char *name = NULL; 403 unsigned char *name = NULL;
404 void __iomem *base;
363 int ret; 405 int ret;
364 406
365 if (irq >= AU1000_INTC1_INT_BASE) { 407 if (irq >= AU1000_INTC1_INT_BASE) {
366 bit = irq - AU1000_INTC1_INT_BASE; 408 bit = irq - AU1000_INTC1_INT_BASE;
367 chip = &au1x_ic1_chip; 409 chip = &au1x_ic1_chip;
368 ic = 1; 410 base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
369 } else { 411 } else {
370 bit = irq - AU1000_INTC0_INT_BASE; 412 bit = irq - AU1000_INTC0_INT_BASE;
371 chip = &au1x_ic0_chip; 413 chip = &au1x_ic0_chip;
372 ic = 0; 414 base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
373 } 415 }
374 416
375 if (bit > 31) 417 if (bit > 31)
376 return -EINVAL; 418 return -EINVAL;
377 419
378 icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET;
379 icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET;
380 icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET;
381 icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR;
382 icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR;
383 icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR;
384
385 ret = 0; 420 ret = 0;
386 421
387 switch (flow_type) { /* cfgregs 2:1:0 */ 422 switch (flow_type) { /* cfgregs 2:1:0 */
388 case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */ 423 case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */
389 au_writel(1 << bit, icr[5]); 424 __raw_writel(1 << bit, base + IC_CFG2CLR);
390 au_writel(1 << bit, icr[4]); 425 __raw_writel(1 << bit, base + IC_CFG1CLR);
391 au_writel(1 << bit, icr[0]); 426 __raw_writel(1 << bit, base + IC_CFG0SET);
392 handler = handle_edge_irq; 427 handler = handle_edge_irq;
393 name = "riseedge"; 428 name = "riseedge";
394 break; 429 break;
395 case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ 430 case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */
396 au_writel(1 << bit, icr[5]); 431 __raw_writel(1 << bit, base + IC_CFG2CLR);
397 au_writel(1 << bit, icr[1]); 432 __raw_writel(1 << bit, base + IC_CFG1SET);
398 au_writel(1 << bit, icr[3]); 433 __raw_writel(1 << bit, base + IC_CFG0CLR);
399 handler = handle_edge_irq; 434 handler = handle_edge_irq;
400 name = "falledge"; 435 name = "falledge";
401 break; 436 break;
402 case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ 437 case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */
403 au_writel(1 << bit, icr[5]); 438 __raw_writel(1 << bit, base + IC_CFG2CLR);
404 au_writel(1 << bit, icr[1]); 439 __raw_writel(1 << bit, base + IC_CFG1SET);
405 au_writel(1 << bit, icr[0]); 440 __raw_writel(1 << bit, base + IC_CFG0SET);
406 handler = handle_edge_irq; 441 handler = handle_edge_irq;
407 name = "bothedge"; 442 name = "bothedge";
408 break; 443 break;
409 case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ 444 case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */
410 au_writel(1 << bit, icr[2]); 445 __raw_writel(1 << bit, base + IC_CFG2SET);
411 au_writel(1 << bit, icr[4]); 446 __raw_writel(1 << bit, base + IC_CFG1CLR);
412 au_writel(1 << bit, icr[0]); 447 __raw_writel(1 << bit, base + IC_CFG0SET);
413 handler = handle_level_irq; 448 handler = handle_level_irq;
414 name = "hilevel"; 449 name = "hilevel";
415 break; 450 break;
416 case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ 451 case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */
417 au_writel(1 << bit, icr[2]); 452 __raw_writel(1 << bit, base + IC_CFG2SET);
418 au_writel(1 << bit, icr[1]); 453 __raw_writel(1 << bit, base + IC_CFG1SET);
419 au_writel(1 << bit, icr[3]); 454 __raw_writel(1 << bit, base + IC_CFG0CLR);
420 handler = handle_level_irq; 455 handler = handle_level_irq;
421 name = "lowlevel"; 456 name = "lowlevel";
422 break; 457 break;
423 case IRQ_TYPE_NONE: /* 0:0:0 */ 458 case IRQ_TYPE_NONE: /* 0:0:0 */
424 au_writel(1 << bit, icr[5]); 459 __raw_writel(1 << bit, base + IC_CFG2CLR);
425 au_writel(1 << bit, icr[4]); 460 __raw_writel(1 << bit, base + IC_CFG1CLR);
426 au_writel(1 << bit, icr[3]); 461 __raw_writel(1 << bit, base + IC_CFG0CLR);
427 break; 462 break;
428 default: 463 default:
429 ret = -EINVAL; 464 ret = -EINVAL;
430 } 465 }
431 __irq_set_chip_handler_name_locked(d->irq, chip, handler, name); 466 __irq_set_chip_handler_name_locked(d->irq, chip, handler, name);
432 467
433 au_sync(); 468 wmb();
434 469
435 return ret; 470 return ret;
436} 471}
@@ -444,21 +479,21 @@ asmlinkage void plat_irq_dispatch(void)
444 off = MIPS_CPU_IRQ_BASE + 7; 479 off = MIPS_CPU_IRQ_BASE + 7;
445 goto handle; 480 goto handle;
446 } else if (pending & CAUSEF_IP2) { 481 } else if (pending & CAUSEF_IP2) {
447 s = IC0_REQ0INT; 482 s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ0INT;
448 off = AU1000_INTC0_INT_BASE; 483 off = AU1000_INTC0_INT_BASE;
449 } else if (pending & CAUSEF_IP3) { 484 } else if (pending & CAUSEF_IP3) {
450 s = IC0_REQ1INT; 485 s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ1INT;
451 off = AU1000_INTC0_INT_BASE; 486 off = AU1000_INTC0_INT_BASE;
452 } else if (pending & CAUSEF_IP4) { 487 } else if (pending & CAUSEF_IP4) {
453 s = IC1_REQ0INT; 488 s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ0INT;
454 off = AU1000_INTC1_INT_BASE; 489 off = AU1000_INTC1_INT_BASE;
455 } else if (pending & CAUSEF_IP5) { 490 } else if (pending & CAUSEF_IP5) {
456 s = IC1_REQ1INT; 491 s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ1INT;
457 off = AU1000_INTC1_INT_BASE; 492 off = AU1000_INTC1_INT_BASE;
458 } else 493 } else
459 goto spurious; 494 goto spurious;
460 495
461 s = au_readl(s); 496 s = __raw_readl((void __iomem *)s);
462 if (unlikely(!s)) { 497 if (unlikely(!s)) {
463spurious: 498spurious:
464 spurious_interrupt(); 499 spurious_interrupt();
@@ -469,48 +504,42 @@ handle:
469 do_IRQ(off); 504 do_IRQ(off);
470} 505}
471 506
507
508static inline void ic_init(void __iomem *base)
509{
510 /* initialize interrupt controller to a safe state */
511 __raw_writel(0xffffffff, base + IC_CFG0CLR);
512 __raw_writel(0xffffffff, base + IC_CFG1CLR);
513 __raw_writel(0xffffffff, base + IC_CFG2CLR);
514 __raw_writel(0xffffffff, base + IC_MASKCLR);
515 __raw_writel(0xffffffff, base + IC_ASSIGNCLR);
516 __raw_writel(0xffffffff, base + IC_WAKECLR);
517 __raw_writel(0xffffffff, base + IC_SRCSET);
518 __raw_writel(0xffffffff, base + IC_FALLINGCLR);
519 __raw_writel(0xffffffff, base + IC_RISINGCLR);
520 __raw_writel(0x00000000, base + IC_TESTBIT);
521 wmb();
522}
523
472static void __init au1000_init_irq(struct au1xxx_irqmap *map) 524static void __init au1000_init_irq(struct au1xxx_irqmap *map)
473{ 525{
474 unsigned int bit, irq_nr; 526 unsigned int bit, irq_nr;
475 int i; 527 void __iomem *base;
476
477 /*
478 * Initialize interrupt controllers to a safe state.
479 */
480 au_writel(0xffffffff, IC0_CFG0CLR);
481 au_writel(0xffffffff, IC0_CFG1CLR);
482 au_writel(0xffffffff, IC0_CFG2CLR);
483 au_writel(0xffffffff, IC0_MASKCLR);
484 au_writel(0xffffffff, IC0_ASSIGNCLR);
485 au_writel(0xffffffff, IC0_WAKECLR);
486 au_writel(0xffffffff, IC0_SRCSET);
487 au_writel(0xffffffff, IC0_FALLINGCLR);
488 au_writel(0xffffffff, IC0_RISINGCLR);
489 au_writel(0x00000000, IC0_TESTBIT);
490
491 au_writel(0xffffffff, IC1_CFG0CLR);
492 au_writel(0xffffffff, IC1_CFG1CLR);
493 au_writel(0xffffffff, IC1_CFG2CLR);
494 au_writel(0xffffffff, IC1_MASKCLR);
495 au_writel(0xffffffff, IC1_ASSIGNCLR);
496 au_writel(0xffffffff, IC1_WAKECLR);
497 au_writel(0xffffffff, IC1_SRCSET);
498 au_writel(0xffffffff, IC1_FALLINGCLR);
499 au_writel(0xffffffff, IC1_RISINGCLR);
500 au_writel(0x00000000, IC1_TESTBIT);
501 528
529 ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR));
530 ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR));
502 mips_cpu_irq_init(); 531 mips_cpu_irq_init();
503 532
504 /* register all 64 possible IC0+IC1 irq sources as type "none". 533 /* register all 64 possible IC0+IC1 irq sources as type "none".
505 * Use set_irq_type() to set edge/level behaviour at runtime. 534 * Use set_irq_type() to set edge/level behaviour at runtime.
506 */ 535 */
507 for (i = AU1000_INTC0_INT_BASE; 536 for (irq_nr = AU1000_INTC0_INT_BASE;
508 (i < AU1000_INTC0_INT_BASE + 32); i++) 537 (irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++)
509 au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); 538 au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
510 539
511 for (i = AU1000_INTC1_INT_BASE; 540 for (irq_nr = AU1000_INTC1_INT_BASE;
512 (i < AU1000_INTC1_INT_BASE + 32); i++) 541 (irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++)
513 au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); 542 au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
514 543
515 /* 544 /*
516 * Initialize IC0, which is fixed per processor. 545 * Initialize IC0, which is fixed per processor.
@@ -520,13 +549,13 @@ static void __init au1000_init_irq(struct au1xxx_irqmap *map)
520 549
521 if (irq_nr >= AU1000_INTC1_INT_BASE) { 550 if (irq_nr >= AU1000_INTC1_INT_BASE) {
522 bit = irq_nr - AU1000_INTC1_INT_BASE; 551 bit = irq_nr - AU1000_INTC1_INT_BASE;
523 if (map->im_request) 552 base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
524 au_writel(1 << bit, IC1_ASSIGNSET);
525 } else { 553 } else {
526 bit = irq_nr - AU1000_INTC0_INT_BASE; 554 bit = irq_nr - AU1000_INTC0_INT_BASE;
527 if (map->im_request) 555 base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
528 au_writel(1 << bit, IC0_ASSIGNSET);
529 } 556 }
557 if (map->im_request)
558 __raw_writel(1 << bit, base + IC_ASSIGNSET);
530 559
531 au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type); 560 au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type);
532 ++map; 561 ++map;
@@ -556,90 +585,62 @@ void __init arch_init_irq(void)
556 } 585 }
557} 586}
558 587
559struct alchemy_ic_sysdev {
560 struct sys_device sysdev;
561 void __iomem *base;
562 unsigned long pmdata[7];
563};
564 588
565static int alchemy_ic_suspend(struct sys_device *dev, pm_message_t state) 589static unsigned long alchemy_ic_pmdata[7 * 2];
566{
567 struct alchemy_ic_sysdev *icdev =
568 container_of(dev, struct alchemy_ic_sysdev, sysdev);
569 590
570 icdev->pmdata[0] = __raw_readl(icdev->base + IC_CFG0RD); 591static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d)
571 icdev->pmdata[1] = __raw_readl(icdev->base + IC_CFG1RD); 592{
572 icdev->pmdata[2] = __raw_readl(icdev->base + IC_CFG2RD); 593 d[0] = __raw_readl(base + IC_CFG0RD);
573 icdev->pmdata[3] = __raw_readl(icdev->base + IC_SRCRD); 594 d[1] = __raw_readl(base + IC_CFG1RD);
574 icdev->pmdata[4] = __raw_readl(icdev->base + IC_ASSIGNRD); 595 d[2] = __raw_readl(base + IC_CFG2RD);
575 icdev->pmdata[5] = __raw_readl(icdev->base + IC_WAKERD); 596 d[3] = __raw_readl(base + IC_SRCRD);
576 icdev->pmdata[6] = __raw_readl(icdev->base + IC_MASKRD); 597 d[4] = __raw_readl(base + IC_ASSIGNRD);
577 598 d[5] = __raw_readl(base + IC_WAKERD);
578 return 0; 599 d[6] = __raw_readl(base + IC_MASKRD);
600 ic_init(base); /* shut it up too while at it */
579} 601}
580 602
581static int alchemy_ic_resume(struct sys_device *dev) 603static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d)
582{ 604{
583 struct alchemy_ic_sysdev *icdev = 605 ic_init(base);
584 container_of(dev, struct alchemy_ic_sysdev, sysdev); 606
585 607 __raw_writel(d[0], base + IC_CFG0SET);
586 __raw_writel(0xffffffff, icdev->base + IC_MASKCLR); 608 __raw_writel(d[1], base + IC_CFG1SET);
587 __raw_writel(0xffffffff, icdev->base + IC_CFG0CLR); 609 __raw_writel(d[2], base + IC_CFG2SET);
588 __raw_writel(0xffffffff, icdev->base + IC_CFG1CLR); 610 __raw_writel(d[3], base + IC_SRCSET);
589 __raw_writel(0xffffffff, icdev->base + IC_CFG2CLR); 611 __raw_writel(d[4], base + IC_ASSIGNSET);
590 __raw_writel(0xffffffff, icdev->base + IC_SRCCLR); 612 __raw_writel(d[5], base + IC_WAKESET);
591 __raw_writel(0xffffffff, icdev->base + IC_ASSIGNCLR);
592 __raw_writel(0xffffffff, icdev->base + IC_WAKECLR);
593 __raw_writel(0xffffffff, icdev->base + IC_RISINGCLR);
594 __raw_writel(0xffffffff, icdev->base + IC_FALLINGCLR);
595 __raw_writel(0x00000000, icdev->base + IC_TESTBIT);
596 wmb();
597 __raw_writel(icdev->pmdata[0], icdev->base + IC_CFG0SET);
598 __raw_writel(icdev->pmdata[1], icdev->base + IC_CFG1SET);
599 __raw_writel(icdev->pmdata[2], icdev->base + IC_CFG2SET);
600 __raw_writel(icdev->pmdata[3], icdev->base + IC_SRCSET);
601 __raw_writel(icdev->pmdata[4], icdev->base + IC_ASSIGNSET);
602 __raw_writel(icdev->pmdata[5], icdev->base + IC_WAKESET);
603 wmb(); 613 wmb();
604 614
605 __raw_writel(icdev->pmdata[6], icdev->base + IC_MASKSET); 615 __raw_writel(d[6], base + IC_MASKSET);
606 wmb(); 616 wmb();
617}
607 618
619static int alchemy_ic_suspend(void)
620{
621 alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
622 alchemy_ic_pmdata);
623 alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
624 &alchemy_ic_pmdata[7]);
608 return 0; 625 return 0;
609} 626}
610 627
611static struct sysdev_class alchemy_ic_sysdev_class = { 628static void alchemy_ic_resume(void)
612 .name = "ic", 629{
630 alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
631 &alchemy_ic_pmdata[7]);
632 alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
633 alchemy_ic_pmdata);
634}
635
636static struct syscore_ops alchemy_ic_syscore_ops = {
613 .suspend = alchemy_ic_suspend, 637 .suspend = alchemy_ic_suspend,
614 .resume = alchemy_ic_resume, 638 .resume = alchemy_ic_resume,
615}; 639};
616 640
617static int __init alchemy_ic_sysdev_init(void) 641static int __init alchemy_ic_pm_init(void)
618{ 642{
619 struct alchemy_ic_sysdev *icdev; 643 register_syscore_ops(&alchemy_ic_syscore_ops);
620 unsigned long icbase[2] = { IC0_PHYS_ADDR, IC1_PHYS_ADDR };
621 int err, i;
622
623 err = sysdev_class_register(&alchemy_ic_sysdev_class);
624 if (err)
625 return err;
626
627 for (i = 0; i < 2; i++) {
628 icdev = kzalloc(sizeof(struct alchemy_ic_sysdev), GFP_KERNEL);
629 if (!icdev)
630 return -ENOMEM;
631
632 icdev->base = ioremap(icbase[i], 0x1000);
633
634 icdev->sysdev.id = i;
635 icdev->sysdev.cls = &alchemy_ic_sysdev_class;
636 err = sysdev_register(&icdev->sysdev);
637 if (err) {
638 kfree(icdev);
639 return err;
640 }
641 }
642
643 return 0; 644 return 0;
644} 645}
645device_initcall(alchemy_ic_sysdev_init); 646device_initcall(alchemy_ic_pm_init);
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 9e7814db3d03..3b2c18b14341 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -13,9 +13,10 @@
13 13
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
16#include <linux/init.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/serial_8250.h> 18#include <linux/serial_8250.h>
18#include <linux/init.h> 19#include <linux/slab.h>
19 20
20#include <asm/mach-au1x00/au1xxx.h> 21#include <asm/mach-au1x00/au1xxx.h>
21#include <asm/mach-au1x00/au1xxx_dbdma.h> 22#include <asm/mach-au1x00/au1xxx_dbdma.h>
@@ -30,21 +31,12 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
30#ifdef CONFIG_SERIAL_8250 31#ifdef CONFIG_SERIAL_8250
31 switch (state) { 32 switch (state) {
32 case 0: 33 case 0:
33 if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) { 34 alchemy_uart_enable(CPHYSADDR(port->membase));
34 /* power-on sequence as suggested in the databooks */
35 __raw_writel(0, port->membase + UART_MOD_CNTRL);
36 wmb();
37 __raw_writel(1, port->membase + UART_MOD_CNTRL);
38 wmb();
39 }
40 __raw_writel(3, port->membase + UART_MOD_CNTRL); /* full on */
41 wmb();
42 serial8250_do_pm(port, state, old_state); 35 serial8250_do_pm(port, state, old_state);
43 break; 36 break;
44 case 3: /* power off */ 37 case 3: /* power off */
45 serial8250_do_pm(port, state, old_state); 38 serial8250_do_pm(port, state, old_state);
46 __raw_writel(0, port->membase + UART_MOD_CNTRL); 39 alchemy_uart_disable(CPHYSADDR(port->membase));
47 wmb();
48 break; 40 break;
49 default: 41 default:
50 serial8250_do_pm(port, state, old_state); 42 serial8250_do_pm(port, state, old_state);
@@ -65,38 +57,60 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
65 .pm = alchemy_8250_pm, \ 57 .pm = alchemy_8250_pm, \
66 } 58 }
67 59
68static struct plat_serial8250_port au1x00_uart_data[] = { 60static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = {
69#if defined(CONFIG_SOC_AU1000) 61 [ALCHEMY_CPU_AU1000] = {
70 PORT(UART0_PHYS_ADDR, AU1000_UART0_INT), 62 PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT),
71 PORT(UART1_PHYS_ADDR, AU1000_UART1_INT), 63 PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT),
72 PORT(UART2_PHYS_ADDR, AU1000_UART2_INT), 64 PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT),
73 PORT(UART3_PHYS_ADDR, AU1000_UART3_INT), 65 PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT),
74#elif defined(CONFIG_SOC_AU1500) 66 },
75 PORT(UART0_PHYS_ADDR, AU1500_UART0_INT), 67 [ALCHEMY_CPU_AU1500] = {
76 PORT(UART3_PHYS_ADDR, AU1500_UART3_INT), 68 PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT),
77#elif defined(CONFIG_SOC_AU1100) 69 PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT),
78 PORT(UART0_PHYS_ADDR, AU1100_UART0_INT), 70 },
79 PORT(UART1_PHYS_ADDR, AU1100_UART1_INT), 71 [ALCHEMY_CPU_AU1100] = {
80 PORT(UART3_PHYS_ADDR, AU1100_UART3_INT), 72 PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT),
81#elif defined(CONFIG_SOC_AU1550) 73 PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT),
82 PORT(UART0_PHYS_ADDR, AU1550_UART0_INT), 74 PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT),
83 PORT(UART1_PHYS_ADDR, AU1550_UART1_INT), 75 },
84 PORT(UART3_PHYS_ADDR, AU1550_UART3_INT), 76 [ALCHEMY_CPU_AU1550] = {
85#elif defined(CONFIG_SOC_AU1200) 77 PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT),
86 PORT(UART0_PHYS_ADDR, AU1200_UART0_INT), 78 PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT),
87 PORT(UART1_PHYS_ADDR, AU1200_UART1_INT), 79 PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT),
88#endif 80 },
89 { }, 81 [ALCHEMY_CPU_AU1200] = {
82 PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT),
83 PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT),
84 },
90}; 85};
91 86
92static struct platform_device au1xx0_uart_device = { 87static struct platform_device au1xx0_uart_device = {
93 .name = "serial8250", 88 .name = "serial8250",
94 .id = PLAT8250_DEV_AU1X00, 89 .id = PLAT8250_DEV_AU1X00,
95 .dev = {
96 .platform_data = au1x00_uart_data,
97 },
98}; 90};
99 91
92static void __init alchemy_setup_uarts(int ctype)
93{
94 unsigned int uartclk = get_au1x00_uart_baud_base() * 16;
95 int s = sizeof(struct plat_serial8250_port);
96 int c = alchemy_get_uarts(ctype);
97 struct plat_serial8250_port *ports;
98
99 ports = kzalloc(s * (c + 1), GFP_KERNEL);
100 if (!ports) {
101 printk(KERN_INFO "Alchemy: no memory for UART data\n");
102 return;
103 }
104 memcpy(ports, au1x00_uart_data[ctype], s * c);
105 au1xx0_uart_device.dev.platform_data = ports;
106
107 /* Fill up uartclk. */
108 for (s = 0; s < c; s++)
109 ports[s].uartclk = uartclk;
110 if (platform_device_register(&au1xx0_uart_device))
111 printk(KERN_INFO "Alchemy: failed to register UARTs\n");
112}
113
100/* OHCI (USB full speed host controller) */ 114/* OHCI (USB full speed host controller) */
101static struct resource au1xxx_usb_ohci_resources[] = { 115static struct resource au1xxx_usb_ohci_resources[] = {
102 [0] = { 116 [0] = {
@@ -269,8 +283,8 @@ extern struct au1xmmc_platform_data au1xmmc_platdata[2];
269 283
270static struct resource au1200_mmc0_resources[] = { 284static struct resource au1200_mmc0_resources[] = {
271 [0] = { 285 [0] = {
272 .start = SD0_PHYS_ADDR, 286 .start = AU1100_SD0_PHYS_ADDR,
273 .end = SD0_PHYS_ADDR + 0x7ffff, 287 .end = AU1100_SD0_PHYS_ADDR + 0xfff,
274 .flags = IORESOURCE_MEM, 288 .flags = IORESOURCE_MEM,
275 }, 289 },
276 [1] = { 290 [1] = {
@@ -305,8 +319,8 @@ static struct platform_device au1200_mmc0_device = {
305#ifndef CONFIG_MIPS_DB1200 319#ifndef CONFIG_MIPS_DB1200
306static struct resource au1200_mmc1_resources[] = { 320static struct resource au1200_mmc1_resources[] = {
307 [0] = { 321 [0] = {
308 .start = SD1_PHYS_ADDR, 322 .start = AU1100_SD1_PHYS_ADDR,
309 .end = SD1_PHYS_ADDR + 0x7ffff, 323 .end = AU1100_SD1_PHYS_ADDR + 0xfff,
310 .flags = IORESOURCE_MEM, 324 .flags = IORESOURCE_MEM,
311 }, 325 },
312 [1] = { 326 [1] = {
@@ -359,15 +373,16 @@ static struct platform_device pbdb_smbus_device = {
359#endif 373#endif
360 374
361/* Macro to help defining the Ethernet MAC resources */ 375/* Macro to help defining the Ethernet MAC resources */
376#define MAC_RES_COUNT 3 /* MAC regs base, MAC enable reg, MAC INT */
362#define MAC_RES(_base, _enable, _irq) \ 377#define MAC_RES(_base, _enable, _irq) \
363 { \ 378 { \
364 .start = CPHYSADDR(_base), \ 379 .start = _base, \
365 .end = CPHYSADDR(_base + 0xffff), \ 380 .end = _base + 0xffff, \
366 .flags = IORESOURCE_MEM, \ 381 .flags = IORESOURCE_MEM, \
367 }, \ 382 }, \
368 { \ 383 { \
369 .start = CPHYSADDR(_enable), \ 384 .start = _enable, \
370 .end = CPHYSADDR(_enable + 0x3), \ 385 .end = _enable + 0x3, \
371 .flags = IORESOURCE_MEM, \ 386 .flags = IORESOURCE_MEM, \
372 }, \ 387 }, \
373 { \ 388 { \
@@ -376,19 +391,29 @@ static struct platform_device pbdb_smbus_device = {
376 .flags = IORESOURCE_IRQ \ 391 .flags = IORESOURCE_IRQ \
377 } 392 }
378 393
379static struct resource au1xxx_eth0_resources[] = { 394static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = {
380#if defined(CONFIG_SOC_AU1000) 395 [ALCHEMY_CPU_AU1000] = {
381 MAC_RES(AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT), 396 MAC_RES(AU1000_MAC0_PHYS_ADDR,
382#elif defined(CONFIG_SOC_AU1100) 397 AU1000_MACEN_PHYS_ADDR,
383 MAC_RES(AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT), 398 AU1000_MAC0_DMA_INT)
384#elif defined(CONFIG_SOC_AU1550) 399 },
385 MAC_RES(AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT), 400 [ALCHEMY_CPU_AU1500] = {
386#elif defined(CONFIG_SOC_AU1500) 401 MAC_RES(AU1500_MAC0_PHYS_ADDR,
387 MAC_RES(AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT), 402 AU1500_MACEN_PHYS_ADDR,
388#endif 403 AU1500_MAC0_DMA_INT)
404 },
405 [ALCHEMY_CPU_AU1100] = {
406 MAC_RES(AU1000_MAC0_PHYS_ADDR,
407 AU1000_MACEN_PHYS_ADDR,
408 AU1100_MAC0_DMA_INT)
409 },
410 [ALCHEMY_CPU_AU1550] = {
411 MAC_RES(AU1000_MAC0_PHYS_ADDR,
412 AU1000_MACEN_PHYS_ADDR,
413 AU1550_MAC0_DMA_INT)
414 },
389}; 415};
390 416
391
392static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { 417static struct au1000_eth_platform_data au1xxx_eth0_platform_data = {
393 .phy1_search_mac0 = 1, 418 .phy1_search_mac0 = 1,
394}; 419};
@@ -396,20 +421,26 @@ static struct au1000_eth_platform_data au1xxx_eth0_platform_data = {
396static struct platform_device au1xxx_eth0_device = { 421static struct platform_device au1xxx_eth0_device = {
397 .name = "au1000-eth", 422 .name = "au1000-eth",
398 .id = 0, 423 .id = 0,
399 .num_resources = ARRAY_SIZE(au1xxx_eth0_resources), 424 .num_resources = MAC_RES_COUNT,
400 .resource = au1xxx_eth0_resources,
401 .dev.platform_data = &au1xxx_eth0_platform_data, 425 .dev.platform_data = &au1xxx_eth0_platform_data,
402}; 426};
403 427
404#ifndef CONFIG_SOC_AU1100 428static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
405static struct resource au1xxx_eth1_resources[] = { 429 [ALCHEMY_CPU_AU1000] = {
406#if defined(CONFIG_SOC_AU1000) 430 MAC_RES(AU1000_MAC1_PHYS_ADDR,
407 MAC_RES(AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT), 431 AU1000_MACEN_PHYS_ADDR + 4,
408#elif defined(CONFIG_SOC_AU1550) 432 AU1000_MAC1_DMA_INT)
409 MAC_RES(AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT), 433 },
410#elif defined(CONFIG_SOC_AU1500) 434 [ALCHEMY_CPU_AU1500] = {
411 MAC_RES(AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT), 435 MAC_RES(AU1500_MAC1_PHYS_ADDR,
412#endif 436 AU1500_MACEN_PHYS_ADDR + 4,
437 AU1500_MAC1_DMA_INT)
438 },
439 [ALCHEMY_CPU_AU1550] = {
440 MAC_RES(AU1000_MAC1_PHYS_ADDR,
441 AU1000_MACEN_PHYS_ADDR + 4,
442 AU1550_MAC1_DMA_INT)
443 },
413}; 444};
414 445
415static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { 446static struct au1000_eth_platform_data au1xxx_eth1_platform_data = {
@@ -419,11 +450,9 @@ static struct au1000_eth_platform_data au1xxx_eth1_platform_data = {
419static struct platform_device au1xxx_eth1_device = { 450static struct platform_device au1xxx_eth1_device = {
420 .name = "au1000-eth", 451 .name = "au1000-eth",
421 .id = 1, 452 .id = 1,
422 .num_resources = ARRAY_SIZE(au1xxx_eth1_resources), 453 .num_resources = MAC_RES_COUNT,
423 .resource = au1xxx_eth1_resources,
424 .dev.platform_data = &au1xxx_eth1_platform_data, 454 .dev.platform_data = &au1xxx_eth1_platform_data,
425}; 455};
426#endif
427 456
428void __init au1xxx_override_eth_cfg(unsigned int port, 457void __init au1xxx_override_eth_cfg(unsigned int port,
429 struct au1000_eth_platform_data *eth_data) 458 struct au1000_eth_platform_data *eth_data)
@@ -434,15 +463,65 @@ void __init au1xxx_override_eth_cfg(unsigned int port,
434 if (port == 0) 463 if (port == 0)
435 memcpy(&au1xxx_eth0_platform_data, eth_data, 464 memcpy(&au1xxx_eth0_platform_data, eth_data,
436 sizeof(struct au1000_eth_platform_data)); 465 sizeof(struct au1000_eth_platform_data));
437#ifndef CONFIG_SOC_AU1100
438 else 466 else
439 memcpy(&au1xxx_eth1_platform_data, eth_data, 467 memcpy(&au1xxx_eth1_platform_data, eth_data,
440 sizeof(struct au1000_eth_platform_data)); 468 sizeof(struct au1000_eth_platform_data));
441#endif 469}
470
471static void __init alchemy_setup_macs(int ctype)
472{
473 int ret, i;
474 unsigned char ethaddr[6];
475 struct resource *macres;
476
477 /* Handle 1st MAC */
478 if (alchemy_get_macs(ctype) < 1)
479 return;
480
481 macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
482 if (!macres) {
483 printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n");
484 return;
485 }
486 memcpy(macres, au1xxx_eth0_resources[ctype],
487 sizeof(struct resource) * MAC_RES_COUNT);
488 au1xxx_eth0_device.resource = macres;
489
490 i = prom_get_ethernet_addr(ethaddr);
491 if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac))
492 memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
493
494 ret = platform_device_register(&au1xxx_eth0_device);
495 if (!ret)
496 printk(KERN_INFO "Alchemy: failed to register MAC0\n");
497
498
499 /* Handle 2nd MAC */
500 if (alchemy_get_macs(ctype) < 2)
501 return;
502
503 macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
504 if (!macres) {
505 printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n");
506 return;
507 }
508 memcpy(macres, au1xxx_eth1_resources[ctype],
509 sizeof(struct resource) * MAC_RES_COUNT);
510 au1xxx_eth1_device.resource = macres;
511
512 ethaddr[5] += 1; /* next addr for 2nd MAC */
513 if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac))
514 memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6);
515
516 /* Register second MAC if enabled in pinfunc */
517 if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) {
518 ret = platform_device_register(&au1xxx_eth1_device);
519 if (ret)
520 printk(KERN_INFO "Alchemy: failed to register MAC1\n");
521 }
442} 522}
443 523
444static struct platform_device *au1xxx_platform_devices[] __initdata = { 524static struct platform_device *au1xxx_platform_devices[] __initdata = {
445 &au1xx0_uart_device,
446 &au1xxx_usb_ohci_device, 525 &au1xxx_usb_ohci_device,
447#ifdef CONFIG_FB_AU1100 526#ifdef CONFIG_FB_AU1100
448 &au1100_lcd_device, 527 &au1100_lcd_device,
@@ -460,36 +539,17 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = {
460#ifdef SMBUS_PSC_BASE 539#ifdef SMBUS_PSC_BASE
461 &pbdb_smbus_device, 540 &pbdb_smbus_device,
462#endif 541#endif
463 &au1xxx_eth0_device,
464}; 542};
465 543
466static int __init au1xxx_platform_init(void) 544static int __init au1xxx_platform_init(void)
467{ 545{
468 unsigned int uartclk = get_au1x00_uart_baud_base() * 16; 546 int err, ctype = alchemy_get_cputype();
469 int err, i;
470 unsigned char ethaddr[6];
471 547
472 /* Fill up uartclk. */ 548 alchemy_setup_uarts(ctype);
473 for (i = 0; au1x00_uart_data[i].flags; i++) 549 alchemy_setup_macs(ctype);
474 au1x00_uart_data[i].uartclk = uartclk;
475
476 /* use firmware-provided mac addr if available and necessary */
477 i = prom_get_ethernet_addr(ethaddr);
478 if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac))
479 memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
480 550
481 err = platform_add_devices(au1xxx_platform_devices, 551 err = platform_add_devices(au1xxx_platform_devices,
482 ARRAY_SIZE(au1xxx_platform_devices)); 552 ARRAY_SIZE(au1xxx_platform_devices));
483#ifndef CONFIG_SOC_AU1100
484 ethaddr[5] += 1; /* next addr for 2nd MAC */
485 if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac))
486 memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6);
487
488 /* Register second MAC if enabled in pinfunc */
489 if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2))
490 err = platform_device_register(&au1xxx_eth1_device);
491#endif
492
493 return err; 553 return err;
494} 554}
495 555
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 561e5da2658b..1b887c868417 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -52,8 +52,6 @@ void __init plat_mem_setup(void)
52 /* this is faster than wasting cycles trying to approximate it */ 52 /* this is faster than wasting cycles trying to approximate it */
53 preset_lpj = (est_freq >> 1) / HZ; 53 preset_lpj = (est_freq >> 1) / HZ;
54 54
55 board_setup(); /* board specific setup */
56
57 if (au1xxx_cpu_needs_config_od()) 55 if (au1xxx_cpu_needs_config_od())
58 /* Various early Au1xx0 errata corrected by this */ 56 /* Various early Au1xx0 errata corrected by this */
59 set_c0_config(1 << 19); /* Set Config[OD] */ 57 set_c0_config(1 << 19); /* Set Config[OD] */
@@ -61,6 +59,8 @@ void __init plat_mem_setup(void)
61 /* Clear to obtain best system bus performance */ 59 /* Clear to obtain best system bus performance */
62 clear_c0_config(1 << 19); /* Clear Config[OD] */ 60 clear_c0_config(1 << 19); /* Clear Config[OD] */
63 61
62 board_setup(); /* board specific setup */
63
64 /* IO/MEM resources. */ 64 /* IO/MEM resources. */
65 set_io_port_base(0); 65 set_io_port_base(0);
66 ioport_resource.start = IOPORT_RESOURCE_START; 66 ioport_resource.start = IOPORT_RESOURCE_START;
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c
index 4a8980027ecf..1dac4f27d334 100644
--- a/arch/mips/alchemy/devboards/db1200/setup.c
+++ b/arch/mips/alchemy/devboards/db1200/setup.c
@@ -23,6 +23,13 @@ void __init board_setup(void)
23 unsigned long freq0, clksrc, div, pfc; 23 unsigned long freq0, clksrc, div, pfc;
24 unsigned short whoami; 24 unsigned short whoami;
25 25
26 /* Set Config[OD] (disable overlapping bus transaction):
27 * This gets rid of a _lot_ of spurious interrupts (especially
28 * wrt. IDE); but incurs ~10% performance hit in some
29 * cpu-bound applications.
30 */
31 set_c0_config(1 << 19);
32
26 bcsr_init(DB1200_BCSR_PHYS_ADDR, 33 bcsr_init(DB1200_BCSR_PHYS_ADDR,
27 DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); 34 DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
28 35
diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c
index 05f120ff90f9..5c956fe8760f 100644
--- a/arch/mips/alchemy/devboards/db1x00/board_setup.c
+++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c
@@ -127,13 +127,10 @@ const char *get_system_type(void)
127void __init board_setup(void) 127void __init board_setup(void)
128{ 128{
129 unsigned long bcsr1, bcsr2; 129 unsigned long bcsr1, bcsr2;
130 u32 pin_func;
131 130
132 bcsr1 = DB1000_BCSR_PHYS_ADDR; 131 bcsr1 = DB1000_BCSR_PHYS_ADDR;
133 bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; 132 bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS;
134 133
135 pin_func = 0;
136
137#ifdef CONFIG_MIPS_DB1000 134#ifdef CONFIG_MIPS_DB1000
138 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); 135 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n");
139#endif 136#endif
@@ -164,12 +161,16 @@ void __init board_setup(void)
164 /* Not valid for Au1550 */ 161 /* Not valid for Au1550 */
165#if defined(CONFIG_IRDA) && \ 162#if defined(CONFIG_IRDA) && \
166 (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) 163 (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100))
167 /* Set IRFIRSEL instead of GPIO15 */ 164 {
168 pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; 165 u32 pin_func;
169 au_writel(pin_func, SYS_PINFUNC); 166
170 /* Power off until the driver is in use */ 167 /* Set IRFIRSEL instead of GPIO15 */
171 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, 168 pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF;
172 BCSR_RESETS_IRDA_MODE_OFF); 169 au_writel(pin_func, SYS_PINFUNC);
170 /* Power off until the driver is in use */
171 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
172 BCSR_RESETS_IRDA_MODE_OFF);
173 }
173#endif 174#endif
174 bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ 175 bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */
175 176
@@ -177,31 +178,35 @@ void __init board_setup(void)
177 alchemy_gpio1_input_enable(); 178 alchemy_gpio1_input_enable();
178 179
179#ifdef CONFIG_MIPS_MIRAGE 180#ifdef CONFIG_MIPS_MIRAGE
180 /* GPIO[20] is output */ 181 {
181 alchemy_gpio_direction_output(20, 0); 182 u32 pin_func;
182 183
183 /* Set GPIO[210:208] instead of SSI_0 */ 184 /* GPIO[20] is output */
184 pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; 185 alchemy_gpio_direction_output(20, 0);
185 186
186 /* Set GPIO[215:211] for LEDs */ 187 /* Set GPIO[210:208] instead of SSI_0 */
187 pin_func |= 5 << 2; 188 pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0;
188 189
189 /* Set GPIO[214:213] for more LEDs */ 190 /* Set GPIO[215:211] for LEDs */
190 pin_func |= 5 << 12; 191 pin_func |= 5 << 2;
191 192
192 /* Set GPIO[207:200] instead of PCMCIA/LCD */ 193 /* Set GPIO[214:213] for more LEDs */
193 pin_func |= SYS_PF_LCD | SYS_PF_PC; 194 pin_func |= 5 << 12;
194 au_writel(pin_func, SYS_PINFUNC);
195 195
196 /* 196 /* Set GPIO[207:200] instead of PCMCIA/LCD */
197 * Enable speaker amplifier. This should 197 pin_func |= SYS_PF_LCD | SYS_PF_PC;
198 * be part of the audio driver. 198 au_writel(pin_func, SYS_PINFUNC);
199 */
200 alchemy_gpio_direction_output(209, 1);
201 199
202 pm_power_off = mirage_power_off; 200 /*
203 _machine_halt = mirage_power_off; 201 * Enable speaker amplifier. This should
204 _machine_restart = (void(*)(char *))mips_softreset; 202 * be part of the audio driver.
203 */
204 alchemy_gpio_direction_output(209, 1);
205
206 pm_power_off = mirage_power_off;
207 _machine_halt = mirage_power_off;
208 _machine_restart = (void(*)(char *))mips_softreset;
209 }
205#endif 210#endif
206 211
207#ifdef CONFIG_MIPS_BOSPORUS 212#ifdef CONFIG_MIPS_BOSPORUS
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c
index 2d85c4b5be09..e64fdcbf75d0 100644
--- a/arch/mips/alchemy/devboards/pb1000/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c
@@ -65,7 +65,7 @@ void __init board_setup(void)
65 65
66 /* Set AUX clock to 12 MHz * 8 = 96 MHz */ 66 /* Set AUX clock to 12 MHz * 8 = 96 MHz */
67 au_writel(8, SYS_AUXPLL); 67 au_writel(8, SYS_AUXPLL);
68 au_writel(0, SYS_PINSTATERD); 68 alchemy_gpio1_input_enable();
69 udelay(100); 69 udelay(100);
70 70
71#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) 71#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c
index 83f46215eb0c..3b4fa3206969 100644
--- a/arch/mips/alchemy/devboards/pb1500/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c
@@ -56,7 +56,7 @@ void __init board_setup(void)
56 sys_clksrc = sys_freqctrl = pin_func = 0; 56 sys_clksrc = sys_freqctrl = pin_func = 0;
57 /* Set AUX clock to 12 MHz * 8 = 96 MHz */ 57 /* Set AUX clock to 12 MHz * 8 = 96 MHz */
58 au_writel(8, SYS_AUXPLL); 58 au_writel(8, SYS_AUXPLL);
59 au_writel(0, SYS_PINSTATERD); 59 alchemy_gpio1_input_enable();
60 udelay(100); 60 udelay(100);
61 61
62 /* GPIO201 is input for PCMCIA card detect */ 62 /* GPIO201 is input for PCMCIA card detect */
diff --git a/arch/mips/alchemy/devboards/prom.c b/arch/mips/alchemy/devboards/prom.c
index baeb21385058..e5306b56da6d 100644
--- a/arch/mips/alchemy/devboards/prom.c
+++ b/arch/mips/alchemy/devboards/prom.c
@@ -62,5 +62,5 @@ void __init prom_init(void)
62 62
63void prom_putchar(unsigned char c) 63void prom_putchar(unsigned char c)
64{ 64{
65 alchemy_uart_putchar(UART0_PHYS_ADDR, c); 65 alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
66} 66}
diff --git a/arch/mips/alchemy/gpr/board_setup.c b/arch/mips/alchemy/gpr/board_setup.c
index ad2e3f137933..5f8f0691ed2d 100644
--- a/arch/mips/alchemy/gpr/board_setup.c
+++ b/arch/mips/alchemy/gpr/board_setup.c
@@ -36,9 +36,6 @@
36 36
37#include <prom.h> 37#include <prom.h>
38 38
39#define UART1_ADDR KSEG1ADDR(UART1_PHYS_ADDR)
40#define UART3_ADDR KSEG1ADDR(UART3_PHYS_ADDR)
41
42char irq_tab_alchemy[][5] __initdata = { 39char irq_tab_alchemy[][5] __initdata = {
43 [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, 40 [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff },
44}; 41};
@@ -67,18 +64,15 @@ static void gpr_power_off(void)
67 64
68void __init board_setup(void) 65void __init board_setup(void)
69{ 66{
70 printk(KERN_INFO "Tarpeze ITS GPR board\n"); 67 printk(KERN_INFO "Trapeze ITS GPR board\n");
71 68
72 pm_power_off = gpr_power_off; 69 pm_power_off = gpr_power_off;
73 _machine_halt = gpr_power_off; 70 _machine_halt = gpr_power_off;
74 _machine_restart = gpr_reset; 71 _machine_restart = gpr_reset;
75 72
76 /* Enable UART3 */ 73 /* Enable UART1/3 */
77 au_writel(0x1, UART3_ADDR + UART_MOD_CNTRL);/* clock enable (CE) */ 74 alchemy_uart_enable(AU1000_UART3_PHYS_ADDR);
78 au_writel(0x3, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ 75 alchemy_uart_enable(AU1000_UART1_PHYS_ADDR);
79 /* Enable UART1 */
80 au_writel(0x1, UART1_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */
81 au_writel(0x3, UART1_ADDR + UART_MOD_CNTRL); /* CE and "enable" */
82 76
83 /* Take away Reset of UMTS-card */ 77 /* Take away Reset of UMTS-card */
84 alchemy_gpio_direction_output(215, 1); 78 alchemy_gpio_direction_output(215, 1);
diff --git a/arch/mips/alchemy/gpr/init.c b/arch/mips/alchemy/gpr/init.c
index f044f4c541d7..229aafae680c 100644
--- a/arch/mips/alchemy/gpr/init.c
+++ b/arch/mips/alchemy/gpr/init.c
@@ -59,5 +59,5 @@ void __init prom_init(void)
59 59
60void prom_putchar(unsigned char c) 60void prom_putchar(unsigned char c)
61{ 61{
62 alchemy_uart_putchar(UART0_PHYS_ADDR, c); 62 alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
63} 63}
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index cf436ab679ae..3ae984cf98cf 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -87,7 +87,7 @@ void __init board_setup(void)
87 au_writel(SYS_PF_NI2, SYS_PINFUNC); 87 au_writel(SYS_PF_NI2, SYS_PINFUNC);
88 88
89 /* Initialize GPIO */ 89 /* Initialize GPIO */
90 au_writel(0xFFFFFFFF, SYS_TRIOUTCLR); 90 au_writel(~0, KSEG1ADDR(AU1000_SYS_PHYS_ADDR) + SYS_TRIOUTCLR);
91 alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ 91 alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */
92 alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ 92 alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */
93 alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ 93 alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */
diff --git a/arch/mips/alchemy/mtx-1/init.c b/arch/mips/alchemy/mtx-1/init.c
index f8d25575fa05..2e81cc7f3422 100644
--- a/arch/mips/alchemy/mtx-1/init.c
+++ b/arch/mips/alchemy/mtx-1/init.c
@@ -62,5 +62,5 @@ void __init prom_init(void)
62 62
63void prom_putchar(unsigned char c) 63void prom_putchar(unsigned char c)
64{ 64{
65 alchemy_uart_putchar(UART0_PHYS_ADDR, c); 65 alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
66} 66}
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c
index 956f946218c5..55628e390fd7 100644
--- a/arch/mips/alchemy/mtx-1/platform.c
+++ b/arch/mips/alchemy/mtx-1/platform.c
@@ -53,8 +53,8 @@ static struct platform_device mtx1_button = {
53 53
54static struct resource mtx1_wdt_res[] = { 54static struct resource mtx1_wdt_res[] = {
55 [0] = { 55 [0] = {
56 .start = 15, 56 .start = 215,
57 .end = 15, 57 .end = 215,
58 .name = "mtx1-wdt-gpio", 58 .name = "mtx1-wdt-gpio",
59 .flags = IORESOURCE_IRQ, 59 .flags = IORESOURCE_IRQ,
60 } 60 }
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c
index febfb0fb0896..81e57fad07ab 100644
--- a/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/arch/mips/alchemy/xxs1500/board_setup.c
@@ -66,13 +66,10 @@ void __init board_setup(void)
66 au_writel(pin_func, SYS_PINFUNC); 66 au_writel(pin_func, SYS_PINFUNC);
67 67
68 /* Enable UART */ 68 /* Enable UART */
69 au_writel(0x01, UART3_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ 69 alchemy_uart_enable(AU1000_UART3_PHYS_ADDR);
70 mdelay(10); 70 /* Enable DTR (MCR bit 0) = USB power up */
71 au_writel(0x03, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ 71 __raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18));
72 mdelay(10); 72 wmb();
73
74 /* Enable DTR = USB power up */
75 au_writel(0x01, UART3_ADDR + UART_MCR); /* UART_MCR_DTR is 0x01??? */
76 73
77#ifdef CONFIG_PCI 74#ifdef CONFIG_PCI
78#if defined(__MIPSEB__) 75#if defined(__MIPSEB__)
diff --git a/arch/mips/alchemy/xxs1500/init.c b/arch/mips/alchemy/xxs1500/init.c
index 15125c2fda7d..0ee02cfa989d 100644
--- a/arch/mips/alchemy/xxs1500/init.c
+++ b/arch/mips/alchemy/xxs1500/init.c
@@ -51,14 +51,13 @@ void __init prom_init(void)
51 prom_init_cmdline(); 51 prom_init_cmdline();
52 52
53 memsize_str = prom_getenv("memsize"); 53 memsize_str = prom_getenv("memsize");
54 if (!memsize_str) 54 if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize))
55 memsize = 0x04000000; 55 memsize = 0x04000000;
56 else 56
57 strict_strtoul(memsize_str, 0, &memsize);
58 add_memory_region(0, memsize, BOOT_MEM_RAM); 57 add_memory_region(0, memsize, BOOT_MEM_RAM);
59} 58}
60 59
61void prom_putchar(unsigned char c) 60void prom_putchar(unsigned char c)
62{ 61{
63 alchemy_uart_putchar(UART0_PHYS_ADDR, c); 62 alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
64} 63}
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
index 425dfa5d6e12..bb571bcdb8f2 100644
--- a/arch/mips/ar7/gpio.c
+++ b/arch/mips/ar7/gpio.c
@@ -325,9 +325,7 @@ int __init ar7_gpio_init(void)
325 size = 0x1f; 325 size = 0x1f;
326 } 326 }
327 327
328 gpch->regs = ioremap_nocache(AR7_REGS_GPIO, 328 gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size);
329 AR7_REGS_GPIO + 0x10);
330
331 if (!gpch->regs) { 329 if (!gpch->regs) {
332 printk(KERN_ERR "%s: failed to ioremap regs\n", 330 printk(KERN_ERR "%s: failed to ioremap regs\n",
333 gpch->chip.label); 331 gpch->chip.label);
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index e5b6615731e5..54db815bc86c 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2005 Broadcom Corporation 4 * Copyright (C) 2005 Broadcom Corporation
5 * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> 5 * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -23,7 +24,7 @@
23static char nvram_buf[NVRAM_SPACE]; 24static char nvram_buf[NVRAM_SPACE];
24 25
25/* Probe for NVRAM header */ 26/* Probe for NVRAM header */
26static void __init early_nvram_init(void) 27static void early_nvram_init(void)
27{ 28{
28 struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore; 29 struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore;
29 struct nvram_header *header; 30 struct nvram_header *header;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index c95f90bf734c..73b529b57433 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -3,6 +3,7 @@
3 * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> 3 * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2006 Michael Buesch <mb@bu3sch.de> 4 * Copyright (C) 2006 Michael Buesch <mb@bu3sch.de>
5 * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> 5 * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
6 * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -57,10 +58,49 @@ static void bcm47xx_machine_halt(void)
57} 58}
58 59
59#define READ_FROM_NVRAM(_outvar, name, buf) \ 60#define READ_FROM_NVRAM(_outvar, name, buf) \
60 if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\ 61 if (nvram_getprefix(prefix, name, buf, sizeof(buf)) >= 0)\
61 sprom->_outvar = simple_strtoul(buf, NULL, 0); 62 sprom->_outvar = simple_strtoul(buf, NULL, 0);
62 63
63static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) 64#define READ_FROM_NVRAM2(_outvar, name1, name2, buf) \
65 if (nvram_getprefix(prefix, name1, buf, sizeof(buf)) >= 0 || \
66 nvram_getprefix(prefix, name2, buf, sizeof(buf)) >= 0)\
67 sprom->_outvar = simple_strtoul(buf, NULL, 0);
68
69static inline int nvram_getprefix(const char *prefix, char *name,
70 char *buf, int len)
71{
72 if (prefix) {
73 char key[100];
74
75 snprintf(key, sizeof(key), "%s%s", prefix, name);
76 return nvram_getenv(key, buf, len);
77 }
78
79 return nvram_getenv(name, buf, len);
80}
81
82static u32 nvram_getu32(const char *name, char *buf, int len)
83{
84 int rv;
85 char key[100];
86 u16 var0, var1;
87
88 snprintf(key, sizeof(key), "%s0", name);
89 rv = nvram_getenv(key, buf, len);
90 /* return 0 here so this looks like unset */
91 if (rv < 0)
92 return 0;
93 var0 = simple_strtoul(buf, NULL, 0);
94
95 snprintf(key, sizeof(key), "%s1", name);
96 rv = nvram_getenv(key, buf, len);
97 if (rv < 0)
98 return 0;
99 var1 = simple_strtoul(buf, NULL, 0);
100 return var1 << 16 | var0;
101}
102
103static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
64{ 104{
65 char buf[100]; 105 char buf[100];
66 u32 boardflags; 106 u32 boardflags;
@@ -69,11 +109,12 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom)
69 109
70 sprom->revision = 1; /* Fallback: Old hardware does not define this. */ 110 sprom->revision = 1; /* Fallback: Old hardware does not define this. */
71 READ_FROM_NVRAM(revision, "sromrev", buf); 111 READ_FROM_NVRAM(revision, "sromrev", buf);
72 if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0) 112 if (nvram_getprefix(prefix, "il0macaddr", buf, sizeof(buf)) >= 0 ||
113 nvram_getprefix(prefix, "macaddr", buf, sizeof(buf)) >= 0)
73 nvram_parse_macaddr(buf, sprom->il0mac); 114 nvram_parse_macaddr(buf, sprom->il0mac);
74 if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) 115 if (nvram_getprefix(prefix, "et0macaddr", buf, sizeof(buf)) >= 0)
75 nvram_parse_macaddr(buf, sprom->et0mac); 116 nvram_parse_macaddr(buf, sprom->et0mac);
76 if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) 117 if (nvram_getprefix(prefix, "et1macaddr", buf, sizeof(buf)) >= 0)
77 nvram_parse_macaddr(buf, sprom->et1mac); 118 nvram_parse_macaddr(buf, sprom->et1mac);
78 READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); 119 READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf);
79 READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); 120 READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf);
@@ -95,20 +136,36 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom)
95 READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); 136 READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf);
96 READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); 137 READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf);
97 READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); 138 READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf);
98 READ_FROM_NVRAM(gpio0, "wl0gpio0", buf); 139 READ_FROM_NVRAM2(gpio0, "ledbh0", "wl0gpio0", buf);
99 READ_FROM_NVRAM(gpio1, "wl0gpio1", buf); 140 READ_FROM_NVRAM2(gpio1, "ledbh1", "wl0gpio1", buf);
100 READ_FROM_NVRAM(gpio2, "wl0gpio2", buf); 141 READ_FROM_NVRAM2(gpio2, "ledbh2", "wl0gpio2", buf);
101 READ_FROM_NVRAM(gpio3, "wl0gpio3", buf); 142 READ_FROM_NVRAM2(gpio3, "ledbh3", "wl0gpio3", buf);
102 READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf); 143 READ_FROM_NVRAM2(maxpwr_bg, "maxp2ga0", "pa0maxpwr", buf);
103 READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf); 144 READ_FROM_NVRAM2(maxpwr_al, "maxp5gla0", "pa1lomaxpwr", buf);
104 READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf); 145 READ_FROM_NVRAM2(maxpwr_a, "maxp5ga0", "pa1maxpwr", buf);
105 READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf); 146 READ_FROM_NVRAM2(maxpwr_ah, "maxp5gha0", "pa1himaxpwr", buf);
106 READ_FROM_NVRAM(itssi_a, "pa1itssit", buf); 147 READ_FROM_NVRAM2(itssi_bg, "itt5ga0", "pa0itssit", buf);
107 READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf); 148 READ_FROM_NVRAM2(itssi_a, "itt2ga0", "pa1itssit", buf);
108 READ_FROM_NVRAM(tri2g, "tri2g", buf); 149 READ_FROM_NVRAM(tri2g, "tri2g", buf);
109 READ_FROM_NVRAM(tri5gl, "tri5gl", buf); 150 READ_FROM_NVRAM(tri5gl, "tri5gl", buf);
110 READ_FROM_NVRAM(tri5g, "tri5g", buf); 151 READ_FROM_NVRAM(tri5g, "tri5g", buf);
111 READ_FROM_NVRAM(tri5gh, "tri5gh", buf); 152 READ_FROM_NVRAM(tri5gh, "tri5gh", buf);
153 READ_FROM_NVRAM(txpid2g[0], "txpid2ga0", buf);
154 READ_FROM_NVRAM(txpid2g[1], "txpid2ga1", buf);
155 READ_FROM_NVRAM(txpid2g[2], "txpid2ga2", buf);
156 READ_FROM_NVRAM(txpid2g[3], "txpid2ga3", buf);
157 READ_FROM_NVRAM(txpid5g[0], "txpid5ga0", buf);
158 READ_FROM_NVRAM(txpid5g[1], "txpid5ga1", buf);
159 READ_FROM_NVRAM(txpid5g[2], "txpid5ga2", buf);
160 READ_FROM_NVRAM(txpid5g[3], "txpid5ga3", buf);
161 READ_FROM_NVRAM(txpid5gl[0], "txpid5gla0", buf);
162 READ_FROM_NVRAM(txpid5gl[1], "txpid5gla1", buf);
163 READ_FROM_NVRAM(txpid5gl[2], "txpid5gla2", buf);
164 READ_FROM_NVRAM(txpid5gl[3], "txpid5gla3", buf);
165 READ_FROM_NVRAM(txpid5gh[0], "txpid5gha0", buf);
166 READ_FROM_NVRAM(txpid5gh[1], "txpid5gha1", buf);
167 READ_FROM_NVRAM(txpid5gh[2], "txpid5gha2", buf);
168 READ_FROM_NVRAM(txpid5gh[3], "txpid5gha3", buf);
112 READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); 169 READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf);
113 READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); 170 READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf);
114 READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); 171 READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf);
@@ -120,19 +177,27 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom)
120 READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); 177 READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf);
121 READ_FROM_NVRAM(bxa5g, "bxa5g", buf); 178 READ_FROM_NVRAM(bxa5g, "bxa5g", buf);
122 READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); 179 READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf);
123 READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf);
124 READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf);
125 READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf);
126 READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf);
127 180
128 if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) { 181 sprom->ofdm2gpo = nvram_getu32("ofdm2gpo", buf, sizeof(buf));
182 sprom->ofdm5glpo = nvram_getu32("ofdm5glpo", buf, sizeof(buf));
183 sprom->ofdm5gpo = nvram_getu32("ofdm5gpo", buf, sizeof(buf));
184 sprom->ofdm5ghpo = nvram_getu32("ofdm5ghpo", buf, sizeof(buf));
185
186 READ_FROM_NVRAM(antenna_gain.ghz24.a0, "ag0", buf);
187 READ_FROM_NVRAM(antenna_gain.ghz24.a1, "ag1", buf);
188 READ_FROM_NVRAM(antenna_gain.ghz24.a2, "ag2", buf);
189 READ_FROM_NVRAM(antenna_gain.ghz24.a3, "ag3", buf);
190 memcpy(&sprom->antenna_gain.ghz5, &sprom->antenna_gain.ghz24,
191 sizeof(sprom->antenna_gain.ghz5));
192
193 if (nvram_getprefix(prefix, "boardflags", buf, sizeof(buf)) >= 0) {
129 boardflags = simple_strtoul(buf, NULL, 0); 194 boardflags = simple_strtoul(buf, NULL, 0);
130 if (boardflags) { 195 if (boardflags) {
131 sprom->boardflags_lo = (boardflags & 0x0000FFFFU); 196 sprom->boardflags_lo = (boardflags & 0x0000FFFFU);
132 sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; 197 sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16;
133 } 198 }
134 } 199 }
135 if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) { 200 if (nvram_getprefix(prefix, "boardflags2", buf, sizeof(buf)) >= 0) {
136 boardflags = simple_strtoul(buf, NULL, 0); 201 boardflags = simple_strtoul(buf, NULL, 0);
137 if (boardflags) { 202 if (boardflags) {
138 sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); 203 sprom->boardflags2_lo = (boardflags & 0x0000FFFFU);
@@ -141,6 +206,22 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom)
141 } 206 }
142} 207}
143 208
209int bcm47xx_get_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
210{
211 char prefix[10];
212
213 if (bus->bustype == SSB_BUSTYPE_PCI) {
214 snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
215 bus->host_pci->bus->number + 1,
216 PCI_SLOT(bus->host_pci->devfn));
217 bcm47xx_fill_sprom(out, prefix);
218 return 0;
219 } else {
220 printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n");
221 return -EINVAL;
222 }
223}
224
144static int bcm47xx_get_invariants(struct ssb_bus *bus, 225static int bcm47xx_get_invariants(struct ssb_bus *bus,
145 struct ssb_init_invariants *iv) 226 struct ssb_init_invariants *iv)
146{ 227{
@@ -158,7 +239,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
158 if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) 239 if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0)
159 iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); 240 iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0);
160 241
161 bcm47xx_fill_sprom(&iv->sprom); 242 bcm47xx_fill_sprom(&iv->sprom, NULL);
162 243
163 if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) 244 if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
164 iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); 245 iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10);
@@ -172,6 +253,11 @@ void __init plat_mem_setup(void)
172 char buf[100]; 253 char buf[100];
173 struct ssb_mipscore *mcore; 254 struct ssb_mipscore *mcore;
174 255
256 err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom);
257 if (err)
258 printk(KERN_WARNING "bcm47xx: someone else already registered"
259 " a ssb SPROM callback handler (err %d)\n", err);
260
175 err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, 261 err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE,
176 bcm47xx_get_invariants); 262 bcm47xx_get_invariants);
177 if (err) 263 if (err)
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 8dba8cfb752f..40b223b603be 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -643,6 +643,17 @@ static struct ssb_sprom bcm63xx_sprom = {
643 .boardflags_lo = 0x2848, 643 .boardflags_lo = 0x2848,
644 .boardflags_hi = 0x0000, 644 .boardflags_hi = 0x0000,
645}; 645};
646
647int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
648{
649 if (bus->bustype == SSB_BUSTYPE_PCI) {
650 memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom));
651 return 0;
652 } else {
653 printk(KERN_ERR PFX "unable to fill SPROM for given bustype.\n");
654 return -EINVAL;
655 }
656}
646#endif 657#endif
647 658
648/* 659/*
@@ -793,8 +804,9 @@ void __init board_prom_init(void)
793 if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { 804 if (!board_get_mac_address(bcm63xx_sprom.il0mac)) {
794 memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); 805 memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
795 memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); 806 memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
796 if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) 807 if (ssb_arch_register_fallback_sprom(
797 printk(KERN_ERR "failed to register fallback SPROM\n"); 808 &bcm63xx_get_fallback_sprom) < 0)
809 printk(KERN_ERR PFX "failed to register fallback SPROM\n");
798 } 810 }
799#endif 811#endif
800} 812}
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 88c9d963be88..9a6243676e22 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -16,8 +16,8 @@
16 16
17int main(int argc, char *argv[]) 17int main(int argc, char *argv[])
18{ 18{
19 unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr;
19 struct stat sb; 20 struct stat sb;
20 uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr;
21 21
22 if (argc != 3) { 22 if (argc != 3) {
23 fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n", 23 fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n",
diff --git a/arch/mips/boot/compressed/uart-alchemy.c b/arch/mips/boot/compressed/uart-alchemy.c
index 1bff22fa089b..eb063e6dead9 100644
--- a/arch/mips/boot/compressed/uart-alchemy.c
+++ b/arch/mips/boot/compressed/uart-alchemy.c
@@ -3,5 +3,5 @@
3void putc(char c) 3void putc(char c)
4{ 4{
5 /* all current (Jan. 2010) in-kernel boards */ 5 /* all current (Jan. 2010) in-kernel boards */
6 alchemy_uart_putchar(UART0_PHYS_ADDR, c); 6 alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
7} 7}
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index caae22858163..cad555ebeca3 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -1,11 +1,7 @@
1config CAVIUM_OCTEON_SPECIFIC_OPTIONS 1if CPU_CAVIUM_OCTEON
2 bool "Enable Octeon specific options"
3 depends on CPU_CAVIUM_OCTEON
4 default "y"
5 2
6config CAVIUM_CN63XXP1 3config CAVIUM_CN63XXP1
7 bool "Enable CN63XXP1 errata worarounds" 4 bool "Enable CN63XXP1 errata worarounds"
8 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
9 default "n" 5 default "n"
10 help 6 help
11 The CN63XXP1 chip requires build time workarounds to 7 The CN63XXP1 chip requires build time workarounds to
@@ -16,7 +12,6 @@ config CAVIUM_CN63XXP1
16 12
17config CAVIUM_OCTEON_2ND_KERNEL 13config CAVIUM_OCTEON_2ND_KERNEL
18 bool "Build the kernel to be used as a 2nd kernel on the same chip" 14 bool "Build the kernel to be used as a 2nd kernel on the same chip"
19 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
20 default "n" 15 default "n"
21 help 16 help
22 This option configures this kernel to be linked at a different 17 This option configures this kernel to be linked at a different
@@ -26,7 +21,6 @@ config CAVIUM_OCTEON_2ND_KERNEL
26 21
27config CAVIUM_OCTEON_HW_FIX_UNALIGNED 22config CAVIUM_OCTEON_HW_FIX_UNALIGNED
28 bool "Enable hardware fixups of unaligned loads and stores" 23 bool "Enable hardware fixups of unaligned loads and stores"
29 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
30 default "y" 24 default "y"
31 help 25 help
32 Configure the Octeon hardware to automatically fix unaligned loads 26 Configure the Octeon hardware to automatically fix unaligned loads
@@ -38,7 +32,6 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED
38 32
39config CAVIUM_OCTEON_CVMSEG_SIZE 33config CAVIUM_OCTEON_CVMSEG_SIZE
40 int "Number of L1 cache lines reserved for CVMSEG memory" 34 int "Number of L1 cache lines reserved for CVMSEG memory"
41 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
42 range 0 54 35 range 0 54
43 default 1 36 default 1
44 help 37 help
@@ -50,7 +43,6 @@ config CAVIUM_OCTEON_CVMSEG_SIZE
50 43
51config CAVIUM_OCTEON_LOCK_L2 44config CAVIUM_OCTEON_LOCK_L2
52 bool "Lock often used kernel code in the L2" 45 bool "Lock often used kernel code in the L2"
53 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
54 default "y" 46 default "y"
55 help 47 help
56 Enable locking parts of the kernel into the L2 cache. 48 Enable locking parts of the kernel into the L2 cache.
@@ -93,7 +85,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
93config ARCH_SPARSEMEM_ENABLE 85config ARCH_SPARSEMEM_ENABLE
94 def_bool y 86 def_bool y
95 select SPARSEMEM_STATIC 87 select SPARSEMEM_STATIC
96 depends on CPU_CAVIUM_OCTEON
97 88
98config CAVIUM_OCTEON_HELPER 89config CAVIUM_OCTEON_HELPER
99 def_bool y 90 def_bool y
@@ -107,6 +98,8 @@ config NEED_SG_DMA_LENGTH
107 98
108config SWIOTLB 99config SWIOTLB
109 def_bool y 100 def_bool y
110 depends on CPU_CAVIUM_OCTEON
111 select IOMMU_HELPER 101 select IOMMU_HELPER
112 select NEED_SG_DMA_LENGTH 102 select NEED_SG_DMA_LENGTH
103
104
105endif # CPU_CAVIUM_OCTEON
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 0707fae3f0ee..2d9028f1474c 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -288,7 +288,6 @@ void octeon_user_io_init(void)
288 union octeon_cvmemctl cvmmemctl; 288 union octeon_cvmemctl cvmmemctl;
289 union cvmx_iob_fau_timeout fau_timeout; 289 union cvmx_iob_fau_timeout fau_timeout;
290 union cvmx_pow_nw_tim nm_tim; 290 union cvmx_pow_nw_tim nm_tim;
291 uint64_t cvmctl;
292 291
293 /* Get the current settings for CP0_CVMMEMCTL_REG */ 292 /* Get the current settings for CP0_CVMMEMCTL_REG */
294 cvmmemctl.u64 = read_c0_cvmmemctl(); 293 cvmmemctl.u64 = read_c0_cvmmemctl();
@@ -392,12 +391,6 @@ void octeon_user_io_init(void)
392 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, 391 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
393 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); 392 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
394 393
395 /* Move the performance counter interrupts to IRQ 6 */
396 cvmctl = read_c0_cvmctl();
397 cvmctl &= ~(7 << 7);
398 cvmctl |= 6 << 7;
399 write_c0_cvmctl(cvmctl);
400
401 /* Set a default for the hardware timeouts */ 394 /* Set a default for the hardware timeouts */
402 fau_timeout.u64 = 0; 395 fau_timeout.u64 = 0;
403 fau_timeout.s.tout_val = 0xfff; 396 fau_timeout.s.tout_val = 0xfff;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 76923eeb58b9..8b606423bbd7 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -37,7 +37,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
37 uint64_t action; 37 uint64_t action;
38 38
39 /* Load the mailbox register to figure out what we're supposed to do */ 39 /* Load the mailbox register to figure out what we're supposed to do */
40 action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)); 40 action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
41 41
42 /* Clear the mailbox to clear the interrupt */ 42 /* Clear the mailbox to clear the interrupt */
43 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); 43 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
@@ -202,16 +202,15 @@ void octeon_prepare_cpus(unsigned int max_cpus)
202 if (labi->labi_signature != LABI_SIGNATURE) 202 if (labi->labi_signature != LABI_SIGNATURE)
203 panic("The bootloader version on this board is incorrect."); 203 panic("The bootloader version on this board is incorrect.");
204#endif 204#endif
205 205 /*
206 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); 206 * Only the low order mailbox bits are used for IPIs, leave
207 * the other bits alone.
208 */
209 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
207 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, 210 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
208 "mailbox0", mailbox_interrupt)) { 211 "SMP-IPI", mailbox_interrupt)) {
209 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); 212 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
210 } 213 }
211 if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED,
212 "mailbox1", mailbox_interrupt)) {
213 panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
214 }
215} 214}
216 215
217/** 216/**
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 167c1d07b809..b6acd2f256b6 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -86,8 +86,8 @@ CONFIG_NET_SCHED=y
86CONFIG_NET_EMATCH=y 86CONFIG_NET_EMATCH=y
87CONFIG_NET_CLS_ACT=y 87CONFIG_NET_CLS_ACT=y
88CONFIG_BT=m 88CONFIG_BT=m
89CONFIG_BT_L2CAP=m 89CONFIG_BT_L2CAP=y
90CONFIG_BT_SCO=m 90CONFIG_BT_SCO=y
91CONFIG_BT_RFCOMM=m 91CONFIG_BT_RFCOMM=m
92CONFIG_BT_RFCOMM_TTY=y 92CONFIG_BT_RFCOMM_TTY=y
93CONFIG_BT_BNEP=m 93CONFIG_BT_BNEP=m
@@ -329,7 +329,7 @@ CONFIG_USB_LED=m
329CONFIG_USB_GADGET=m 329CONFIG_USB_GADGET=m
330CONFIG_USB_GADGET_M66592=y 330CONFIG_USB_GADGET_M66592=y
331CONFIG_MMC=m 331CONFIG_MMC=m
332CONFIG_LEDS_CLASS=m 332CONFIG_LEDS_CLASS=y
333CONFIG_STAGING=y 333CONFIG_STAGING=y
334# CONFIG_STAGING_EXCLUDE_BUILD is not set 334# CONFIG_STAGING_EXCLUDE_BUILD is not set
335CONFIG_FB_SM7XX=y 335CONFIG_FB_SM7XX=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 7270f3183bda..5527abbb7dea 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -374,7 +374,7 @@ CONFIG_FB_CIRRUS=y
374# CONFIG_VGA_CONSOLE is not set 374# CONFIG_VGA_CONSOLE is not set
375CONFIG_FRAMEBUFFER_CONSOLE=y 375CONFIG_FRAMEBUFFER_CONSOLE=y
376CONFIG_HID=m 376CONFIG_HID=m
377CONFIG_LEDS_CLASS=m 377CONFIG_LEDS_CLASS=y
378CONFIG_LEDS_TRIGGER_TIMER=m 378CONFIG_LEDS_TRIGGER_TIMER=m
379CONFIG_LEDS_TRIGGER_IDE_DISK=y 379CONFIG_LEDS_TRIGGER_IDE_DISK=y
380CONFIG_LEDS_TRIGGER_HEARTBEAT=m 380CONFIG_LEDS_TRIGGER_HEARTBEAT=m
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index a97a42c6b2c8..37862b2ce363 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -225,8 +225,8 @@ CONFIG_TOSHIBA_FIR=m
225CONFIG_VLSI_FIR=m 225CONFIG_VLSI_FIR=m
226CONFIG_MCS_FIR=m 226CONFIG_MCS_FIR=m
227CONFIG_BT=m 227CONFIG_BT=m
228CONFIG_BT_L2CAP=m 228CONFIG_BT_L2CAP=y
229CONFIG_BT_SCO=m 229CONFIG_BT_SCO=y
230CONFIG_BT_RFCOMM=m 230CONFIG_BT_RFCOMM=m
231CONFIG_BT_RFCOMM_TTY=y 231CONFIG_BT_RFCOMM_TTY=y
232CONFIG_BT_BNEP=m 232CONFIG_BT_BNEP=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
new file mode 100644
index 000000000000..e4b399fdaa61
--- /dev/null
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -0,0 +1,574 @@
1CONFIG_NLM_XLR_BOARD=y
2CONFIG_HIGHMEM=y
3CONFIG_KSM=y
4CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
5CONFIG_SMP=y
6CONFIG_NO_HZ=y
7CONFIG_HIGH_RES_TIMERS=y
8CONFIG_PREEMPT_VOLUNTARY=y
9CONFIG_KEXEC=y
10CONFIG_EXPERIMENTAL=y
11CONFIG_CROSS_COMPILE="mips64-unknown-linux-gnu-"
12# CONFIG_LOCALVERSION_AUTO is not set
13CONFIG_SYSVIPC=y
14CONFIG_POSIX_MQUEUE=y
15CONFIG_BSD_PROCESS_ACCT=y
16CONFIG_BSD_PROCESS_ACCT_V3=y
17CONFIG_TASKSTATS=y
18CONFIG_TASK_DELAY_ACCT=y
19CONFIG_TASK_XACCT=y
20CONFIG_TASK_IO_ACCOUNTING=y
21CONFIG_AUDIT=y
22CONFIG_NAMESPACES=y
23CONFIG_SCHED_AUTOGROUP=y
24CONFIG_BLK_DEV_INITRD=y
25CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs"
26CONFIG_RD_BZIP2=y
27CONFIG_RD_LZMA=y
28CONFIG_INITRAMFS_COMPRESSION_GZIP=y
29# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
30CONFIG_EXPERT=y
31CONFIG_KALLSYMS_ALL=y
32# CONFIG_ELF_CORE is not set
33# CONFIG_PCSPKR_PLATFORM is not set
34# CONFIG_PERF_EVENTS is not set
35# CONFIG_COMPAT_BRK is not set
36CONFIG_PROFILING=y
37CONFIG_MODULES=y
38CONFIG_MODULE_UNLOAD=y
39CONFIG_MODVERSIONS=y
40CONFIG_MODULE_SRCVERSION_ALL=y
41CONFIG_BLK_DEV_INTEGRITY=y
42CONFIG_BINFMT_MISC=m
43CONFIG_PM_RUNTIME=y
44CONFIG_PM_DEBUG=y
45CONFIG_PACKET=y
46CONFIG_UNIX=y
47CONFIG_XFRM_USER=m
48CONFIG_NET_KEY=m
49CONFIG_INET=y
50CONFIG_IP_MULTICAST=y
51CONFIG_IP_ADVANCED_ROUTER=y
52CONFIG_IP_MULTIPLE_TABLES=y
53CONFIG_IP_ROUTE_MULTIPATH=y
54CONFIG_IP_ROUTE_VERBOSE=y
55CONFIG_NET_IPIP=m
56CONFIG_IP_MROUTE=y
57CONFIG_IP_PIMSM_V1=y
58CONFIG_IP_PIMSM_V2=y
59CONFIG_SYN_COOKIES=y
60CONFIG_INET_AH=m
61CONFIG_INET_ESP=m
62CONFIG_INET_IPCOMP=m
63CONFIG_INET_XFRM_MODE_TRANSPORT=m
64CONFIG_INET_XFRM_MODE_TUNNEL=m
65CONFIG_INET_XFRM_MODE_BEET=m
66CONFIG_TCP_CONG_ADVANCED=y
67CONFIG_TCP_CONG_HSTCP=m
68CONFIG_TCP_CONG_HYBLA=m
69CONFIG_TCP_CONG_SCALABLE=m
70CONFIG_TCP_CONG_LP=m
71CONFIG_TCP_CONG_VENO=m
72CONFIG_TCP_CONG_YEAH=m
73CONFIG_TCP_CONG_ILLINOIS=m
74CONFIG_TCP_MD5SIG=y
75CONFIG_IPV6=y
76CONFIG_IPV6_PRIVACY=y
77CONFIG_INET6_AH=m
78CONFIG_INET6_ESP=m
79CONFIG_INET6_IPCOMP=m
80CONFIG_INET6_XFRM_MODE_TRANSPORT=m
81CONFIG_INET6_XFRM_MODE_TUNNEL=m
82CONFIG_INET6_XFRM_MODE_BEET=m
83CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
84CONFIG_IPV6_SIT=m
85CONFIG_IPV6_TUNNEL=m
86CONFIG_IPV6_MULTIPLE_TABLES=y
87CONFIG_NETLABEL=y
88CONFIG_NETFILTER=y
89CONFIG_NF_CONNTRACK=m
90CONFIG_NF_CONNTRACK_SECMARK=y
91CONFIG_NF_CONNTRACK_EVENTS=y
92CONFIG_NF_CT_PROTO_UDPLITE=m
93CONFIG_NF_CONNTRACK_AMANDA=m
94CONFIG_NF_CONNTRACK_FTP=m
95CONFIG_NF_CONNTRACK_H323=m
96CONFIG_NF_CONNTRACK_IRC=m
97CONFIG_NF_CONNTRACK_NETBIOS_NS=m
98CONFIG_NF_CONNTRACK_PPTP=m
99CONFIG_NF_CONNTRACK_SANE=m
100CONFIG_NF_CONNTRACK_SIP=m
101CONFIG_NF_CONNTRACK_TFTP=m
102CONFIG_NF_CT_NETLINK=m
103CONFIG_NETFILTER_TPROXY=m
104CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
105CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
106CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
107CONFIG_NETFILTER_XT_TARGET_DSCP=m
108CONFIG_NETFILTER_XT_TARGET_MARK=m
109CONFIG_NETFILTER_XT_TARGET_NFLOG=m
110CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
111CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
112CONFIG_NETFILTER_XT_TARGET_TPROXY=m
113CONFIG_NETFILTER_XT_TARGET_TRACE=m
114CONFIG_NETFILTER_XT_TARGET_SECMARK=m
115CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
116CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
117CONFIG_NETFILTER_XT_MATCH_COMMENT=m
118CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
119CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
120CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
121CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
122CONFIG_NETFILTER_XT_MATCH_DSCP=m
123CONFIG_NETFILTER_XT_MATCH_ESP=m
124CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
125CONFIG_NETFILTER_XT_MATCH_HELPER=m
126CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
127CONFIG_NETFILTER_XT_MATCH_LENGTH=m
128CONFIG_NETFILTER_XT_MATCH_LIMIT=m
129CONFIG_NETFILTER_XT_MATCH_MAC=m
130CONFIG_NETFILTER_XT_MATCH_MARK=m
131CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
132CONFIG_NETFILTER_XT_MATCH_OSF=m
133CONFIG_NETFILTER_XT_MATCH_OWNER=m
134CONFIG_NETFILTER_XT_MATCH_POLICY=m
135CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
136CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
137CONFIG_NETFILTER_XT_MATCH_QUOTA=m
138CONFIG_NETFILTER_XT_MATCH_RATEEST=m
139CONFIG_NETFILTER_XT_MATCH_REALM=m
140CONFIG_NETFILTER_XT_MATCH_RECENT=m
141CONFIG_NETFILTER_XT_MATCH_SOCKET=m
142CONFIG_NETFILTER_XT_MATCH_STATE=m
143CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
144CONFIG_NETFILTER_XT_MATCH_STRING=m
145CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
146CONFIG_NETFILTER_XT_MATCH_TIME=m
147CONFIG_NETFILTER_XT_MATCH_U32=m
148CONFIG_IP_VS=m
149CONFIG_IP_VS_IPV6=y
150CONFIG_IP_VS_PROTO_TCP=y
151CONFIG_IP_VS_PROTO_UDP=y
152CONFIG_IP_VS_PROTO_ESP=y
153CONFIG_IP_VS_PROTO_AH=y
154CONFIG_IP_VS_RR=m
155CONFIG_IP_VS_WRR=m
156CONFIG_IP_VS_LC=m
157CONFIG_IP_VS_WLC=m
158CONFIG_IP_VS_LBLC=m
159CONFIG_IP_VS_LBLCR=m
160CONFIG_IP_VS_DH=m
161CONFIG_IP_VS_SH=m
162CONFIG_IP_VS_SED=m
163CONFIG_IP_VS_NQ=m
164CONFIG_IP_VS_FTP=m
165CONFIG_NF_CONNTRACK_IPV4=m
166CONFIG_IP_NF_QUEUE=m
167CONFIG_IP_NF_IPTABLES=m
168CONFIG_IP_NF_MATCH_AH=m
169CONFIG_IP_NF_MATCH_ECN=m
170CONFIG_IP_NF_MATCH_TTL=m
171CONFIG_IP_NF_FILTER=m
172CONFIG_IP_NF_TARGET_REJECT=m
173CONFIG_IP_NF_TARGET_LOG=m
174CONFIG_IP_NF_TARGET_ULOG=m
175CONFIG_NF_NAT=m
176CONFIG_IP_NF_TARGET_MASQUERADE=m
177CONFIG_IP_NF_TARGET_NETMAP=m
178CONFIG_IP_NF_TARGET_REDIRECT=m
179CONFIG_IP_NF_MANGLE=m
180CONFIG_IP_NF_TARGET_CLUSTERIP=m
181CONFIG_IP_NF_TARGET_ECN=m
182CONFIG_IP_NF_TARGET_TTL=m
183CONFIG_IP_NF_RAW=m
184CONFIG_IP_NF_SECURITY=m
185CONFIG_IP_NF_ARPTABLES=m
186CONFIG_IP_NF_ARPFILTER=m
187CONFIG_IP_NF_ARP_MANGLE=m
188CONFIG_NF_CONNTRACK_IPV6=m
189CONFIG_IP6_NF_QUEUE=m
190CONFIG_IP6_NF_IPTABLES=m
191CONFIG_IP6_NF_MATCH_AH=m
192CONFIG_IP6_NF_MATCH_EUI64=m
193CONFIG_IP6_NF_MATCH_FRAG=m
194CONFIG_IP6_NF_MATCH_OPTS=m
195CONFIG_IP6_NF_MATCH_HL=m
196CONFIG_IP6_NF_MATCH_IPV6HEADER=m
197CONFIG_IP6_NF_MATCH_MH=m
198CONFIG_IP6_NF_MATCH_RT=m
199CONFIG_IP6_NF_TARGET_HL=m
200CONFIG_IP6_NF_TARGET_LOG=m
201CONFIG_IP6_NF_FILTER=m
202CONFIG_IP6_NF_TARGET_REJECT=m
203CONFIG_IP6_NF_MANGLE=m
204CONFIG_IP6_NF_RAW=m
205CONFIG_IP6_NF_SECURITY=m
206CONFIG_DECNET_NF_GRABULATOR=m
207CONFIG_BRIDGE_NF_EBTABLES=m
208CONFIG_BRIDGE_EBT_BROUTE=m
209CONFIG_BRIDGE_EBT_T_FILTER=m
210CONFIG_BRIDGE_EBT_T_NAT=m
211CONFIG_BRIDGE_EBT_802_3=m
212CONFIG_BRIDGE_EBT_AMONG=m
213CONFIG_BRIDGE_EBT_ARP=m
214CONFIG_BRIDGE_EBT_IP=m
215CONFIG_BRIDGE_EBT_IP6=m
216CONFIG_BRIDGE_EBT_LIMIT=m
217CONFIG_BRIDGE_EBT_MARK=m
218CONFIG_BRIDGE_EBT_PKTTYPE=m
219CONFIG_BRIDGE_EBT_STP=m
220CONFIG_BRIDGE_EBT_VLAN=m
221CONFIG_BRIDGE_EBT_ARPREPLY=m
222CONFIG_BRIDGE_EBT_DNAT=m
223CONFIG_BRIDGE_EBT_MARK_T=m
224CONFIG_BRIDGE_EBT_REDIRECT=m
225CONFIG_BRIDGE_EBT_SNAT=m
226CONFIG_BRIDGE_EBT_LOG=m
227CONFIG_BRIDGE_EBT_ULOG=m
228CONFIG_BRIDGE_EBT_NFLOG=m
229CONFIG_IP_DCCP=m
230CONFIG_RDS=m
231CONFIG_RDS_TCP=m
232CONFIG_TIPC=m
233CONFIG_ATM=m
234CONFIG_ATM_CLIP=m
235CONFIG_ATM_LANE=m
236CONFIG_ATM_MPOA=m
237CONFIG_ATM_BR2684=m
238CONFIG_BRIDGE=m
239CONFIG_VLAN_8021Q=m
240CONFIG_VLAN_8021Q_GVRP=y
241CONFIG_DECNET=m
242CONFIG_LLC2=m
243CONFIG_IPX=m
244CONFIG_ATALK=m
245CONFIG_DEV_APPLETALK=m
246CONFIG_IPDDP=m
247CONFIG_IPDDP_ENCAP=y
248CONFIG_IPDDP_DECAP=y
249CONFIG_X25=m
250CONFIG_LAPB=m
251CONFIG_ECONET=m
252CONFIG_ECONET_AUNUDP=y
253CONFIG_ECONET_NATIVE=y
254CONFIG_WAN_ROUTER=m
255CONFIG_PHONET=m
256CONFIG_IEEE802154=m
257CONFIG_NET_SCHED=y
258CONFIG_NET_SCH_CBQ=m
259CONFIG_NET_SCH_HTB=m
260CONFIG_NET_SCH_HFSC=m
261CONFIG_NET_SCH_ATM=m
262CONFIG_NET_SCH_PRIO=m
263CONFIG_NET_SCH_MULTIQ=m
264CONFIG_NET_SCH_RED=m
265CONFIG_NET_SCH_SFQ=m
266CONFIG_NET_SCH_TEQL=m
267CONFIG_NET_SCH_TBF=m
268CONFIG_NET_SCH_GRED=m
269CONFIG_NET_SCH_DSMARK=m
270CONFIG_NET_SCH_NETEM=m
271CONFIG_NET_SCH_DRR=m
272CONFIG_NET_SCH_INGRESS=m
273CONFIG_NET_CLS_BASIC=m
274CONFIG_NET_CLS_TCINDEX=m
275CONFIG_NET_CLS_ROUTE4=m
276CONFIG_NET_CLS_FW=m
277CONFIG_NET_CLS_U32=m
278CONFIG_CLS_U32_MARK=y
279CONFIG_NET_CLS_RSVP=m
280CONFIG_NET_CLS_RSVP6=m
281CONFIG_NET_CLS_FLOW=m
282CONFIG_NET_EMATCH=y
283CONFIG_NET_EMATCH_CMP=m
284CONFIG_NET_EMATCH_NBYTE=m
285CONFIG_NET_EMATCH_U32=m
286CONFIG_NET_EMATCH_META=m
287CONFIG_NET_EMATCH_TEXT=m
288CONFIG_NET_CLS_ACT=y
289CONFIG_NET_ACT_POLICE=m
290CONFIG_NET_ACT_GACT=m
291CONFIG_GACT_PROB=y
292CONFIG_NET_ACT_MIRRED=m
293CONFIG_NET_ACT_IPT=m
294CONFIG_NET_ACT_NAT=m
295CONFIG_NET_ACT_PEDIT=m
296CONFIG_NET_ACT_SIMP=m
297CONFIG_NET_ACT_SKBEDIT=m
298CONFIG_DCB=y
299CONFIG_NET_PKTGEN=m
300# CONFIG_WIRELESS is not set
301CONFIG_DEVTMPFS=y
302CONFIG_DEVTMPFS_MOUNT=y
303# CONFIG_STANDALONE is not set
304CONFIG_CONNECTOR=y
305CONFIG_MTD=m
306CONFIG_BLK_DEV_LOOP=y
307CONFIG_BLK_DEV_CRYPTOLOOP=m
308CONFIG_BLK_DEV_NBD=m
309CONFIG_BLK_DEV_OSD=m
310CONFIG_BLK_DEV_RAM=y
311CONFIG_BLK_DEV_RAM_SIZE=65536
312CONFIG_CDROM_PKTCDVD=y
313CONFIG_MISC_DEVICES=y
314CONFIG_RAID_ATTRS=m
315CONFIG_SCSI=y
316CONFIG_SCSI_TGT=m
317CONFIG_BLK_DEV_SD=y
318CONFIG_CHR_DEV_ST=m
319CONFIG_CHR_DEV_OSST=m
320CONFIG_BLK_DEV_SR=y
321CONFIG_CHR_DEV_SG=y
322CONFIG_CHR_DEV_SCH=m
323CONFIG_SCSI_MULTI_LUN=y
324CONFIG_SCSI_CONSTANTS=y
325CONFIG_SCSI_LOGGING=y
326CONFIG_SCSI_SCAN_ASYNC=y
327CONFIG_SCSI_SPI_ATTRS=m
328CONFIG_SCSI_FC_TGT_ATTRS=y
329CONFIG_SCSI_SAS_LIBSAS=m
330CONFIG_SCSI_SRP_ATTRS=m
331CONFIG_SCSI_SRP_TGT_ATTRS=y
332CONFIG_ISCSI_TCP=m
333CONFIG_LIBFCOE=m
334CONFIG_SCSI_DEBUG=m
335CONFIG_SCSI_DH=y
336CONFIG_SCSI_DH_RDAC=m
337CONFIG_SCSI_DH_HP_SW=m
338CONFIG_SCSI_DH_EMC=m
339CONFIG_SCSI_DH_ALUA=m
340CONFIG_SCSI_OSD_INITIATOR=m
341CONFIG_SCSI_OSD_ULD=m
342# CONFIG_INPUT_MOUSEDEV is not set
343CONFIG_INPUT_EVDEV=y
344CONFIG_INPUT_EVBUG=m
345# CONFIG_INPUT_KEYBOARD is not set
346# CONFIG_INPUT_MOUSE is not set
347# CONFIG_SERIO_I8042 is not set
348CONFIG_SERIO_SERPORT=m
349CONFIG_SERIO_LIBPS2=y
350CONFIG_SERIO_RAW=m
351CONFIG_VT_HW_CONSOLE_BINDING=y
352CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
353CONFIG_LEGACY_PTY_COUNT=0
354CONFIG_SERIAL_NONSTANDARD=y
355CONFIG_N_HDLC=m
356# CONFIG_DEVKMEM is not set
357CONFIG_STALDRV=y
358CONFIG_SERIAL_8250=y
359CONFIG_SERIAL_8250_CONSOLE=y
360CONFIG_SERIAL_8250_NR_UARTS=48
361CONFIG_SERIAL_8250_EXTENDED=y
362CONFIG_SERIAL_8250_MANY_PORTS=y
363CONFIG_SERIAL_8250_SHARE_IRQ=y
364CONFIG_SERIAL_8250_RSA=y
365CONFIG_HW_RANDOM=y
366CONFIG_HW_RANDOM_TIMERIOMEM=m
367CONFIG_RAW_DRIVER=m
368# CONFIG_HWMON is not set
369# CONFIG_VGA_CONSOLE is not set
370# CONFIG_HID_SUPPORT is not set
371# CONFIG_USB_SUPPORT is not set
372CONFIG_UIO=y
373CONFIG_UIO_PDRV=m
374CONFIG_UIO_PDRV_GENIRQ=m
375CONFIG_EXT2_FS=y
376CONFIG_EXT2_FS_XATTR=y
377CONFIG_EXT2_FS_POSIX_ACL=y
378CONFIG_EXT2_FS_SECURITY=y
379CONFIG_EXT3_FS=y
380CONFIG_EXT3_FS_POSIX_ACL=y
381CONFIG_EXT3_FS_SECURITY=y
382CONFIG_EXT4_FS=y
383CONFIG_EXT4_FS_POSIX_ACL=y
384CONFIG_EXT4_FS_SECURITY=y
385CONFIG_GFS2_FS=m
386CONFIG_GFS2_FS_LOCKING_DLM=y
387CONFIG_OCFS2_FS=m
388CONFIG_BTRFS_FS=m
389CONFIG_BTRFS_FS_POSIX_ACL=y
390CONFIG_NILFS2_FS=m
391CONFIG_QUOTA_NETLINK_INTERFACE=y
392# CONFIG_PRINT_QUOTA_WARNING is not set
393CONFIG_QFMT_V1=m
394CONFIG_QFMT_V2=m
395CONFIG_AUTOFS4_FS=m
396CONFIG_FUSE_FS=y
397CONFIG_CUSE=m
398CONFIG_FSCACHE=m
399CONFIG_FSCACHE_STATS=y
400CONFIG_FSCACHE_HISTOGRAM=y
401CONFIG_CACHEFILES=m
402CONFIG_ISO9660_FS=m
403CONFIG_JOLIET=y
404CONFIG_ZISOFS=y
405CONFIG_UDF_FS=m
406CONFIG_MSDOS_FS=m
407CONFIG_VFAT_FS=m
408CONFIG_NTFS_FS=m
409CONFIG_PROC_KCORE=y
410CONFIG_TMPFS=y
411CONFIG_TMPFS_POSIX_ACL=y
412CONFIG_CONFIGFS_FS=y
413CONFIG_ADFS_FS=m
414CONFIG_AFFS_FS=m
415CONFIG_ECRYPT_FS=y
416CONFIG_HFS_FS=m
417CONFIG_HFSPLUS_FS=m
418CONFIG_BEFS_FS=m
419CONFIG_BFS_FS=m
420CONFIG_EFS_FS=m
421CONFIG_CRAMFS=m
422CONFIG_SQUASHFS=m
423CONFIG_VXFS_FS=m
424CONFIG_MINIX_FS=m
425CONFIG_OMFS_FS=m
426CONFIG_HPFS_FS=m
427CONFIG_QNX4FS_FS=m
428CONFIG_ROMFS_FS=m
429CONFIG_SYSV_FS=m
430CONFIG_UFS_FS=m
431CONFIG_EXOFS_FS=m
432CONFIG_NFS_FS=m
433CONFIG_NFS_V3=y
434CONFIG_NFS_V3_ACL=y
435CONFIG_NFS_V4=y
436CONFIG_NFS_FSCACHE=y
437CONFIG_NFSD=m
438CONFIG_NFSD_V3_ACL=y
439CONFIG_NFSD_V4=y
440CONFIG_CIFS=m
441CONFIG_CIFS_WEAK_PW_HASH=y
442CONFIG_CIFS_UPCALL=y
443CONFIG_CIFS_XATTR=y
444CONFIG_CIFS_POSIX=y
445CONFIG_CIFS_DFS_UPCALL=y
446CONFIG_CIFS_EXPERIMENTAL=y
447CONFIG_NCP_FS=m
448CONFIG_NCPFS_PACKET_SIGNING=y
449CONFIG_NCPFS_IOCTL_LOCKING=y
450CONFIG_NCPFS_STRONG=y
451CONFIG_NCPFS_NFS_NS=y
452CONFIG_NCPFS_OS2_NS=y
453CONFIG_NCPFS_NLS=y
454CONFIG_NCPFS_EXTRAS=y
455CONFIG_CODA_FS=m
456CONFIG_AFS_FS=m
457CONFIG_PARTITION_ADVANCED=y
458CONFIG_ACORN_PARTITION=y
459CONFIG_ACORN_PARTITION_ICS=y
460CONFIG_ACORN_PARTITION_RISCIX=y
461CONFIG_OSF_PARTITION=y
462CONFIG_AMIGA_PARTITION=y
463CONFIG_ATARI_PARTITION=y
464CONFIG_MAC_PARTITION=y
465CONFIG_BSD_DISKLABEL=y
466CONFIG_MINIX_SUBPARTITION=y
467CONFIG_SOLARIS_X86_PARTITION=y
468CONFIG_UNIXWARE_DISKLABEL=y
469CONFIG_LDM_PARTITION=y
470CONFIG_SGI_PARTITION=y
471CONFIG_ULTRIX_PARTITION=y
472CONFIG_SUN_PARTITION=y
473CONFIG_KARMA_PARTITION=y
474CONFIG_EFI_PARTITION=y
475CONFIG_SYSV68_PARTITION=y
476CONFIG_NLS=y
477CONFIG_NLS_DEFAULT="cp437"
478CONFIG_NLS_CODEPAGE_437=m
479CONFIG_NLS_CODEPAGE_737=m
480CONFIG_NLS_CODEPAGE_775=m
481CONFIG_NLS_CODEPAGE_850=m
482CONFIG_NLS_CODEPAGE_852=m
483CONFIG_NLS_CODEPAGE_855=m
484CONFIG_NLS_CODEPAGE_857=m
485CONFIG_NLS_CODEPAGE_860=m
486CONFIG_NLS_CODEPAGE_861=m
487CONFIG_NLS_CODEPAGE_862=m
488CONFIG_NLS_CODEPAGE_863=m
489CONFIG_NLS_CODEPAGE_864=m
490CONFIG_NLS_CODEPAGE_865=m
491CONFIG_NLS_CODEPAGE_866=m
492CONFIG_NLS_CODEPAGE_869=m
493CONFIG_NLS_CODEPAGE_936=m
494CONFIG_NLS_CODEPAGE_950=m
495CONFIG_NLS_CODEPAGE_932=m
496CONFIG_NLS_CODEPAGE_949=m
497CONFIG_NLS_CODEPAGE_874=m
498CONFIG_NLS_ISO8859_8=m
499CONFIG_NLS_CODEPAGE_1250=m
500CONFIG_NLS_CODEPAGE_1251=m
501CONFIG_NLS_ASCII=m
502CONFIG_NLS_ISO8859_1=m
503CONFIG_NLS_ISO8859_2=m
504CONFIG_NLS_ISO8859_3=m
505CONFIG_NLS_ISO8859_4=m
506CONFIG_NLS_ISO8859_5=m
507CONFIG_NLS_ISO8859_6=m
508CONFIG_NLS_ISO8859_7=m
509CONFIG_NLS_ISO8859_9=m
510CONFIG_NLS_ISO8859_13=m
511CONFIG_NLS_ISO8859_14=m
512CONFIG_NLS_ISO8859_15=m
513CONFIG_NLS_KOI8_R=m
514CONFIG_NLS_KOI8_U=m
515CONFIG_PRINTK_TIME=y
516# CONFIG_ENABLE_WARN_DEPRECATED is not set
517# CONFIG_ENABLE_MUST_CHECK is not set
518CONFIG_UNUSED_SYMBOLS=y
519CONFIG_DEBUG_KERNEL=y
520CONFIG_DETECT_HUNG_TASK=y
521CONFIG_SCHEDSTATS=y
522CONFIG_TIMER_STATS=y
523CONFIG_DEBUG_INFO=y
524CONFIG_DEBUG_MEMORY_INIT=y
525CONFIG_SYSCTL_SYSCALL_CHECK=y
526CONFIG_SCHED_TRACER=y
527CONFIG_BLK_DEV_IO_TRACE=y
528CONFIG_KGDB=y
529CONFIG_SECURITY=y
530CONFIG_SECURITY_NETWORK=y
531CONFIG_LSM_MMAP_MIN_ADDR=0
532CONFIG_SECURITY_SELINUX=y
533CONFIG_SECURITY_SELINUX_BOOTPARAM=y
534CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
535CONFIG_SECURITY_SELINUX_DISABLE=y
536CONFIG_SECURITY_SMACK=y
537CONFIG_SECURITY_TOMOYO=y
538CONFIG_CRYPTO_NULL=m
539CONFIG_CRYPTO_CRYPTD=m
540CONFIG_CRYPTO_TEST=m
541CONFIG_CRYPTO_CCM=m
542CONFIG_CRYPTO_GCM=m
543CONFIG_CRYPTO_CTS=m
544CONFIG_CRYPTO_LRW=m
545CONFIG_CRYPTO_PCBC=m
546CONFIG_CRYPTO_XTS=m
547CONFIG_CRYPTO_HMAC=y
548CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m
551CONFIG_CRYPTO_RMD128=m
552CONFIG_CRYPTO_RMD160=m
553CONFIG_CRYPTO_RMD256=m
554CONFIG_CRYPTO_RMD320=m
555CONFIG_CRYPTO_SHA256=m
556CONFIG_CRYPTO_SHA512=m
557CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m
559CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m
562CONFIG_CRYPTO_CAST5=m
563CONFIG_CRYPTO_CAST6=m
564CONFIG_CRYPTO_FCRYPT=m
565CONFIG_CRYPTO_KHAZAD=m
566CONFIG_CRYPTO_SALSA20=m
567CONFIG_CRYPTO_SEED=m
568CONFIG_CRYPTO_SERPENT=m
569CONFIG_CRYPTO_TEA=m
570CONFIG_CRYPTO_TWOFISH=m
571CONFIG_CRYPTO_ZLIB=m
572CONFIG_CRYPTO_LZO=m
573CONFIG_CRC_CCITT=m
574CONFIG_CRC7=m
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index 650ac9ba734c..b4db69fbc40c 100644
--- a/arch/mips/include/asm/cache.h
+++ b/arch/mips/include/asm/cache.h
@@ -17,6 +17,6 @@
17#define SMP_CACHE_SHIFT L1_CACHE_SHIFT 17#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
18#define SMP_CACHE_BYTES L1_CACHE_BYTES 18#define SMP_CACHE_BYTES L1_CACHE_BYTES
19 19
20#define __read_mostly __attribute__((__section__(".data.read_mostly"))) 20#define __read_mostly __attribute__((__section__(".data..read_mostly")))
21 21
22#endif /* _ASM_CACHE_H */ 22#endif /* _ASM_CACHE_H */
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h
index fa4328f9124f..65f9bdd02f1f 100644
--- a/arch/mips/include/asm/cevt-r4k.h
+++ b/arch/mips/include/asm/cevt-r4k.h
@@ -14,6 +14,9 @@
14#ifndef __ASM_CEVT_R4K_H 14#ifndef __ASM_CEVT_R4K_H
15#define __ASM_CEVT_R4K_H 15#define __ASM_CEVT_R4K_H
16 16
17#include <linux/clockchips.h>
18#include <asm/time.h>
19
17DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); 20DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
18 21
19void mips_event_handler(struct clock_event_device *dev); 22void mips_event_handler(struct clock_event_device *dev);
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 86877539c6e8..34c0d3cb116f 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -33,6 +33,7 @@
33#define PRID_COMP_TOSHIBA 0x070000 33#define PRID_COMP_TOSHIBA 0x070000
34#define PRID_COMP_LSI 0x080000 34#define PRID_COMP_LSI 0x080000
35#define PRID_COMP_LEXRA 0x0b0000 35#define PRID_COMP_LEXRA 0x0b0000
36#define PRID_COMP_NETLOGIC 0x0c0000
36#define PRID_COMP_CAVIUM 0x0d0000 37#define PRID_COMP_CAVIUM 0x0d0000
37#define PRID_COMP_INGENIC 0xd00000 38#define PRID_COMP_INGENIC 0xd00000
38 39
@@ -142,6 +143,31 @@
142#define PRID_IMP_JZRISC 0x0200 143#define PRID_IMP_JZRISC 0x0200
143 144
144/* 145/*
146 * These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC
147 */
148#define PRID_IMP_NETLOGIC_XLR732 0x0000
149#define PRID_IMP_NETLOGIC_XLR716 0x0200
150#define PRID_IMP_NETLOGIC_XLR532 0x0900
151#define PRID_IMP_NETLOGIC_XLR308 0x0600
152#define PRID_IMP_NETLOGIC_XLR532C 0x0800
153#define PRID_IMP_NETLOGIC_XLR516C 0x0a00
154#define PRID_IMP_NETLOGIC_XLR508C 0x0b00
155#define PRID_IMP_NETLOGIC_XLR308C 0x0f00
156#define PRID_IMP_NETLOGIC_XLS608 0x8000
157#define PRID_IMP_NETLOGIC_XLS408 0x8800
158#define PRID_IMP_NETLOGIC_XLS404 0x8c00
159#define PRID_IMP_NETLOGIC_XLS208 0x8e00
160#define PRID_IMP_NETLOGIC_XLS204 0x8f00
161#define PRID_IMP_NETLOGIC_XLS108 0xce00
162#define PRID_IMP_NETLOGIC_XLS104 0xcf00
163#define PRID_IMP_NETLOGIC_XLS616B 0x4000
164#define PRID_IMP_NETLOGIC_XLS608B 0x4a00
165#define PRID_IMP_NETLOGIC_XLS416B 0x4400
166#define PRID_IMP_NETLOGIC_XLS412B 0x4c00
167#define PRID_IMP_NETLOGIC_XLS408B 0x4e00
168#define PRID_IMP_NETLOGIC_XLS404B 0x4f00
169
170/*
145 * Definitions for 7:0 on legacy processors 171 * Definitions for 7:0 on legacy processors
146 */ 172 */
147 173
@@ -234,6 +260,7 @@ enum cpu_type_enum {
234 */ 260 */
235 CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, 261 CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
236 CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, 262 CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
263 CPU_XLR,
237 264
238 CPU_LAST 265 CPU_LAST
239}; 266};
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 655f849bd08d..7aa37ddfca4b 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -5,7 +5,9 @@
5#include <asm/cache.h> 5#include <asm/cache.h>
6#include <asm-generic/dma-coherent.h> 6#include <asm-generic/dma-coherent.h>
7 7
8#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
8#include <dma-coherence.h> 9#include <dma-coherence.h>
10#endif
9 11
10extern struct dma_map_ops *mips_dma_map_ops; 12extern struct dma_map_ops *mips_dma_map_ops;
11 13
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index f5e856015329..c565b7c3f0b5 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -70,6 +70,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
70static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 70static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
71 unsigned long addr, pte_t *ptep) 71 unsigned long addr, pte_t *ptep)
72{ 72{
73 flush_tlb_mm(vma->vm_mm);
73} 74}
74 75
75static inline int huge_pte_none(pte_t pte) 76static inline int huge_pte_none(pte_t pte)
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 7622ccf75076..1881b316ca45 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -20,16 +20,18 @@
20#define WORD_INSN ".word" 20#define WORD_INSN ".word"
21#endif 21#endif
22 22
23#define JUMP_LABEL(key, label) \ 23static __always_inline bool arch_static_branch(struct jump_label_key *key)
24 do { \ 24{
25 asm goto("1:\tnop\n\t" \ 25 asm goto("1:\tnop\n\t"
26 "nop\n\t" \ 26 "nop\n\t"
27 ".pushsection __jump_table, \"a\"\n\t" \ 27 ".pushsection __jump_table, \"aw\"\n\t"
28 WORD_INSN " 1b, %l[" #label "], %0\n\t" \ 28 WORD_INSN " 1b, %l[l_yes], %0\n\t"
29 ".popsection\n\t" \ 29 ".popsection\n\t"
30 : : "i" (key) : : label); \ 30 : : "i" (key) : : l_yes);
31 } while (0) 31 return false;
32 32l_yes:
33 return true;
34}
33 35
34#endif /* __KERNEL__ */ 36#endif /* __KERNEL__ */
35 37
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index a6976619160a..f260ebed713b 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -161,6 +161,45 @@ static inline int alchemy_get_cputype(void)
161 return ALCHEMY_CPU_UNKNOWN; 161 return ALCHEMY_CPU_UNKNOWN;
162} 162}
163 163
164/* return number of uarts on a given cputype */
165static inline int alchemy_get_uarts(int type)
166{
167 switch (type) {
168 case ALCHEMY_CPU_AU1000:
169 return 4;
170 case ALCHEMY_CPU_AU1500:
171 case ALCHEMY_CPU_AU1200:
172 return 2;
173 case ALCHEMY_CPU_AU1100:
174 case ALCHEMY_CPU_AU1550:
175 return 3;
176 }
177 return 0;
178}
179
180/* enable an UART block if it isn't already */
181static inline void alchemy_uart_enable(u32 uart_phys)
182{
183 void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys);
184
185 /* reset, enable clock, deassert reset */
186 if ((__raw_readl(addr + 0x100) & 3) != 3) {
187 __raw_writel(0, addr + 0x100);
188 wmb();
189 __raw_writel(1, addr + 0x100);
190 wmb();
191 }
192 __raw_writel(3, addr + 0x100);
193 wmb();
194}
195
196static inline void alchemy_uart_disable(u32 uart_phys)
197{
198 void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys);
199 __raw_writel(0, addr + 0x100); /* UART_MOD_CNTRL */
200 wmb();
201}
202
164static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) 203static inline void alchemy_uart_putchar(u32 uart_phys, u8 c)
165{ 204{
166 void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys); 205 void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys);
@@ -180,6 +219,20 @@ static inline void alchemy_uart_putchar(u32 uart_phys, u8 c)
180 wmb(); 219 wmb();
181} 220}
182 221
222/* return number of ethernet MACs on a given cputype */
223static inline int alchemy_get_macs(int type)
224{
225 switch (type) {
226 case ALCHEMY_CPU_AU1000:
227 case ALCHEMY_CPU_AU1500:
228 case ALCHEMY_CPU_AU1550:
229 return 2;
230 case ALCHEMY_CPU_AU1100:
231 return 1;
232 }
233 return 0;
234}
235
183/* arch/mips/au1000/common/clocks.c */ 236/* arch/mips/au1000/common/clocks.c */
184extern void set_au1x00_speed(unsigned int new_freq); 237extern void set_au1x00_speed(unsigned int new_freq);
185extern unsigned int get_au1x00_speed(void); 238extern unsigned int get_au1x00_speed(void);
@@ -630,38 +683,42 @@ enum soc_au1200_ints {
630 683
631/* 684/*
632 * Physical base addresses for integrated peripherals 685 * Physical base addresses for integrated peripherals
686 * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200
633 */ 687 */
634 688
689#define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */
690#define AU1000_USBD_PHYS_ADDR 0x10200000 /* 0123 */
691#define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */
692#define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */
693#define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */
694#define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */
695#define AU1100_SD0_PHYS_ADDR 0x10600000 /* 24 */
696#define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */
697#define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */
698#define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */
699#define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */
700#define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */
701#define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */
702#define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */
703#define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */
704#define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */
705#define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */
706#define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */
707#define AU1000_SYS_PHYS_ADDR 0x11900000 /* 01234 */
708#define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */
709#define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 34 */
710#define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 34 */
711#define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */
712#define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */
713
714
635#ifdef CONFIG_SOC_AU1000 715#ifdef CONFIG_SOC_AU1000
636#define MEM_PHYS_ADDR 0x14000000 716#define MEM_PHYS_ADDR 0x14000000
637#define STATIC_MEM_PHYS_ADDR 0x14001000 717#define STATIC_MEM_PHYS_ADDR 0x14001000
638#define DMA0_PHYS_ADDR 0x14002000
639#define DMA1_PHYS_ADDR 0x14002100
640#define DMA2_PHYS_ADDR 0x14002200
641#define DMA3_PHYS_ADDR 0x14002300
642#define DMA4_PHYS_ADDR 0x14002400
643#define DMA5_PHYS_ADDR 0x14002500
644#define DMA6_PHYS_ADDR 0x14002600
645#define DMA7_PHYS_ADDR 0x14002700
646#define IC0_PHYS_ADDR 0x10400000
647#define IC1_PHYS_ADDR 0x11800000
648#define AC97_PHYS_ADDR 0x10000000
649#define USBH_PHYS_ADDR 0x10100000 718#define USBH_PHYS_ADDR 0x10100000
650#define USBD_PHYS_ADDR 0x10200000
651#define IRDA_PHYS_ADDR 0x10300000 719#define IRDA_PHYS_ADDR 0x10300000
652#define MAC0_PHYS_ADDR 0x10500000
653#define MAC1_PHYS_ADDR 0x10510000
654#define MACEN_PHYS_ADDR 0x10520000
655#define MACDMA0_PHYS_ADDR 0x14004000
656#define MACDMA1_PHYS_ADDR 0x14004200
657#define I2S_PHYS_ADDR 0x11000000
658#define UART0_PHYS_ADDR 0x11100000
659#define UART1_PHYS_ADDR 0x11200000
660#define UART2_PHYS_ADDR 0x11300000
661#define UART3_PHYS_ADDR 0x11400000
662#define SSI0_PHYS_ADDR 0x11600000 720#define SSI0_PHYS_ADDR 0x11600000
663#define SSI1_PHYS_ADDR 0x11680000 721#define SSI1_PHYS_ADDR 0x11680000
664#define SYS_PHYS_ADDR 0x11900000
665#define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL 722#define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL
666#define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL 723#define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL
667#define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL 724#define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL
@@ -672,30 +729,8 @@ enum soc_au1200_ints {
672#ifdef CONFIG_SOC_AU1500 729#ifdef CONFIG_SOC_AU1500
673#define MEM_PHYS_ADDR 0x14000000 730#define MEM_PHYS_ADDR 0x14000000
674#define STATIC_MEM_PHYS_ADDR 0x14001000 731#define STATIC_MEM_PHYS_ADDR 0x14001000
675#define DMA0_PHYS_ADDR 0x14002000
676#define DMA1_PHYS_ADDR 0x14002100
677#define DMA2_PHYS_ADDR 0x14002200
678#define DMA3_PHYS_ADDR 0x14002300
679#define DMA4_PHYS_ADDR 0x14002400
680#define DMA5_PHYS_ADDR 0x14002500
681#define DMA6_PHYS_ADDR 0x14002600
682#define DMA7_PHYS_ADDR 0x14002700
683#define IC0_PHYS_ADDR 0x10400000
684#define IC1_PHYS_ADDR 0x11800000
685#define AC97_PHYS_ADDR 0x10000000
686#define USBH_PHYS_ADDR 0x10100000 732#define USBH_PHYS_ADDR 0x10100000
687#define USBD_PHYS_ADDR 0x10200000
688#define PCI_PHYS_ADDR 0x14005000 733#define PCI_PHYS_ADDR 0x14005000
689#define MAC0_PHYS_ADDR 0x11500000
690#define MAC1_PHYS_ADDR 0x11510000
691#define MACEN_PHYS_ADDR 0x11520000
692#define MACDMA0_PHYS_ADDR 0x14004000
693#define MACDMA1_PHYS_ADDR 0x14004200
694#define I2S_PHYS_ADDR 0x11000000
695#define UART0_PHYS_ADDR 0x11100000
696#define UART3_PHYS_ADDR 0x11400000
697#define GPIO2_PHYS_ADDR 0x11700000
698#define SYS_PHYS_ADDR 0x11900000
699#define PCI_MEM_PHYS_ADDR 0x400000000ULL 734#define PCI_MEM_PHYS_ADDR 0x400000000ULL
700#define PCI_IO_PHYS_ADDR 0x500000000ULL 735#define PCI_IO_PHYS_ADDR 0x500000000ULL
701#define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL 736#define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL
@@ -710,34 +745,10 @@ enum soc_au1200_ints {
710#ifdef CONFIG_SOC_AU1100 745#ifdef CONFIG_SOC_AU1100
711#define MEM_PHYS_ADDR 0x14000000 746#define MEM_PHYS_ADDR 0x14000000
712#define STATIC_MEM_PHYS_ADDR 0x14001000 747#define STATIC_MEM_PHYS_ADDR 0x14001000
713#define DMA0_PHYS_ADDR 0x14002000
714#define DMA1_PHYS_ADDR 0x14002100
715#define DMA2_PHYS_ADDR 0x14002200
716#define DMA3_PHYS_ADDR 0x14002300
717#define DMA4_PHYS_ADDR 0x14002400
718#define DMA5_PHYS_ADDR 0x14002500
719#define DMA6_PHYS_ADDR 0x14002600
720#define DMA7_PHYS_ADDR 0x14002700
721#define IC0_PHYS_ADDR 0x10400000
722#define SD0_PHYS_ADDR 0x10600000
723#define SD1_PHYS_ADDR 0x10680000
724#define IC1_PHYS_ADDR 0x11800000
725#define AC97_PHYS_ADDR 0x10000000
726#define USBH_PHYS_ADDR 0x10100000 748#define USBH_PHYS_ADDR 0x10100000
727#define USBD_PHYS_ADDR 0x10200000
728#define IRDA_PHYS_ADDR 0x10300000 749#define IRDA_PHYS_ADDR 0x10300000
729#define MAC0_PHYS_ADDR 0x10500000
730#define MACEN_PHYS_ADDR 0x10520000
731#define MACDMA0_PHYS_ADDR 0x14004000
732#define MACDMA1_PHYS_ADDR 0x14004200
733#define I2S_PHYS_ADDR 0x11000000
734#define UART0_PHYS_ADDR 0x11100000
735#define UART1_PHYS_ADDR 0x11200000
736#define UART3_PHYS_ADDR 0x11400000
737#define SSI0_PHYS_ADDR 0x11600000 750#define SSI0_PHYS_ADDR 0x11600000
738#define SSI1_PHYS_ADDR 0x11680000 751#define SSI1_PHYS_ADDR 0x11680000
739#define GPIO2_PHYS_ADDR 0x11700000
740#define SYS_PHYS_ADDR 0x11900000
741#define LCD_PHYS_ADDR 0x15000000 752#define LCD_PHYS_ADDR 0x15000000
742#define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL 753#define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL
743#define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL 754#define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL
@@ -749,22 +760,8 @@ enum soc_au1200_ints {
749#ifdef CONFIG_SOC_AU1550 760#ifdef CONFIG_SOC_AU1550
750#define MEM_PHYS_ADDR 0x14000000 761#define MEM_PHYS_ADDR 0x14000000
751#define STATIC_MEM_PHYS_ADDR 0x14001000 762#define STATIC_MEM_PHYS_ADDR 0x14001000
752#define IC0_PHYS_ADDR 0x10400000
753#define IC1_PHYS_ADDR 0x11800000
754#define USBH_PHYS_ADDR 0x14020000 763#define USBH_PHYS_ADDR 0x14020000
755#define USBD_PHYS_ADDR 0x10200000
756#define PCI_PHYS_ADDR 0x14005000 764#define PCI_PHYS_ADDR 0x14005000
757#define MAC0_PHYS_ADDR 0x10500000
758#define MAC1_PHYS_ADDR 0x10510000
759#define MACEN_PHYS_ADDR 0x10520000
760#define MACDMA0_PHYS_ADDR 0x14004000
761#define MACDMA1_PHYS_ADDR 0x14004200
762#define UART0_PHYS_ADDR 0x11100000
763#define UART1_PHYS_ADDR 0x11200000
764#define UART3_PHYS_ADDR 0x11400000
765#define GPIO2_PHYS_ADDR 0x11700000
766#define SYS_PHYS_ADDR 0x11900000
767#define DDMA_PHYS_ADDR 0x14002000
768#define PE_PHYS_ADDR 0x14008000 765#define PE_PHYS_ADDR 0x14008000
769#define PSC0_PHYS_ADDR 0x11A00000 766#define PSC0_PHYS_ADDR 0x11A00000
770#define PSC1_PHYS_ADDR 0x11B00000 767#define PSC1_PHYS_ADDR 0x11B00000
@@ -786,19 +783,10 @@ enum soc_au1200_ints {
786#define STATIC_MEM_PHYS_ADDR 0x14001000 783#define STATIC_MEM_PHYS_ADDR 0x14001000
787#define AES_PHYS_ADDR 0x10300000 784#define AES_PHYS_ADDR 0x10300000
788#define CIM_PHYS_ADDR 0x14004000 785#define CIM_PHYS_ADDR 0x14004000
789#define IC0_PHYS_ADDR 0x10400000
790#define IC1_PHYS_ADDR 0x11800000
791#define USBM_PHYS_ADDR 0x14020000 786#define USBM_PHYS_ADDR 0x14020000
792#define USBH_PHYS_ADDR 0x14020100 787#define USBH_PHYS_ADDR 0x14020100
793#define UART0_PHYS_ADDR 0x11100000
794#define UART1_PHYS_ADDR 0x11200000
795#define GPIO2_PHYS_ADDR 0x11700000
796#define SYS_PHYS_ADDR 0x11900000
797#define DDMA_PHYS_ADDR 0x14002000
798#define PSC0_PHYS_ADDR 0x11A00000 788#define PSC0_PHYS_ADDR 0x11A00000
799#define PSC1_PHYS_ADDR 0x11B00000 789#define PSC1_PHYS_ADDR 0x11B00000
800#define SD0_PHYS_ADDR 0x10600000
801#define SD1_PHYS_ADDR 0x10680000
802#define LCD_PHYS_ADDR 0x15000000 790#define LCD_PHYS_ADDR 0x15000000
803#define SWCNT_PHYS_ADDR 0x1110010C 791#define SWCNT_PHYS_ADDR 0x1110010C
804#define MAEFE_PHYS_ADDR 0x14012000 792#define MAEFE_PHYS_ADDR 0x14012000
@@ -835,183 +823,43 @@ enum soc_au1200_ints {
835#endif 823#endif
836 824
837 825
838/* Interrupt Controller register offsets */
839#define IC_CFG0RD 0x40
840#define IC_CFG0SET 0x40
841#define IC_CFG0CLR 0x44
842#define IC_CFG1RD 0x48
843#define IC_CFG1SET 0x48
844#define IC_CFG1CLR 0x4C
845#define IC_CFG2RD 0x50
846#define IC_CFG2SET 0x50
847#define IC_CFG2CLR 0x54
848#define IC_REQ0INT 0x54
849#define IC_SRCRD 0x58
850#define IC_SRCSET 0x58
851#define IC_SRCCLR 0x5C
852#define IC_REQ1INT 0x5C
853#define IC_ASSIGNRD 0x60
854#define IC_ASSIGNSET 0x60
855#define IC_ASSIGNCLR 0x64
856#define IC_WAKERD 0x68
857#define IC_WAKESET 0x68
858#define IC_WAKECLR 0x6C
859#define IC_MASKRD 0x70
860#define IC_MASKSET 0x70
861#define IC_MASKCLR 0x74
862#define IC_RISINGRD 0x78
863#define IC_RISINGCLR 0x78
864#define IC_FALLINGRD 0x7C
865#define IC_FALLINGCLR 0x7C
866#define IC_TESTBIT 0x80
867
868
869/* Interrupt Controller 0 */
870#define IC0_CFG0RD 0xB0400040
871#define IC0_CFG0SET 0xB0400040
872#define IC0_CFG0CLR 0xB0400044
873
874#define IC0_CFG1RD 0xB0400048
875#define IC0_CFG1SET 0xB0400048
876#define IC0_CFG1CLR 0xB040004C
877
878#define IC0_CFG2RD 0xB0400050
879#define IC0_CFG2SET 0xB0400050
880#define IC0_CFG2CLR 0xB0400054
881
882#define IC0_REQ0INT 0xB0400054
883#define IC0_SRCRD 0xB0400058
884#define IC0_SRCSET 0xB0400058
885#define IC0_SRCCLR 0xB040005C
886#define IC0_REQ1INT 0xB040005C
887
888#define IC0_ASSIGNRD 0xB0400060
889#define IC0_ASSIGNSET 0xB0400060
890#define IC0_ASSIGNCLR 0xB0400064
891
892#define IC0_WAKERD 0xB0400068
893#define IC0_WAKESET 0xB0400068
894#define IC0_WAKECLR 0xB040006C
895
896#define IC0_MASKRD 0xB0400070
897#define IC0_MASKSET 0xB0400070
898#define IC0_MASKCLR 0xB0400074
899
900#define IC0_RISINGRD 0xB0400078
901#define IC0_RISINGCLR 0xB0400078
902#define IC0_FALLINGRD 0xB040007C
903#define IC0_FALLINGCLR 0xB040007C
904
905#define IC0_TESTBIT 0xB0400080
906
907/* Interrupt Controller 1 */
908#define IC1_CFG0RD 0xB1800040
909#define IC1_CFG0SET 0xB1800040
910#define IC1_CFG0CLR 0xB1800044
911
912#define IC1_CFG1RD 0xB1800048
913#define IC1_CFG1SET 0xB1800048
914#define IC1_CFG1CLR 0xB180004C
915
916#define IC1_CFG2RD 0xB1800050
917#define IC1_CFG2SET 0xB1800050
918#define IC1_CFG2CLR 0xB1800054
919
920#define IC1_REQ0INT 0xB1800054
921#define IC1_SRCRD 0xB1800058
922#define IC1_SRCSET 0xB1800058
923#define IC1_SRCCLR 0xB180005C
924#define IC1_REQ1INT 0xB180005C
925
926#define IC1_ASSIGNRD 0xB1800060
927#define IC1_ASSIGNSET 0xB1800060
928#define IC1_ASSIGNCLR 0xB1800064
929
930#define IC1_WAKERD 0xB1800068
931#define IC1_WAKESET 0xB1800068
932#define IC1_WAKECLR 0xB180006C
933
934#define IC1_MASKRD 0xB1800070
935#define IC1_MASKSET 0xB1800070
936#define IC1_MASKCLR 0xB1800074
937
938#define IC1_RISINGRD 0xB1800078
939#define IC1_RISINGCLR 0xB1800078
940#define IC1_FALLINGRD 0xB180007C
941#define IC1_FALLINGCLR 0xB180007C
942
943#define IC1_TESTBIT 0xB1800080
944 826
945 827
946/* Au1000 */ 828/* Au1000 */
947#ifdef CONFIG_SOC_AU1000 829#ifdef CONFIG_SOC_AU1000
948 830
949#define UART0_ADDR 0xB1100000
950#define UART3_ADDR 0xB1400000
951
952#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ 831#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */
953#define USB_HOST_CONFIG 0xB017FFFC 832#define USB_HOST_CONFIG 0xB017FFFC
954#define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT 833#define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT
955
956#define AU1000_ETH0_BASE 0xB0500000
957#define AU1000_ETH1_BASE 0xB0510000
958#define AU1000_MAC0_ENABLE 0xB0520000
959#define AU1000_MAC1_ENABLE 0xB0520004
960#define NUM_ETH_INTERFACES 2
961#endif /* CONFIG_SOC_AU1000 */ 834#endif /* CONFIG_SOC_AU1000 */
962 835
963/* Au1500 */ 836/* Au1500 */
964#ifdef CONFIG_SOC_AU1500 837#ifdef CONFIG_SOC_AU1500
965 838
966#define UART0_ADDR 0xB1100000
967#define UART3_ADDR 0xB1400000
968
969#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ 839#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */
970#define USB_HOST_CONFIG 0xB017fffc 840#define USB_HOST_CONFIG 0xB017fffc
971#define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT 841#define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT
972
973#define AU1500_ETH0_BASE 0xB1500000
974#define AU1500_ETH1_BASE 0xB1510000
975#define AU1500_MAC0_ENABLE 0xB1520000
976#define AU1500_MAC1_ENABLE 0xB1520004
977#define NUM_ETH_INTERFACES 2
978#endif /* CONFIG_SOC_AU1500 */ 842#endif /* CONFIG_SOC_AU1500 */
979 843
980/* Au1100 */ 844/* Au1100 */
981#ifdef CONFIG_SOC_AU1100 845#ifdef CONFIG_SOC_AU1100
982 846
983#define UART0_ADDR 0xB1100000
984#define UART3_ADDR 0xB1400000
985
986#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ 847#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */
987#define USB_HOST_CONFIG 0xB017FFFC 848#define USB_HOST_CONFIG 0xB017FFFC
988#define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT 849#define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT
989
990#define AU1100_ETH0_BASE 0xB0500000
991#define AU1100_MAC0_ENABLE 0xB0520000
992#define NUM_ETH_INTERFACES 1
993#endif /* CONFIG_SOC_AU1100 */ 850#endif /* CONFIG_SOC_AU1100 */
994 851
995#ifdef CONFIG_SOC_AU1550 852#ifdef CONFIG_SOC_AU1550
996#define UART0_ADDR 0xB1100000
997 853
998#define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */ 854#define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */
999#define USB_OHCI_LEN 0x00060000 855#define USB_OHCI_LEN 0x00060000
1000#define USB_HOST_CONFIG 0xB4027ffc 856#define USB_HOST_CONFIG 0xB4027ffc
1001#define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT 857#define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT
1002
1003#define AU1550_ETH0_BASE 0xB0500000
1004#define AU1550_ETH1_BASE 0xB0510000
1005#define AU1550_MAC0_ENABLE 0xB0520000
1006#define AU1550_MAC1_ENABLE 0xB0520004
1007#define NUM_ETH_INTERFACES 2
1008#endif /* CONFIG_SOC_AU1550 */ 858#endif /* CONFIG_SOC_AU1550 */
1009 859
1010 860
1011#ifdef CONFIG_SOC_AU1200 861#ifdef CONFIG_SOC_AU1200
1012 862
1013#define UART0_ADDR 0xB1100000
1014
1015#define USB_UOC_BASE 0x14020020 863#define USB_UOC_BASE 0x14020020
1016#define USB_UOC_LEN 0x20 864#define USB_UOC_LEN 0x20
1017#define USB_OHCI_BASE 0x14020100 865#define USB_OHCI_BASE 0x14020100
@@ -1504,22 +1352,6 @@ enum soc_au1200_ints {
1504#define SYS_PINFUNC_S1B (1 << 2) 1352#define SYS_PINFUNC_S1B (1 << 2)
1505#endif 1353#endif
1506 1354
1507#define SYS_TRIOUTRD 0xB1900100
1508#define SYS_TRIOUTCLR 0xB1900100
1509#define SYS_OUTPUTRD 0xB1900108
1510#define SYS_OUTPUTSET 0xB1900108
1511#define SYS_OUTPUTCLR 0xB190010C
1512#define SYS_PINSTATERD 0xB1900110
1513#define SYS_PININPUTEN 0xB1900110
1514
1515/* GPIO2, Au1500, Au1550 only */
1516#define GPIO2_BASE 0xB1700000
1517#define GPIO2_DIR (GPIO2_BASE + 0)
1518#define GPIO2_OUTPUT (GPIO2_BASE + 8)
1519#define GPIO2_PINSTATE (GPIO2_BASE + 0xC)
1520#define GPIO2_INTENABLE (GPIO2_BASE + 0x10)
1521#define GPIO2_ENABLE (GPIO2_BASE + 0x14)
1522
1523/* Power Management */ 1355/* Power Management */
1524#define SYS_SCRATCH0 0xB1900018 1356#define SYS_SCRATCH0 0xB1900018
1525#define SYS_SCRATCH1 0xB190001C 1357#define SYS_SCRATCH1 0xB190001C
@@ -1635,12 +1467,6 @@ enum soc_au1200_ints {
1635# define AC97C_RS (1 << 1) 1467# define AC97C_RS (1 << 1)
1636# define AC97C_CE (1 << 0) 1468# define AC97C_CE (1 << 0)
1637 1469
1638/* Secure Digital (SD) Controller */
1639#define SD0_XMIT_FIFO 0xB0600000
1640#define SD0_RECV_FIFO 0xB0600004
1641#define SD1_XMIT_FIFO 0xB0680000
1642#define SD1_RECV_FIFO 0xB0680004
1643
1644#if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) 1470#if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550)
1645/* Au1500 PCI Controller */ 1471/* Au1500 PCI Controller */
1646#define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */ 1472#define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
index c333b4e1cd44..59f5b55b2200 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
@@ -37,10 +37,6 @@
37 37
38#define NUM_AU1000_DMA_CHANNELS 8 38#define NUM_AU1000_DMA_CHANNELS 8
39 39
40/* DMA Channel Base Addresses */
41#define DMA_CHANNEL_BASE 0xB4002000
42#define DMA_CHANNEL_LEN 0x00000100
43
44/* DMA Channel Register Offsets */ 40/* DMA Channel Register Offsets */
45#define DMA_MODE_SET 0x00000000 41#define DMA_MODE_SET 0x00000000
46#define DMA_MODE_READ DMA_MODE_SET 42#define DMA_MODE_READ DMA_MODE_SET
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index c8a553a36ba4..2fdacfe85e23 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -37,14 +37,6 @@
37 37
38#ifndef _LANGUAGE_ASSEMBLY 38#ifndef _LANGUAGE_ASSEMBLY
39 39
40/*
41 * The DMA base addresses.
42 * The channels are every 256 bytes (0x0100) from the channel 0 base.
43 * Interrupt status/enable is bits 15:0 for channels 15 to zero.
44 */
45#define DDMA_GLOBAL_BASE 0xb4003000
46#define DDMA_CHANNEL_BASE 0xb4002000
47
48typedef volatile struct dbdma_global { 40typedef volatile struct dbdma_global {
49 u32 ddma_config; 41 u32 ddma_config;
50 u32 ddma_intstat; 42 u32 ddma_intstat;
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
index 62d2f136d941..1f41a522906d 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
@@ -24,6 +24,23 @@
24 24
25#define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) 25#define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off))
26 26
27/* GPIO1 registers within SYS_ area */
28#define SYS_TRIOUTRD 0x100
29#define SYS_TRIOUTCLR 0x100
30#define SYS_OUTPUTRD 0x108
31#define SYS_OUTPUTSET 0x108
32#define SYS_OUTPUTCLR 0x10C
33#define SYS_PINSTATERD 0x110
34#define SYS_PININPUTEN 0x110
35
36/* register offsets within GPIO2 block */
37#define GPIO2_DIR 0x00
38#define GPIO2_OUTPUT 0x08
39#define GPIO2_PINSTATE 0x0C
40#define GPIO2_INTENABLE 0x10
41#define GPIO2_ENABLE 0x14
42
43struct gpio;
27 44
28static inline int au1000_gpio1_to_irq(int gpio) 45static inline int au1000_gpio1_to_irq(int gpio)
29{ 46{
@@ -200,23 +217,26 @@ static inline int au1200_irq_to_gpio(int irq)
200 */ 217 */
201static inline void alchemy_gpio1_set_value(int gpio, int v) 218static inline void alchemy_gpio1_set_value(int gpio, int v)
202{ 219{
220 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
203 unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); 221 unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE);
204 unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; 222 unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR;
205 au_writel(mask, r); 223 __raw_writel(mask, base + r);
206 au_sync(); 224 wmb();
207} 225}
208 226
209static inline int alchemy_gpio1_get_value(int gpio) 227static inline int alchemy_gpio1_get_value(int gpio)
210{ 228{
229 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
211 unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); 230 unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE);
212 return au_readl(SYS_PINSTATERD) & mask; 231 return __raw_readl(base + SYS_PINSTATERD) & mask;
213} 232}
214 233
215static inline int alchemy_gpio1_direction_input(int gpio) 234static inline int alchemy_gpio1_direction_input(int gpio)
216{ 235{
236 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
217 unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); 237 unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE);
218 au_writel(mask, SYS_TRIOUTCLR); 238 __raw_writel(mask, base + SYS_TRIOUTCLR);
219 au_sync(); 239 wmb();
220 return 0; 240 return 0;
221} 241}
222 242
@@ -257,27 +277,31 @@ static inline int alchemy_gpio1_to_irq(int gpio)
257 */ 277 */
258static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) 278static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out)
259{ 279{
280 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
260 unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); 281 unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE);
261 unsigned long d = au_readl(GPIO2_DIR); 282 unsigned long d = __raw_readl(base + GPIO2_DIR);
283
262 if (to_out) 284 if (to_out)
263 d |= mask; 285 d |= mask;
264 else 286 else
265 d &= ~mask; 287 d &= ~mask;
266 au_writel(d, GPIO2_DIR); 288 __raw_writel(d, base + GPIO2_DIR);
267 au_sync(); 289 wmb();
268} 290}
269 291
270static inline void alchemy_gpio2_set_value(int gpio, int v) 292static inline void alchemy_gpio2_set_value(int gpio, int v)
271{ 293{
294 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
272 unsigned long mask; 295 unsigned long mask;
273 mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); 296 mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE);
274 au_writel(mask, GPIO2_OUTPUT); 297 __raw_writel(mask, base + GPIO2_OUTPUT);
275 au_sync(); 298 wmb();
276} 299}
277 300
278static inline int alchemy_gpio2_get_value(int gpio) 301static inline int alchemy_gpio2_get_value(int gpio)
279{ 302{
280 return au_readl(GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); 303 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
304 return __raw_readl(base + GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE));
281} 305}
282 306
283static inline int alchemy_gpio2_direction_input(int gpio) 307static inline int alchemy_gpio2_direction_input(int gpio)
@@ -329,21 +353,23 @@ static inline int alchemy_gpio2_to_irq(int gpio)
329 */ 353 */
330static inline void alchemy_gpio1_input_enable(void) 354static inline void alchemy_gpio1_input_enable(void)
331{ 355{
332 au_writel(0, SYS_PININPUTEN); /* the write op is key */ 356 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
333 au_sync(); 357 __raw_writel(0, base + SYS_PININPUTEN); /* the write op is key */
358 wmb();
334} 359}
335 360
336/* GPIO2 shared interrupts and control */ 361/* GPIO2 shared interrupts and control */
337 362
338static inline void __alchemy_gpio2_mod_int(int gpio2, int en) 363static inline void __alchemy_gpio2_mod_int(int gpio2, int en)
339{ 364{
340 unsigned long r = au_readl(GPIO2_INTENABLE); 365 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
366 unsigned long r = __raw_readl(base + GPIO2_INTENABLE);
341 if (en) 367 if (en)
342 r |= 1 << gpio2; 368 r |= 1 << gpio2;
343 else 369 else
344 r &= ~(1 << gpio2); 370 r &= ~(1 << gpio2);
345 au_writel(r, GPIO2_INTENABLE); 371 __raw_writel(r, base + GPIO2_INTENABLE);
346 au_sync(); 372 wmb();
347} 373}
348 374
349/** 375/**
@@ -418,10 +444,11 @@ static inline void alchemy_gpio2_disable_int(int gpio2)
418 */ 444 */
419static inline void alchemy_gpio2_enable(void) 445static inline void alchemy_gpio2_enable(void)
420{ 446{
421 au_writel(3, GPIO2_ENABLE); /* reset, clock enabled */ 447 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
422 au_sync(); 448 __raw_writel(3, base + GPIO2_ENABLE); /* reset, clock enabled */
423 au_writel(1, GPIO2_ENABLE); /* clock enabled */ 449 wmb();
424 au_sync(); 450 __raw_writel(1, base + GPIO2_ENABLE); /* clock enabled */
451 wmb();
425} 452}
426 453
427/** 454/**
@@ -431,8 +458,9 @@ static inline void alchemy_gpio2_enable(void)
431 */ 458 */
432static inline void alchemy_gpio2_disable(void) 459static inline void alchemy_gpio2_disable(void)
433{ 460{
434 au_writel(2, GPIO2_ENABLE); /* reset, clock disabled */ 461 void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
435 au_sync(); 462 __raw_writel(2, base + GPIO2_ENABLE); /* reset, clock disabled */
463 wmb();
436} 464}
437 465
438/**********************************************************************/ 466/**********************************************************************/
@@ -556,6 +584,16 @@ static inline void gpio_set_value(int gpio, int v)
556 alchemy_gpio_set_value(gpio, v); 584 alchemy_gpio_set_value(gpio, v);
557} 585}
558 586
587static inline int gpio_get_value_cansleep(unsigned gpio)
588{
589 return gpio_get_value(gpio);
590}
591
592static inline void gpio_set_value_cansleep(unsigned gpio, int value)
593{
594 gpio_set_value(gpio, value);
595}
596
559static inline int gpio_is_valid(int gpio) 597static inline int gpio_is_valid(int gpio)
560{ 598{
561 return alchemy_gpio_is_valid(gpio); 599 return alchemy_gpio_is_valid(gpio);
@@ -581,10 +619,50 @@ static inline int gpio_request(unsigned gpio, const char *label)
581 return 0; 619 return 0;
582} 620}
583 621
622static inline int gpio_request_one(unsigned gpio,
623 unsigned long flags, const char *label)
624{
625 return 0;
626}
627
628static inline int gpio_request_array(struct gpio *array, size_t num)
629{
630 return 0;
631}
632
584static inline void gpio_free(unsigned gpio) 633static inline void gpio_free(unsigned gpio)
585{ 634{
586} 635}
587 636
637static inline void gpio_free_array(struct gpio *array, size_t num)
638{
639}
640
641static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
642{
643 return -ENOSYS;
644}
645
646static inline int gpio_export(unsigned gpio, bool direction_may_change)
647{
648 return -ENOSYS;
649}
650
651static inline int gpio_export_link(struct device *dev, const char *name,
652 unsigned gpio)
653{
654 return -ENOSYS;
655}
656
657static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
658{
659 return -ENOSYS;
660}
661
662static inline void gpio_unexport(unsigned gpio)
663{
664}
665
588#endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ 666#endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */
589 667
590 668
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/nvram.h
index 9759588ba3cf..184d5ecb5f51 100644
--- a/arch/mips/include/asm/mach-bcm47xx/nvram.h
+++ b/arch/mips/include/asm/mach-bcm47xx/nvram.h
@@ -39,8 +39,16 @@ extern int nvram_getenv(char *name, char *val, size_t val_len);
39 39
40static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) 40static inline void nvram_parse_macaddr(char *buf, u8 *macaddr)
41{ 41{
42 sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1], 42 if (strchr(buf, ':'))
43 &macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]); 43 sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
44 &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
45 &macaddr[5]);
46 else if (strchr(buf, '-'))
47 sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
48 &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
49 &macaddr[5]);
50 else
51 printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
44} 52}
45 53
46#endif 54#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
index 32978d32561a..ed72e6a26b73 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
@@ -88,7 +88,7 @@ struct bcm_tag {
88 char kernel_crc[CRC_LEN]; 88 char kernel_crc[CRC_LEN];
89 /* 228-235: Unused at present */ 89 /* 228-235: Unused at present */
90 char reserved1[8]; 90 char reserved1[8];
91 /* 236-239: CRC32 of header excluding tagVersion */ 91 /* 236-239: CRC32 of header excluding last 20 bytes */
92 char header_crc[CRC_LEN]; 92 char header_crc[CRC_LEN];
93 /* 240-255: Unused at present */ 93 /* 240-255: Unused at present */
94 char reserved2[16]; 94 char reserved2[16];
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index 0b2b5eb22e9b..dedef7d2b01f 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -63,6 +63,11 @@
63 # CN30XX Disable instruction prefetching 63 # CN30XX Disable instruction prefetching
64 or v0, v0, 0x2000 64 or v0, v0, 0x2000
65skip: 65skip:
66 # First clear off CvmCtl[IPPCI] bit and move the performance
67 # counters interrupt to IRQ 6
68 li v1, ~(7 << 7)
69 and v0, v0, v1
70 ori v0, v0, (6 << 7)
66 # Write the cavium control register 71 # Write the cavium control register
67 dmtc0 v0, CP0_CVMCTL_REG 72 dmtc0 v0, CP0_CVMCTL_REG
68 sync 73 sync
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
new file mode 100644
index 000000000000..ce2f02929d22
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -0,0 +1,63 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8#ifndef _LANTIQ_H__
9#define _LANTIQ_H__
10
11#include <linux/irq.h>
12
13/* generic reg access functions */
14#define ltq_r32(reg) __raw_readl(reg)
15#define ltq_w32(val, reg) __raw_writel(val, reg)
16#define ltq_w32_mask(clear, set, reg) \
17 ltq_w32((ltq_r32(reg) & ~(clear)) | (set), reg)
18#define ltq_r8(reg) __raw_readb(reg)
19#define ltq_w8(val, reg) __raw_writeb(val, reg)
20
21/* register access macros for EBU and CGU */
22#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
23#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
24#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y))
25#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x))
26
27extern __iomem void *ltq_ebu_membase;
28extern __iomem void *ltq_cgu_membase;
29
30extern unsigned int ltq_get_cpu_ver(void);
31extern unsigned int ltq_get_soc_type(void);
32
33/* clock speeds */
34#define CLOCK_60M 60000000
35#define CLOCK_83M 83333333
36#define CLOCK_111M 111111111
37#define CLOCK_133M 133333333
38#define CLOCK_167M 166666667
39#define CLOCK_200M 200000000
40#define CLOCK_266M 266666666
41#define CLOCK_333M 333333333
42#define CLOCK_400M 400000000
43
44/* spinlock all ebu i/o */
45extern spinlock_t ebu_lock;
46
47/* some irq helpers */
48extern void ltq_disable_irq(struct irq_data *data);
49extern void ltq_mask_and_ack_irq(struct irq_data *data);
50extern void ltq_enable_irq(struct irq_data *data);
51
52/* find out what caused the last cpu reset */
53extern int ltq_reset_cause(void);
54#define LTQ_RST_CAUSE_WDTRST 0x20
55
56#define IOPORT_RESOURCE_START 0x10000000
57#define IOPORT_RESOURCE_END 0xffffffff
58#define IOMEM_RESOURCE_START 0x10000000
59#define IOMEM_RESOURCE_END 0xffffffff
60#define LTQ_FLASH_START 0x10000000
61#define LTQ_FLASH_MAX 0x04000000
62
63#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
new file mode 100644
index 000000000000..a305f1d0259e
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
@@ -0,0 +1,53 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LANTIQ_PLATFORM_H__
10#define _LANTIQ_PLATFORM_H__
11
12#include <linux/mtd/partitions.h>
13#include <linux/socket.h>
14
15/* struct used to pass info to the pci core */
16enum {
17 PCI_CLOCK_INT = 0,
18 PCI_CLOCK_EXT
19};
20
21#define PCI_EXIN0 0x0001
22#define PCI_EXIN1 0x0002
23#define PCI_EXIN2 0x0004
24#define PCI_EXIN3 0x0008
25#define PCI_EXIN4 0x0010
26#define PCI_EXIN5 0x0020
27#define PCI_EXIN_MAX 6
28
29#define PCI_GNT1 0x0040
30#define PCI_GNT2 0x0080
31#define PCI_GNT3 0x0100
32#define PCI_GNT4 0x0200
33
34#define PCI_REQ1 0x0400
35#define PCI_REQ2 0x0800
36#define PCI_REQ3 0x1000
37#define PCI_REQ4 0x2000
38#define PCI_REQ_SHIFT 10
39#define PCI_REQ_MASK 0xf
40
41struct ltq_pci_data {
42 int clock;
43 int gpio;
44 int irq[16];
45};
46
47/* struct used to pass info to network drivers */
48struct ltq_eth_data {
49 struct sockaddr mac;
50 int mii_mode;
51};
52
53#endif
diff --git a/arch/mips/include/asm/mach-lantiq/war.h b/arch/mips/include/asm/mach-lantiq/war.h
new file mode 100644
index 000000000000..01b08ef368d1
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/war.h
@@ -0,0 +1,24 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 */
7#ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H
8#define __ASM_MIPS_MACH_LANTIQ_WAR_H
9
10#define R4600_V1_INDEX_ICACHEOP_WAR 0
11#define R4600_V1_HIT_CACHEOP_WAR 0
12#define R4600_V2_HIT_CACHEOP_WAR 0
13#define R5432_CP0_INTERRUPT_WAR 0
14#define BCM1250_M3_WAR 0
15#define SIBYTE_1956_WAR 0
16#define MIPS4K_ICACHE_REFILL_WAR 0
17#define MIPS_CACHE_SYNC_WAR 0
18#define TX49XX_ICACHE_INDEX_INV_WAR 0
19#define RM9000_CDEX_SMP_WAR 0
20#define ICACHE_REFILLS_WORKAROUND_WAR 0
21#define R10000_LLSC_WAR 0
22#define MIPS34K_MISSED_ITLB_WAR 0
23
24#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/irq.h b/arch/mips/include/asm/mach-lantiq/xway/irq.h
new file mode 100644
index 000000000000..a1471d2dd0d2
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/xway/irq.h
@@ -0,0 +1,18 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef __LANTIQ_IRQ_H
10#define __LANTIQ_IRQ_H
11
12#include <lantiq_irq.h>
13
14#define NR_IRQS 256
15
16#include_next <irq.h>
17
18#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
new file mode 100644
index 000000000000..b4465a888e20
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -0,0 +1,66 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LANTIQ_XWAY_IRQ_H__
10#define _LANTIQ_XWAY_IRQ_H__
11
12#define INT_NUM_IRQ0 8
13#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0)
14#define INT_NUM_IM1_IRL0 (INT_NUM_IRQ0 + 32)
15#define INT_NUM_IM2_IRL0 (INT_NUM_IRQ0 + 64)
16#define INT_NUM_IM3_IRL0 (INT_NUM_IRQ0 + 96)
17#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128)
18#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
19
20#define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8))
21#define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1)
22#define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2)
23
24#define LTQ_ASC_ASE_TIR INT_NUM_IM2_IRL0
25#define LTQ_ASC_ASE_RIR (INT_NUM_IM2_IRL0 + 2)
26#define LTQ_ASC_ASE_EIR (INT_NUM_IM2_IRL0 + 3)
27
28#define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15)
29#define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14)
30#define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16)
31
32#define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21)
33#define LTQ_MEI_INT (INT_NUM_IM1_IRL0 + 23)
34
35#define LTQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23)
36#define LTQ_USB_INT (INT_NUM_IM1_IRL0 + 22)
37#define LTQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23)
38
39#define MIPS_CPU_TIMER_IRQ 7
40
41#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
42#define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1)
43#define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2)
44#define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3)
45#define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4)
46#define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5)
47#define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6)
48#define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7)
49#define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8)
50#define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9)
51#define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10)
52#define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11)
53#define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25)
54#define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26)
55#define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27)
56#define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28)
57#define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29)
58#define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30)
59#define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16)
60#define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21)
61
62#define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24)
63
64#define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14)
65
66#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
new file mode 100644
index 000000000000..8a3c6be669d2
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -0,0 +1,141 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_XWAY_H__
10#define _LTQ_XWAY_H__
11
12#ifdef CONFIG_SOC_TYPE_XWAY
13
14#include <lantiq.h>
15
16/* Chip IDs */
17#define SOC_ID_DANUBE1 0x129
18#define SOC_ID_DANUBE2 0x12B
19#define SOC_ID_TWINPASS 0x12D
20#define SOC_ID_AMAZON_SE 0x152
21#define SOC_ID_ARX188 0x16C
22#define SOC_ID_ARX168 0x16D
23#define SOC_ID_ARX182 0x16F
24
25/* SoC Types */
26#define SOC_TYPE_DANUBE 0x01
27#define SOC_TYPE_TWINPASS 0x02
28#define SOC_TYPE_AR9 0x03
29#define SOC_TYPE_VR9 0x04
30#define SOC_TYPE_AMAZON_SE 0x05
31
32/* ASC0/1 - serial port */
33#define LTQ_ASC0_BASE_ADDR 0x1E100400
34#define LTQ_ASC1_BASE_ADDR 0x1E100C00
35#define LTQ_ASC_SIZE 0x400
36
37/* RCU - reset control unit */
38#define LTQ_RCU_BASE_ADDR 0x1F203000
39#define LTQ_RCU_SIZE 0x1000
40
41/* GPTU - general purpose timer unit */
42#define LTQ_GPTU_BASE_ADDR 0x18000300
43#define LTQ_GPTU_SIZE 0x100
44
45/* EBU - external bus unit */
46#define LTQ_EBU_GPIO_START 0x14000000
47#define LTQ_EBU_GPIO_SIZE 0x1000
48
49#define LTQ_EBU_BASE_ADDR 0x1E105300
50#define LTQ_EBU_SIZE 0x100
51
52#define LTQ_EBU_BUSCON0 0x0060
53#define LTQ_EBU_PCC_CON 0x0090
54#define LTQ_EBU_PCC_IEN 0x00A4
55#define LTQ_EBU_PCC_ISTAT 0x00A0
56#define LTQ_EBU_BUSCON1 0x0064
57#define LTQ_EBU_ADDRSEL1 0x0024
58#define EBU_WRDIS 0x80000000
59
60/* CGU - clock generation unit */
61#define LTQ_CGU_BASE_ADDR 0x1F103000
62#define LTQ_CGU_SIZE 0x1000
63
64/* ICU - interrupt control unit */
65#define LTQ_ICU_BASE_ADDR 0x1F880200
66#define LTQ_ICU_SIZE 0x100
67
68/* EIU - external interrupt unit */
69#define LTQ_EIU_BASE_ADDR 0x1F101000
70#define LTQ_EIU_SIZE 0x1000
71
72/* PMU - power management unit */
73#define LTQ_PMU_BASE_ADDR 0x1F102000
74#define LTQ_PMU_SIZE 0x1000
75
76#define PMU_DMA 0x0020
77#define PMU_USB 0x8041
78#define PMU_LED 0x0800
79#define PMU_GPT 0x1000
80#define PMU_PPE 0x2000
81#define PMU_FPI 0x4000
82#define PMU_SWITCH 0x10000000
83
84/* ETOP - ethernet */
85#define LTQ_ETOP_BASE_ADDR 0x1E180000
86#define LTQ_ETOP_SIZE 0x40000
87
88/* DMA */
89#define LTQ_DMA_BASE_ADDR 0x1E104100
90#define LTQ_DMA_SIZE 0x800
91
92/* PCI */
93#define PCI_CR_BASE_ADDR 0x1E105400
94#define PCI_CR_SIZE 0x400
95
96/* WDT */
97#define LTQ_WDT_BASE_ADDR 0x1F8803F0
98#define LTQ_WDT_SIZE 0x10
99
100/* STP - serial to parallel conversion unit */
101#define LTQ_STP_BASE_ADDR 0x1E100BB0
102#define LTQ_STP_SIZE 0x40
103
104/* GPIO */
105#define LTQ_GPIO0_BASE_ADDR 0x1E100B10
106#define LTQ_GPIO1_BASE_ADDR 0x1E100B40
107#define LTQ_GPIO2_BASE_ADDR 0x1E100B70
108#define LTQ_GPIO_SIZE 0x30
109
110/* SSC */
111#define LTQ_SSC_BASE_ADDR 0x1e100800
112#define LTQ_SSC_SIZE 0x100
113
114/* MEI - dsl core */
115#define LTQ_MEI_BASE_ADDR 0x1E116000
116
117/* DEU - data encryption unit */
118#define LTQ_DEU_BASE_ADDR 0x1E103100
119
120/* MPS - multi processor unit (voice) */
121#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000)
122#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
123
124/* request a non-gpio and set the PIO config */
125extern int ltq_gpio_request(unsigned int pin, unsigned int alt0,
126 unsigned int alt1, unsigned int dir, const char *name);
127extern void ltq_pmu_enable(unsigned int module);
128extern void ltq_pmu_disable(unsigned int module);
129
130static inline int ltq_is_ar9(void)
131{
132 return (ltq_get_soc_type() == SOC_TYPE_AR9);
133}
134
135static inline int ltq_is_vr9(void)
136{
137 return (ltq_get_soc_type() == SOC_TYPE_VR9);
138}
139
140#endif /* CONFIG_SOC_TYPE_XWAY */
141#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
new file mode 100644
index 000000000000..872943a4b90e
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -0,0 +1,60 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
16 */
17
18#ifndef LTQ_DMA_H__
19#define LTQ_DMA_H__
20
21#define LTQ_DESC_SIZE 0x08 /* each descriptor is 64bit */
22#define LTQ_DESC_NUM 0x40 /* 64 descriptors / channel */
23
24#define LTQ_DMA_OWN BIT(31) /* owner bit */
25#define LTQ_DMA_C BIT(30) /* complete bit */
26#define LTQ_DMA_SOP BIT(29) /* start of packet */
27#define LTQ_DMA_EOP BIT(28) /* end of packet */
28#define LTQ_DMA_TX_OFFSET(x) ((x & 0x1f) << 23) /* data bytes offset */
29#define LTQ_DMA_RX_OFFSET(x) ((x & 0x7) << 23) /* data bytes offset */
30#define LTQ_DMA_SIZE_MASK (0xffff) /* the size field is 16 bit */
31
32struct ltq_dma_desc {
33 u32 ctl;
34 u32 addr;
35};
36
37struct ltq_dma_channel {
38 int nr; /* the channel number */
39 int irq; /* the mapped irq */
40 int desc; /* the current descriptor */
41 struct ltq_dma_desc *desc_base; /* the descriptor base */
42 int phys; /* physical addr */
43};
44
45enum {
46 DMA_PORT_ETOP = 0,
47 DMA_PORT_DEU,
48};
49
50extern void ltq_dma_enable_irq(struct ltq_dma_channel *ch);
51extern void ltq_dma_disable_irq(struct ltq_dma_channel *ch);
52extern void ltq_dma_ack_irq(struct ltq_dma_channel *ch);
53extern void ltq_dma_open(struct ltq_dma_channel *ch);
54extern void ltq_dma_close(struct ltq_dma_channel *ch);
55extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch);
56extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch);
57extern void ltq_dma_free(struct ltq_dma_channel *ch);
58extern void ltq_dma_init_port(int p);
59
60#endif
diff --git a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
new file mode 100644
index 000000000000..3b728275b9b0
--- /dev/null
+++ b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
@@ -0,0 +1,47 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 Netlogic Microsystems
7 * Copyright (C) 2003 Ralf Baechle
8 */
9#ifndef __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H
10#define __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H
11
12#define cpu_has_4kex 1
13#define cpu_has_4k_cache 1
14#define cpu_has_watch 1
15#define cpu_has_mips16 0
16#define cpu_has_counter 1
17#define cpu_has_divec 1
18#define cpu_has_vce 0
19#define cpu_has_cache_cdex_p 0
20#define cpu_has_cache_cdex_s 0
21#define cpu_has_prefetch 1
22#define cpu_has_mcheck 1
23#define cpu_has_ejtag 1
24
25#define cpu_has_llsc 1
26#define cpu_has_vtag_icache 0
27#define cpu_has_dc_aliases 0
28#define cpu_has_ic_fills_f_dc 0
29#define cpu_has_dsp 0
30#define cpu_has_mipsmt 0
31#define cpu_has_userlocal 0
32#define cpu_icache_snoops_remote_store 0
33
34#define cpu_has_nofpuex 0
35#define cpu_has_64bits 1
36
37#define cpu_has_mips32r1 1
38#define cpu_has_mips32r2 0
39#define cpu_has_mips64r1 1
40#define cpu_has_mips64r2 0
41
42#define cpu_has_inclusive_pcaches 0
43
44#define cpu_dcache_line_size() 32
45#define cpu_icache_line_size() 32
46
47#endif /* __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-netlogic/irq.h b/arch/mips/include/asm/mach-netlogic/irq.h
new file mode 100644
index 000000000000..b5902458e7c1
--- /dev/null
+++ b/arch/mips/include/asm/mach-netlogic/irq.h
@@ -0,0 +1,14 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 Netlogic Microsystems.
7 */
8#ifndef __ASM_NETLOGIC_IRQ_H
9#define __ASM_NETLOGIC_IRQ_H
10
11#define NR_IRQS 64
12#define MIPS_CPU_IRQ_BASE 0
13
14#endif /* __ASM_NETLOGIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-netlogic/war.h b/arch/mips/include/asm/mach-netlogic/war.h
new file mode 100644
index 000000000000..22da89327352
--- /dev/null
+++ b/arch/mips/include/asm/mach-netlogic/war.h
@@ -0,0 +1,26 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 Netlogic Microsystems.
7 * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
8 */
9#ifndef __ASM_MIPS_MACH_NLM_WAR_H
10#define __ASM_MIPS_MACH_NLM_WAR_H
11
12#define R4600_V1_INDEX_ICACHEOP_WAR 0
13#define R4600_V1_HIT_CACHEOP_WAR 0
14#define R4600_V2_HIT_CACHEOP_WAR 0
15#define R5432_CP0_INTERRUPT_WAR 0
16#define BCM1250_M3_WAR 0
17#define SIBYTE_1956_WAR 0
18#define MIPS4K_ICACHE_REFILL_WAR 0
19#define MIPS_CACHE_SYNC_WAR 0
20#define TX49XX_ICACHE_INDEX_INV_WAR 0
21#define RM9000_CDEX_SMP_WAR 0
22#define ICACHE_REFILLS_WORKAROUND_WAR 0
23#define R10000_LLSC_WAR 0
24#define MIPS34K_MISSED_ITLB_WAR 0
25
26#endif /* __ASM_MIPS_MACH_NLM_WAR_H */
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index d94085a3eafb..bc01a02cacd8 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -118,6 +118,8 @@ search_module_dbetables(unsigned long addr)
118#define MODULE_PROC_FAMILY "LOONGSON2 " 118#define MODULE_PROC_FAMILY "LOONGSON2 "
119#elif defined CONFIG_CPU_CAVIUM_OCTEON 119#elif defined CONFIG_CPU_CAVIUM_OCTEON
120#define MODULE_PROC_FAMILY "OCTEON " 120#define MODULE_PROC_FAMILY "OCTEON "
121#elif defined CONFIG_CPU_XLR
122#define MODULE_PROC_FAMILY "XLR "
121#else 123#else
122#error MODULE_PROC_FAMILY undefined for your processor configuration 124#error MODULE_PROC_FAMILY undefined for your processor configuration
123#endif 125#endif
diff --git a/arch/mips/include/asm/netlogic/interrupt.h b/arch/mips/include/asm/netlogic/interrupt.h
new file mode 100644
index 000000000000..a85aadb6cfd7
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/interrupt.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _ASM_NLM_INTERRUPT_H
36#define _ASM_NLM_INTERRUPT_H
37
38/* Defines for the IRQ numbers */
39
40#define IRQ_IPI_SMP_FUNCTION 3
41#define IRQ_IPI_SMP_RESCHEDULE 4
42#define IRQ_MSGRING 6
43#define IRQ_TIMER 7
44
45#endif
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
new file mode 100644
index 000000000000..8c53d0ba4bf2
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _ASM_NLM_MIPS_EXTS_H
36#define _ASM_NLM_MIPS_EXTS_H
37
38/*
39 * XLR and XLP interrupt request and interrupt mask registers
40 */
41#define read_c0_eirr() __read_64bit_c0_register($9, 6)
42#define read_c0_eimr() __read_64bit_c0_register($9, 7)
43#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val)
44
45/*
46 * Writing EIMR in 32 bit is a special case, the lower 8 bit of the
47 * EIMR is shadowed in the status register, so we cannot save and
48 * restore status register for split read.
49 */
50#define write_c0_eimr(val) \
51do { \
52 if (sizeof(unsigned long) == 4) { \
53 unsigned long __flags; \
54 \
55 local_irq_save(__flags); \
56 __asm__ __volatile__( \
57 ".set\tmips64\n\t" \
58 "dsll\t%L0, %L0, 32\n\t" \
59 "dsrl\t%L0, %L0, 32\n\t" \
60 "dsll\t%M0, %M0, 32\n\t" \
61 "or\t%L0, %L0, %M0\n\t" \
62 "dmtc0\t%L0, $9, 7\n\t" \
63 ".set\tmips0" \
64 : : "r" (val)); \
65 __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\
66 local_irq_restore(__flags); \
67 } else \
68 __write_64bit_c0_register($9, 7, (val)); \
69} while (0)
70
71static inline int hard_smp_processor_id(void)
72{
73 return __read_32bit_c0_register($15, 1) & 0x3ff;
74}
75
76#endif /*_ASM_NLM_MIPS_EXTS_H */
diff --git a/arch/mips/include/asm/netlogic/psb-bootinfo.h b/arch/mips/include/asm/netlogic/psb-bootinfo.h
new file mode 100644
index 000000000000..6878307f0ee6
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/psb-bootinfo.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _ASM_NETLOGIC_BOOTINFO_H
36#define _ASM_NETLOGIC_BOOTINFO_H
37
38struct psb_info {
39 uint64_t boot_level;
40 uint64_t io_base;
41 uint64_t output_device;
42 uint64_t uart_print;
43 uint64_t led_output;
44 uint64_t init;
45 uint64_t exit;
46 uint64_t warm_reset;
47 uint64_t wakeup;
48 uint64_t online_cpu_map;
49 uint64_t master_reentry_sp;
50 uint64_t master_reentry_gp;
51 uint64_t master_reentry_fn;
52 uint64_t slave_reentry_fn;
53 uint64_t magic_dword;
54 uint64_t uart_putchar;
55 uint64_t size;
56 uint64_t uart_getchar;
57 uint64_t nmi_handler;
58 uint64_t psb_version;
59 uint64_t mac_addr;
60 uint64_t cpu_frequency;
61 uint64_t board_version;
62 uint64_t malloc;
63 uint64_t free;
64 uint64_t global_shmem_addr;
65 uint64_t global_shmem_size;
66 uint64_t psb_os_cpu_map;
67 uint64_t userapp_cpu_map;
68 uint64_t wakeup_os;
69 uint64_t psb_mem_map;
70 uint64_t board_major_version;
71 uint64_t board_minor_version;
72 uint64_t board_manf_revision;
73 uint64_t board_serial_number;
74 uint64_t psb_physaddr_map;
75 uint64_t xlr_loaderip_config;
76 uint64_t bldr_envp;
77 uint64_t avail_mem_map;
78};
79
80enum {
81 NETLOGIC_IO_SPACE = 0x10,
82 PCIX_IO_SPACE,
83 PCIX_CFG_SPACE,
84 PCIX_MEMORY_SPACE,
85 HT_IO_SPACE,
86 HT_CFG_SPACE,
87 HT_MEMORY_SPACE,
88 SRAM_SPACE,
89 FLASH_CONTROLLER_SPACE
90};
91
92#define NLM_MAX_ARGS 64
93#define NLM_MAX_ENVS 32
94
95/* This is what netlboot passes and linux boot_mem_map is subtly different */
96#define NLM_BOOT_MEM_MAP_MAX 32
97struct nlm_boot_mem_map {
98 int nr_map;
99 struct nlm_boot_mem_map_entry {
100 uint64_t addr; /* start of memory segment */
101 uint64_t size; /* size of memory segment */
102 uint32_t type; /* type of memory segment */
103 } map[NLM_BOOT_MEM_MAP_MAX];
104};
105
106/* Pointer to saved boot loader info */
107extern struct psb_info nlm_prom_info;
108
109#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/gpio.h b/arch/mips/include/asm/netlogic/xlr/gpio.h
new file mode 100644
index 000000000000..51f6ad4aeb14
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/gpio.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _ASM_NLM_GPIO_H
36#define _ASM_NLM_GPIO_H
37
38#define NETLOGIC_GPIO_INT_EN_REG 0
39#define NETLOGIC_GPIO_INPUT_INVERSION_REG 1
40#define NETLOGIC_GPIO_IO_DIR_REG 2
41#define NETLOGIC_GPIO_IO_DATA_WR_REG 3
42#define NETLOGIC_GPIO_IO_DATA_RD_REG 4
43
44#define NETLOGIC_GPIO_SWRESET_REG 8
45#define NETLOGIC_GPIO_DRAM1_CNTRL_REG 9
46#define NETLOGIC_GPIO_DRAM1_RATIO_REG 10
47#define NETLOGIC_GPIO_DRAM1_RESET_REG 11
48#define NETLOGIC_GPIO_DRAM1_STATUS_REG 12
49#define NETLOGIC_GPIO_DRAM2_CNTRL_REG 13
50#define NETLOGIC_GPIO_DRAM2_RATIO_REG 14
51#define NETLOGIC_GPIO_DRAM2_RESET_REG 15
52#define NETLOGIC_GPIO_DRAM2_STATUS_REG 16
53
54#define NETLOGIC_GPIO_PWRON_RESET_CFG_REG 21
55#define NETLOGIC_GPIO_BIST_ALL_GO_STATUS_REG 24
56#define NETLOGIC_GPIO_BIST_CPU_GO_STATUS_REG 25
57#define NETLOGIC_GPIO_BIST_DEV_GO_STATUS_REG 26
58
59#define NETLOGIC_GPIO_FUSE_BANK_REG 35
60#define NETLOGIC_GPIO_CPU_RESET_REG 40
61#define NETLOGIC_GPIO_RNG_REG 43
62
63#define NETLOGIC_PWRON_RESET_PCMCIA_BOOT 17
64#define NETLOGIC_GPIO_LED_BITMAP 0x1700000
65#define NETLOGIC_GPIO_LED_0_SHIFT 20
66#define NETLOGIC_GPIO_LED_1_SHIFT 24
67
68#define NETLOGIC_GPIO_LED_OUTPUT_CODE_RESET 0x01
69#define NETLOGIC_GPIO_LED_OUTPUT_CODE_HARD_RESET 0x02
70#define NETLOGIC_GPIO_LED_OUTPUT_CODE_SOFT_RESET 0x03
71#define NETLOGIC_GPIO_LED_OUTPUT_CODE_MAIN 0x04
72
73#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/iomap.h b/arch/mips/include/asm/netlogic/xlr/iomap.h
new file mode 100644
index 000000000000..2e3a4dd53045
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/iomap.h
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _ASM_NLM_IOMAP_H
36#define _ASM_NLM_IOMAP_H
37
38#define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000)
39#define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000
40#define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000
41#define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000
42#define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000
43#define NETLOGIC_IO_PIC_OFFSET 0x08000
44#define NETLOGIC_IO_UART_0_OFFSET 0x14000
45#define NETLOGIC_IO_UART_1_OFFSET 0x15100
46
47#define NETLOGIC_IO_SIZE 0x1000
48
49#define NETLOGIC_IO_BRIDGE_OFFSET 0x00000
50
51#define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000
52#define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000
53
54#define NETLOGIC_IO_SRAM_OFFSET 0x07000
55
56#define NETLOGIC_IO_PCIX_OFFSET 0x09000
57#define NETLOGIC_IO_HT_OFFSET 0x0A000
58
59#define NETLOGIC_IO_SECURITY_OFFSET 0x0B000
60
61#define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000
62#define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000
63#define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000
64#define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000
65
66/* XLS devices */
67#define NETLOGIC_IO_GMAC_4_OFFSET 0x20000
68#define NETLOGIC_IO_GMAC_5_OFFSET 0x21000
69#define NETLOGIC_IO_GMAC_6_OFFSET 0x22000
70#define NETLOGIC_IO_GMAC_7_OFFSET 0x23000
71
72#define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000
73#define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000
74#define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000
75#define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000
76
77#define NETLOGIC_IO_USB_0_OFFSET 0x24000
78#define NETLOGIC_IO_USB_1_OFFSET 0x25000
79
80#define NETLOGIC_IO_COMP_OFFSET 0x1D000
81/* end XLS devices */
82
83/* XLR devices */
84#define NETLOGIC_IO_SPI4_0_OFFSET 0x10000
85#define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000
86#define NETLOGIC_IO_SPI4_1_OFFSET 0x12000
87#define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000
88/* end XLR devices */
89
90#define NETLOGIC_IO_I2C_0_OFFSET 0x16000
91#define NETLOGIC_IO_I2C_1_OFFSET 0x17000
92
93#define NETLOGIC_IO_GPIO_OFFSET 0x18000
94#define NETLOGIC_IO_FLASH_OFFSET 0x19000
95#define NETLOGIC_IO_TB_OFFSET 0x1C000
96
97#define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000)
98
99/*
100 * Base Address (Virtual) of the PCI Config address space
101 * For now, choose 256M phys in kseg1 = 0xA0000000 + (1<<28)
102 * Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes
103 * ie 1<<24 = 16M
104 */
105#define DEFAULT_PCI_CONFIG_BASE 0x18000000
106#define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000
107#define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000
108
109#ifndef __ASSEMBLY__
110#include <linux/types.h>
111#include <asm/byteorder.h>
112
113typedef volatile __u32 nlm_reg_t;
114extern unsigned long netlogic_io_base;
115
116/* FIXME read once in write_reg */
117#ifdef CONFIG_CPU_LITTLE_ENDIAN
118#define netlogic_read_reg(base, offset) ((base)[(offset)])
119#define netlogic_write_reg(base, offset, value) ((base)[(offset)] = (value))
120#else
121#define netlogic_read_reg(base, offset) (be32_to_cpu((base)[(offset)]))
122#define netlogic_write_reg(base, offset, value) \
123 ((base)[(offset)] = cpu_to_be32((value)))
124#endif
125
126#define netlogic_read_reg_le32(base, offset) (le32_to_cpu((base)[(offset)]))
127#define netlogic_write_reg_le32(base, offset, value) \
128 ((base)[(offset)] = cpu_to_le32((value)))
129#define netlogic_io_mmio(offset) ((nlm_reg_t *)(netlogic_io_base+(offset)))
130#endif /* __ASSEMBLY__ */
131#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/pic.h b/arch/mips/include/asm/netlogic/xlr/pic.h
new file mode 100644
index 000000000000..5cceb746f080
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/pic.h
@@ -0,0 +1,231 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _ASM_NLM_XLR_PIC_H
36#define _ASM_NLM_XLR_PIC_H
37
38#define PIC_CLKS_PER_SEC 66666666ULL
39/* PIC hardware interrupt numbers */
40#define PIC_IRT_WD_INDEX 0
41#define PIC_IRT_TIMER_0_INDEX 1
42#define PIC_IRT_TIMER_1_INDEX 2
43#define PIC_IRT_TIMER_2_INDEX 3
44#define PIC_IRT_TIMER_3_INDEX 4
45#define PIC_IRT_TIMER_4_INDEX 5
46#define PIC_IRT_TIMER_5_INDEX 6
47#define PIC_IRT_TIMER_6_INDEX 7
48#define PIC_IRT_TIMER_7_INDEX 8
49#define PIC_IRT_CLOCK_INDEX PIC_IRT_TIMER_7_INDEX
50#define PIC_IRT_UART_0_INDEX 9
51#define PIC_IRT_UART_1_INDEX 10
52#define PIC_IRT_I2C_0_INDEX 11
53#define PIC_IRT_I2C_1_INDEX 12
54#define PIC_IRT_PCMCIA_INDEX 13
55#define PIC_IRT_GPIO_INDEX 14
56#define PIC_IRT_HYPER_INDEX 15
57#define PIC_IRT_PCIX_INDEX 16
58/* XLS */
59#define PIC_IRT_CDE_INDEX 15
60#define PIC_IRT_BRIDGE_TB_XLS_INDEX 16
61/* XLS */
62#define PIC_IRT_GMAC0_INDEX 17
63#define PIC_IRT_GMAC1_INDEX 18
64#define PIC_IRT_GMAC2_INDEX 19
65#define PIC_IRT_GMAC3_INDEX 20
66#define PIC_IRT_XGS0_INDEX 21
67#define PIC_IRT_XGS1_INDEX 22
68#define PIC_IRT_HYPER_FATAL_INDEX 23
69#define PIC_IRT_PCIX_FATAL_INDEX 24
70#define PIC_IRT_BRIDGE_AERR_INDEX 25
71#define PIC_IRT_BRIDGE_BERR_INDEX 26
72#define PIC_IRT_BRIDGE_TB_XLR_INDEX 27
73#define PIC_IRT_BRIDGE_AERR_NMI_INDEX 28
74/* XLS */
75#define PIC_IRT_GMAC4_INDEX 21
76#define PIC_IRT_GMAC5_INDEX 22
77#define PIC_IRT_GMAC6_INDEX 23
78#define PIC_IRT_GMAC7_INDEX 24
79#define PIC_IRT_BRIDGE_ERR_INDEX 25
80#define PIC_IRT_PCIE_LINK0_INDEX 26
81#define PIC_IRT_PCIE_LINK1_INDEX 27
82#define PIC_IRT_PCIE_LINK2_INDEX 23
83#define PIC_IRT_PCIE_LINK3_INDEX 24
84#define PIC_IRT_PCIE_XLSB0_LINK2_INDEX 28
85#define PIC_IRT_PCIE_XLSB0_LINK3_INDEX 29
86#define PIC_IRT_SRIO_LINK0_INDEX 26
87#define PIC_IRT_SRIO_LINK1_INDEX 27
88#define PIC_IRT_SRIO_LINK2_INDEX 28
89#define PIC_IRT_SRIO_LINK3_INDEX 29
90#define PIC_IRT_PCIE_INT_INDEX 28
91#define PIC_IRT_PCIE_FATAL_INDEX 29
92#define PIC_IRT_GPIO_B_INDEX 30
93#define PIC_IRT_USB_INDEX 31
94/* XLS */
95#define PIC_NUM_IRTS 32
96
97
98#define PIC_CLOCK_TIMER 7
99
100/* PIC Registers */
101#define PIC_CTRL 0x00
102#define PIC_IPI 0x04
103#define PIC_INT_ACK 0x06
104
105#define WD_MAX_VAL_0 0x08
106#define WD_MAX_VAL_1 0x09
107#define WD_MASK_0 0x0a
108#define WD_MASK_1 0x0b
109#define WD_HEARBEAT_0 0x0c
110#define WD_HEARBEAT_1 0x0d
111
112#define PIC_IRT_0_BASE 0x40
113#define PIC_IRT_1_BASE 0x80
114#define PIC_TIMER_MAXVAL_0_BASE 0x100
115#define PIC_TIMER_MAXVAL_1_BASE 0x110
116#define PIC_TIMER_COUNT_0_BASE 0x120
117#define PIC_TIMER_COUNT_1_BASE 0x130
118
119#define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr))
120#define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr))
121
122#define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i))
123#define PIC_TIMER_MAXVAL_1(i) (PIC_TIMER_MAXVAL_1_BASE + (i))
124#define PIC_TIMER_COUNT_0(i) (PIC_TIMER_COUNT_0_BASE + (i))
125#define PIC_TIMER_COUNT_1(i) (PIC_TIMER_COUNT_0_BASE + (i))
126
127/*
128 * Mapping between hardware interrupt numbers and IRQs on CPU
129 * we use a simple scheme to map PIC interrupts 0-31 to IRQs
130 * 8-39. This leaves the IRQ 0-7 for cpu interrupts like
131 * count/compare and FMN
132 */
133#define PIC_IRQ_BASE 8
134#define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i))
135#define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE)
136
137#define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE
138#define PIC_WD_IRQ PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX)
139#define PIC_TIMER_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_0_INDEX)
140#define PIC_TIMER_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_1_INDEX)
141#define PIC_TIMER_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_2_INDEX)
142#define PIC_TIMER_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_3_INDEX)
143#define PIC_TIMER_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_4_INDEX)
144#define PIC_TIMER_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_5_INDEX)
145#define PIC_TIMER_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_6_INDEX)
146#define PIC_TIMER_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_7_INDEX)
147#define PIC_CLOCK_IRQ (PIC_TIMER_7_IRQ)
148#define PIC_UART_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_0_INDEX)
149#define PIC_UART_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_1_INDEX)
150#define PIC_I2C_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_0_INDEX)
151#define PIC_I2C_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_1_INDEX)
152#define PIC_PCMCIA_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCMCIA_INDEX)
153#define PIC_GPIO_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_INDEX)
154#define PIC_HYPER_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_INDEX)
155#define PIC_PCIX_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_INDEX)
156/* XLS */
157#define PIC_CDE_IRQ PIC_INTR_TO_IRQ(PIC_IRT_CDE_INDEX)
158#define PIC_BRIDGE_TB_XLS_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLS_INDEX)
159/* end XLS */
160#define PIC_GMAC_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC0_INDEX)
161#define PIC_GMAC_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC1_INDEX)
162#define PIC_GMAC_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC2_INDEX)
163#define PIC_GMAC_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC3_INDEX)
164#define PIC_XGS_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS0_INDEX)
165#define PIC_XGS_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS1_INDEX)
166#define PIC_HYPER_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_FATAL_INDEX)
167#define PIC_PCIX_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_FATAL_INDEX)
168#define PIC_BRIDGE_AERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX)
169#define PIC_BRIDGE_BERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX)
170#define PIC_BRIDGE_TB_XLR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX)
171#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX)
172/* XLS defines */
173#define PIC_GMAC_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX)
174#define PIC_GMAC_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX)
175#define PIC_GMAC_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC6_INDEX)
176#define PIC_GMAC_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC7_INDEX)
177#define PIC_BRIDGE_ERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_ERR_INDEX)
178#define PIC_PCIE_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK0_INDEX)
179#define PIC_PCIE_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK1_INDEX)
180#define PIC_PCIE_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK2_INDEX)
181#define PIC_PCIE_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK3_INDEX)
182#define PIC_PCIE_XLSB0_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK2_INDEX)
183#define PIC_PCIE_XLSB0_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK3_INDEX)
184#define PIC_SRIO_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK0_INDEX)
185#define PIC_SRIO_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK1_INDEX)
186#define PIC_SRIO_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK2_INDEX)
187#define PIC_SRIO_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK3_INDEX)
188#define PIC_PCIE_INT_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_INT__INDEX)
189#define PIC_PCIE_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_FATAL_INDEX)
190#define PIC_GPIO_B_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_B_INDEX)
191#define PIC_USB_IRQ PIC_INTR_TO_IRQ(PIC_IRT_USB_INDEX)
192#define PIC_IRT_LAST_IRQ PIC_USB_IRQ
193/* end XLS */
194
195#ifndef __ASSEMBLY__
196static inline void pic_send_ipi(u32 ipi)
197{
198 nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET);
199
200 netlogic_write_reg(mmio, PIC_IPI, ipi);
201}
202
203static inline u32 pic_read_control(void)
204{
205 nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET);
206
207 return netlogic_read_reg(mmio, PIC_CTRL);
208}
209
210static inline void pic_write_control(u32 control)
211{
212 nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET);
213
214 netlogic_write_reg(mmio, PIC_CTRL, control);
215}
216
217static inline void pic_update_control(u32 control)
218{
219 nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET);
220
221 netlogic_write_reg(mmio, PIC_CTRL,
222 (control | netlogic_read_reg(mmio, PIC_CTRL)));
223}
224
225#define PIC_IRQ_IS_EDGE_TRIGGERED(irq) (((irq) >= PIC_TIMER_0_IRQ) && \
226 ((irq) <= PIC_TIMER_7_IRQ))
227#define PIC_IRQ_IS_IRT(irq) (((irq) >= PIC_IRT_FIRST_IRQ) && \
228 ((irq) <= PIC_IRT_LAST_IRQ))
229#endif
230
231#endif /* _ASM_NLM_XLR_PIC_H */
diff --git a/arch/mips/include/asm/netlogic/xlr/xlr.h b/arch/mips/include/asm/netlogic/xlr/xlr.h
new file mode 100644
index 000000000000..3e6372692a04
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/xlr.h
@@ -0,0 +1,75 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _ASM_NLM_XLR_H
36#define _ASM_NLM_XLR_H
37
38/* Platform UART functions */
39struct uart_port;
40unsigned int nlm_xlr_uart_in(struct uart_port *, int);
41void nlm_xlr_uart_out(struct uart_port *, int, int);
42
43/* SMP support functions */
44struct irq_desc;
45void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc);
46void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc);
47int nlm_wakeup_secondary_cpus(u32 wakeup_mask);
48void nlm_smp_irq_init(void);
49void nlm_boot_smp_nmi(void);
50void prom_pre_boot_secondary_cpus(void);
51
52extern struct plat_smp_ops nlm_smp_ops;
53extern unsigned long nlm_common_ebase;
54
55/* XLS B silicon "Rook" */
56static inline unsigned int nlm_chip_is_xls_b(void)
57{
58 uint32_t prid = read_c0_prid();
59
60 return ((prid & 0xf000) == 0x4000);
61}
62
63/*
64 * XLR chip types
65 */
66 /* The XLS product line has chip versions 0x[48c]? */
67static inline unsigned int nlm_chip_is_xls(void)
68{
69 uint32_t prid = read_c0_prid();
70
71 return ((prid & 0xf000) == 0x8000 || (prid & 0xf000) == 0x4000 ||
72 (prid & 0xf000) == 0xc000);
73}
74
75#endif /* _ASM_NLM_XLR_H */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 9f1b8dba2c81..de39b1f343ea 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -141,7 +141,8 @@ extern int ptrace_set_watch_regs(struct task_struct *child,
141#define instruction_pointer(regs) ((regs)->cp0_epc) 141#define instruction_pointer(regs) ((regs)->cp0_epc)
142#define profile_pc(regs) instruction_pointer(regs) 142#define profile_pc(regs) instruction_pointer(regs)
143 143
144extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); 144extern asmlinkage void syscall_trace_enter(struct pt_regs *regs);
145extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
145 146
146extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET; 147extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET;
147 148
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index d71160de4d10..97f8bf6639e7 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -149,6 +149,9 @@ register struct thread_info *__current_thread_info __asm__("$28");
149#define _TIF_FPUBOUND (1<<TIF_FPUBOUND) 149#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
150#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) 150#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
151 151
152/* work to do in syscall_trace_leave() */
153#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
154
152/* work to do on interrupt/exception return */ 155/* work to do on interrupt/exception return */
153#define _TIF_WORK_MASK (0x0000ffef & \ 156#define _TIF_WORK_MASK (0x0000ffef & \
154 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) 157 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 9ce9f64cb76f..2d8e447cb828 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(vdma_free);
211 */ 211 */
212int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) 212int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
213{ 213{
214 int first, pages, npages; 214 int first, pages;
215 215
216 if (laddr > 0xffffff) { 216 if (laddr > 0xffffff) {
217 if (vdma_debug) 217 if (vdma_debug)
@@ -228,8 +228,7 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
228 return -EINVAL; /* invalid physical address */ 228 return -EINVAL; /* invalid physical address */
229 } 229 }
230 230
231 npages = pages = 231 pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
232 (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
233 first = laddr >> 12; 232 first = laddr >> 12;
234 if (vdma_debug) 233 if (vdma_debug)
235 printk("vdma_remap: first=%x, pages=%x\n", first, pages); 234 printk("vdma_remap: first=%x, pages=%x\n", first, pages);
diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c
index 5ebe75a68350..d7feb898692c 100644
--- a/arch/mips/jz4740/dma.c
+++ b/arch/mips/jz4740/dma.c
@@ -242,9 +242,7 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue);
242 242
243static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) 243static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma)
244{ 244{
245 uint32_t status; 245 (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id));
246
247 status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id));
248 246
249 jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, 247 jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0,
250 JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); 248 JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index 6a9e14dab91e..d97cfbf882f5 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> 2 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
3 * Copyright (C) 2011, Maarten ter Huurne <maarten@treewalker.org>
3 * JZ4740 setup code 4 * JZ4740 setup code
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
@@ -14,13 +15,44 @@
14 */ 15 */
15 16
16#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/io.h>
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18 20
21#include <asm/bootinfo.h>
22
23#include <asm/mach-jz4740/base.h>
24
19#include "reset.h" 25#include "reset.h"
20 26
27
28#define JZ4740_EMC_SDRAM_CTRL 0x80
29
30
31static void __init jz4740_detect_mem(void)
32{
33 void __iomem *jz_emc_base;
34 u32 ctrl, bus, bank, rows, cols;
35 phys_t size;
36
37 jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100);
38 ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL);
39 bus = 2 - ((ctrl >> 31) & 1);
40 bank = 1 + ((ctrl >> 19) & 1);
41 cols = 8 + ((ctrl >> 26) & 7);
42 rows = 11 + ((ctrl >> 20) & 3);
43 printk(KERN_DEBUG
44 "SDRAM preconfigured: bus:%u bank:%u rows:%u cols:%u\n",
45 bus, bank, rows, cols);
46 iounmap(jz_emc_base);
47
48 size = 1 << (bus + bank + cols + rows);
49 add_memory_region(0, size, BOOT_MEM_RAM);
50}
51
21void __init plat_mem_setup(void) 52void __init plat_mem_setup(void)
22{ 53{
23 jz4740_reset_init(); 54 jz4740_reset_init();
55 jz4740_detect_mem();
24} 56}
25 57
26const char *get_system_type(void) 58const char *get_system_type(void)
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index fe01678d94fd..eaa853a54af6 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt,
89 89
90static struct clock_event_device jz4740_clockevent = { 90static struct clock_event_device jz4740_clockevent = {
91 .name = "jz4740-timer", 91 .name = "jz4740-timer",
92 .features = CLOCK_EVT_FEAT_PERIODIC, 92 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
93 .set_next_event = jz4740_clockevent_set_next, 93 .set_next_event = jz4740_clockevent_set_next,
94 .set_mode = jz4740_clockevent_set_mode, 94 .set_mode = jz4740_clockevent_set_mode,
95 .rating = 200, 95 .rating = 200,
diff --git a/arch/mips/jz4740/timer.c b/arch/mips/jz4740/timer.c
index b2c015129055..654d5c3900b6 100644
--- a/arch/mips/jz4740/timer.c
+++ b/arch/mips/jz4740/timer.c
@@ -27,11 +27,13 @@ void jz4740_timer_enable_watchdog(void)
27{ 27{
28 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); 28 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR);
29} 29}
30EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog);
30 31
31void jz4740_timer_disable_watchdog(void) 32void jz4740_timer_disable_watchdog(void)
32{ 33{
33 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); 34 writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET);
34} 35}
36EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog);
35 37
36void __init jz4740_timer_init(void) 38void __init jz4740_timer_init(void)
37{ 39{
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index cedee2bcbd18..83bba332bbfc 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
52obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o 52obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
53obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o 53obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
54obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o 54obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
55obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o
55 56
56obj-$(CONFIG_SMP) += smp.o 57obj-$(CONFIG_SMP) += smp.o
57obj-$(CONFIG_SMP_UP) += smp-up.o 58obj-$(CONFIG_SMP_UP) += smp-up.o
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index f65d4c8c65a6..bb133d10b145 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -291,6 +291,12 @@ static inline int cpu_has_confreg(void)
291#endif 291#endif
292} 292}
293 293
294static inline void set_elf_platform(int cpu, const char *plat)
295{
296 if (cpu == 0)
297 __elf_platform = plat;
298}
299
294/* 300/*
295 * Get the FPU Implementation/Revision. 301 * Get the FPU Implementation/Revision.
296 */ 302 */
@@ -614,6 +620,16 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
614 case PRID_IMP_LOONGSON2: 620 case PRID_IMP_LOONGSON2:
615 c->cputype = CPU_LOONGSON2; 621 c->cputype = CPU_LOONGSON2;
616 __cpu_name[cpu] = "ICT Loongson-2"; 622 __cpu_name[cpu] = "ICT Loongson-2";
623
624 switch (c->processor_id & PRID_REV_MASK) {
625 case PRID_REV_LOONGSON2E:
626 set_elf_platform(cpu, "loongson2e");
627 break;
628 case PRID_REV_LOONGSON2F:
629 set_elf_platform(cpu, "loongson2f");
630 break;
631 }
632
617 c->isa_level = MIPS_CPU_ISA_III; 633 c->isa_level = MIPS_CPU_ISA_III;
618 c->options = R4K_OPTS | 634 c->options = R4K_OPTS |
619 MIPS_CPU_FPU | MIPS_CPU_LLSC | 635 MIPS_CPU_FPU | MIPS_CPU_LLSC |
@@ -911,12 +927,14 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
911 case PRID_IMP_BMIPS32_REV8: 927 case PRID_IMP_BMIPS32_REV8:
912 c->cputype = CPU_BMIPS32; 928 c->cputype = CPU_BMIPS32;
913 __cpu_name[cpu] = "Broadcom BMIPS32"; 929 __cpu_name[cpu] = "Broadcom BMIPS32";
930 set_elf_platform(cpu, "bmips32");
914 break; 931 break;
915 case PRID_IMP_BMIPS3300: 932 case PRID_IMP_BMIPS3300:
916 case PRID_IMP_BMIPS3300_ALT: 933 case PRID_IMP_BMIPS3300_ALT:
917 case PRID_IMP_BMIPS3300_BUG: 934 case PRID_IMP_BMIPS3300_BUG:
918 c->cputype = CPU_BMIPS3300; 935 c->cputype = CPU_BMIPS3300;
919 __cpu_name[cpu] = "Broadcom BMIPS3300"; 936 __cpu_name[cpu] = "Broadcom BMIPS3300";
937 set_elf_platform(cpu, "bmips3300");
920 break; 938 break;
921 case PRID_IMP_BMIPS43XX: { 939 case PRID_IMP_BMIPS43XX: {
922 int rev = c->processor_id & 0xff; 940 int rev = c->processor_id & 0xff;
@@ -925,15 +943,18 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
925 rev <= PRID_REV_BMIPS4380_HI) { 943 rev <= PRID_REV_BMIPS4380_HI) {
926 c->cputype = CPU_BMIPS4380; 944 c->cputype = CPU_BMIPS4380;
927 __cpu_name[cpu] = "Broadcom BMIPS4380"; 945 __cpu_name[cpu] = "Broadcom BMIPS4380";
946 set_elf_platform(cpu, "bmips4380");
928 } else { 947 } else {
929 c->cputype = CPU_BMIPS4350; 948 c->cputype = CPU_BMIPS4350;
930 __cpu_name[cpu] = "Broadcom BMIPS4350"; 949 __cpu_name[cpu] = "Broadcom BMIPS4350";
950 set_elf_platform(cpu, "bmips4350");
931 } 951 }
932 break; 952 break;
933 } 953 }
934 case PRID_IMP_BMIPS5000: 954 case PRID_IMP_BMIPS5000:
935 c->cputype = CPU_BMIPS5000; 955 c->cputype = CPU_BMIPS5000;
936 __cpu_name[cpu] = "Broadcom BMIPS5000"; 956 __cpu_name[cpu] = "Broadcom BMIPS5000";
957 set_elf_platform(cpu, "bmips5000");
937 c->options |= MIPS_CPU_ULRI; 958 c->options |= MIPS_CPU_ULRI;
938 break; 959 break;
939 } 960 }
@@ -956,14 +977,12 @@ static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
956 c->cputype = CPU_CAVIUM_OCTEON_PLUS; 977 c->cputype = CPU_CAVIUM_OCTEON_PLUS;
957 __cpu_name[cpu] = "Cavium Octeon+"; 978 __cpu_name[cpu] = "Cavium Octeon+";
958platform: 979platform:
959 if (cpu == 0) 980 set_elf_platform(cpu, "octeon");
960 __elf_platform = "octeon";
961 break; 981 break;
962 case PRID_IMP_CAVIUM_CN63XX: 982 case PRID_IMP_CAVIUM_CN63XX:
963 c->cputype = CPU_CAVIUM_OCTEON2; 983 c->cputype = CPU_CAVIUM_OCTEON2;
964 __cpu_name[cpu] = "Cavium Octeon II"; 984 __cpu_name[cpu] = "Cavium Octeon II";
965 if (cpu == 0) 985 set_elf_platform(cpu, "octeon2");
966 __elf_platform = "octeon2";
967 break; 986 break;
968 default: 987 default:
969 printk(KERN_INFO "Unknown Octeon chip!\n"); 988 printk(KERN_INFO "Unknown Octeon chip!\n");
@@ -988,6 +1007,59 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
988 } 1007 }
989} 1008}
990 1009
1010static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
1011{
1012 decode_configs(c);
1013
1014 c->options = (MIPS_CPU_TLB |
1015 MIPS_CPU_4KEX |
1016 MIPS_CPU_COUNTER |
1017 MIPS_CPU_DIVEC |
1018 MIPS_CPU_WATCH |
1019 MIPS_CPU_EJTAG |
1020 MIPS_CPU_LLSC);
1021
1022 switch (c->processor_id & 0xff00) {
1023 case PRID_IMP_NETLOGIC_XLR732:
1024 case PRID_IMP_NETLOGIC_XLR716:
1025 case PRID_IMP_NETLOGIC_XLR532:
1026 case PRID_IMP_NETLOGIC_XLR308:
1027 case PRID_IMP_NETLOGIC_XLR532C:
1028 case PRID_IMP_NETLOGIC_XLR516C:
1029 case PRID_IMP_NETLOGIC_XLR508C:
1030 case PRID_IMP_NETLOGIC_XLR308C:
1031 c->cputype = CPU_XLR;
1032 __cpu_name[cpu] = "Netlogic XLR";
1033 break;
1034
1035 case PRID_IMP_NETLOGIC_XLS608:
1036 case PRID_IMP_NETLOGIC_XLS408:
1037 case PRID_IMP_NETLOGIC_XLS404:
1038 case PRID_IMP_NETLOGIC_XLS208:
1039 case PRID_IMP_NETLOGIC_XLS204:
1040 case PRID_IMP_NETLOGIC_XLS108:
1041 case PRID_IMP_NETLOGIC_XLS104:
1042 case PRID_IMP_NETLOGIC_XLS616B:
1043 case PRID_IMP_NETLOGIC_XLS608B:
1044 case PRID_IMP_NETLOGIC_XLS416B:
1045 case PRID_IMP_NETLOGIC_XLS412B:
1046 case PRID_IMP_NETLOGIC_XLS408B:
1047 case PRID_IMP_NETLOGIC_XLS404B:
1048 c->cputype = CPU_XLR;
1049 __cpu_name[cpu] = "Netlogic XLS";
1050 break;
1051
1052 default:
1053 printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n",
1054 c->processor_id);
1055 c->cputype = CPU_XLR;
1056 break;
1057 }
1058
1059 c->isa_level = MIPS_CPU_ISA_M64R1;
1060 c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
1061}
1062
991#ifdef CONFIG_64BIT 1063#ifdef CONFIG_64BIT
992/* For use by uaccess.h */ 1064/* For use by uaccess.h */
993u64 __ua_limit; 1065u64 __ua_limit;
@@ -1035,6 +1107,9 @@ __cpuinit void cpu_probe(void)
1035 case PRID_COMP_INGENIC: 1107 case PRID_COMP_INGENIC:
1036 cpu_probe_ingenic(c, cpu); 1108 cpu_probe_ingenic(c, cpu);
1037 break; 1109 break;
1110 case PRID_COMP_NETLOGIC:
1111 cpu_probe_netlogic(c, cpu);
1112 break;
1038 } 1113 }
1039 1114
1040 BUG_ON(!__cpu_name[cpu]); 1115 BUG_ON(!__cpu_name[cpu]);
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index ffa331029e08..37acfa036d44 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -167,14 +167,13 @@ work_notifysig: # deal with pending signals and
167FEXPORT(syscall_exit_work_partial) 167FEXPORT(syscall_exit_work_partial)
168 SAVE_STATIC 168 SAVE_STATIC
169syscall_exit_work: 169syscall_exit_work:
170 li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT 170 li t0, _TIF_WORK_SYSCALL_EXIT
171 and t0, a2 # a2 is preloaded with TI_FLAGS 171 and t0, a2 # a2 is preloaded with TI_FLAGS
172 beqz t0, work_pending # trace bit set? 172 beqz t0, work_pending # trace bit set?
173 local_irq_enable # could let do_syscall_trace() 173 local_irq_enable # could let syscall_trace_leave()
174 # call schedule() instead 174 # call schedule() instead
175 move a0, sp 175 move a0, sp
176 li a1, 1 176 jal syscall_trace_leave
177 jal do_syscall_trace
178 b resume_userspace 177 b resume_userspace
179 178
180#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) 179#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 94ca2b018af7..feb8021a305f 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -23,6 +23,7 @@
23 23
24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
25#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 25#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
26#define JUMP_RANGE_MASK ((1UL << 28) - 1)
26 27
27#define INSN_NOP 0x00000000 /* nop */ 28#define INSN_NOP 0x00000000 /* nop */
28#define INSN_JAL(addr) \ 29#define INSN_JAL(addr) \
@@ -44,12 +45,12 @@ static inline void ftrace_dyn_arch_init_insns(void)
44 45
45 /* jal (ftrace_caller + 8), jump over the first two instruction */ 46 /* jal (ftrace_caller + 8), jump over the first two instruction */
46 buf = (u32 *)&insn_jal_ftrace_caller; 47 buf = (u32 *)&insn_jal_ftrace_caller;
47 uasm_i_jal(&buf, (FTRACE_ADDR + 8)); 48 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
48 49
49#ifdef CONFIG_FUNCTION_GRAPH_TRACER 50#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50 /* j ftrace_graph_caller */ 51 /* j ftrace_graph_caller */
51 buf = (u32 *)&insn_j_ftrace_graph_caller; 52 buf = (u32 *)&insn_j_ftrace_graph_caller;
52 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); 53 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
53#endif 54#endif
54} 55}
55 56
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index d21c388c0116..4e6ea1ffad46 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -533,15 +533,10 @@ static inline int audit_arch(void)
533 * Notification of system call entry/exit 533 * Notification of system call entry/exit
534 * - triggered by current->work.syscall_trace 534 * - triggered by current->work.syscall_trace
535 */ 535 */
536asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) 536asmlinkage void syscall_trace_enter(struct pt_regs *regs)
537{ 537{
538 /* do the secure computing check first */ 538 /* do the secure computing check first */
539 if (!entryexit) 539 secure_computing(regs->regs[2]);
540 secure_computing(regs->regs[2]);
541
542 if (unlikely(current->audit_context) && entryexit)
543 audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
544 regs->regs[2]);
545 540
546 if (!(current->ptrace & PT_PTRACED)) 541 if (!(current->ptrace & PT_PTRACED))
547 goto out; 542 goto out;
@@ -565,8 +560,40 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
565 } 560 }
566 561
567out: 562out:
568 if (unlikely(current->audit_context) && !entryexit) 563 if (unlikely(current->audit_context))
569 audit_syscall_entry(audit_arch(), regs->regs[2], 564 audit_syscall_entry(audit_arch(), regs->regs[2],
570 regs->regs[4], regs->regs[5], 565 regs->regs[4], regs->regs[5],
571 regs->regs[6], regs->regs[7]); 566 regs->regs[6], regs->regs[7]);
572} 567}
568
569/*
570 * Notification of system call entry/exit
571 * - triggered by current->work.syscall_trace
572 */
573asmlinkage void syscall_trace_leave(struct pt_regs *regs)
574{
575 if (unlikely(current->audit_context))
576 audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
577 -regs->regs[2]);
578
579 if (!(current->ptrace & PT_PTRACED))
580 return;
581
582 if (!test_thread_flag(TIF_SYSCALL_TRACE))
583 return;
584
585 /* The 0x80 provides a way for the tracing parent to distinguish
586 between a syscall stop and SIGTRAP delivery */
587 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
588 0x80 : 0));
589
590 /*
591 * this isn't the same as continuing with a signal, but it will do
592 * for normal use. strace only continues with a signal if the
593 * stopping signal is not SIGTRAP. -brl
594 */
595 if (current->exit_code) {
596 send_sig(current->exit_code, current, 1);
597 current->exit_code = 0;
598 }
599}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7f5468b38d4c..7a8e1dd7f6f2 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -88,8 +88,7 @@ syscall_trace_entry:
88 SAVE_STATIC 88 SAVE_STATIC
89 move s0, t2 89 move s0, t2
90 move a0, sp 90 move a0, sp
91 li a1, 0 91 jal syscall_trace_enter
92 jal do_syscall_trace
93 92
94 move t0, s0 93 move t0, s0
95 RESTORE_STATIC 94 RESTORE_STATIC
@@ -565,7 +564,7 @@ einval: li v0, -ENOSYS
565 sys sys_ioprio_get 2 /* 4315 */ 564 sys sys_ioprio_get 2 /* 4315 */
566 sys sys_utimensat 4 565 sys sys_utimensat 4
567 sys sys_signalfd 3 566 sys sys_signalfd 3
568 sys sys_ni_syscall 0 567 sys sys_ni_syscall 0 /* was timerfd */
569 sys sys_eventfd 1 568 sys sys_eventfd 1
570 sys sys_fallocate 6 /* 4320 */ 569 sys sys_fallocate 6 /* 4320 */
571 sys sys_timerfd_create 2 570 sys sys_timerfd_create 2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a2e1fcbc41dc..2d31c83224f9 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -91,8 +91,7 @@ syscall_trace_entry:
91 SAVE_STATIC 91 SAVE_STATIC
92 move s0, t2 92 move s0, t2
93 move a0, sp 93 move a0, sp
94 li a1, 0 94 jal syscall_trace_enter
95 jal do_syscall_trace
96 95
97 move t0, s0 96 move t0, s0
98 RESTORE_STATIC 97 RESTORE_STATIC
@@ -404,7 +403,7 @@ sys_call_table:
404 PTR sys_ioprio_get 403 PTR sys_ioprio_get
405 PTR sys_utimensat /* 5275 */ 404 PTR sys_utimensat /* 5275 */
406 PTR sys_signalfd 405 PTR sys_signalfd
407 PTR sys_ni_syscall 406 PTR sys_ni_syscall /* was timerfd */
408 PTR sys_eventfd 407 PTR sys_eventfd
409 PTR sys_fallocate 408 PTR sys_fallocate
410 PTR sys_timerfd_create /* 5280 */ 409 PTR sys_timerfd_create /* 5280 */
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index b2c7624995b8..38a0503b9a4a 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -89,8 +89,7 @@ n32_syscall_trace_entry:
89 SAVE_STATIC 89 SAVE_STATIC
90 move s0, t2 90 move s0, t2
91 move a0, sp 91 move a0, sp
92 li a1, 0 92 jal syscall_trace_enter
93 jal do_syscall_trace
94 93
95 move t0, s0 94 move t0, s0
96 RESTORE_STATIC 95 RESTORE_STATIC
@@ -403,7 +402,7 @@ EXPORT(sysn32_call_table)
403 PTR sys_ioprio_get 402 PTR sys_ioprio_get
404 PTR compat_sys_utimensat 403 PTR compat_sys_utimensat
405 PTR compat_sys_signalfd /* 6280 */ 404 PTR compat_sys_signalfd /* 6280 */
406 PTR sys_ni_syscall 405 PTR sys_ni_syscall /* was timerfd */
407 PTR sys_eventfd 406 PTR sys_eventfd
408 PTR sys_fallocate 407 PTR sys_fallocate
409 PTR sys_timerfd_create 408 PTR sys_timerfd_create
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 049a9c8c49a0..91ea5e4041dd 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -123,8 +123,7 @@ trace_a_syscall:
123 123
124 move s0, t2 # Save syscall pointer 124 move s0, t2 # Save syscall pointer
125 move a0, sp 125 move a0, sp
126 li a1, 0 126 jal syscall_trace_enter
127 jal do_syscall_trace
128 127
129 move t0, s0 128 move t0, s0
130 RESTORE_STATIC 129 RESTORE_STATIC
@@ -522,7 +521,7 @@ sys_call_table:
522 PTR sys_ioprio_get /* 4315 */ 521 PTR sys_ioprio_get /* 4315 */
523 PTR compat_sys_utimensat 522 PTR compat_sys_utimensat
524 PTR compat_sys_signalfd 523 PTR compat_sys_signalfd
525 PTR sys_ni_syscall 524 PTR sys_ni_syscall /* was timerfd */
526 PTR sys_eventfd 525 PTR sys_eventfd
527 PTR sys32_fallocate /* 4320 */ 526 PTR sys32_fallocate /* 4320 */
528 PTR sys_timerfd_create 527 PTR sys_timerfd_create
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 58beabf50b3c..d02765708ddb 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -10,12 +10,9 @@
10#include <linux/capability.h> 10#include <linux/capability.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <linux/mm.h>
14#include <linux/fs.h> 13#include <linux/fs.h>
15#include <linux/smp.h> 14#include <linux/smp.h>
16#include <linux/mman.h>
17#include <linux/ptrace.h> 15#include <linux/ptrace.h>
18#include <linux/sched.h>
19#include <linux/string.h> 16#include <linux/string.h>
20#include <linux/syscalls.h> 17#include <linux/syscalls.h>
21#include <linux/file.h> 18#include <linux/file.h>
@@ -25,11 +22,9 @@
25#include <linux/msg.h> 22#include <linux/msg.h>
26#include <linux/shm.h> 23#include <linux/shm.h>
27#include <linux/compiler.h> 24#include <linux/compiler.h>
28#include <linux/module.h>
29#include <linux/ipc.h> 25#include <linux/ipc.h>
30#include <linux/uaccess.h> 26#include <linux/uaccess.h>
31#include <linux/slab.h> 27#include <linux/slab.h>
32#include <linux/random.h>
33#include <linux/elf.h> 28#include <linux/elf.h>
34 29
35#include <asm/asm.h> 30#include <asm/asm.h>
@@ -66,121 +61,6 @@ out:
66 return res; 61 return res;
67} 62}
68 63
69unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
70
71EXPORT_SYMBOL(shm_align_mask);
72
73#define COLOUR_ALIGN(addr,pgoff) \
74 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
75 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
76
77unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
78 unsigned long len, unsigned long pgoff, unsigned long flags)
79{
80 struct vm_area_struct * vmm;
81 int do_color_align;
82 unsigned long task_size;
83
84#ifdef CONFIG_32BIT
85 task_size = TASK_SIZE;
86#else /* Must be CONFIG_64BIT*/
87 task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE;
88#endif
89
90 if (len > task_size)
91 return -ENOMEM;
92
93 if (flags & MAP_FIXED) {
94 /* Even MAP_FIXED mappings must reside within task_size. */
95 if (task_size - len < addr)
96 return -EINVAL;
97
98 /*
99 * We do not accept a shared mapping if it would violate
100 * cache aliasing constraints.
101 */
102 if ((flags & MAP_SHARED) &&
103 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
104 return -EINVAL;
105 return addr;
106 }
107
108 do_color_align = 0;
109 if (filp || (flags & MAP_SHARED))
110 do_color_align = 1;
111 if (addr) {
112 if (do_color_align)
113 addr = COLOUR_ALIGN(addr, pgoff);
114 else
115 addr = PAGE_ALIGN(addr);
116 vmm = find_vma(current->mm, addr);
117 if (task_size - len >= addr &&
118 (!vmm || addr + len <= vmm->vm_start))
119 return addr;
120 }
121 addr = current->mm->mmap_base;
122 if (do_color_align)
123 addr = COLOUR_ALIGN(addr, pgoff);
124 else
125 addr = PAGE_ALIGN(addr);
126
127 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
128 /* At this point: (!vmm || addr < vmm->vm_end). */
129 if (task_size - len < addr)
130 return -ENOMEM;
131 if (!vmm || addr + len <= vmm->vm_start)
132 return addr;
133 addr = vmm->vm_end;
134 if (do_color_align)
135 addr = COLOUR_ALIGN(addr, pgoff);
136 }
137}
138
139void arch_pick_mmap_layout(struct mm_struct *mm)
140{
141 unsigned long random_factor = 0UL;
142
143 if (current->flags & PF_RANDOMIZE) {
144 random_factor = get_random_int();
145 random_factor = random_factor << PAGE_SHIFT;
146 if (TASK_IS_32BIT_ADDR)
147 random_factor &= 0xfffffful;
148 else
149 random_factor &= 0xffffffful;
150 }
151
152 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
153 mm->get_unmapped_area = arch_get_unmapped_area;
154 mm->unmap_area = arch_unmap_area;
155}
156
157static inline unsigned long brk_rnd(void)
158{
159 unsigned long rnd = get_random_int();
160
161 rnd = rnd << PAGE_SHIFT;
162 /* 8MB for 32bit, 256MB for 64bit */
163 if (TASK_IS_32BIT_ADDR)
164 rnd = rnd & 0x7ffffful;
165 else
166 rnd = rnd & 0xffffffful;
167
168 return rnd;
169}
170
171unsigned long arch_randomize_brk(struct mm_struct *mm)
172{
173 unsigned long base = mm->brk;
174 unsigned long ret;
175
176 ret = PAGE_ALIGN(base + brk_rnd());
177
178 if (ret < mm->brk)
179 return mm->brk;
180
181 return ret;
182}
183
184SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, 64SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
185 unsigned long, prot, unsigned long, flags, unsigned long, 65 unsigned long, prot, unsigned long, flags, unsigned long,
186 fd, off_t, offset) 66 fd, off_t, offset)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 71350f7f2d88..e9b3af27d844 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
374 unsigned long dvpret = dvpe(); 374 unsigned long dvpret = dvpe();
375#endif /* CONFIG_MIPS_MT_SMTC */ 375#endif /* CONFIG_MIPS_MT_SMTC */
376 376
377 notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); 377 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
378 sig = 0;
378 379
379 console_verbose(); 380 console_verbose();
380 spin_lock_irq(&die_lock); 381 spin_lock_irq(&die_lock);
@@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
383 mips_mt_regdump(dvpret); 384 mips_mt_regdump(dvpret);
384#endif /* CONFIG_MIPS_MT_SMTC */ 385#endif /* CONFIG_MIPS_MT_SMTC */
385 386
386 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
387 sig = 0;
388
389 printk("%s[#%d]:\n", str, ++die_counter); 387 printk("%s[#%d]:\n", str, ++die_counter);
390 show_registers(regs); 388 show_registers(regs);
391 add_taint(TAINT_DIE); 389 add_taint(TAINT_DIE);
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 832afbb87588..cd2ca544454b 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -68,12 +68,14 @@ SECTIONS
68 RODATA 68 RODATA
69 69
70 /* writeable */ 70 /* writeable */
71 _sdata = .; /* Start of data section */
71 .data : { /* Data */ 72 .data : { /* Data */
72 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ 73 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
73 74
74 INIT_TASK_DATA(PAGE_SIZE) 75 INIT_TASK_DATA(PAGE_SIZE)
75 NOSAVE_DATA 76 NOSAVE_DATA
76 CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) 77 CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
78 READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
77 DATA_DATA 79 DATA_DATA
78 CONSTRUCTORS 80 CONSTRUCTORS
79 } 81 }
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
new file mode 100644
index 000000000000..3fccf2104513
--- /dev/null
+++ b/arch/mips/lantiq/Kconfig
@@ -0,0 +1,23 @@
1if LANTIQ
2
3config SOC_TYPE_XWAY
4 bool
5 default n
6
7choice
8 prompt "SoC Type"
9 default SOC_XWAY
10
11config SOC_AMAZON_SE
12 bool "Amazon SE"
13 select SOC_TYPE_XWAY
14
15config SOC_XWAY
16 bool "XWAY"
17 select SOC_TYPE_XWAY
18 select HW_HAS_PCI
19endchoice
20
21source "arch/mips/lantiq/xway/Kconfig"
22
23endif
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
new file mode 100644
index 000000000000..e5dae0e24b00
--- /dev/null
+++ b/arch/mips/lantiq/Makefile
@@ -0,0 +1,11 @@
1# Copyright (C) 2010 John Crispin <blogic@openwrt.org>
2#
3# This program is free software; you can redistribute it and/or modify it
4# under the terms of the GNU General Public License version 2 as published
5# by the Free Software Foundation.
6
7obj-y := irq.o setup.o clk.o prom.o devices.o
8
9obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
10
11obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform
new file mode 100644
index 000000000000..f3dff05722de
--- /dev/null
+++ b/arch/mips/lantiq/Platform
@@ -0,0 +1,8 @@
1#
2# Lantiq
3#
4
5platform-$(CONFIG_LANTIQ) += lantiq/
6cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq
7load-$(CONFIG_LANTIQ) = 0xffffffff80002000
8cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
new file mode 100644
index 000000000000..94560899d13e
--- /dev/null
+++ b/arch/mips/lantiq/clk.c
@@ -0,0 +1,140 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
7 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
8 */
9#include <linux/io.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/clk.h>
15#include <linux/err.h>
16#include <linux/list.h>
17
18#include <asm/time.h>
19#include <asm/irq.h>
20#include <asm/div64.h>
21
22#include <lantiq_soc.h>
23
24#include "clk.h"
25
26struct clk {
27 const char *name;
28 unsigned long rate;
29 unsigned long (*get_rate) (void);
30};
31
32static struct clk *cpu_clk;
33static int cpu_clk_cnt;
34
35/* lantiq socs have 3 static clocks */
36static struct clk cpu_clk_generic[] = {
37 {
38 .name = "cpu",
39 .get_rate = ltq_get_cpu_hz,
40 }, {
41 .name = "fpi",
42 .get_rate = ltq_get_fpi_hz,
43 }, {
44 .name = "io",
45 .get_rate = ltq_get_io_region_clock,
46 },
47};
48
49static struct resource ltq_cgu_resource = {
50 .name = "cgu",
51 .start = LTQ_CGU_BASE_ADDR,
52 .end = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1,
53 .flags = IORESOURCE_MEM,
54};
55
56/* remapped clock register range */
57void __iomem *ltq_cgu_membase;
58
59void clk_init(void)
60{
61 cpu_clk = cpu_clk_generic;
62 cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic);
63}
64
65static inline int clk_good(struct clk *clk)
66{
67 return clk && !IS_ERR(clk);
68}
69
70unsigned long clk_get_rate(struct clk *clk)
71{
72 if (unlikely(!clk_good(clk)))
73 return 0;
74
75 if (clk->rate != 0)
76 return clk->rate;
77
78 if (clk->get_rate != NULL)
79 return clk->get_rate();
80
81 return 0;
82}
83EXPORT_SYMBOL(clk_get_rate);
84
85struct clk *clk_get(struct device *dev, const char *id)
86{
87 int i;
88
89 for (i = 0; i < cpu_clk_cnt; i++)
90 if (!strcmp(id, cpu_clk[i].name))
91 return &cpu_clk[i];
92 BUG();
93 return ERR_PTR(-ENOENT);
94}
95EXPORT_SYMBOL(clk_get);
96
97void clk_put(struct clk *clk)
98{
99 /* not used */
100}
101EXPORT_SYMBOL(clk_put);
102
103static inline u32 ltq_get_counter_resolution(void)
104{
105 u32 res;
106
107 __asm__ __volatile__(
108 ".set push\n"
109 ".set mips32r2\n"
110 "rdhwr %0, $3\n"
111 ".set pop\n"
112 : "=&r" (res)
113 : /* no input */
114 : "memory");
115
116 return res;
117}
118
119void __init plat_time_init(void)
120{
121 struct clk *clk;
122
123 if (insert_resource(&iomem_resource, &ltq_cgu_resource) < 0)
124 panic("Failed to insert cgu memory\n");
125
126 if (request_mem_region(ltq_cgu_resource.start,
127 resource_size(&ltq_cgu_resource), "cgu") < 0)
128 panic("Failed to request cgu memory\n");
129
130 ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start,
131 resource_size(&ltq_cgu_resource));
132 if (!ltq_cgu_membase) {
133 pr_err("Failed to remap cgu memory\n");
134 unreachable();
135 }
136 clk = clk_get(0, "cpu");
137 mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution();
138 write_c0_compare(read_c0_count());
139 clk_put(clk);
140}
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
new file mode 100644
index 000000000000..3328925f2c3f
--- /dev/null
+++ b/arch/mips/lantiq/clk.h
@@ -0,0 +1,18 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_CLK_H__
10#define _LTQ_CLK_H__
11
12extern void clk_init(void);
13
14extern unsigned long ltq_get_cpu_hz(void);
15extern unsigned long ltq_get_fpi_hz(void);
16extern unsigned long ltq_get_io_region_clock(void);
17
18#endif
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c
new file mode 100644
index 000000000000..7b82c34cb169
--- /dev/null
+++ b/arch/mips/lantiq/devices.c
@@ -0,0 +1,122 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/reboot.h>
15#include <linux/platform_device.h>
16#include <linux/leds.h>
17#include <linux/etherdevice.h>
18#include <linux/reboot.h>
19#include <linux/time.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/leds.h>
23
24#include <asm/bootinfo.h>
25#include <asm/irq.h>
26
27#include <lantiq_soc.h>
28
29#include "devices.h"
30
31/* nor flash */
32static struct resource ltq_nor_resource = {
33 .name = "nor",
34 .start = LTQ_FLASH_START,
35 .end = LTQ_FLASH_START + LTQ_FLASH_MAX - 1,
36 .flags = IORESOURCE_MEM,
37};
38
39static struct platform_device ltq_nor = {
40 .name = "ltq_nor",
41 .resource = &ltq_nor_resource,
42 .num_resources = 1,
43};
44
45void __init ltq_register_nor(struct physmap_flash_data *data)
46{
47 ltq_nor.dev.platform_data = data;
48 platform_device_register(&ltq_nor);
49}
50
51/* watchdog */
52static struct resource ltq_wdt_resource = {
53 .name = "watchdog",
54 .start = LTQ_WDT_BASE_ADDR,
55 .end = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1,
56 .flags = IORESOURCE_MEM,
57};
58
59void __init ltq_register_wdt(void)
60{
61 platform_device_register_simple("ltq_wdt", 0, &ltq_wdt_resource, 1);
62}
63
64/* asc ports */
65static struct resource ltq_asc0_resources[] = {
66 {
67 .name = "asc0",
68 .start = LTQ_ASC0_BASE_ADDR,
69 .end = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1,
70 .flags = IORESOURCE_MEM,
71 },
72 IRQ_RES(tx, LTQ_ASC_TIR(0)),
73 IRQ_RES(rx, LTQ_ASC_RIR(0)),
74 IRQ_RES(err, LTQ_ASC_EIR(0)),
75};
76
77static struct resource ltq_asc1_resources[] = {
78 {
79 .name = "asc1",
80 .start = LTQ_ASC1_BASE_ADDR,
81 .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
82 .flags = IORESOURCE_MEM,
83 },
84 IRQ_RES(tx, LTQ_ASC_TIR(1)),
85 IRQ_RES(rx, LTQ_ASC_RIR(1)),
86 IRQ_RES(err, LTQ_ASC_EIR(1)),
87};
88
89void __init ltq_register_asc(int port)
90{
91 switch (port) {
92 case 0:
93 platform_device_register_simple("ltq_asc", 0,
94 ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources));
95 break;
96 case 1:
97 platform_device_register_simple("ltq_asc", 1,
98 ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources));
99 break;
100 default:
101 break;
102 }
103}
104
105#ifdef CONFIG_PCI
106/* pci */
107static struct platform_device ltq_pci = {
108 .name = "ltq_pci",
109 .num_resources = 0,
110};
111
112void __init ltq_register_pci(struct ltq_pci_data *data)
113{
114 ltq_pci.dev.platform_data = data;
115 platform_device_register(&ltq_pci);
116}
117#else
118void __init ltq_register_pci(struct ltq_pci_data *data)
119{
120 pr_err("kernel is compiled without PCI support\n");
121}
122#endif
diff --git a/arch/mips/lantiq/devices.h b/arch/mips/lantiq/devices.h
new file mode 100644
index 000000000000..2947bb19a528
--- /dev/null
+++ b/arch/mips/lantiq/devices.h
@@ -0,0 +1,23 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_DEVICES_H__
10#define _LTQ_DEVICES_H__
11
12#include <lantiq_platform.h>
13#include <linux/mtd/physmap.h>
14
15#define IRQ_RES(resname, irq) \
16 {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ}
17
18extern void ltq_register_nor(struct physmap_flash_data *data);
19extern void ltq_register_wdt(void);
20extern void ltq_register_asc(int port);
21extern void ltq_register_pci(struct ltq_pci_data *data);
22
23#endif
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
new file mode 100644
index 000000000000..972e05f87631
--- /dev/null
+++ b/arch/mips/lantiq/early_printk.c
@@ -0,0 +1,33 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/cpu.h>
11
12#include <lantiq.h>
13#include <lantiq_soc.h>
14
15/* no ioremap possible at this early stage, lets use KSEG1 instead */
16#define LTQ_ASC_BASE KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
17#define ASC_BUF 1024
18#define LTQ_ASC_FSTAT ((u32 *)(LTQ_ASC_BASE + 0x0048))
19#define LTQ_ASC_TBUF ((u32 *)(LTQ_ASC_BASE + 0x0020))
20#define TXMASK 0x3F00
21#define TXOFFSET 8
22
23void prom_putchar(char c)
24{
25 unsigned long flags;
26
27 local_irq_save(flags);
28 do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
29 if (c == '\n')
30 ltq_w32('\r', LTQ_ASC_TBUF);
31 ltq_w32(c, LTQ_ASC_TBUF);
32 local_irq_restore(flags);
33}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
new file mode 100644
index 000000000000..fc89795cafdb
--- /dev/null
+++ b/arch/mips/lantiq/irq.c
@@ -0,0 +1,326 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
8 */
9
10#include <linux/interrupt.h>
11#include <linux/ioport.h>
12
13#include <asm/bootinfo.h>
14#include <asm/irq_cpu.h>
15
16#include <lantiq_soc.h>
17#include <irq.h>
18
19/* register definitions */
20#define LTQ_ICU_IM0_ISR 0x0000
21#define LTQ_ICU_IM0_IER 0x0008
22#define LTQ_ICU_IM0_IOSR 0x0010
23#define LTQ_ICU_IM0_IRSR 0x0018
24#define LTQ_ICU_IM0_IMR 0x0020
25#define LTQ_ICU_IM1_ISR 0x0028
26#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
27
28#define LTQ_EIU_EXIN_C 0x0000
29#define LTQ_EIU_EXIN_INIC 0x0004
30#define LTQ_EIU_EXIN_INEN 0x000C
31
32/* irq numbers used by the external interrupt unit (EIU) */
33#define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30)
34#define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31)
35#define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26)
36#define LTQ_EIU_IR3 INT_NUM_IM1_IRL0
37#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1)
38#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2)
39#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30)
40
41#define MAX_EIU 6
42
43/* irqs generated by device attached to the EBU need to be acked in
44 * a special manner
45 */
46#define LTQ_ICU_EBU_IRQ 22
47
48#define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y))
49#define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x))
50
51#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
52#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
53
54static unsigned short ltq_eiu_irq[MAX_EIU] = {
55 LTQ_EIU_IR0,
56 LTQ_EIU_IR1,
57 LTQ_EIU_IR2,
58 LTQ_EIU_IR3,
59 LTQ_EIU_IR4,
60 LTQ_EIU_IR5,
61};
62
63static struct resource ltq_icu_resource = {
64 .name = "icu",
65 .start = LTQ_ICU_BASE_ADDR,
66 .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1,
67 .flags = IORESOURCE_MEM,
68};
69
70static struct resource ltq_eiu_resource = {
71 .name = "eiu",
72 .start = LTQ_EIU_BASE_ADDR,
73 .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1,
74 .flags = IORESOURCE_MEM,
75};
76
77static void __iomem *ltq_icu_membase;
78static void __iomem *ltq_eiu_membase;
79
80void ltq_disable_irq(struct irq_data *d)
81{
82 u32 ier = LTQ_ICU_IM0_IER;
83 int irq_nr = d->irq - INT_NUM_IRQ0;
84
85 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
86 irq_nr %= INT_NUM_IM_OFFSET;
87 ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
88}
89
90void ltq_mask_and_ack_irq(struct irq_data *d)
91{
92 u32 ier = LTQ_ICU_IM0_IER;
93 u32 isr = LTQ_ICU_IM0_ISR;
94 int irq_nr = d->irq - INT_NUM_IRQ0;
95
96 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
97 isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
98 irq_nr %= INT_NUM_IM_OFFSET;
99 ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
100 ltq_icu_w32((1 << irq_nr), isr);
101}
102
103static void ltq_ack_irq(struct irq_data *d)
104{
105 u32 isr = LTQ_ICU_IM0_ISR;
106 int irq_nr = d->irq - INT_NUM_IRQ0;
107
108 isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
109 irq_nr %= INT_NUM_IM_OFFSET;
110 ltq_icu_w32((1 << irq_nr), isr);
111}
112
113void ltq_enable_irq(struct irq_data *d)
114{
115 u32 ier = LTQ_ICU_IM0_IER;
116 int irq_nr = d->irq - INT_NUM_IRQ0;
117
118 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
119 irq_nr %= INT_NUM_IM_OFFSET;
120 ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier);
121}
122
123static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
124{
125 int i;
126 int irq_nr = d->irq - INT_NUM_IRQ0;
127
128 ltq_enable_irq(d);
129 for (i = 0; i < MAX_EIU; i++) {
130 if (irq_nr == ltq_eiu_irq[i]) {
131 /* low level - we should really handle set_type */
132 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
133 (0x6 << (i * 4)), LTQ_EIU_EXIN_C);
134 /* clear all pending */
135 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i),
136 LTQ_EIU_EXIN_INIC);
137 /* enable */
138 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i),
139 LTQ_EIU_EXIN_INEN);
140 break;
141 }
142 }
143
144 return 0;
145}
146
147static void ltq_shutdown_eiu_irq(struct irq_data *d)
148{
149 int i;
150 int irq_nr = d->irq - INT_NUM_IRQ0;
151
152 ltq_disable_irq(d);
153 for (i = 0; i < MAX_EIU; i++) {
154 if (irq_nr == ltq_eiu_irq[i]) {
155 /* disable */
156 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
157 LTQ_EIU_EXIN_INEN);
158 break;
159 }
160 }
161}
162
163static struct irq_chip ltq_irq_type = {
164 "icu",
165 .irq_enable = ltq_enable_irq,
166 .irq_disable = ltq_disable_irq,
167 .irq_unmask = ltq_enable_irq,
168 .irq_ack = ltq_ack_irq,
169 .irq_mask = ltq_disable_irq,
170 .irq_mask_ack = ltq_mask_and_ack_irq,
171};
172
173static struct irq_chip ltq_eiu_type = {
174 "eiu",
175 .irq_startup = ltq_startup_eiu_irq,
176 .irq_shutdown = ltq_shutdown_eiu_irq,
177 .irq_enable = ltq_enable_irq,
178 .irq_disable = ltq_disable_irq,
179 .irq_unmask = ltq_enable_irq,
180 .irq_ack = ltq_ack_irq,
181 .irq_mask = ltq_disable_irq,
182 .irq_mask_ack = ltq_mask_and_ack_irq,
183};
184
185static void ltq_hw_irqdispatch(int module)
186{
187 u32 irq;
188
189 irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET));
190 if (irq == 0)
191 return;
192
193 /* silicon bug causes only the msb set to 1 to be valid. all
194 * other bits might be bogus
195 */
196 irq = __fls(irq);
197 do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));
198
199 /* if this is a EBU irq, we need to ack it or get a deadlock */
200 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0))
201 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
202 LTQ_EBU_PCC_ISTAT);
203}
204
205#define DEFINE_HWx_IRQDISPATCH(x) \
206 static void ltq_hw ## x ## _irqdispatch(void) \
207 { \
208 ltq_hw_irqdispatch(x); \
209 }
210DEFINE_HWx_IRQDISPATCH(0)
211DEFINE_HWx_IRQDISPATCH(1)
212DEFINE_HWx_IRQDISPATCH(2)
213DEFINE_HWx_IRQDISPATCH(3)
214DEFINE_HWx_IRQDISPATCH(4)
215
216static void ltq_hw5_irqdispatch(void)
217{
218 do_IRQ(MIPS_CPU_TIMER_IRQ);
219}
220
221asmlinkage void plat_irq_dispatch(void)
222{
223 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
224 unsigned int i;
225
226 if (pending & CAUSEF_IP7) {
227 do_IRQ(MIPS_CPU_TIMER_IRQ);
228 goto out;
229 } else {
230 for (i = 0; i < 5; i++) {
231 if (pending & (CAUSEF_IP2 << i)) {
232 ltq_hw_irqdispatch(i);
233 goto out;
234 }
235 }
236 }
237 pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
238
239out:
240 return;
241}
242
243static struct irqaction cascade = {
244 .handler = no_action,
245 .flags = IRQF_DISABLED,
246 .name = "cascade",
247};
248
249void __init arch_init_irq(void)
250{
251 int i;
252
253 if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
254 panic("Failed to insert icu memory\n");
255
256 if (request_mem_region(ltq_icu_resource.start,
257 resource_size(&ltq_icu_resource), "icu") < 0)
258 panic("Failed to request icu memory\n");
259
260 ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
261 resource_size(&ltq_icu_resource));
262 if (!ltq_icu_membase)
263 panic("Failed to remap icu memory\n");
264
265 if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
266 panic("Failed to insert eiu memory\n");
267
268 if (request_mem_region(ltq_eiu_resource.start,
269 resource_size(&ltq_eiu_resource), "eiu") < 0)
270 panic("Failed to request eiu memory\n");
271
272 ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
273 resource_size(&ltq_eiu_resource));
274 if (!ltq_eiu_membase)
275 panic("Failed to remap eiu memory\n");
276
277 /* make sure all irqs are turned off by default */
278 for (i = 0; i < 5; i++)
279 ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));
280
281 /* clear all possibly pending interrupts */
282 ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
283
284 mips_cpu_irq_init();
285
286 for (i = 2; i <= 6; i++)
287 setup_irq(i, &cascade);
288
289 if (cpu_has_vint) {
290 pr_info("Setting up vectored interrupts\n");
291 set_vi_handler(2, ltq_hw0_irqdispatch);
292 set_vi_handler(3, ltq_hw1_irqdispatch);
293 set_vi_handler(4, ltq_hw2_irqdispatch);
294 set_vi_handler(5, ltq_hw3_irqdispatch);
295 set_vi_handler(6, ltq_hw4_irqdispatch);
296 set_vi_handler(7, ltq_hw5_irqdispatch);
297 }
298
299 for (i = INT_NUM_IRQ0;
300 i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
301 if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
302 (i == LTQ_EIU_IR2))
303 irq_set_chip_and_handler(i, &ltq_eiu_type,
304 handle_level_irq);
305 /* EIU3-5 only exist on ar9 and vr9 */
306 else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) ||
307 (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9()))
308 irq_set_chip_and_handler(i, &ltq_eiu_type,
309 handle_level_irq);
310 else
311 irq_set_chip_and_handler(i, &ltq_irq_type,
312 handle_level_irq);
313
314#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
315 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
316 IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
317#else
318 set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
319 IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
320#endif
321}
322
323unsigned int __cpuinit get_c0_compare_int(void)
324{
325 return CP0_LEGACY_COMPARE_IRQ;
326}
diff --git a/arch/mips/lantiq/machtypes.h b/arch/mips/lantiq/machtypes.h
new file mode 100644
index 000000000000..7e01b8c484eb
--- /dev/null
+++ b/arch/mips/lantiq/machtypes.h
@@ -0,0 +1,20 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LANTIQ_MACH_H__
10#define _LANTIQ_MACH_H__
11
12#include <asm/mips_machine.h>
13
14enum lantiq_mach_type {
15 LTQ_MACH_GENERIC = 0,
16 LTQ_MACH_EASY50712, /* Danube evaluation board */
17 LTQ_MACH_EASY50601, /* Amazon SE evaluation board */
18};
19
20#endif
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
new file mode 100644
index 000000000000..56ba007bf1e5
--- /dev/null
+++ b/arch/mips/lantiq/prom.c
@@ -0,0 +1,71 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/module.h>
10#include <linux/clk.h>
11#include <asm/bootinfo.h>
12#include <asm/time.h>
13
14#include <lantiq.h>
15
16#include "prom.h"
17#include "clk.h"
18
19static struct ltq_soc_info soc_info;
20
21unsigned int ltq_get_cpu_ver(void)
22{
23 return soc_info.rev;
24}
25EXPORT_SYMBOL(ltq_get_cpu_ver);
26
27unsigned int ltq_get_soc_type(void)
28{
29 return soc_info.type;
30}
31EXPORT_SYMBOL(ltq_get_soc_type);
32
33const char *get_system_type(void)
34{
35 return soc_info.sys_type;
36}
37
38void prom_free_prom_memory(void)
39{
40}
41
42static void __init prom_init_cmdline(void)
43{
44 int argc = fw_arg0;
45 char **argv = (char **) KSEG1ADDR(fw_arg1);
46 int i;
47
48 for (i = 0; i < argc; i++) {
49 char *p = (char *) KSEG1ADDR(argv[i]);
50
51 if (p && *p) {
52 strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
53 strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
54 }
55 }
56}
57
58void __init prom_init(void)
59{
60 struct clk *clk;
61
62 ltq_soc_detect(&soc_info);
63 clk_init();
64 clk = clk_get(0, "cpu");
65 snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d",
66 soc_info.name, soc_info.rev);
67 clk_put(clk);
68 soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
69 pr_info("SoC: %s\n", soc_info.sys_type);
70 prom_init_cmdline();
71}
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
new file mode 100644
index 000000000000..b4229d94280f
--- /dev/null
+++ b/arch/mips/lantiq/prom.h
@@ -0,0 +1,25 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_PROM_H__
10#define _LTQ_PROM_H__
11
12#define LTQ_SYS_TYPE_LEN 0x100
13
14struct ltq_soc_info {
15 unsigned char *name;
16 unsigned int rev;
17 unsigned int partnum;
18 unsigned int type;
19 unsigned char sys_type[LTQ_SYS_TYPE_LEN];
20};
21
22extern void ltq_soc_detect(struct ltq_soc_info *i);
23extern void ltq_soc_setup(void);
24
25#endif
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c
new file mode 100644
index 000000000000..9b8af77ed0f9
--- /dev/null
+++ b/arch/mips/lantiq/setup.c
@@ -0,0 +1,66 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/io.h>
12#include <linux/ioport.h>
13#include <asm/bootinfo.h>
14
15#include <lantiq_soc.h>
16
17#include "machtypes.h"
18#include "devices.h"
19#include "prom.h"
20
21void __init plat_mem_setup(void)
22{
23 /* assume 16M as default incase uboot fails to pass proper ramsize */
24 unsigned long memsize = 16;
25 char **envp = (char **) KSEG1ADDR(fw_arg2);
26
27 ioport_resource.start = IOPORT_RESOURCE_START;
28 ioport_resource.end = IOPORT_RESOURCE_END;
29 iomem_resource.start = IOMEM_RESOURCE_START;
30 iomem_resource.end = IOMEM_RESOURCE_END;
31
32 set_io_port_base((unsigned long) KSEG1);
33
34 while (*envp) {
35 char *e = (char *)KSEG1ADDR(*envp);
36 if (!strncmp(e, "memsize=", 8)) {
37 e += 8;
38 if (strict_strtoul(e, 0, &memsize))
39 pr_warn("bad memsize specified\n");
40 }
41 envp++;
42 }
43 memsize *= 1024 * 1024;
44 add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
45}
46
47static int __init
48lantiq_setup(void)
49{
50 ltq_soc_setup();
51 mips_machine_setup();
52 return 0;
53}
54
55arch_initcall(lantiq_setup);
56
57static void __init
58lantiq_generic_init(void)
59{
60 /* Nothing to do */
61}
62
63MIPS_MACHINE(LTQ_MACH_GENERIC,
64 "Generic",
65 "Generic Lantiq based board",
66 lantiq_generic_init);
diff --git a/arch/mips/lantiq/xway/Kconfig b/arch/mips/lantiq/xway/Kconfig
new file mode 100644
index 000000000000..2b857de36620
--- /dev/null
+++ b/arch/mips/lantiq/xway/Kconfig
@@ -0,0 +1,23 @@
1if SOC_XWAY
2
3menu "MIPS Machine"
4
5config LANTIQ_MACH_EASY50712
6 bool "Easy50712 - Danube"
7 default y
8
9endmenu
10
11endif
12
13if SOC_AMAZON_SE
14
15menu "MIPS Machine"
16
17config LANTIQ_MACH_EASY50601
18 bool "Easy50601 - Amazon SE"
19 default y
20
21endmenu
22
23endif
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
new file mode 100644
index 000000000000..c517f2e77563
--- /dev/null
+++ b/arch/mips/lantiq/xway/Makefile
@@ -0,0 +1,7 @@
1obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o
2
3obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o
4obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o
5
6obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o
7obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c
new file mode 100644
index 000000000000..22d823acd536
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk-ase.c
@@ -0,0 +1,48 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/io.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/clk.h>
13
14#include <asm/time.h>
15#include <asm/irq.h>
16#include <asm/div64.h>
17
18#include <lantiq_soc.h>
19
20/* cgu registers */
21#define LTQ_CGU_SYS 0x0010
22
23unsigned int ltq_get_io_region_clock(void)
24{
25 return CLOCK_133M;
26}
27EXPORT_SYMBOL(ltq_get_io_region_clock);
28
29unsigned int ltq_get_fpi_bus_clock(int fpi)
30{
31 return CLOCK_133M;
32}
33EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
34
35unsigned int ltq_get_cpu_hz(void)
36{
37 if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5))
38 return CLOCK_266M;
39 else
40 return CLOCK_133M;
41}
42EXPORT_SYMBOL(ltq_get_cpu_hz);
43
44unsigned int ltq_get_fpi_hz(void)
45{
46 return CLOCK_133M;
47}
48EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c
new file mode 100644
index 000000000000..ddd39593c581
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk-xway.c
@@ -0,0 +1,223 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/io.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/clk.h>
13
14#include <asm/time.h>
15#include <asm/irq.h>
16#include <asm/div64.h>
17
18#include <lantiq_soc.h>
19
20static unsigned int ltq_ram_clocks[] = {
21 CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
22#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3]
23
24#define BASIC_FREQUENCY_1 35328000
25#define BASIC_FREQUENCY_2 36000000
26#define BASIS_REQUENCY_USB 12000000
27
28#define GET_BITS(x, msb, lsb) \
29 (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
30
31#define LTQ_CGU_PLL0_CFG 0x0004
32#define LTQ_CGU_PLL1_CFG 0x0008
33#define LTQ_CGU_PLL2_CFG 0x000C
34#define LTQ_CGU_SYS 0x0010
35#define LTQ_CGU_UPDATE 0x0014
36#define LTQ_CGU_IF_CLK 0x0018
37#define LTQ_CGU_OSC_CON 0x001C
38#define LTQ_CGU_SMD 0x0020
39#define LTQ_CGU_CT1SR 0x0028
40#define LTQ_CGU_CT2SR 0x002C
41#define LTQ_CGU_PCMCR 0x0030
42#define LTQ_CGU_PCI_CR 0x0034
43#define LTQ_CGU_PD_PC 0x0038
44#define LTQ_CGU_FMR 0x003C
45
46#define CGU_PLL0_PHASE_DIVIDER_ENABLE \
47 (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31))
48#define CGU_PLL0_BYPASS \
49 (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30))
50#define CGU_PLL0_CFG_DSMSEL \
51 (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28))
52#define CGU_PLL0_CFG_FRAC_EN \
53 (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27))
54#define CGU_PLL1_SRC \
55 (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31))
56#define CGU_PLL2_PHASE_DIVIDER_ENABLE \
57 (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20))
58#define CGU_SYS_FPI_SEL (1 << 6)
59#define CGU_SYS_DDR_SEL 0x3
60#define CGU_PLL0_SRC (1 << 29)
61
62#define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17)
63#define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6)
64#define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2)
65#define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17)
66#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13)
67
68static unsigned int ltq_get_pll0_fdiv(void);
69
70static inline unsigned int get_input_clock(int pll)
71{
72 switch (pll) {
73 case 0:
74 if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC)
75 return BASIS_REQUENCY_USB;
76 else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
77 return BASIC_FREQUENCY_1;
78 else
79 return BASIC_FREQUENCY_2;
80 case 1:
81 if (CGU_PLL1_SRC)
82 return BASIS_REQUENCY_USB;
83 else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
84 return BASIC_FREQUENCY_1;
85 else
86 return BASIC_FREQUENCY_2;
87 case 2:
88 switch (CGU_PLL2_SRC) {
89 case 0:
90 return ltq_get_pll0_fdiv();
91 case 1:
92 return CGU_PLL2_PHASE_DIVIDER_ENABLE ?
93 BASIC_FREQUENCY_1 :
94 BASIC_FREQUENCY_2;
95 case 2:
96 return BASIS_REQUENCY_USB;
97 }
98 default:
99 return 0;
100 }
101}
102
103static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den)
104{
105 u64 res, clock = get_input_clock(pll);
106
107 res = num * clock;
108 do_div(res, den);
109 return res;
110}
111
112static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N,
113 unsigned int K)
114{
115 unsigned int num = ((N + 1) << 10) + K;
116 unsigned int den = (M + 1) << 10;
117
118 return cal_dsm(pll, num, den);
119}
120
121static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N,
122 unsigned int K)
123{
124 unsigned int num = ((N + 1) << 11) + K + 512;
125 unsigned int den = (M + 1) << 11;
126
127 return cal_dsm(pll, num, den);
128}
129
130static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N,
131 unsigned int K)
132{
133 unsigned int num = K >= 512 ?
134 ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584;
135 unsigned int den = (M + 1) << 12;
136
137 return cal_dsm(pll, num, den);
138}
139
140static inline unsigned int dsm(int pll, unsigned int M, unsigned int N,
141 unsigned int K, unsigned int dsmsel, unsigned int phase_div_en)
142{
143 if (!dsmsel)
144 return mash_dsm(pll, M, N, K);
145 else if (!phase_div_en)
146 return mash_dsm(pll, M, N, K);
147 else
148 return ssff_dsm_2(pll, M, N, K);
149}
150
151static inline unsigned int ltq_get_pll0_fosc(void)
152{
153 if (CGU_PLL0_BYPASS)
154 return get_input_clock(0);
155 else
156 return !CGU_PLL0_CFG_FRAC_EN
157 ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0,
158 CGU_PLL0_CFG_DSMSEL,
159 CGU_PLL0_PHASE_DIVIDER_ENABLE)
160 : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN,
161 CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL,
162 CGU_PLL0_PHASE_DIVIDER_ENABLE);
163}
164
165static unsigned int ltq_get_pll0_fdiv(void)
166{
167 unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1;
168
169 return (ltq_get_pll0_fosc() + (div >> 1)) / div;
170}
171
172unsigned int ltq_get_io_region_clock(void)
173{
174 unsigned int ret = ltq_get_pll0_fosc();
175
176 switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) {
177 default:
178 case 0:
179 return (ret + 1) / 2;
180 case 1:
181 return (ret * 2 + 2) / 5;
182 case 2:
183 return (ret + 1) / 3;
184 case 3:
185 return (ret + 2) / 4;
186 }
187}
188EXPORT_SYMBOL(ltq_get_io_region_clock);
189
190unsigned int ltq_get_fpi_bus_clock(int fpi)
191{
192 unsigned int ret = ltq_get_io_region_clock();
193
194 if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL))
195 ret >>= 1;
196 return ret;
197}
198EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
199
200unsigned int ltq_get_cpu_hz(void)
201{
202 switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) {
203 case 0:
204 return CLOCK_333M;
205 case 4:
206 return DDR_HZ;
207 case 8:
208 return DDR_HZ << 1;
209 default:
210 return DDR_HZ >> 1;
211 }
212}
213EXPORT_SYMBOL(ltq_get_cpu_hz);
214
215unsigned int ltq_get_fpi_hz(void)
216{
217 unsigned int ddr_clock = DDR_HZ;
218
219 if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40)
220 return ddr_clock >> 1;
221 return ddr_clock;
222}
223EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
new file mode 100644
index 000000000000..e09e789dfc27
--- /dev/null
+++ b/arch/mips/lantiq/xway/devices.c
@@ -0,0 +1,121 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/string.h>
13#include <linux/mtd/physmap.h>
14#include <linux/kernel.h>
15#include <linux/reboot.h>
16#include <linux/platform_device.h>
17#include <linux/leds.h>
18#include <linux/etherdevice.h>
19#include <linux/reboot.h>
20#include <linux/time.h>
21#include <linux/io.h>
22#include <linux/gpio.h>
23#include <linux/leds.h>
24
25#include <asm/bootinfo.h>
26#include <asm/irq.h>
27
28#include <lantiq_soc.h>
29#include <lantiq_irq.h>
30#include <lantiq_platform.h>
31
32#include "devices.h"
33
34/* gpio */
35static struct resource ltq_gpio_resource[] = {
36 {
37 .name = "gpio0",
38 .start = LTQ_GPIO0_BASE_ADDR,
39 .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1,
40 .flags = IORESOURCE_MEM,
41 }, {
42 .name = "gpio1",
43 .start = LTQ_GPIO1_BASE_ADDR,
44 .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1,
45 .flags = IORESOURCE_MEM,
46 }, {
47 .name = "gpio2",
48 .start = LTQ_GPIO2_BASE_ADDR,
49 .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1,
50 .flags = IORESOURCE_MEM,
51 }
52};
53
54void __init ltq_register_gpio(void)
55{
56 platform_device_register_simple("ltq_gpio", 0,
57 &ltq_gpio_resource[0], 1);
58 platform_device_register_simple("ltq_gpio", 1,
59 &ltq_gpio_resource[1], 1);
60
61 /* AR9 and VR9 have an extra gpio block */
62 if (ltq_is_ar9() || ltq_is_vr9()) {
63 platform_device_register_simple("ltq_gpio", 2,
64 &ltq_gpio_resource[2], 1);
65 }
66}
67
68/* serial to parallel conversion */
69static struct resource ltq_stp_resource = {
70 .name = "stp",
71 .start = LTQ_STP_BASE_ADDR,
72 .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1,
73 .flags = IORESOURCE_MEM,
74};
75
76void __init ltq_register_gpio_stp(void)
77{
78 platform_device_register_simple("ltq_stp", 0, &ltq_stp_resource, 1);
79}
80
81/* asc ports - amazon se has its own serial mapping */
82static struct resource ltq_ase_asc_resources[] = {
83 {
84 .name = "asc0",
85 .start = LTQ_ASC1_BASE_ADDR,
86 .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
87 .flags = IORESOURCE_MEM,
88 },
89 IRQ_RES(tx, LTQ_ASC_ASE_TIR),
90 IRQ_RES(rx, LTQ_ASC_ASE_RIR),
91 IRQ_RES(err, LTQ_ASC_ASE_EIR),
92};
93
94void __init ltq_register_ase_asc(void)
95{
96 platform_device_register_simple("ltq_asc", 0,
97 ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources));
98}
99
100/* ethernet */
101static struct resource ltq_etop_resources = {
102 .name = "etop",
103 .start = LTQ_ETOP_BASE_ADDR,
104 .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1,
105 .flags = IORESOURCE_MEM,
106};
107
108static struct platform_device ltq_etop = {
109 .name = "ltq_etop",
110 .resource = &ltq_etop_resources,
111 .num_resources = 1,
112};
113
114void __init
115ltq_register_etop(struct ltq_eth_data *eth)
116{
117 if (eth) {
118 ltq_etop.dev.platform_data = eth;
119 platform_device_register(&ltq_etop);
120 }
121}
diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h
new file mode 100644
index 000000000000..e90493471bc1
--- /dev/null
+++ b/arch/mips/lantiq/xway/devices.h
@@ -0,0 +1,20 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_DEVICES_XWAY_H__
10#define _LTQ_DEVICES_XWAY_H__
11
12#include "../devices.h"
13#include <linux/phy.h>
14
15extern void ltq_register_gpio(void);
16extern void ltq_register_gpio_stp(void);
17extern void ltq_register_ase_asc(void);
18extern void ltq_register_etop(struct ltq_eth_data *eth);
19
20#endif
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
new file mode 100644
index 000000000000..4278a459d6c4
--- /dev/null
+++ b/arch/mips/lantiq/xway/dma.c
@@ -0,0 +1,253 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
16 */
17
18#include <linux/init.h>
19#include <linux/platform_device.h>
20#include <linux/io.h>
21#include <linux/dma-mapping.h>
22
23#include <lantiq_soc.h>
24#include <xway_dma.h>
25
26#define LTQ_DMA_CTRL 0x10
27#define LTQ_DMA_CPOLL 0x14
28#define LTQ_DMA_CS 0x18
29#define LTQ_DMA_CCTRL 0x1C
30#define LTQ_DMA_CDBA 0x20
31#define LTQ_DMA_CDLEN 0x24
32#define LTQ_DMA_CIS 0x28
33#define LTQ_DMA_CIE 0x2C
34#define LTQ_DMA_PS 0x40
35#define LTQ_DMA_PCTRL 0x44
36#define LTQ_DMA_IRNEN 0xf4
37
38#define DMA_DESCPT BIT(3) /* descriptor complete irq */
39#define DMA_TX BIT(8) /* TX channel direction */
40#define DMA_CHAN_ON BIT(0) /* channel on / off bit */
41#define DMA_PDEN BIT(6) /* enable packet drop */
42#define DMA_CHAN_RST BIT(1) /* channel on / off bit */
43#define DMA_RESET BIT(0) /* channel on / off bit */
44#define DMA_IRQ_ACK 0x7e /* IRQ status register */
45#define DMA_POLL BIT(31) /* turn on channel polling */
46#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */
47#define DMA_2W_BURST BIT(1) /* 2 word burst length */
48#define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */
49#define DMA_ETOP_ENDIANESS (0xf << 8) /* endianess swap etop channels */
50#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */
51
52#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x))
53#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y))
54#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
55 ltq_dma_membase + (z))
56
57static struct resource ltq_dma_resource = {
58 .name = "dma",
59 .start = LTQ_DMA_BASE_ADDR,
60 .end = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1,
61 .flags = IORESOURCE_MEM,
62};
63
64static void __iomem *ltq_dma_membase;
65
66void
67ltq_dma_enable_irq(struct ltq_dma_channel *ch)
68{
69 unsigned long flags;
70
71 local_irq_save(flags);
72 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
73 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
74 local_irq_restore(flags);
75}
76EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
77
78void
79ltq_dma_disable_irq(struct ltq_dma_channel *ch)
80{
81 unsigned long flags;
82
83 local_irq_save(flags);
84 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
85 ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
86 local_irq_restore(flags);
87}
88EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
89
90void
91ltq_dma_ack_irq(struct ltq_dma_channel *ch)
92{
93 unsigned long flags;
94
95 local_irq_save(flags);
96 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
97 ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
98 local_irq_restore(flags);
99}
100EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
101
102void
103ltq_dma_open(struct ltq_dma_channel *ch)
104{
105 unsigned long flag;
106
107 local_irq_save(flag);
108 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
109 ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
110 ltq_dma_enable_irq(ch);
111 local_irq_restore(flag);
112}
113EXPORT_SYMBOL_GPL(ltq_dma_open);
114
115void
116ltq_dma_close(struct ltq_dma_channel *ch)
117{
118 unsigned long flag;
119
120 local_irq_save(flag);
121 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
122 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
123 ltq_dma_disable_irq(ch);
124 local_irq_restore(flag);
125}
126EXPORT_SYMBOL_GPL(ltq_dma_close);
127
128static void
129ltq_dma_alloc(struct ltq_dma_channel *ch)
130{
131 unsigned long flags;
132
133 ch->desc = 0;
134 ch->desc_base = dma_alloc_coherent(NULL,
135 LTQ_DESC_NUM * LTQ_DESC_SIZE,
136 &ch->phys, GFP_ATOMIC);
137 memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
138
139 local_irq_save(flags);
140 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
141 ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
142 ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
143 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
144 wmb();
145 ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
146 while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
147 ;
148 local_irq_restore(flags);
149}
150
151void
152ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
153{
154 unsigned long flags;
155
156 ltq_dma_alloc(ch);
157
158 local_irq_save(flags);
159 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
160 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
161 ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
162 local_irq_restore(flags);
163}
164EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
165
166void
167ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
168{
169 unsigned long flags;
170
171 ltq_dma_alloc(ch);
172
173 local_irq_save(flags);
174 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
175 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
176 ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
177 local_irq_restore(flags);
178}
179EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
180
181void
182ltq_dma_free(struct ltq_dma_channel *ch)
183{
184 if (!ch->desc_base)
185 return;
186 ltq_dma_close(ch);
187 dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
188 ch->desc_base, ch->phys);
189}
190EXPORT_SYMBOL_GPL(ltq_dma_free);
191
192void
193ltq_dma_init_port(int p)
194{
195 ltq_dma_w32(p, LTQ_DMA_PS);
196 switch (p) {
197 case DMA_PORT_ETOP:
198 /*
199 * Tell the DMA engine to swap the endianess of data frames and
200 * drop packets if the channel arbitration fails.
201 */
202 ltq_dma_w32_mask(0, DMA_ETOP_ENDIANESS | DMA_PDEN,
203 LTQ_DMA_PCTRL);
204 break;
205
206 case DMA_PORT_DEU:
207 ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2),
208 LTQ_DMA_PCTRL);
209 break;
210
211 default:
212 break;
213 }
214}
215EXPORT_SYMBOL_GPL(ltq_dma_init_port);
216
217int __init
218ltq_dma_init(void)
219{
220 int i;
221
222 /* insert and request the memory region */
223 if (insert_resource(&iomem_resource, &ltq_dma_resource) < 0)
224 panic("Failed to insert dma memory\n");
225
226 if (request_mem_region(ltq_dma_resource.start,
227 resource_size(&ltq_dma_resource), "dma") < 0)
228 panic("Failed to request dma memory\n");
229
230 /* remap dma register range */
231 ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start,
232 resource_size(&ltq_dma_resource));
233 if (!ltq_dma_membase)
234 panic("Failed to remap dma memory\n");
235
236 /* power up and reset the dma engine */
237 ltq_pmu_enable(PMU_DMA);
238 ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
239
240 /* disable all interrupts */
241 ltq_dma_w32(0, LTQ_DMA_IRNEN);
242
243 /* reset/configure each channel */
244 for (i = 0; i < DMA_MAX_CHANNEL; i++) {
245 ltq_dma_w32(i, LTQ_DMA_CS);
246 ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL);
247 ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
248 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
249 }
250 return 0;
251}
252
253postcore_initcall(ltq_dma_init);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
new file mode 100644
index 000000000000..66eb52fa50a1
--- /dev/null
+++ b/arch/mips/lantiq/xway/ebu.c
@@ -0,0 +1,53 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * EBU - the external bus unit attaches PCI, NOR and NAND
7 *
8 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/version.h>
14#include <linux/ioport.h>
15
16#include <lantiq_soc.h>
17
18/* all access to the ebu must be locked */
19DEFINE_SPINLOCK(ebu_lock);
20EXPORT_SYMBOL_GPL(ebu_lock);
21
22static struct resource ltq_ebu_resource = {
23 .name = "ebu",
24 .start = LTQ_EBU_BASE_ADDR,
25 .end = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1,
26 .flags = IORESOURCE_MEM,
27};
28
29/* remapped base addr of the clock unit and external bus unit */
30void __iomem *ltq_ebu_membase;
31
32static int __init lantiq_ebu_init(void)
33{
34 /* insert and request the memory region */
35 if (insert_resource(&iomem_resource, &ltq_ebu_resource) < 0)
36 panic("Failed to insert ebu memory\n");
37
38 if (request_mem_region(ltq_ebu_resource.start,
39 resource_size(&ltq_ebu_resource), "ebu") < 0)
40 panic("Failed to request ebu memory\n");
41
42 /* remap ebu register range */
43 ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start,
44 resource_size(&ltq_ebu_resource));
45 if (!ltq_ebu_membase)
46 panic("Failed to remap ebu memory\n");
47
48 /* make sure to unprotect the memory region where flash is located */
49 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
50 return 0;
51}
52
53postcore_initcall(lantiq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c
new file mode 100644
index 000000000000..a321451a5455
--- /dev/null
+++ b/arch/mips/lantiq/xway/gpio.c
@@ -0,0 +1,195 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/slab.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/gpio.h>
13#include <linux/ioport.h>
14#include <linux/io.h>
15
16#include <lantiq_soc.h>
17
18#define LTQ_GPIO_OUT 0x00
19#define LTQ_GPIO_IN 0x04
20#define LTQ_GPIO_DIR 0x08
21#define LTQ_GPIO_ALTSEL0 0x0C
22#define LTQ_GPIO_ALTSEL1 0x10
23#define LTQ_GPIO_OD 0x14
24
25#define PINS_PER_PORT 16
26#define MAX_PORTS 3
27
28#define ltq_gpio_getbit(m, r, p) (!!(ltq_r32(m + r) & (1 << p)))
29#define ltq_gpio_setbit(m, r, p) ltq_w32_mask(0, (1 << p), m + r)
30#define ltq_gpio_clearbit(m, r, p) ltq_w32_mask((1 << p), 0, m + r)
31
32struct ltq_gpio {
33 void __iomem *membase;
34 struct gpio_chip chip;
35};
36
37static struct ltq_gpio ltq_gpio_port[MAX_PORTS];
38
39int gpio_to_irq(unsigned int gpio)
40{
41 return -EINVAL;
42}
43EXPORT_SYMBOL(gpio_to_irq);
44
45int irq_to_gpio(unsigned int gpio)
46{
47 return -EINVAL;
48}
49EXPORT_SYMBOL(irq_to_gpio);
50
51int ltq_gpio_request(unsigned int pin, unsigned int alt0,
52 unsigned int alt1, unsigned int dir, const char *name)
53{
54 int id = 0;
55
56 if (pin >= (MAX_PORTS * PINS_PER_PORT))
57 return -EINVAL;
58 if (gpio_request(pin, name)) {
59 pr_err("failed to setup lantiq gpio: %s\n", name);
60 return -EBUSY;
61 }
62 if (dir)
63 gpio_direction_output(pin, 1);
64 else
65 gpio_direction_input(pin);
66 while (pin >= PINS_PER_PORT) {
67 pin -= PINS_PER_PORT;
68 id++;
69 }
70 if (alt0)
71 ltq_gpio_setbit(ltq_gpio_port[id].membase,
72 LTQ_GPIO_ALTSEL0, pin);
73 else
74 ltq_gpio_clearbit(ltq_gpio_port[id].membase,
75 LTQ_GPIO_ALTSEL0, pin);
76 if (alt1)
77 ltq_gpio_setbit(ltq_gpio_port[id].membase,
78 LTQ_GPIO_ALTSEL1, pin);
79 else
80 ltq_gpio_clearbit(ltq_gpio_port[id].membase,
81 LTQ_GPIO_ALTSEL1, pin);
82 return 0;
83}
84EXPORT_SYMBOL(ltq_gpio_request);
85
86static void ltq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
87{
88 struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
89
90 if (value)
91 ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset);
92 else
93 ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset);
94}
95
96static int ltq_gpio_get(struct gpio_chip *chip, unsigned int offset)
97{
98 struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
99
100 return ltq_gpio_getbit(ltq_gpio->membase, LTQ_GPIO_IN, offset);
101}
102
103static int ltq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
104{
105 struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
106
107 ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OD, offset);
108 ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset);
109
110 return 0;
111}
112
113static int ltq_gpio_direction_output(struct gpio_chip *chip,
114 unsigned int offset, int value)
115{
116 struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
117
118 ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OD, offset);
119 ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset);
120 ltq_gpio_set(chip, offset, value);
121
122 return 0;
123}
124
125static int ltq_gpio_req(struct gpio_chip *chip, unsigned offset)
126{
127 struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip);
128
129 ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL0, offset);
130 ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL1, offset);
131 return 0;
132}
133
134static int ltq_gpio_probe(struct platform_device *pdev)
135{
136 struct resource *res;
137
138 if (pdev->id >= MAX_PORTS) {
139 dev_err(&pdev->dev, "invalid gpio port %d\n",
140 pdev->id);
141 return -EINVAL;
142 }
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 if (!res) {
145 dev_err(&pdev->dev, "failed to get memory for gpio port %d\n",
146 pdev->id);
147 return -ENOENT;
148 }
149 res = devm_request_mem_region(&pdev->dev, res->start,
150 resource_size(res), dev_name(&pdev->dev));
151 if (!res) {
152 dev_err(&pdev->dev,
153 "failed to request memory for gpio port %d\n",
154 pdev->id);
155 return -EBUSY;
156 }
157 ltq_gpio_port[pdev->id].membase = devm_ioremap_nocache(&pdev->dev,
158 res->start, resource_size(res));
159 if (!ltq_gpio_port[pdev->id].membase) {
160 dev_err(&pdev->dev, "failed to remap memory for gpio port %d\n",
161 pdev->id);
162 return -ENOMEM;
163 }
164 ltq_gpio_port[pdev->id].chip.label = "ltq_gpio";
165 ltq_gpio_port[pdev->id].chip.direction_input = ltq_gpio_direction_input;
166 ltq_gpio_port[pdev->id].chip.direction_output =
167 ltq_gpio_direction_output;
168 ltq_gpio_port[pdev->id].chip.get = ltq_gpio_get;
169 ltq_gpio_port[pdev->id].chip.set = ltq_gpio_set;
170 ltq_gpio_port[pdev->id].chip.request = ltq_gpio_req;
171 ltq_gpio_port[pdev->id].chip.base = PINS_PER_PORT * pdev->id;
172 ltq_gpio_port[pdev->id].chip.ngpio = PINS_PER_PORT;
173 platform_set_drvdata(pdev, &ltq_gpio_port[pdev->id]);
174 return gpiochip_add(&ltq_gpio_port[pdev->id].chip);
175}
176
177static struct platform_driver
178ltq_gpio_driver = {
179 .probe = ltq_gpio_probe,
180 .driver = {
181 .name = "ltq_gpio",
182 .owner = THIS_MODULE,
183 },
184};
185
186int __init ltq_gpio_init(void)
187{
188 int ret = platform_driver_register(&ltq_gpio_driver);
189
190 if (ret)
191 pr_info("ltq_gpio : Error registering platfom driver!");
192 return ret;
193}
194
195postcore_initcall(ltq_gpio_init);
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c
new file mode 100644
index 000000000000..a479355abdb9
--- /dev/null
+++ b/arch/mips/lantiq/xway/gpio_ebu.c
@@ -0,0 +1,126 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/platform_device.h>
13#include <linux/mutex.h>
14#include <linux/gpio.h>
15#include <linux/io.h>
16
17#include <lantiq_soc.h>
18
19/*
20 * By attaching hardware latches to the EBU it is possible to create output
21 * only gpios. This driver configures a special memory address, which when
22 * written to outputs 16 bit to the latches.
23 */
24
25#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
26#define LTQ_EBU_WP 0x80000000 /* write protect bit */
27
28/* we keep a shadow value of the last value written to the ebu */
29static int ltq_ebu_gpio_shadow = 0x0;
30static void __iomem *ltq_ebu_gpio_membase;
31
32static void ltq_ebu_apply(void)
33{
34 unsigned long flags;
35
36 spin_lock_irqsave(&ebu_lock, flags);
37 ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
38 *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow;
39 ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
40 spin_unlock_irqrestore(&ebu_lock, flags);
41}
42
43static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value)
44{
45 if (value)
46 ltq_ebu_gpio_shadow |= (1 << offset);
47 else
48 ltq_ebu_gpio_shadow &= ~(1 << offset);
49 ltq_ebu_apply();
50}
51
52static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset,
53 int value)
54{
55 ltq_ebu_set(chip, offset, value);
56
57 return 0;
58}
59
60static struct gpio_chip ltq_ebu_chip = {
61 .label = "ltq_ebu",
62 .direction_output = ltq_ebu_direction_output,
63 .set = ltq_ebu_set,
64 .base = 72,
65 .ngpio = 16,
66 .can_sleep = 1,
67 .owner = THIS_MODULE,
68};
69
70static int ltq_ebu_probe(struct platform_device *pdev)
71{
72 int ret = 0;
73 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
74
75 if (!res) {
76 dev_err(&pdev->dev, "failed to get memory resource\n");
77 return -ENOENT;
78 }
79
80 res = devm_request_mem_region(&pdev->dev, res->start,
81 resource_size(res), dev_name(&pdev->dev));
82 if (!res) {
83 dev_err(&pdev->dev, "failed to request memory resource\n");
84 return -EBUSY;
85 }
86
87 ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start,
88 resource_size(res));
89 if (!ltq_ebu_gpio_membase) {
90 dev_err(&pdev->dev, "Failed to ioremap mem region\n");
91 return -ENOMEM;
92 }
93
94 /* grab the default shadow value passed form the platform code */
95 ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data;
96
97 /* tell the ebu controller which memory address we will be using */
98 ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1);
99
100 /* write protect the region */
101 ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
102
103 ret = gpiochip_add(&ltq_ebu_chip);
104 if (!ret)
105 ltq_ebu_apply();
106 return ret;
107}
108
109static struct platform_driver ltq_ebu_driver = {
110 .probe = ltq_ebu_probe,
111 .driver = {
112 .name = "ltq_ebu",
113 .owner = THIS_MODULE,
114 },
115};
116
117static int __init ltq_ebu_init(void)
118{
119 int ret = platform_driver_register(&ltq_ebu_driver);
120
121 if (ret)
122 pr_info("ltq_ebu : Error registering platfom driver!");
123 return ret;
124}
125
126postcore_initcall(ltq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c
new file mode 100644
index 000000000000..67d59d690340
--- /dev/null
+++ b/arch/mips/lantiq/xway/gpio_stp.c
@@ -0,0 +1,157 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
7 *
8 */
9
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/platform_device.h>
15#include <linux/mutex.h>
16#include <linux/io.h>
17#include <linux/gpio.h>
18
19#include <lantiq_soc.h>
20
21#define LTQ_STP_CON0 0x00
22#define LTQ_STP_CON1 0x04
23#define LTQ_STP_CPU0 0x08
24#define LTQ_STP_CPU1 0x0C
25#define LTQ_STP_AR 0x10
26
27#define LTQ_STP_CON_SWU (1 << 31)
28#define LTQ_STP_2HZ 0
29#define LTQ_STP_4HZ (1 << 23)
30#define LTQ_STP_8HZ (2 << 23)
31#define LTQ_STP_10HZ (3 << 23)
32#define LTQ_STP_SPEED_MASK (0xf << 23)
33#define LTQ_STP_UPD_FPI (1 << 31)
34#define LTQ_STP_UPD_MASK (3 << 30)
35#define LTQ_STP_ADSL_SRC (3 << 24)
36
37#define LTQ_STP_GROUP0 (1 << 0)
38
39#define LTQ_STP_RISING 0
40#define LTQ_STP_FALLING (1 << 26)
41#define LTQ_STP_EDGE_MASK (1 << 26)
42
43#define ltq_stp_r32(reg) __raw_readl(ltq_stp_membase + reg)
44#define ltq_stp_w32(val, reg) __raw_writel(val, ltq_stp_membase + reg)
45#define ltq_stp_w32_mask(clear, set, reg) \
46 ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \
47 ltq_stp_membase + (reg))
48
49static int ltq_stp_shadow = 0xffff;
50static void __iomem *ltq_stp_membase;
51
52static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value)
53{
54 if (value)
55 ltq_stp_shadow |= (1 << offset);
56 else
57 ltq_stp_shadow &= ~(1 << offset);
58 ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0);
59}
60
61static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset,
62 int value)
63{
64 ltq_stp_set(chip, offset, value);
65
66 return 0;
67}
68
69static struct gpio_chip ltq_stp_chip = {
70 .label = "ltq_stp",
71 .direction_output = ltq_stp_direction_output,
72 .set = ltq_stp_set,
73 .base = 48,
74 .ngpio = 24,
75 .can_sleep = 1,
76 .owner = THIS_MODULE,
77};
78
79static int ltq_stp_hw_init(void)
80{
81 /* the 3 pins used to control the external stp */
82 ltq_gpio_request(4, 1, 0, 1, "stp-st");
83 ltq_gpio_request(5, 1, 0, 1, "stp-d");
84 ltq_gpio_request(6, 1, 0, 1, "stp-sh");
85
86 /* sane defaults */
87 ltq_stp_w32(0, LTQ_STP_AR);
88 ltq_stp_w32(0, LTQ_STP_CPU0);
89 ltq_stp_w32(0, LTQ_STP_CPU1);
90 ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0);
91 ltq_stp_w32(0, LTQ_STP_CON1);
92
93 /* rising or falling edge */
94 ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0);
95
96 /* per default stp 15-0 are set */
97 ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1);
98
99 /* stp are update periodically by the FPI bus */
100 ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1);
101
102 /* set stp update speed */
103 ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1);
104
105 /* tell the hardware that pin (led) 0 and 1 are controlled
106 * by the dsl arc
107 */
108 ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0);
109
110 ltq_pmu_enable(PMU_LED);
111 return 0;
112}
113
114static int __devinit ltq_stp_probe(struct platform_device *pdev)
115{
116 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
117 int ret = 0;
118
119 if (!res)
120 return -ENOENT;
121 res = devm_request_mem_region(&pdev->dev, res->start,
122 resource_size(res), dev_name(&pdev->dev));
123 if (!res) {
124 dev_err(&pdev->dev, "failed to request STP memory\n");
125 return -EBUSY;
126 }
127 ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start,
128 resource_size(res));
129 if (!ltq_stp_membase) {
130 dev_err(&pdev->dev, "failed to remap STP memory\n");
131 return -ENOMEM;
132 }
133 ret = gpiochip_add(&ltq_stp_chip);
134 if (!ret)
135 ret = ltq_stp_hw_init();
136
137 return ret;
138}
139
140static struct platform_driver ltq_stp_driver = {
141 .probe = ltq_stp_probe,
142 .driver = {
143 .name = "ltq_stp",
144 .owner = THIS_MODULE,
145 },
146};
147
148int __init ltq_stp_init(void)
149{
150 int ret = platform_driver_register(&ltq_stp_driver);
151
152 if (ret)
153 pr_info("ltq_stp: error registering platfom driver");
154 return ret;
155}
156
157postcore_initcall(ltq_stp_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50601.c b/arch/mips/lantiq/xway/mach-easy50601.c
new file mode 100644
index 000000000000..d5aaf637ab19
--- /dev/null
+++ b/arch/mips/lantiq/xway/mach-easy50601.c
@@ -0,0 +1,57 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/partitions.h>
13#include <linux/mtd/physmap.h>
14#include <linux/input.h>
15
16#include <lantiq.h>
17
18#include "../machtypes.h"
19#include "devices.h"
20
21static struct mtd_partition easy50601_partitions[] = {
22 {
23 .name = "uboot",
24 .offset = 0x0,
25 .size = 0x10000,
26 },
27 {
28 .name = "uboot_env",
29 .offset = 0x10000,
30 .size = 0x10000,
31 },
32 {
33 .name = "linux",
34 .offset = 0x20000,
35 .size = 0xE0000,
36 },
37 {
38 .name = "rootfs",
39 .offset = 0x100000,
40 .size = 0x300000,
41 },
42};
43
44static struct physmap_flash_data easy50601_flash_data = {
45 .nr_parts = ARRAY_SIZE(easy50601_partitions),
46 .parts = easy50601_partitions,
47};
48
49static void __init easy50601_init(void)
50{
51 ltq_register_nor(&easy50601_flash_data);
52}
53
54MIPS_MACHINE(LTQ_MACH_EASY50601,
55 "EASY50601",
56 "EASY50601 Eval Board",
57 easy50601_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50712.c b/arch/mips/lantiq/xway/mach-easy50712.c
new file mode 100644
index 000000000000..ea5027b3239d
--- /dev/null
+++ b/arch/mips/lantiq/xway/mach-easy50712.c
@@ -0,0 +1,74 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/partitions.h>
13#include <linux/mtd/physmap.h>
14#include <linux/input.h>
15#include <linux/phy.h>
16
17#include <lantiq_soc.h>
18#include <irq.h>
19
20#include "../machtypes.h"
21#include "devices.h"
22
23static struct mtd_partition easy50712_partitions[] = {
24 {
25 .name = "uboot",
26 .offset = 0x0,
27 .size = 0x10000,
28 },
29 {
30 .name = "uboot_env",
31 .offset = 0x10000,
32 .size = 0x10000,
33 },
34 {
35 .name = "linux",
36 .offset = 0x20000,
37 .size = 0xe0000,
38 },
39 {
40 .name = "rootfs",
41 .offset = 0x100000,
42 .size = 0x300000,
43 },
44};
45
46static struct physmap_flash_data easy50712_flash_data = {
47 .nr_parts = ARRAY_SIZE(easy50712_partitions),
48 .parts = easy50712_partitions,
49};
50
51static struct ltq_pci_data ltq_pci_data = {
52 .clock = PCI_CLOCK_INT,
53 .gpio = PCI_GNT1 | PCI_REQ1,
54 .irq = {
55 [14] = INT_NUM_IM0_IRL0 + 22,
56 },
57};
58
59static struct ltq_eth_data ltq_eth_data = {
60 .mii_mode = PHY_INTERFACE_MODE_MII,
61};
62
63static void __init easy50712_init(void)
64{
65 ltq_register_gpio_stp();
66 ltq_register_nor(&easy50712_flash_data);
67 ltq_register_pci(&ltq_pci_data);
68 ltq_register_etop(&ltq_eth_data);
69}
70
71MIPS_MACHINE(LTQ_MACH_EASY50712,
72 "EASY50712",
73 "EASY50712 Eval Board",
74 easy50712_init);
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
new file mode 100644
index 000000000000..9d69f01e352b
--- /dev/null
+++ b/arch/mips/lantiq/xway/pmu.c
@@ -0,0 +1,70 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/version.h>
12#include <linux/ioport.h>
13
14#include <lantiq_soc.h>
15
16/* PMU - the power management unit allows us to turn part of the core
17 * on and off
18 */
19
20/* the enable / disable registers */
21#define LTQ_PMU_PWDCR 0x1C
22#define LTQ_PMU_PWDSR 0x20
23
24#define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y))
25#define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x))
26
27static struct resource ltq_pmu_resource = {
28 .name = "pmu",
29 .start = LTQ_PMU_BASE_ADDR,
30 .end = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1,
31 .flags = IORESOURCE_MEM,
32};
33
34static void __iomem *ltq_pmu_membase;
35
36void ltq_pmu_enable(unsigned int module)
37{
38 int err = 1000000;
39
40 ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR);
41 do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module));
42
43 if (!err)
44 panic("activating PMU module failed!\n");
45}
46EXPORT_SYMBOL(ltq_pmu_enable);
47
48void ltq_pmu_disable(unsigned int module)
49{
50 ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR);
51}
52EXPORT_SYMBOL(ltq_pmu_disable);
53
54int __init ltq_pmu_init(void)
55{
56 if (insert_resource(&iomem_resource, &ltq_pmu_resource) < 0)
57 panic("Failed to insert pmu memory\n");
58
59 if (request_mem_region(ltq_pmu_resource.start,
60 resource_size(&ltq_pmu_resource), "pmu") < 0)
61 panic("Failed to request pmu memory\n");
62
63 ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start,
64 resource_size(&ltq_pmu_resource));
65 if (!ltq_pmu_membase)
66 panic("Failed to remap pmu memory\n");
67 return 0;
68}
69
70core_initcall(ltq_pmu_init);
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c
new file mode 100644
index 000000000000..abe49f4db57f
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom-ase.c
@@ -0,0 +1,39 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/module.h>
10#include <linux/clk.h>
11#include <asm/bootinfo.h>
12#include <asm/time.h>
13
14#include <lantiq_soc.h>
15
16#include "../prom.h"
17
18#define SOC_AMAZON_SE "Amazon_SE"
19
20#define PART_SHIFT 12
21#define PART_MASK 0x0FFFFFFF
22#define REV_SHIFT 28
23#define REV_MASK 0xF0000000
24
25void __init ltq_soc_detect(struct ltq_soc_info *i)
26{
27 i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
28 i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
29 switch (i->partnum) {
30 case SOC_ID_AMAZON_SE:
31 i->name = SOC_AMAZON_SE;
32 i->type = SOC_TYPE_AMAZON_SE;
33 break;
34
35 default:
36 unreachable();
37 break;
38 }
39}
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c
new file mode 100644
index 000000000000..1686692ac24d
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom-xway.c
@@ -0,0 +1,54 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/module.h>
10#include <linux/clk.h>
11#include <asm/bootinfo.h>
12#include <asm/time.h>
13
14#include <lantiq_soc.h>
15
16#include "../prom.h"
17
18#define SOC_DANUBE "Danube"
19#define SOC_TWINPASS "Twinpass"
20#define SOC_AR9 "AR9"
21
22#define PART_SHIFT 12
23#define PART_MASK 0x0FFFFFFF
24#define REV_SHIFT 28
25#define REV_MASK 0xF0000000
26
27void __init ltq_soc_detect(struct ltq_soc_info *i)
28{
29 i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
30 i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
31 switch (i->partnum) {
32 case SOC_ID_DANUBE1:
33 case SOC_ID_DANUBE2:
34 i->name = SOC_DANUBE;
35 i->type = SOC_TYPE_DANUBE;
36 break;
37
38 case SOC_ID_TWINPASS:
39 i->name = SOC_TWINPASS;
40 i->type = SOC_TYPE_DANUBE;
41 break;
42
43 case SOC_ID_ARX188:
44 case SOC_ID_ARX168:
45 case SOC_ID_ARX182:
46 i->name = SOC_AR9;
47 i->type = SOC_TYPE_AR9;
48 break;
49
50 default:
51 unreachable();
52 break;
53 }
54}
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
new file mode 100644
index 000000000000..a1be36d0e490
--- /dev/null
+++ b/arch/mips/lantiq/xway/reset.c
@@ -0,0 +1,91 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/io.h>
11#include <linux/ioport.h>
12#include <linux/pm.h>
13#include <linux/module.h>
14#include <asm/reboot.h>
15
16#include <lantiq_soc.h>
17
18#define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y))
19#define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x))
20
21/* register definitions */
22#define LTQ_RCU_RST 0x0010
23#define LTQ_RCU_RST_ALL 0x40000000
24
25#define LTQ_RCU_RST_STAT 0x0014
26#define LTQ_RCU_STAT_SHIFT 26
27
28static struct resource ltq_rcu_resource = {
29 .name = "rcu",
30 .start = LTQ_RCU_BASE_ADDR,
31 .end = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1,
32 .flags = IORESOURCE_MEM,
33};
34
35/* remapped base addr of the reset control unit */
36static void __iomem *ltq_rcu_membase;
37
38/* This function is used by the watchdog driver */
39int ltq_reset_cause(void)
40{
41 u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT);
42 return val >> LTQ_RCU_STAT_SHIFT;
43}
44EXPORT_SYMBOL_GPL(ltq_reset_cause);
45
46static void ltq_machine_restart(char *command)
47{
48 pr_notice("System restart\n");
49 local_irq_disable();
50 ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST);
51 unreachable();
52}
53
54static void ltq_machine_halt(void)
55{
56 pr_notice("System halted.\n");
57 local_irq_disable();
58 unreachable();
59}
60
61static void ltq_machine_power_off(void)
62{
63 pr_notice("Please turn off the power now.\n");
64 local_irq_disable();
65 unreachable();
66}
67
68static int __init mips_reboot_setup(void)
69{
70 /* insert and request the memory region */
71 if (insert_resource(&iomem_resource, &ltq_rcu_resource) < 0)
72 panic("Failed to insert rcu memory\n");
73
74 if (request_mem_region(ltq_rcu_resource.start,
75 resource_size(&ltq_rcu_resource), "rcu") < 0)
76 panic("Failed to request rcu memory\n");
77
78 /* remap rcu register range */
79 ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start,
80 resource_size(&ltq_rcu_resource));
81 if (!ltq_rcu_membase)
82 panic("Failed to remap rcu memory\n");
83
84 _machine_restart = ltq_machine_restart;
85 _machine_halt = ltq_machine_halt;
86 pm_power_off = ltq_machine_power_off;
87
88 return 0;
89}
90
91arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/lantiq/xway/setup-ase.c b/arch/mips/lantiq/xway/setup-ase.c
new file mode 100644
index 000000000000..f6f326798a39
--- /dev/null
+++ b/arch/mips/lantiq/xway/setup-ase.c
@@ -0,0 +1,19 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
7 */
8
9#include <lantiq_soc.h>
10
11#include "../prom.h"
12#include "devices.h"
13
14void __init ltq_soc_setup(void)
15{
16 ltq_register_ase_asc();
17 ltq_register_gpio();
18 ltq_register_wdt();
19}
diff --git a/arch/mips/lantiq/xway/setup-xway.c b/arch/mips/lantiq/xway/setup-xway.c
new file mode 100644
index 000000000000..c292f643a858
--- /dev/null
+++ b/arch/mips/lantiq/xway/setup-xway.c
@@ -0,0 +1,20 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
7 */
8
9#include <lantiq_soc.h>
10
11#include "../prom.h"
12#include "devices.h"
13
14void __init ltq_soc_setup(void)
15{
16 ltq_register_asc(0);
17 ltq_register_asc(1);
18 ltq_register_gpio();
19 ltq_register_wdt();
20}
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 2adead5a8a37..b2cad4fd5fc4 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
28obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o 28obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o
29obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o 29obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o
30obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o 30obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o
31obj-$(CONFIG_CPU_XLR) += dump_tlb.o
31 32
32# libgcc-style stuff needed in the kernel 33# libgcc-style stuff needed in the kernel
33obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o 34obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index 11b193f848f8..d93830ad6113 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -29,9 +29,10 @@ unsigned long memsize, highmemsize;
29 29
30#define parse_even_earlier(res, option, p) \ 30#define parse_even_earlier(res, option, p) \
31do { \ 31do { \
32 int ret; \ 32 unsigned int tmp __maybe_unused; \
33 \
33 if (strncmp(option, (char *)p, strlen(option)) == 0) \ 34 if (strncmp(option, (char *)p, strlen(option)) == 0) \
34 ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ 35 tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \
35} while (0) 36} while (0)
36 37
37void __init prom_init_env(void) 38void __init prom_init_env(void)
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index d679c772d082..4d8c1623eee2 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -3,7 +3,8 @@
3# 3#
4 4
5obj-y += cache.o dma-default.o extable.o fault.o \ 5obj-y += cache.o dma-default.o extable.o fault.o \
6 init.o tlbex.o tlbex-fault.o uasm.o page.o 6 init.o mmap.o tlbex.o tlbex-fault.o uasm.o \
7 page.o
7 8
8obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o 9obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
9obj-$(CONFIG_64BIT) += pgtable-64.o 10obj-$(CONFIG_64BIT) += pgtable-64.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
29obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o 30obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o
30obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o 31obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o
31obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o 32obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
33obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o
32 34
33obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o 35obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
34obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o 36obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index b4923a75cb4b..d9bc5d3593b6 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1006,6 +1006,7 @@ static void __cpuinit probe_pcache(void)
1006 case CPU_25KF: 1006 case CPU_25KF:
1007 case CPU_SB1: 1007 case CPU_SB1:
1008 case CPU_SB1A: 1008 case CPU_SB1A:
1009 case CPU_XLR:
1009 c->dcache.flags |= MIPS_CACHE_PINDEX; 1010 c->dcache.flags |= MIPS_CACHE_PINDEX;
1010 break; 1011 break;
1011 1012
@@ -1075,7 +1076,6 @@ static int __cpuinit probe_scache(void)
1075 unsigned long flags, addr, begin, end, pow2; 1076 unsigned long flags, addr, begin, end, pow2;
1076 unsigned int config = read_c0_config(); 1077 unsigned int config = read_c0_config();
1077 struct cpuinfo_mips *c = &current_cpu_data; 1078 struct cpuinfo_mips *c = &current_cpu_data;
1078 int tmp;
1079 1079
1080 if (config & CONF_SC) 1080 if (config & CONF_SC)
1081 return 0; 1081 return 0;
@@ -1108,7 +1108,6 @@ static int __cpuinit probe_scache(void)
1108 1108
1109 /* Now search for the wrap around point. */ 1109 /* Now search for the wrap around point. */
1110 pow2 = (128 * 1024); 1110 pow2 = (128 * 1024);
1111 tmp = 0;
1112 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { 1111 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1113 cache_op(Index_Load_Tag_SD, addr); 1112 cache_op(Index_Load_Tag_SD, addr);
1114 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ 1113 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
new file mode 100644
index 000000000000..ae3c20a9556e
--- /dev/null
+++ b/arch/mips/mm/mmap.c
@@ -0,0 +1,122 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
8 */
9#include <linux/errno.h>
10#include <linux/mm.h>
11#include <linux/mman.h>
12#include <linux/module.h>
13#include <linux/random.h>
14#include <linux/sched.h>
15
16unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
17
18EXPORT_SYMBOL(shm_align_mask);
19
20#define COLOUR_ALIGN(addr,pgoff) \
21 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
22 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
23
24unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
25 unsigned long len, unsigned long pgoff, unsigned long flags)
26{
27 struct vm_area_struct * vmm;
28 int do_color_align;
29
30 if (len > TASK_SIZE)
31 return -ENOMEM;
32
33 if (flags & MAP_FIXED) {
34 /* Even MAP_FIXED mappings must reside within TASK_SIZE. */
35 if (TASK_SIZE - len < addr)
36 return -EINVAL;
37
38 /*
39 * We do not accept a shared mapping if it would violate
40 * cache aliasing constraints.
41 */
42 if ((flags & MAP_SHARED) &&
43 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
44 return -EINVAL;
45 return addr;
46 }
47
48 do_color_align = 0;
49 if (filp || (flags & MAP_SHARED))
50 do_color_align = 1;
51 if (addr) {
52 if (do_color_align)
53 addr = COLOUR_ALIGN(addr, pgoff);
54 else
55 addr = PAGE_ALIGN(addr);
56 vmm = find_vma(current->mm, addr);
57 if (TASK_SIZE - len >= addr &&
58 (!vmm || addr + len <= vmm->vm_start))
59 return addr;
60 }
61 addr = current->mm->mmap_base;
62 if (do_color_align)
63 addr = COLOUR_ALIGN(addr, pgoff);
64 else
65 addr = PAGE_ALIGN(addr);
66
67 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
68 /* At this point: (!vmm || addr < vmm->vm_end). */
69 if (TASK_SIZE - len < addr)
70 return -ENOMEM;
71 if (!vmm || addr + len <= vmm->vm_start)
72 return addr;
73 addr = vmm->vm_end;
74 if (do_color_align)
75 addr = COLOUR_ALIGN(addr, pgoff);
76 }
77}
78
79void arch_pick_mmap_layout(struct mm_struct *mm)
80{
81 unsigned long random_factor = 0UL;
82
83 if (current->flags & PF_RANDOMIZE) {
84 random_factor = get_random_int();
85 random_factor = random_factor << PAGE_SHIFT;
86 if (TASK_IS_32BIT_ADDR)
87 random_factor &= 0xfffffful;
88 else
89 random_factor &= 0xffffffful;
90 }
91
92 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
93 mm->get_unmapped_area = arch_get_unmapped_area;
94 mm->unmap_area = arch_unmap_area;
95}
96
97static inline unsigned long brk_rnd(void)
98{
99 unsigned long rnd = get_random_int();
100
101 rnd = rnd << PAGE_SHIFT;
102 /* 8MB for 32bit, 256MB for 64bit */
103 if (TASK_IS_32BIT_ADDR)
104 rnd = rnd & 0x7ffffful;
105 else
106 rnd = rnd & 0xffffffful;
107
108 return rnd;
109}
110
111unsigned long arch_randomize_brk(struct mm_struct *mm)
112{
113 unsigned long base = mm->brk;
114 unsigned long ret;
115
116 ret = PAGE_ALIGN(base + brk_rnd());
117
118 if (ret < mm->brk)
119 return mm->brk;
120
121 return ret;
122}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 5ef294fbb6e7..424ed4b92e6d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -404,6 +404,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
404 case CPU_5KC: 404 case CPU_5KC:
405 case CPU_TX49XX: 405 case CPU_TX49XX:
406 case CPU_PR4450: 406 case CPU_PR4450:
407 case CPU_XLR:
407 uasm_i_nop(p); 408 uasm_i_nop(p);
408 tlbw(p); 409 tlbw(p);
409 break; 410 break;
@@ -1151,8 +1152,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
1151 struct uasm_reloc *r = relocs; 1152 struct uasm_reloc *r = relocs;
1152 u32 *f; 1153 u32 *f;
1153 unsigned int final_len; 1154 unsigned int final_len;
1154 struct mips_huge_tlb_info htlb_info; 1155 struct mips_huge_tlb_info htlb_info __maybe_unused;
1155 enum vmalloc64_mode vmalloc_mode; 1156 enum vmalloc64_mode vmalloc_mode __maybe_unused;
1156 1157
1157 memset(tlb_handler, 0, sizeof(tlb_handler)); 1158 memset(tlb_handler, 0, sizeof(tlb_handler));
1158 memset(labels, 0, sizeof(labels)); 1159 memset(labels, 0, sizeof(labels));
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 414f0c99b196..31180c321a1a 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -193,8 +193,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
193 193
194void __init prom_init(void) 194void __init prom_init(void)
195{ 195{
196 int result;
197
198 prom_argc = fw_arg0; 196 prom_argc = fw_arg0;
199 _prom_argv = (int *) fw_arg1; 197 _prom_argv = (int *) fw_arg1;
200 _prom_envp = (int *) fw_arg2; 198 _prom_envp = (int *) fw_arg2;
@@ -360,20 +358,14 @@ void __init prom_init(void)
360#ifdef CONFIG_SERIAL_8250_CONSOLE 358#ifdef CONFIG_SERIAL_8250_CONSOLE
361 console_config(); 359 console_config();
362#endif 360#endif
363 /* Early detection of CMP support */
364 result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
365
366#ifdef CONFIG_MIPS_CMP 361#ifdef CONFIG_MIPS_CMP
367 if (result) 362 /* Early detection of CMP support */
363 if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ))
368 register_smp_ops(&cmp_smp_ops); 364 register_smp_ops(&cmp_smp_ops);
365 else
369#endif 366#endif
370#ifdef CONFIG_MIPS_MT_SMP 367#ifdef CONFIG_MIPS_MT_SMP
371#ifdef CONFIG_MIPS_CMP
372 if (!result)
373 register_smp_ops(&vsmp_smp_ops); 368 register_smp_ops(&vsmp_smp_ops);
374#else
375 register_smp_ops(&vsmp_smp_ops);
376#endif
377#endif 369#endif
378#ifdef CONFIG_MIPS_MT_SMTC 370#ifdef CONFIG_MIPS_MT_SMTC
379 register_smp_ops(&msmtc_smp_ops); 371 register_smp_ops(&msmtc_smp_ops);
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 7d93e6fbfa5a..1d36c511a7a5 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -56,7 +56,6 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock);
56static inline int mips_pcibios_iack(void) 56static inline int mips_pcibios_iack(void)
57{ 57{
58 int irq; 58 int irq;
59 u32 dummy;
60 59
61 /* 60 /*
62 * Determine highest priority pending interrupt by performing 61 * Determine highest priority pending interrupt by performing
@@ -83,7 +82,7 @@ static inline int mips_pcibios_iack(void)
83 BONITO_PCIMAP_CFG = 0x20000; 82 BONITO_PCIMAP_CFG = 0x20000;
84 83
85 /* Flush Bonito register block */ 84 /* Flush Bonito register block */
86 dummy = BONITO_PCIMAP_CFG; 85 (void) BONITO_PCIMAP_CFG;
87 iob(); /* sync */ 86 iob(); /* sync */
88 87
89 irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); 88 irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
new file mode 100644
index 000000000000..a5ca743613f2
--- /dev/null
+++ b/arch/mips/netlogic/Kconfig
@@ -0,0 +1,5 @@
1config NLM_COMMON
2 bool
3
4config NLM_XLR
5 bool
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
new file mode 100644
index 000000000000..9bd3f731f62e
--- /dev/null
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -0,0 +1,5 @@
1obj-y += setup.o platform.o irq.o setup.o time.o
2obj-$(CONFIG_SMP) += smp.o smpboot.o
3obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o
4
5EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/netlogic/xlr/irq.c b/arch/mips/netlogic/xlr/irq.c
new file mode 100644
index 000000000000..1446d58e364c
--- /dev/null
+++ b/arch/mips/netlogic/xlr/irq.c
@@ -0,0 +1,300 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/init.h>
37#include <linux/linkage.h>
38#include <linux/interrupt.h>
39#include <linux/spinlock.h>
40#include <linux/mm.h>
41
42#include <asm/mipsregs.h>
43
44#include <asm/netlogic/xlr/iomap.h>
45#include <asm/netlogic/xlr/pic.h>
46#include <asm/netlogic/xlr/xlr.h>
47
48#include <asm/netlogic/interrupt.h>
49#include <asm/netlogic/mips-extns.h>
50
51static u64 nlm_irq_mask;
52static DEFINE_SPINLOCK(nlm_pic_lock);
53
54static void xlr_pic_enable(struct irq_data *d)
55{
56 nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET);
57 unsigned long flags;
58 nlm_reg_t reg;
59 int irq = d->irq;
60
61 WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq);
62
63 spin_lock_irqsave(&nlm_pic_lock, flags);
64 reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE);
65 netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE,
66 reg | (1 << 6) | (1 << 30) | (1 << 31));
67 spin_unlock_irqrestore(&nlm_pic_lock, flags);
68}
69
70static void xlr_pic_mask(struct irq_data *d)
71{
72 nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET);
73 unsigned long flags;
74 nlm_reg_t reg;
75 int irq = d->irq;
76
77 WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq);
78
79 spin_lock_irqsave(&nlm_pic_lock, flags);
80 reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE);
81 netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE,
82 reg | (1 << 6) | (1 << 30) | (0 << 31));
83 spin_unlock_irqrestore(&nlm_pic_lock, flags);
84}
85
86#ifdef CONFIG_PCI
87/* Extra ACK needed for XLR on chip PCI controller */
88static void xlr_pci_ack(struct irq_data *d)
89{
90 nlm_reg_t *pci_mmio = netlogic_io_mmio(NETLOGIC_IO_PCIX_OFFSET);
91
92 netlogic_read_reg(pci_mmio, (0x140 >> 2));
93}
94
95/* Extra ACK needed for XLS on chip PCIe controller */
96static void xls_pcie_ack(struct irq_data *d)
97{
98 nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET);
99
100 switch (d->irq) {
101 case PIC_PCIE_LINK0_IRQ:
102 netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff);
103 break;
104 case PIC_PCIE_LINK1_IRQ:
105 netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff);
106 break;
107 case PIC_PCIE_LINK2_IRQ:
108 netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff);
109 break;
110 case PIC_PCIE_LINK3_IRQ:
111 netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff);
112 break;
113 }
114}
115
116/* For XLS B silicon, the 3,4 PCI interrupts are different */
117static void xls_pcie_ack_b(struct irq_data *d)
118{
119 nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET);
120
121 switch (d->irq) {
122 case PIC_PCIE_LINK0_IRQ:
123 netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff);
124 break;
125 case PIC_PCIE_LINK1_IRQ:
126 netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff);
127 break;
128 case PIC_PCIE_XLSB0_LINK2_IRQ:
129 netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff);
130 break;
131 case PIC_PCIE_XLSB0_LINK3_IRQ:
132 netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff);
133 break;
134 }
135}
136#endif
137
138static void xlr_pic_ack(struct irq_data *d)
139{
140 unsigned long flags;
141 nlm_reg_t *mmio;
142 int irq = d->irq;
143 void *hd = irq_data_get_irq_handler_data(d);
144
145 WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq);
146
147 if (hd) {
148 void (*extra_ack)(void *) = hd;
149 extra_ack(d);
150 }
151 mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET);
152 spin_lock_irqsave(&nlm_pic_lock, flags);
153 netlogic_write_reg(mmio, PIC_INT_ACK, (1 << (irq - PIC_IRQ_BASE)));
154 spin_unlock_irqrestore(&nlm_pic_lock, flags);
155}
156
157/*
158 * This chip definition handles interrupts routed thru the XLR
159 * hardware PIC, currently IRQs 8-39 are mapped to hardware intr
160 * 0-31 wired the XLR PIC
161 */
162static struct irq_chip xlr_pic = {
163 .name = "XLR-PIC",
164 .irq_enable = xlr_pic_enable,
165 .irq_mask = xlr_pic_mask,
166 .irq_ack = xlr_pic_ack,
167};
168
169static void rsvd_irq_handler(struct irq_data *d)
170{
171 WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq);
172}
173
174/*
175 * Chip definition for CPU originated interrupts(timer, msg) and
176 * IPIs
177 */
178struct irq_chip nlm_cpu_intr = {
179 .name = "XLR-CPU-INTR",
180 .irq_enable = rsvd_irq_handler,
181 .irq_mask = rsvd_irq_handler,
182 .irq_ack = rsvd_irq_handler,
183};
184
185void __init init_xlr_irqs(void)
186{
187 nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET);
188 uint32_t thread_mask = 1;
189 int level, i;
190
191 pr_info("Interrupt thread mask [%x]\n", thread_mask);
192 for (i = 0; i < PIC_NUM_IRTS; i++) {
193 level = PIC_IRQ_IS_EDGE_TRIGGERED(i);
194
195 /* Bind all PIC irqs to boot cpu */
196 netlogic_write_reg(mmio, PIC_IRT_0_BASE + i, thread_mask);
197
198 /*
199 * Use local scheduling and high polarity for all IRTs
200 * Invalidate all IRTs, by default
201 */
202 netlogic_write_reg(mmio, PIC_IRT_1_BASE + i,
203 (level << 30) | (1 << 6) | (PIC_IRQ_BASE + i));
204 }
205
206 /* Make all IRQs as level triggered by default */
207 for (i = 0; i < NR_IRQS; i++) {
208 if (PIC_IRQ_IS_IRT(i))
209 irq_set_chip_and_handler(i, &xlr_pic, handle_level_irq);
210 else
211 irq_set_chip_and_handler(i, &nlm_cpu_intr,
212 handle_level_irq);
213 }
214#ifdef CONFIG_SMP
215 irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
216 nlm_smp_function_ipi_handler);
217 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
218 nlm_smp_resched_ipi_handler);
219 nlm_irq_mask |=
220 ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE));
221#endif
222
223#ifdef CONFIG_PCI
224 /*
225 * For PCI interrupts, we need to ack the PIC controller too, overload
226 * irq handler data to do this
227 */
228 if (nlm_chip_is_xls()) {
229 if (nlm_chip_is_xls_b()) {
230 irq_set_handler_data(PIC_PCIE_LINK0_IRQ,
231 xls_pcie_ack_b);
232 irq_set_handler_data(PIC_PCIE_LINK1_IRQ,
233 xls_pcie_ack_b);
234 irq_set_handler_data(PIC_PCIE_XLSB0_LINK2_IRQ,
235 xls_pcie_ack_b);
236 irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ,
237 xls_pcie_ack_b);
238 } else {
239 irq_set_handler_data(PIC_PCIE_LINK0_IRQ, xls_pcie_ack);
240 irq_set_handler_data(PIC_PCIE_LINK1_IRQ, xls_pcie_ack);
241 irq_set_handler_data(PIC_PCIE_LINK2_IRQ, xls_pcie_ack);
242 irq_set_handler_data(PIC_PCIE_LINK3_IRQ, xls_pcie_ack);
243 }
244 } else {
245 /* XLR PCI controller ACK */
246 irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack);
247 }
248#endif
249 /* unmask all PIC related interrupts. If no handler is installed by the
250 * drivers, it'll just ack the interrupt and return
251 */
252 for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++)
253 nlm_irq_mask |= (1ULL << i);
254
255 nlm_irq_mask |= (1ULL << IRQ_TIMER);
256}
257
258void __init arch_init_irq(void)
259{
260 /* Initialize the irq descriptors */
261 init_xlr_irqs();
262 write_c0_eimr(nlm_irq_mask);
263}
264
265void __cpuinit nlm_smp_irq_init(void)
266{
267 /* set interrupt mask for non-zero cpus */
268 write_c0_eimr(nlm_irq_mask);
269}
270
271asmlinkage void plat_irq_dispatch(void)
272{
273 uint64_t eirr;
274 int i;
275
276 eirr = read_c0_eirr() & read_c0_eimr();
277 if (!eirr)
278 return;
279
280 /* no need of EIRR here, writing compare clears interrupt */
281 if (eirr & (1 << IRQ_TIMER)) {
282 do_IRQ(IRQ_TIMER);
283 return;
284 }
285
286 /* use dcltz: optimize below code */
287 for (i = 63; i != -1; i--) {
288 if (eirr & (1ULL << i))
289 break;
290 }
291 if (i == -1) {
292 pr_err("no interrupt !!\n");
293 return;
294 }
295
296 /* Ack eirr */
297 write_c0_eirr(1ULL << i);
298
299 do_IRQ(i);
300}
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c
new file mode 100644
index 000000000000..609ec2534642
--- /dev/null
+++ b/arch/mips/netlogic/xlr/platform.c
@@ -0,0 +1,98 @@
1/*
2 * Copyright 2011, Netlogic Microsystems.
3 * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
4 *
5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any
7 * warranty of any kind, whether express or implied.
8 */
9
10#include <linux/device.h>
11#include <linux/platform_device.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/resource.h>
15#include <linux/serial_8250.h>
16#include <linux/serial_reg.h>
17
18#include <asm/netlogic/xlr/iomap.h>
19#include <asm/netlogic/xlr/pic.h>
20#include <asm/netlogic/xlr/xlr.h>
21
22unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
23{
24 nlm_reg_t *mmio;
25 unsigned int value;
26
27 /* XLR uart does not need any mapping of regs */
28 mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift));
29 value = netlogic_read_reg(mmio, 0);
30
31 /* See XLR/XLS errata */
32 if (offset == UART_MSR)
33 value ^= 0xF0;
34 else if (offset == UART_MCR)
35 value ^= 0x3;
36
37 return value;
38}
39
40void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
41{
42 nlm_reg_t *mmio;
43
44 /* XLR uart does not need any mapping of regs */
45 mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift));
46
47 /* See XLR/XLS errata */
48 if (offset == UART_MSR)
49 value ^= 0xF0;
50 else if (offset == UART_MCR)
51 value ^= 0x3;
52
53 netlogic_write_reg(mmio, 0, value);
54}
55
56#define PORT(_irq) \
57 { \
58 .irq = _irq, \
59 .regshift = 2, \
60 .iotype = UPIO_MEM32, \
61 .flags = (UPF_SKIP_TEST | \
62 UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\
63 .uartclk = PIC_CLKS_PER_SEC, \
64 .type = PORT_16550A, \
65 .serial_in = nlm_xlr_uart_in, \
66 .serial_out = nlm_xlr_uart_out, \
67 }
68
69static struct plat_serial8250_port xlr_uart_data[] = {
70 PORT(PIC_UART_0_IRQ),
71 PORT(PIC_UART_1_IRQ),
72 {},
73};
74
75static struct platform_device uart_device = {
76 .name = "serial8250",
77 .id = PLAT8250_DEV_PLATFORM,
78 .dev = {
79 .platform_data = xlr_uart_data,
80 },
81};
82
83static int __init nlm_uart_init(void)
84{
85 nlm_reg_t *mmio;
86
87 mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET);
88 xlr_uart_data[0].membase = (void __iomem *)mmio;
89 xlr_uart_data[0].mapbase = CPHYSADDR((unsigned long)mmio);
90
91 mmio = netlogic_io_mmio(NETLOGIC_IO_UART_1_OFFSET);
92 xlr_uart_data[1].membase = (void __iomem *)mmio;
93 xlr_uart_data[1].mapbase = CPHYSADDR((unsigned long)mmio);
94
95 return platform_device_register(&uart_device);
96}
97
98arch_initcall(nlm_uart_init);
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
new file mode 100644
index 000000000000..482802569e74
--- /dev/null
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -0,0 +1,188 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/serial_8250.h>
37#include <linux/pm.h>
38
39#include <asm/reboot.h>
40#include <asm/time.h>
41#include <asm/bootinfo.h>
42#include <asm/smp-ops.h>
43
44#include <asm/netlogic/interrupt.h>
45#include <asm/netlogic/psb-bootinfo.h>
46
47#include <asm/netlogic/xlr/xlr.h>
48#include <asm/netlogic/xlr/iomap.h>
49#include <asm/netlogic/xlr/pic.h>
50#include <asm/netlogic/xlr/gpio.h>
51
52unsigned long netlogic_io_base = (unsigned long)(DEFAULT_NETLOGIC_IO_BASE);
53unsigned long nlm_common_ebase = 0x0;
54struct psb_info nlm_prom_info;
55
56static void nlm_early_serial_setup(void)
57{
58 struct uart_port s;
59 nlm_reg_t *uart_base;
60
61 uart_base = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET);
62 memset(&s, 0, sizeof(s));
63 s.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
64 s.iotype = UPIO_MEM32;
65 s.regshift = 2;
66 s.irq = PIC_UART_0_IRQ;
67 s.uartclk = PIC_CLKS_PER_SEC;
68 s.serial_in = nlm_xlr_uart_in;
69 s.serial_out = nlm_xlr_uart_out;
70 s.mapbase = (unsigned long)uart_base;
71 s.membase = (unsigned char __iomem *)uart_base;
72 early_serial_setup(&s);
73}
74
75static void nlm_linux_exit(void)
76{
77 nlm_reg_t *mmio;
78
79 mmio = netlogic_io_mmio(NETLOGIC_IO_GPIO_OFFSET);
80 /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */
81 netlogic_write_reg(mmio, NETLOGIC_GPIO_SWRESET_REG, 1);
82 for ( ; ; )
83 cpu_wait();
84}
85
86void __init plat_mem_setup(void)
87{
88 panic_timeout = 5;
89 _machine_restart = (void (*)(char *))nlm_linux_exit;
90 _machine_halt = nlm_linux_exit;
91 pm_power_off = nlm_linux_exit;
92}
93
94const char *get_system_type(void)
95{
96 return "Netlogic XLR/XLS Series";
97}
98
99void __init prom_free_prom_memory(void)
100{
101 /* Nothing yet */
102}
103
104static void build_arcs_cmdline(int *argv)
105{
106 int i, remain, len;
107 char *arg;
108
109 remain = sizeof(arcs_cmdline) - 1;
110 arcs_cmdline[0] = '\0';
111 for (i = 0; argv[i] != 0; i++) {
112 arg = (char *)(long)argv[i];
113 len = strlen(arg);
114 if (len + 1 > remain)
115 break;
116 strcat(arcs_cmdline, arg);
117 strcat(arcs_cmdline, " ");
118 remain -= len + 1;
119 }
120
121 /* Add the default options here */
122 if ((strstr(arcs_cmdline, "console=")) == NULL) {
123 arg = "console=ttyS0,38400 ";
124 len = strlen(arg);
125 if (len > remain)
126 goto fail;
127 strcat(arcs_cmdline, arg);
128 remain -= len;
129 }
130#ifdef CONFIG_BLK_DEV_INITRD
131 if ((strstr(arcs_cmdline, "rdinit=")) == NULL) {
132 arg = "rdinit=/sbin/init ";
133 len = strlen(arg);
134 if (len > remain)
135 goto fail;
136 strcat(arcs_cmdline, arg);
137 remain -= len;
138 }
139#endif
140 return;
141fail:
142 panic("Cannot add %s, command line too big!", arg);
143}
144
145static void prom_add_memory(void)
146{
147 struct nlm_boot_mem_map *bootm;
148 u64 start, size;
149 u64 pref_backup = 512; /* avoid pref walking beyond end */
150 int i;
151
152 bootm = (void *)(long)nlm_prom_info.psb_mem_map;
153 for (i = 0; i < bootm->nr_map; i++) {
154 if (bootm->map[i].type != BOOT_MEM_RAM)
155 continue;
156 start = bootm->map[i].addr;
157 size = bootm->map[i].size;
158
159 /* Work around for using bootloader mem */
160 if (i == 0 && start == 0 && size == 0x0c000000)
161 size = 0x0ff00000;
162
163 add_memory_region(start, size - pref_backup, BOOT_MEM_RAM);
164 }
165}
166
167void __init prom_init(void)
168{
169 int *argv, *envp; /* passed as 32 bit ptrs */
170 struct psb_info *prom_infop;
171
172 /* truncate to 32 bit and sign extend all args */
173 argv = (int *)(long)(int)fw_arg1;
174 envp = (int *)(long)(int)fw_arg2;
175 prom_infop = (struct psb_info *)(long)(int)fw_arg3;
176
177 nlm_prom_info = *prom_infop;
178
179 nlm_early_serial_setup();
180 build_arcs_cmdline(argv);
181 nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
182 prom_add_memory();
183
184#ifdef CONFIG_SMP
185 nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map);
186 register_smp_ops(&nlm_smp_ops);
187#endif
188}
diff --git a/arch/mips/netlogic/xlr/smp.c b/arch/mips/netlogic/xlr/smp.c
new file mode 100644
index 000000000000..b495a7f1433b
--- /dev/null
+++ b/arch/mips/netlogic/xlr/smp.c
@@ -0,0 +1,225 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/delay.h>
37#include <linux/init.h>
38#include <linux/smp.h>
39#include <linux/irq.h>
40
41#include <asm/mmu_context.h>
42
43#include <asm/netlogic/interrupt.h>
44#include <asm/netlogic/mips-extns.h>
45
46#include <asm/netlogic/xlr/iomap.h>
47#include <asm/netlogic/xlr/pic.h>
48#include <asm/netlogic/xlr/xlr.h>
49
50void core_send_ipi(int logical_cpu, unsigned int action)
51{
52 int cpu = cpu_logical_map(logical_cpu);
53 u32 tid = cpu & 0x3;
54 u32 pid = (cpu >> 2) & 0x07;
55 u32 ipi = (tid << 16) | (pid << 20);
56
57 if (action & SMP_CALL_FUNCTION)
58 ipi |= IRQ_IPI_SMP_FUNCTION;
59 else if (action & SMP_RESCHEDULE_YOURSELF)
60 ipi |= IRQ_IPI_SMP_RESCHEDULE;
61 else
62 return;
63
64 pic_send_ipi(ipi);
65}
66
67void nlm_send_ipi_single(int cpu, unsigned int action)
68{
69 core_send_ipi(cpu, action);
70}
71
72void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
73{
74 int cpu;
75
76 for_each_cpu(cpu, mask) {
77 core_send_ipi(cpu, action);
78 }
79}
80
81/* IRQ_IPI_SMP_FUNCTION Handler */
82void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
83{
84 smp_call_function_interrupt();
85}
86
87/* IRQ_IPI_SMP_RESCHEDULE handler */
88void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
89{
90 set_need_resched();
91}
92
93void nlm_common_ipi_handler(int irq, struct pt_regs *regs)
94{
95 if (irq == IRQ_IPI_SMP_FUNCTION) {
96 smp_call_function_interrupt();
97 } else {
98 /* Announce that we are for reschduling */
99 set_need_resched();
100 }
101}
102
103/*
104 * Called before going into mips code, early cpu init
105 */
106void nlm_early_init_secondary(void)
107{
108 write_c0_ebase((uint32_t)nlm_common_ebase);
109 /* TLB partition here later */
110}
111
112/*
113 * Code to run on secondary just after probing the CPU
114 */
115static void __cpuinit nlm_init_secondary(void)
116{
117 nlm_smp_irq_init();
118}
119
120void nlm_smp_finish(void)
121{
122#ifdef notyet
123 nlm_common_msgring_cpu_init();
124#endif
125}
126
127void nlm_cpus_done(void)
128{
129}
130
131/*
132 * Boot all other cpus in the system, initialize them, and bring them into
133 * the boot function
134 */
135int nlm_cpu_unblock[NR_CPUS];
136int nlm_cpu_ready[NR_CPUS];
137unsigned long nlm_next_gp;
138unsigned long nlm_next_sp;
139cpumask_t phys_cpu_present_map;
140
141void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
142{
143 unsigned long gp = (unsigned long)task_thread_info(idle);
144 unsigned long sp = (unsigned long)__KSTK_TOS(idle);
145 int cpu = cpu_logical_map(logical_cpu);
146
147 nlm_next_sp = sp;
148 nlm_next_gp = gp;
149
150 /* barrier */
151 __sync();
152 nlm_cpu_unblock[cpu] = 1;
153}
154
155void __init nlm_smp_setup(void)
156{
157 unsigned int boot_cpu;
158 int num_cpus, i;
159
160 boot_cpu = hard_smp_processor_id();
161 cpus_clear(phys_cpu_present_map);
162
163 cpu_set(boot_cpu, phys_cpu_present_map);
164 __cpu_number_map[boot_cpu] = 0;
165 __cpu_logical_map[0] = boot_cpu;
166 cpu_set(0, cpu_possible_map);
167
168 num_cpus = 1;
169 for (i = 0; i < NR_CPUS; i++) {
170 if (nlm_cpu_ready[i]) {
171 cpu_set(i, phys_cpu_present_map);
172 __cpu_number_map[i] = num_cpus;
173 __cpu_logical_map[num_cpus] = i;
174 cpu_set(num_cpus, cpu_possible_map);
175 ++num_cpus;
176 }
177 }
178
179 pr_info("Phys CPU present map: %lx, possible map %lx\n",
180 (unsigned long)phys_cpu_present_map.bits[0],
181 (unsigned long)cpu_possible_map.bits[0]);
182
183 pr_info("Detected %i Slave CPU(s)\n", num_cpus);
184}
185
186void nlm_prepare_cpus(unsigned int max_cpus)
187{
188}
189
190struct plat_smp_ops nlm_smp_ops = {
191 .send_ipi_single = nlm_send_ipi_single,
192 .send_ipi_mask = nlm_send_ipi_mask,
193 .init_secondary = nlm_init_secondary,
194 .smp_finish = nlm_smp_finish,
195 .cpus_done = nlm_cpus_done,
196 .boot_secondary = nlm_boot_secondary,
197 .smp_setup = nlm_smp_setup,
198 .prepare_cpus = nlm_prepare_cpus,
199};
200
201unsigned long secondary_entry_point;
202
203int nlm_wakeup_secondary_cpus(u32 wakeup_mask)
204{
205 unsigned int tid, pid, ipi, i, boot_cpu;
206 void *reset_vec;
207
208 secondary_entry_point = (unsigned long)prom_pre_boot_secondary_cpus;
209 reset_vec = (void *)CKSEG1ADDR(0x1fc00000);
210 memcpy(reset_vec, nlm_boot_smp_nmi, 0x80);
211 boot_cpu = hard_smp_processor_id();
212
213 for (i = 0; i < NR_CPUS; i++) {
214 if (i == boot_cpu)
215 continue;
216 if (wakeup_mask & (1u << i)) {
217 tid = i & 0x3;
218 pid = (i >> 2) & 0x7;
219 ipi = (tid << 16) | (pid << 20) | (1 << 8);
220 pic_send_ipi(ipi);
221 }
222 }
223
224 return 0;
225}
diff --git a/arch/mips/netlogic/xlr/smpboot.S b/arch/mips/netlogic/xlr/smpboot.S
new file mode 100644
index 000000000000..b8e074402c99
--- /dev/null
+++ b/arch/mips/netlogic/xlr/smpboot.S
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <asm/asm.h>
36#include <asm/asm-offsets.h>
37#include <asm/regdef.h>
38#include <asm/mipsregs.h>
39
40
41/* Don't jump to linux function from Bootloader stack. Change it
42 * here. Kernel might allocate bootloader memory before all the CPUs are
43 * brought up (eg: Inode cache region) and we better don't overwrite this
44 * memory
45 */
46NESTED(prom_pre_boot_secondary_cpus, 16, sp)
47 .set mips64
48 mfc0 t0, $15, 1 # read ebase
49 andi t0, 0x1f # t0 has the processor_id()
50 sll t0, 2 # offset in cpu array
51
52 PTR_LA t1, nlm_cpu_ready # mark CPU ready
53 PTR_ADDU t1, t0
54 li t2, 1
55 sw t2, 0(t1)
56
57 PTR_LA t1, nlm_cpu_unblock
58 PTR_ADDU t1, t0
591: lw t2, 0(t1) # wait till unblocked
60 beqz t2, 1b
61 nop
62
63 PTR_LA t1, nlm_next_sp
64 PTR_L sp, 0(t1)
65 PTR_LA t1, nlm_next_gp
66 PTR_L gp, 0(t1)
67
68 PTR_LA t0, nlm_early_init_secondary
69 jalr t0
70 nop
71
72 PTR_LA t0, smp_bootstrap
73 jr t0
74 nop
75END(prom_pre_boot_secondary_cpus)
76
77NESTED(nlm_boot_smp_nmi, 0, sp)
78 .set push
79 .set noat
80 .set mips64
81 .set noreorder
82
83 /* Clear the NMI and BEV bits */
84 MFC0 k0, CP0_STATUS
85 li k1, 0xffb7ffff
86 and k0, k0, k1
87 MTC0 k0, CP0_STATUS
88
89 PTR_LA k1, secondary_entry_point
90 PTR_L k0, 0(k1)
91 jr k0
92 nop
93 .set pop
94END(nlm_boot_smp_nmi)
diff --git a/arch/mips/netlogic/xlr/time.c b/arch/mips/netlogic/xlr/time.c
new file mode 100644
index 000000000000..0d81b262593c
--- /dev/null
+++ b/arch/mips/netlogic/xlr/time.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/init.h>
36
37#include <asm/time.h>
38#include <asm/netlogic/interrupt.h>
39#include <asm/netlogic/psb-bootinfo.h>
40
41unsigned int __cpuinit get_c0_compare_int(void)
42{
43 return IRQ_TIMER;
44}
45
46void __init plat_time_init(void)
47{
48 mips_hpt_frequency = nlm_prom_info.cpu_frequency;
49 pr_info("MIPS counter frequency [%ld]\n",
50 (unsigned long)mips_hpt_frequency);
51}
diff --git a/arch/mips/netlogic/xlr/xlr_console.c b/arch/mips/netlogic/xlr/xlr_console.c
new file mode 100644
index 000000000000..759df0692201
--- /dev/null
+++ b/arch/mips/netlogic/xlr/xlr_console.c
@@ -0,0 +1,46 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/types.h>
36#include <asm/netlogic/xlr/iomap.h>
37
38void prom_putchar(char c)
39{
40 nlm_reg_t *mmio;
41
42 mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET);
43 while (netlogic_read_reg(mmio, 0x5) == 0)
44 ;
45 netlogic_write_reg(mmio, 0x0, c);
46}
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c9209ca6c8e7..4df879937446 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
41obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o 41obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o
42obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o 42obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
43obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o 43obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
44obj-$(CONFIG_SOC_XWAY) += pci-lantiq.o ops-lantiq.o
44obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o 45obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
45obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o 46obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
46obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o 47obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o
55obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o 56obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o
56obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o 57obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
57obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o 58obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o
59obj-$(CONFIG_NLM_XLR) += pci-xlr.o
58 60
59ifdef CONFIG_PCI_MSI 61ifdef CONFIG_PCI_MSI
60obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o 62obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c
new file mode 100644
index 000000000000..1f2afb55cc71
--- /dev/null
+++ b/arch/mips/pci/ops-lantiq.c
@@ -0,0 +1,116 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/types.h>
10#include <linux/pci.h>
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/mm.h>
15#include <asm/addrspace.h>
16#include <linux/vmalloc.h>
17
18#include <lantiq_soc.h>
19
20#include "pci-lantiq.h"
21
22#define LTQ_PCI_CFG_BUSNUM_SHF 16
23#define LTQ_PCI_CFG_DEVNUM_SHF 11
24#define LTQ_PCI_CFG_FUNNUM_SHF 8
25
26#define PCI_ACCESS_READ 0
27#define PCI_ACCESS_WRITE 1
28
29static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus,
30 unsigned int devfn, unsigned int where, u32 *data)
31{
32 unsigned long cfg_base;
33 unsigned long flags;
34 u32 temp;
35
36 /* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the
37 SoC itself */
38 if ((bus->number != 0) || ((devfn & 0xf8) > 0x78)
39 || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68))
40 return 1;
41
42 spin_lock_irqsave(&ebu_lock, flags);
43
44 cfg_base = (unsigned long) ltq_pci_mapped_cfg;
45 cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn <<
46 LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3);
47
48 /* Perform access */
49 if (access_type == PCI_ACCESS_WRITE) {
50 ltq_w32(swab32(*data), ((u32 *)cfg_base));
51 } else {
52 *data = ltq_r32(((u32 *)(cfg_base)));
53 *data = swab32(*data);
54 }
55 wmb();
56
57 /* clean possible Master abort */
58 cfg_base = (unsigned long) ltq_pci_mapped_cfg;
59 cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4;
60 temp = ltq_r32(((u32 *)(cfg_base)));
61 temp = swab32(temp);
62 cfg_base = (unsigned long) ltq_pci_mapped_cfg;
63 cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4;
64 ltq_w32(temp, ((u32 *)cfg_base));
65
66 spin_unlock_irqrestore(&ebu_lock, flags);
67
68 if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ))
69 return 1;
70
71 return 0;
72}
73
74int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn,
75 int where, int size, u32 *val)
76{
77 u32 data = 0;
78
79 if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
80 return PCIBIOS_DEVICE_NOT_FOUND;
81
82 if (size == 1)
83 *val = (data >> ((where & 3) << 3)) & 0xff;
84 else if (size == 2)
85 *val = (data >> ((where & 3) << 3)) & 0xffff;
86 else
87 *val = data;
88
89 return PCIBIOS_SUCCESSFUL;
90}
91
92int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn,
93 int where, int size, u32 val)
94{
95 u32 data = 0;
96
97 if (size == 4) {
98 data = val;
99 } else {
100 if (ltq_pci_config_access(PCI_ACCESS_READ, bus,
101 devfn, where, &data))
102 return PCIBIOS_DEVICE_NOT_FOUND;
103
104 if (size == 1)
105 data = (data & ~(0xff << ((where & 3) << 3))) |
106 (val << ((where & 3) << 3));
107 else if (size == 2)
108 data = (data & ~(0xffff << ((where & 3) << 3))) |
109 (val << ((where & 3) << 3));
110 }
111
112 if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
113 return PCIBIOS_DEVICE_NOT_FOUND;
114
115 return PCIBIOS_SUCCESSFUL;
116}
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
new file mode 100644
index 000000000000..603d7493e966
--- /dev/null
+++ b/arch/mips/pci/pci-lantiq.c
@@ -0,0 +1,297 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/types.h>
10#include <linux/pci.h>
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/mm.h>
15#include <linux/vmalloc.h>
16#include <linux/platform_device.h>
17
18#include <asm/pci.h>
19#include <asm/gpio.h>
20#include <asm/addrspace.h>
21
22#include <lantiq_soc.h>
23#include <lantiq_irq.h>
24#include <lantiq_platform.h>
25
26#include "pci-lantiq.h"
27
28#define LTQ_PCI_CFG_BASE 0x17000000
29#define LTQ_PCI_CFG_SIZE 0x00008000
30#define LTQ_PCI_MEM_BASE 0x18000000
31#define LTQ_PCI_MEM_SIZE 0x02000000
32#define LTQ_PCI_IO_BASE 0x1AE00000
33#define LTQ_PCI_IO_SIZE 0x00200000
34
35#define PCI_CR_FCI_ADDR_MAP0 0x00C0
36#define PCI_CR_FCI_ADDR_MAP1 0x00C4
37#define PCI_CR_FCI_ADDR_MAP2 0x00C8
38#define PCI_CR_FCI_ADDR_MAP3 0x00CC
39#define PCI_CR_FCI_ADDR_MAP4 0x00D0
40#define PCI_CR_FCI_ADDR_MAP5 0x00D4
41#define PCI_CR_FCI_ADDR_MAP6 0x00D8
42#define PCI_CR_FCI_ADDR_MAP7 0x00DC
43#define PCI_CR_CLK_CTRL 0x0000
44#define PCI_CR_PCI_MOD 0x0030
45#define PCI_CR_PC_ARB 0x0080
46#define PCI_CR_FCI_ADDR_MAP11hg 0x00E4
47#define PCI_CR_BAR11MASK 0x0044
48#define PCI_CR_BAR12MASK 0x0048
49#define PCI_CR_BAR13MASK 0x004C
50#define PCI_CS_BASE_ADDR1 0x0010
51#define PCI_CR_PCI_ADDR_MAP11 0x0064
52#define PCI_CR_FCI_BURST_LENGTH 0x00E8
53#define PCI_CR_PCI_EOI 0x002C
54#define PCI_CS_STS_CMD 0x0004
55
56#define PCI_MASTER0_REQ_MASK_2BITS 8
57#define PCI_MASTER1_REQ_MASK_2BITS 10
58#define PCI_MASTER2_REQ_MASK_2BITS 12
59#define INTERNAL_ARB_ENABLE_BIT 0
60
61#define LTQ_CGU_IFCCR 0x0018
62#define LTQ_CGU_PCICR 0x0034
63
64#define ltq_pci_w32(x, y) ltq_w32((x), ltq_pci_membase + (y))
65#define ltq_pci_r32(x) ltq_r32(ltq_pci_membase + (x))
66
67#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y))
68#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x))
69
70struct ltq_pci_gpio_map {
71 int pin;
72 int alt0;
73 int alt1;
74 int dir;
75 char *name;
76};
77
78/* the pci core can make use of the following gpios */
79static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = {
80 { 0, 1, 0, 0, "pci-exin0" },
81 { 1, 1, 0, 0, "pci-exin1" },
82 { 2, 1, 0, 0, "pci-exin2" },
83 { 39, 1, 0, 0, "pci-exin3" },
84 { 10, 1, 0, 0, "pci-exin4" },
85 { 9, 1, 0, 0, "pci-exin5" },
86 { 30, 1, 0, 1, "pci-gnt1" },
87 { 23, 1, 0, 1, "pci-gnt2" },
88 { 19, 1, 0, 1, "pci-gnt3" },
89 { 38, 1, 0, 1, "pci-gnt4" },
90 { 29, 1, 0, 0, "pci-req1" },
91 { 31, 1, 0, 0, "pci-req2" },
92 { 3, 1, 0, 0, "pci-req3" },
93 { 37, 1, 0, 0, "pci-req4" },
94};
95
96__iomem void *ltq_pci_mapped_cfg;
97static __iomem void *ltq_pci_membase;
98
99int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL;
100
101/* Since the PCI REQ pins can be reused for other functionality, make it
102 possible to exclude those from interpretation by the PCI controller */
103static int ltq_pci_req_mask = 0xf;
104
105static int *ltq_pci_irq_map;
106
107struct pci_ops ltq_pci_ops = {
108 .read = ltq_pci_read_config_dword,
109 .write = ltq_pci_write_config_dword
110};
111
112static struct resource pci_io_resource = {
113 .name = "pci io space",
114 .start = LTQ_PCI_IO_BASE,
115 .end = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1,
116 .flags = IORESOURCE_IO
117};
118
119static struct resource pci_mem_resource = {
120 .name = "pci memory space",
121 .start = LTQ_PCI_MEM_BASE,
122 .end = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1,
123 .flags = IORESOURCE_MEM
124};
125
126static struct pci_controller ltq_pci_controller = {
127 .pci_ops = &ltq_pci_ops,
128 .mem_resource = &pci_mem_resource,
129 .mem_offset = 0x00000000UL,
130 .io_resource = &pci_io_resource,
131 .io_offset = 0x00000000UL,
132};
133
134int pcibios_plat_dev_init(struct pci_dev *dev)
135{
136 if (ltqpci_plat_dev_init)
137 return ltqpci_plat_dev_init(dev);
138
139 return 0;
140}
141
142static u32 ltq_calc_bar11mask(void)
143{
144 u32 mem, bar11mask;
145
146 /* BAR11MASK value depends on available memory on system. */
147 mem = num_physpages * PAGE_SIZE;
148 bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8;
149
150 return bar11mask;
151}
152
153static void ltq_pci_setup_gpio(int gpio)
154{
155 int i;
156 for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) {
157 if (gpio & (1 << i)) {
158 ltq_gpio_request(ltq_pci_gpio_map[i].pin,
159 ltq_pci_gpio_map[i].alt0,
160 ltq_pci_gpio_map[i].alt1,
161 ltq_pci_gpio_map[i].dir,
162 ltq_pci_gpio_map[i].name);
163 }
164 }
165 ltq_gpio_request(21, 0, 0, 1, "pci-reset");
166 ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK;
167}
168
169static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
170{
171 u32 temp_buffer;
172
173 /* set clock to 33Mhz */
174 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
175 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
176
177 /* external or internal clock ? */
178 if (conf->clock) {
179 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16),
180 LTQ_CGU_IFCCR);
181 ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR);
182 } else {
183 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16),
184 LTQ_CGU_IFCCR);
185 ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR);
186 }
187
188 /* setup pci clock and gpis used by pci */
189 ltq_pci_setup_gpio(conf->gpio);
190
191 /* enable auto-switching between PCI and EBU */
192 ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
193
194 /* busy, i.e. configuration is not done, PCI access has to be retried */
195 ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD);
196 wmb();
197 /* BUS Master/IO/MEM access */
198 ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD);
199
200 /* enable external 2 PCI masters */
201 temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
202 temp_buffer &= (~(ltq_pci_req_mask << 16));
203 /* enable internal arbiter */
204 temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
205 /* enable internal PCI master reqest */
206 temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS));
207
208 /* enable EBU request */
209 temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS));
210
211 /* enable all external masters request */
212 temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS));
213 ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB);
214 wmb();
215
216 /* setup BAR memory regions */
217 ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0);
218 ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1);
219 ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2);
220 ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3);
221 ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4);
222 ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5);
223 ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6);
224 ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7);
225 ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg);
226 ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK);
227 ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11);
228 ltq_pci_w32(0, PCI_CS_BASE_ADDR1);
229 /* both TX and RX endian swap are enabled */
230 ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI);
231 wmb();
232 ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000,
233 PCI_CR_BAR12MASK);
234 ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000,
235 PCI_CR_BAR13MASK);
236 /*use 8 dw burst length */
237 ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH);
238 ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD);
239 wmb();
240
241 /* setup irq line */
242 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON);
243 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
244
245 /* toggle reset pin */
246 __gpio_set_value(21, 0);
247 wmb();
248 mdelay(1);
249 __gpio_set_value(21, 1);
250 return 0;
251}
252
253int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
254{
255 if (ltq_pci_irq_map[slot])
256 return ltq_pci_irq_map[slot];
257 printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n",
258 slot);
259
260 return 0;
261}
262
263static int __devinit ltq_pci_probe(struct platform_device *pdev)
264{
265 struct ltq_pci_data *ltq_pci_data =
266 (struct ltq_pci_data *) pdev->dev.platform_data;
267 pci_probe_only = 0;
268 ltq_pci_irq_map = ltq_pci_data->irq;
269 ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE);
270 ltq_pci_mapped_cfg =
271 ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE);
272 ltq_pci_controller.io_map_base =
273 (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1);
274 ltq_pci_startup(ltq_pci_data);
275 register_pci_controller(&ltq_pci_controller);
276
277 return 0;
278}
279
280static struct platform_driver
281ltq_pci_driver = {
282 .probe = ltq_pci_probe,
283 .driver = {
284 .name = "ltq_pci",
285 .owner = THIS_MODULE,
286 },
287};
288
289int __init pcibios_init(void)
290{
291 int ret = platform_driver_register(&ltq_pci_driver);
292 if (ret)
293 printk(KERN_INFO "ltq_pci: Error registering platfom driver!");
294 return ret;
295}
296
297arch_initcall(pcibios_init);
diff --git a/arch/mips/pci/pci-lantiq.h b/arch/mips/pci/pci-lantiq.h
new file mode 100644
index 000000000000..66bf6cd6be3c
--- /dev/null
+++ b/arch/mips/pci/pci-lantiq.h
@@ -0,0 +1,18 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_PCI_H__
10#define _LTQ_PCI_H__
11
12extern __iomem void *ltq_pci_mapped_cfg;
13extern int ltq_pci_read_config_dword(struct pci_bus *bus,
14 unsigned int devfn, int where, int size, u32 *val);
15extern int ltq_pci_write_config_dword(struct pci_bus *bus,
16 unsigned int devfn, int where, int size, u32 val);
17
18#endif
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
new file mode 100644
index 000000000000..38fece16c435
--- /dev/null
+++ b/arch/mips/pci/pci-xlr.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/types.h>
36#include <linux/pci.h>
37#include <linux/kernel.h>
38#include <linux/init.h>
39#include <linux/mm.h>
40#include <linux/console.h>
41
42#include <asm/io.h>
43
44#include <asm/netlogic/interrupt.h>
45#include <asm/netlogic/xlr/iomap.h>
46#include <asm/netlogic/xlr/pic.h>
47#include <asm/netlogic/xlr/xlr.h>
48
49static void *pci_config_base;
50
51#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
52
53/* PCI ops */
54static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
55 int where)
56{
57 u32 data;
58 u32 *cfgaddr;
59
60 cfgaddr = (u32 *)(pci_config_base +
61 pci_cfg_addr(bus->number, devfn, where & ~3));
62 data = *cfgaddr;
63 return cpu_to_le32(data);
64}
65
66static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn,
67 int where, u32 data)
68{
69 u32 *cfgaddr;
70
71 cfgaddr = (u32 *)(pci_config_base +
72 pci_cfg_addr(bus->number, devfn, where & ~3));
73 *cfgaddr = cpu_to_le32(data);
74}
75
76static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn,
77 int where, int size, u32 *val)
78{
79 u32 data;
80
81 if ((size == 2) && (where & 1))
82 return PCIBIOS_BAD_REGISTER_NUMBER;
83 else if ((size == 4) && (where & 3))
84 return PCIBIOS_BAD_REGISTER_NUMBER;
85
86 data = pci_cfg_read_32bit(bus, devfn, where);
87
88 if (size == 1)
89 *val = (data >> ((where & 3) << 3)) & 0xff;
90 else if (size == 2)
91 *val = (data >> ((where & 3) << 3)) & 0xffff;
92 else
93 *val = data;
94
95 return PCIBIOS_SUCCESSFUL;
96}
97
98
99static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn,
100 int where, int size, u32 val)
101{
102 u32 data;
103
104 if ((size == 2) && (where & 1))
105 return PCIBIOS_BAD_REGISTER_NUMBER;
106 else if ((size == 4) && (where & 3))
107 return PCIBIOS_BAD_REGISTER_NUMBER;
108
109 data = pci_cfg_read_32bit(bus, devfn, where);
110
111 if (size == 1)
112 data = (data & ~(0xff << ((where & 3) << 3))) |
113 (val << ((where & 3) << 3));
114 else if (size == 2)
115 data = (data & ~(0xffff << ((where & 3) << 3))) |
116 (val << ((where & 3) << 3));
117 else
118 data = val;
119
120 pci_cfg_write_32bit(bus, devfn, where, data);
121
122 return PCIBIOS_SUCCESSFUL;
123}
124
125struct pci_ops nlm_pci_ops = {
126 .read = nlm_pcibios_read,
127 .write = nlm_pcibios_write
128};
129
130static struct resource nlm_pci_mem_resource = {
131 .name = "XLR PCI MEM",
132 .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
133 .end = 0xdfffffffUL,
134 .flags = IORESOURCE_MEM,
135};
136
137static struct resource nlm_pci_io_resource = {
138 .name = "XLR IO MEM",
139 .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
140 .end = 0x100fffffUL,
141 .flags = IORESOURCE_IO,
142};
143
144struct pci_controller nlm_pci_controller = {
145 .index = 0,
146 .pci_ops = &nlm_pci_ops,
147 .mem_resource = &nlm_pci_mem_resource,
148 .mem_offset = 0x00000000UL,
149 .io_resource = &nlm_pci_io_resource,
150 .io_offset = 0x00000000UL,
151};
152
153int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
154{
155 if (!nlm_chip_is_xls())
156 return PIC_PCIX_IRQ; /* for XLR just one IRQ*/
157
158 /*
159 * For XLS PCIe, there is an IRQ per Link, find out which
160 * link the device is on to assign interrupts
161 */
162 if (dev->bus->self == NULL)
163 return 0;
164
165 switch (dev->bus->self->devfn) {
166 case 0x0:
167 return PIC_PCIE_LINK0_IRQ;
168 case 0x8:
169 return PIC_PCIE_LINK1_IRQ;
170 case 0x10:
171 if (nlm_chip_is_xls_b())
172 return PIC_PCIE_XLSB0_LINK2_IRQ;
173 else
174 return PIC_PCIE_LINK2_IRQ;
175 case 0x18:
176 if (nlm_chip_is_xls_b())
177 return PIC_PCIE_XLSB0_LINK3_IRQ;
178 else
179 return PIC_PCIE_LINK3_IRQ;
180 }
181 WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
182 return 0;
183}
184
185/* Do platform specific device initialization at pci_enable_device() time */
186int pcibios_plat_dev_init(struct pci_dev *dev)
187{
188 return 0;
189}
190
191static int __init pcibios_init(void)
192{
193 /* PSB assigns PCI resources */
194 pci_probe_only = 1;
195 pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20);
196
197 /* Extend IO port for memory mapped io */
198 ioport_resource.start = 0;
199 ioport_resource.end = ~0;
200
201 set_io_port_base(CKSEG1);
202 nlm_pci_controller.io_map_base = CKSEG1;
203
204 pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n");
205 register_pci_controller(&nlm_pci_controller);
206
207 return 0;
208}
209
210arch_initcall(pcibios_init);
211
212struct pci_fixup pcibios_fixups[] = {
213 {0}
214};
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
index f9b9dcdfa9dd..98fd0099d964 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
@@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d,
97 97
98static struct irq_chip msp_per_irq_controller = { 98static struct irq_chip msp_per_irq_controller = {
99 .name = "MSP_PER", 99 .name = "MSP_PER",
100 .irq_enable = unmask_per_irq. 100 .irq_enable = unmask_per_irq,
101 .irq_disable = mask_per_irq, 101 .irq_disable = mask_per_irq,
102 .irq_ack = msp_per_irq_ack, 102 .irq_ack = msp_per_irq_ack,
103#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index dbb5c7b4b70f..f8a751c03282 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume)
350: 350:
36 PTR_L t1, PBE_ADDRESS(t0) /* source */ 36 PTR_L t1, PBE_ADDRESS(t0) /* source */
37 PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ 37 PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */
38 PTR_ADDIU t3, t1, PAGE_SIZE 38 PTR_ADDU t3, t1, PAGE_SIZE
391: 391:
40 REG_L t8, (t1) 40 REG_L t8, (t1)
41 REG_S t8, (t2) 41 REG_S t8, (t2)
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 37de05d595e7..6c47dfeb7be3 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -185,7 +185,7 @@ int __init rb532_gpio_init(void)
185 struct resource *r; 185 struct resource *r;
186 186
187 r = rb532_gpio_reg0_res; 187 r = rb532_gpio_reg0_res;
188 rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); 188 rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r));
189 189
190 if (!rb532_gpio_chip->regbase) { 190 if (!rb532_gpio_chip->regbase) {
191 printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); 191 printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
index deddbf0ebe5c..698904daf901 100644
--- a/arch/mips/sgi-ip22/ip22-platform.c
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -132,7 +132,7 @@ static struct platform_device eth1_device = {
132 */ 132 */
133static int __init sgiseeq_devinit(void) 133static int __init sgiseeq_devinit(void)
134{ 134{
135 unsigned int tmp; 135 unsigned int pbdma __maybe_unused;
136 int res, i; 136 int res, i;
137 137
138 eth0_pd.hpc = hpc3c0; 138 eth0_pd.hpc = hpc3c0;
@@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void)
151 151
152 /* Second HPC is missing? */ 152 /* Second HPC is missing? */
153 if (ip22_is_fullhouse() || 153 if (ip22_is_fullhouse() ||
154 get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) 154 get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1]))
155 return 0; 155 return 0;
156 156
157 sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | 157 sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 |
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index 603fc91c1030..1a94c9894188 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -32,7 +32,7 @@
32static unsigned long dosample(void) 32static unsigned long dosample(void)
33{ 33{
34 u32 ct0, ct1; 34 u32 ct0, ct1;
35 u8 msb, lsb; 35 u8 msb;
36 36
37 /* Start the counter. */ 37 /* Start the counter. */
38 sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | 38 sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL |
@@ -46,7 +46,7 @@ static unsigned long dosample(void)
46 /* Latch and spin until top byte of counter2 is zero */ 46 /* Latch and spin until top byte of counter2 is zero */
47 do { 47 do {
48 writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); 48 writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword);
49 lsb = readb(&sgint->tcnt2); 49 (void) readb(&sgint->tcnt2);
50 msb = readb(&sgint->tcnt2); 50 msb = readb(&sgint->tcnt2);
51 ct1 = read_c0_count(); 51 ct1 = read_c0_count();
52 } while (msb); 52 } while (msb);
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index a1fa4abb3f6a..cd0d5b06cd83 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -29,7 +29,6 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
29 unsigned long xtalk_addr, size_t size) 29 unsigned long xtalk_addr, size_t size)
30{ 30{
31 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 31 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
32 volatile hubreg_t junk;
33 unsigned i; 32 unsigned i;
34 33
35 /* use small-window mapping if possible */ 34 /* use small-window mapping if possible */
@@ -64,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
64 * after we write it. 63 * after we write it.
65 */ 64 */
66 IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); 65 IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
67 junk = HUB_L(IIO_ITTE_GET(nasid, i)); 66 (void) HUB_L(IIO_ITTE_GET(nasid, i));
68 67
69 return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); 68 return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
70 } 69 }
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index c3d30a88daf3..1d1919a44e88 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -54,11 +54,8 @@ void __init setup_replication_mask(void)
54 54
55static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) 55static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid)
56{ 56{
57 cnodeid_t client_cnode;
58 kern_vars_t *kvp; 57 kern_vars_t *kvp;
59 58
60 client_cnode = NASID_TO_COMPACT_NODEID(client_nasid);
61
62 kvp = &hub_data(client_nasid)->kern_vars; 59 kvp = &hub_data(client_nasid)->kern_vars;
63 60
64 KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; 61 KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp;
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index a152538d3c97..3f810c9cbf83 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -66,18 +66,7 @@ static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
66static void rt_set_mode(enum clock_event_mode mode, 66static void rt_set_mode(enum clock_event_mode mode,
67 struct clock_event_device *evt) 67 struct clock_event_device *evt)
68{ 68{
69 switch (mode) { 69 /* Nothing to do ... */
70 case CLOCK_EVT_MODE_ONESHOT:
71 /* The only mode supported */
72 break;
73
74 case CLOCK_EVT_MODE_PERIODIC:
75 case CLOCK_EVT_MODE_UNUSED:
76 case CLOCK_EVT_MODE_SHUTDOWN:
77 case CLOCK_EVT_MODE_RESUME:
78 /* Nothing to do */
79 break;
80 }
81} 70}
82 71
83int rt_timer_irq; 72int rt_timer_irq;
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index c76151b56568..0904d4d30cb3 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void)
95static __init unsigned long dosample(void) 95static __init unsigned long dosample(void)
96{ 96{
97 u32 ct0, ct1; 97 u32 ct0, ct1;
98 volatile u8 msb, lsb; 98 volatile u8 msb;
99 99
100 /* Start the counter. */ 100 /* Start the counter. */
101 outb_p(0x34, 0x43); 101 outb_p(0x34, 0x43);
@@ -108,7 +108,7 @@ static __init unsigned long dosample(void)
108 /* Latch and spin until top byte of counter0 is zero */ 108 /* Latch and spin until top byte of counter0 is zero */
109 do { 109 do {
110 outb(0x00, 0x43); 110 outb(0x00, 0x43);
111 lsb = inb(0x40); 111 (void) inb(0x40);
112 msb = inb(0x40); 112 msb = inb(0x40);
113 ct1 = read_c0_count(); 113 ct1 = read_c0_count();
114 } while (msb); 114 } while (msb);
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 7005ee0b074d..49baddcdd14e 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -3,7 +3,6 @@
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/irq.h> 5#include <linux/irq.h>
6#include <linux/sysdev.h>
7#include <asm/dcr.h> 6#include <asm/dcr.h>
8#include <asm/msi_bitmap.h> 7#include <asm/msi_bitmap.h>
9 8
@@ -320,8 +319,6 @@ struct mpic
320 /* link */ 319 /* link */
321 struct mpic *next; 320 struct mpic *next;
322 321
323 struct sys_device sysdev;
324
325#ifdef CONFIG_PM 322#ifdef CONFIG_PM
326 struct mpic_irq_save *save_data; 323 struct mpic_irq_save *save_data;
327#endif 324#endif
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 188272934cfb..104faa8aa23c 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
318 .end = mpc83xx_suspend_end, 318 .end = mpc83xx_suspend_end,
319}; 319};
320 320
321static struct of_device_id pmc_match[];
321static int pmc_probe(struct platform_device *ofdev) 322static int pmc_probe(struct platform_device *ofdev)
322{ 323{
324 const struct of_device_id *match;
323 struct device_node *np = ofdev->dev.of_node; 325 struct device_node *np = ofdev->dev.of_node;
324 struct resource res; 326 struct resource res;
325 struct pmc_type *type; 327 struct pmc_type *type;
326 int ret = 0; 328 int ret = 0;
327 329
328 if (!ofdev->dev.of_match) 330 match = of_match_device(pmc_match, &ofdev->dev);
331 if (!match)
329 return -EINVAL; 332 return -EINVAL;
330 333
331 type = ofdev->dev.of_match->data; 334 type = match->data;
332 335
333 if (!of_device_is_available(np)) 336 if (!of_device_is_available(np))
334 return -ENODEV; 337 return -ENODEV;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index acfaccea5f4f..3675da73623f 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -32,6 +32,7 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/linux_logo.h> 34#include <linux/linux_logo.h>
35#include <linux/syscore_ops.h>
35#include <asm/spu.h> 36#include <asm/spu.h>
36#include <asm/spu_priv1.h> 37#include <asm/spu_priv1.h>
37#include <asm/spu_csa.h> 38#include <asm/spu_csa.h>
@@ -521,18 +522,8 @@ void spu_init_channels(struct spu *spu)
521} 522}
522EXPORT_SYMBOL_GPL(spu_init_channels); 523EXPORT_SYMBOL_GPL(spu_init_channels);
523 524
524static int spu_shutdown(struct sys_device *sysdev)
525{
526 struct spu *spu = container_of(sysdev, struct spu, sysdev);
527
528 spu_free_irqs(spu);
529 spu_destroy_spu(spu);
530 return 0;
531}
532
533static struct sysdev_class spu_sysdev_class = { 525static struct sysdev_class spu_sysdev_class = {
534 .name = "spu", 526 .name = "spu",
535 .shutdown = spu_shutdown,
536}; 527};
537 528
538int spu_add_sysdev_attr(struct sysdev_attribute *attr) 529int spu_add_sysdev_attr(struct sysdev_attribute *attr)
@@ -797,6 +788,22 @@ static inline void crash_register_spus(struct list_head *list)
797} 788}
798#endif 789#endif
799 790
791static void spu_shutdown(void)
792{
793 struct spu *spu;
794
795 mutex_lock(&spu_full_list_mutex);
796 list_for_each_entry(spu, &spu_full_list, full_list) {
797 spu_free_irqs(spu);
798 spu_destroy_spu(spu);
799 }
800 mutex_unlock(&spu_full_list_mutex);
801}
802
803static struct syscore_ops spu_syscore_ops = {
804 .shutdown = spu_shutdown,
805};
806
800static int __init init_spu_base(void) 807static int __init init_spu_base(void)
801{ 808{
802 int i, ret = 0; 809 int i, ret = 0;
@@ -830,6 +837,7 @@ static int __init init_spu_base(void)
830 crash_register_spus(&spu_full_list); 837 crash_register_spus(&spu_full_list);
831 mutex_unlock(&spu_full_list_mutex); 838 mutex_unlock(&spu_full_list_mutex);
832 spu_add_sysdev_attr(&attr_stat); 839 spu_add_sysdev_attr(&attr_stat);
840 register_syscore_ops(&spu_syscore_ops);
833 841
834 spu_init_affinity(); 842 spu_init_affinity();
835 843
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 023f24086a0a..7c18a1607d1c 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -21,7 +21,7 @@
21#include <linux/signal.h> 21#include <linux/signal.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/sysdev.h> 24#include <linux/syscore_ops.h>
25#include <linux/adb.h> 25#include <linux/adb.h>
26#include <linux/pmu.h> 26#include <linux/pmu.h>
27#include <linux/module.h> 27#include <linux/module.h>
@@ -677,7 +677,7 @@ not_found:
677 return viaint; 677 return viaint;
678} 678}
679 679
680static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) 680static int pmacpic_suspend(void)
681{ 681{
682 int viaint = pmacpic_find_viaint(); 682 int viaint = pmacpic_find_viaint();
683 683
@@ -698,7 +698,7 @@ static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
698 return 0; 698 return 0;
699} 699}
700 700
701static int pmacpic_resume(struct sys_device *sysdev) 701static void pmacpic_resume(void)
702{ 702{
703 int i; 703 int i;
704 704
@@ -709,39 +709,19 @@ static int pmacpic_resume(struct sys_device *sysdev)
709 for (i = 0; i < max_real_irqs; ++i) 709 for (i = 0; i < max_real_irqs; ++i)
710 if (test_bit(i, sleep_save_mask)) 710 if (test_bit(i, sleep_save_mask))
711 pmac_unmask_irq(irq_get_irq_data(i)); 711 pmac_unmask_irq(irq_get_irq_data(i));
712
713 return 0;
714} 712}
715 713
716#endif /* CONFIG_PM && CONFIG_PPC32 */ 714static struct syscore_ops pmacpic_syscore_ops = {
717 715 .suspend = pmacpic_suspend,
718static struct sysdev_class pmacpic_sysclass = { 716 .resume = pmacpic_resume,
719 .name = "pmac_pic",
720}; 717};
721 718
722static struct sys_device device_pmacpic = { 719static int __init init_pmacpic_syscore(void)
723 .id = 0,
724 .cls = &pmacpic_sysclass,
725};
726
727static struct sysdev_driver driver_pmacpic = {
728#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
729 .suspend = &pmacpic_suspend,
730 .resume = &pmacpic_resume,
731#endif /* CONFIG_PM && CONFIG_PPC32 */
732};
733
734static int __init init_pmacpic_sysfs(void)
735{ 720{
736#ifdef CONFIG_PPC32 721 register_syscore_ops(&pmacpic_syscore_ops);
737 if (max_irqs == 0)
738 return -ENODEV;
739#endif
740 printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
741 sysdev_class_register(&pmacpic_sysclass);
742 sysdev_register(&device_pmacpic);
743 sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
744 return 0; 722 return 0;
745} 723}
746machine_subsys_initcall(powermac, init_pmacpic_sysfs);
747 724
725machine_subsys_initcall(powermac, init_pmacpic_syscore);
726
727#endif /* CONFIG_PM && CONFIG_PPC32 */
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index d5679dc1e20f..01cd2f089512 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
304 return 0; 304 return 0;
305} 305}
306 306
307static const struct of_device_id fsl_of_msi_ids[];
307static int __devinit fsl_of_msi_probe(struct platform_device *dev) 308static int __devinit fsl_of_msi_probe(struct platform_device *dev)
308{ 309{
310 const struct of_device_id *match;
309 struct fsl_msi *msi; 311 struct fsl_msi *msi;
310 struct resource res; 312 struct resource res;
311 int err, i, j, irq_index, count; 313 int err, i, j, irq_index, count;
@@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
316 u32 offset; 318 u32 offset;
317 static const u32 all_avail[] = { 0, NR_MSI_IRQS }; 319 static const u32 all_avail[] = { 0, NR_MSI_IRQS };
318 320
319 if (!dev->dev.of_match) 321 match = of_match_device(fsl_of_msi_ids, &dev->dev);
322 if (!match)
320 return -EINVAL; 323 return -EINVAL;
321 features = dev->dev.of_match->data; 324 features = match->data;
322 325
323 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 326 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
324 327
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index fa438be962b7..596554a8725e 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -18,7 +18,7 @@
18#include <linux/stddef.h> 18#include <linux/stddef.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/sysdev.h> 21#include <linux/syscore_ops.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
@@ -902,7 +902,7 @@ static struct {
902 u32 sercr; 902 u32 sercr;
903} ipic_saved_state; 903} ipic_saved_state;
904 904
905static int ipic_suspend(struct sys_device *sdev, pm_message_t state) 905static int ipic_suspend(void)
906{ 906{
907 struct ipic *ipic = primary_ipic; 907 struct ipic *ipic = primary_ipic;
908 908
@@ -933,7 +933,7 @@ static int ipic_suspend(struct sys_device *sdev, pm_message_t state)
933 return 0; 933 return 0;
934} 934}
935 935
936static int ipic_resume(struct sys_device *sdev) 936static void ipic_resume(void)
937{ 937{
938 struct ipic *ipic = primary_ipic; 938 struct ipic *ipic = primary_ipic;
939 939
@@ -949,44 +949,26 @@ static int ipic_resume(struct sys_device *sdev)
949 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); 949 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
950 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); 950 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
951 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); 951 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
952
953 return 0;
954} 952}
955#else 953#else
956#define ipic_suspend NULL 954#define ipic_suspend NULL
957#define ipic_resume NULL 955#define ipic_resume NULL
958#endif 956#endif
959 957
960static struct sysdev_class ipic_sysclass = { 958static struct syscore_ops ipic_syscore_ops = {
961 .name = "ipic",
962 .suspend = ipic_suspend, 959 .suspend = ipic_suspend,
963 .resume = ipic_resume, 960 .resume = ipic_resume,
964}; 961};
965 962
966static struct sys_device device_ipic = { 963static int __init init_ipic_syscore(void)
967 .id = 0,
968 .cls = &ipic_sysclass,
969};
970
971static int __init init_ipic_sysfs(void)
972{ 964{
973 int rc;
974
975 if (!primary_ipic || !primary_ipic->regs) 965 if (!primary_ipic || !primary_ipic->regs)
976 return -ENODEV; 966 return -ENODEV;
977 printk(KERN_DEBUG "Registering ipic with sysfs...\n");
978 967
979 rc = sysdev_class_register(&ipic_sysclass); 968 printk(KERN_DEBUG "Registering ipic system core operations\n");
980 if (rc) { 969 register_syscore_ops(&ipic_syscore_ops);
981 printk(KERN_ERR "Failed registering ipic sys class\n"); 970
982 return -ENODEV;
983 }
984 rc = sysdev_register(&device_ipic);
985 if (rc) {
986 printk(KERN_ERR "Failed registering ipic sys device\n");
987 return -ENODEV;
988 }
989 return 0; 971 return 0;
990} 972}
991 973
992subsys_initcall(init_ipic_sysfs); 974subsys_initcall(init_ipic_syscore);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index f91c065bed5a..7e5dc8f4984a 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -27,6 +27,7 @@
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/syscore_ops.h>
30 31
31#include <asm/ptrace.h> 32#include <asm/ptrace.h>
32#include <asm/signal.h> 33#include <asm/signal.h>
@@ -1702,9 +1703,8 @@ void mpic_reset_core(int cpu)
1702#endif /* CONFIG_SMP */ 1703#endif /* CONFIG_SMP */
1703 1704
1704#ifdef CONFIG_PM 1705#ifdef CONFIG_PM
1705static int mpic_suspend(struct sys_device *dev, pm_message_t state) 1706static void mpic_suspend_one(struct mpic *mpic)
1706{ 1707{
1707 struct mpic *mpic = container_of(dev, struct mpic, sysdev);
1708 int i; 1708 int i;
1709 1709
1710 for (i = 0; i < mpic->num_sources; i++) { 1710 for (i = 0; i < mpic->num_sources; i++) {
@@ -1713,13 +1713,22 @@ static int mpic_suspend(struct sys_device *dev, pm_message_t state)
1713 mpic->save_data[i].dest = 1713 mpic->save_data[i].dest =
1714 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); 1714 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
1715 } 1715 }
1716}
1717
1718static int mpic_suspend(void)
1719{
1720 struct mpic *mpic = mpics;
1721
1722 while (mpic) {
1723 mpic_suspend_one(mpic);
1724 mpic = mpic->next;
1725 }
1716 1726
1717 return 0; 1727 return 0;
1718} 1728}
1719 1729
1720static int mpic_resume(struct sys_device *dev) 1730static void mpic_resume_one(struct mpic *mpic)
1721{ 1731{
1722 struct mpic *mpic = container_of(dev, struct mpic, sysdev);
1723 int i; 1732 int i;
1724 1733
1725 for (i = 0; i < mpic->num_sources; i++) { 1734 for (i = 0; i < mpic->num_sources; i++) {
@@ -1746,33 +1755,28 @@ static int mpic_resume(struct sys_device *dev)
1746 } 1755 }
1747#endif 1756#endif
1748 } /* end for loop */ 1757 } /* end for loop */
1758}
1749 1759
1750 return 0; 1760static void mpic_resume(void)
1761{
1762 struct mpic *mpic = mpics;
1763
1764 while (mpic) {
1765 mpic_resume_one(mpic);
1766 mpic = mpic->next;
1767 }
1751} 1768}
1752#endif
1753 1769
1754static struct sysdev_class mpic_sysclass = { 1770static struct syscore_ops mpic_syscore_ops = {
1755#ifdef CONFIG_PM
1756 .resume = mpic_resume, 1771 .resume = mpic_resume,
1757 .suspend = mpic_suspend, 1772 .suspend = mpic_suspend,
1758#endif
1759 .name = "mpic",
1760}; 1773};
1761 1774
1762static int mpic_init_sys(void) 1775static int mpic_init_sys(void)
1763{ 1776{
1764 struct mpic *mpic = mpics; 1777 register_syscore_ops(&mpic_syscore_ops);
1765 int error, id = 0; 1778 return 0;
1766
1767 error = sysdev_class_register(&mpic_sysclass);
1768
1769 while (mpic && !error) {
1770 mpic->sysdev.cls = &mpic_sysclass;
1771 mpic->sysdev.id = id++;
1772 error = sysdev_register(&mpic->sysdev);
1773 mpic = mpic->next;
1774 }
1775 return error;
1776} 1779}
1777 1780
1778device_initcall(mpic_init_sys); 1781device_initcall(mpic_init_sys);
1782#endif
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2508a6f31588..4a7f14079e03 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -88,6 +88,7 @@ config S390
88 select HAVE_KERNEL_XZ 88 select HAVE_KERNEL_XZ
89 select HAVE_GET_USER_PAGES_FAST 89 select HAVE_GET_USER_PAGES_FAST
90 select HAVE_ARCH_MUTEX_CPU_RELAX 90 select HAVE_ARCH_MUTEX_CPU_RELAX
91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
91 select ARCH_INLINE_SPIN_TRYLOCK 92 select ARCH_INLINE_SPIN_TRYLOCK
92 select ARCH_INLINE_SPIN_TRYLOCK_BH 93 select ARCH_INLINE_SPIN_TRYLOCK_BH
93 select ARCH_INLINE_SPIN_LOCK 94 select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 43a5c78046db..3e20383d0921 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -11,5 +11,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable);
11int set_memory_ro(unsigned long addr, int numpages); 11int set_memory_ro(unsigned long addr, int numpages);
12int set_memory_rw(unsigned long addr, int numpages); 12int set_memory_rw(unsigned long addr, int numpages);
13int set_memory_nx(unsigned long addr, int numpages); 13int set_memory_nx(unsigned long addr, int numpages);
14int set_memory_x(unsigned long addr, int numpages);
14 15
15#endif /* _S390_CACHEFLUSH_H */ 16#endif /* _S390_CACHEFLUSH_H */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 72b2e2f2d32d..7e91c58072e2 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -9,9 +9,22 @@
9#define _ASM_S390_DIAG_H 9#define _ASM_S390_DIAG_H
10 10
11/* 11/*
12 * Diagnose 10: Release pages 12 * Diagnose 10: Release page range
13 */ 13 */
14extern void diag10(unsigned long addr); 14static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
15{
16 unsigned long start_addr, end_addr;
17
18 start_addr = start_pfn << PAGE_SHIFT;
19 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
20
21 asm volatile(
22 "0: diag %0,%1,0x10\n"
23 "1:\n"
24 EX_TABLE(0b, 1b)
25 EX_TABLE(1b, 1b)
26 : : "a" (start_addr), "a" (end_addr));
27}
15 28
16/* 29/*
17 * Diagnose 14: Input spool file manipulation 30 * Diagnose 14: Input spool file manipulation
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 3c29be4836ed..b7931faaef6d 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -11,15 +11,13 @@ struct dyn_arch_ftrace { };
11 11
12#ifdef CONFIG_64BIT 12#ifdef CONFIG_64BIT
13#define MCOUNT_INSN_SIZE 12 13#define MCOUNT_INSN_SIZE 12
14#define MCOUNT_OFFSET 8
15#else 14#else
16#define MCOUNT_INSN_SIZE 20 15#define MCOUNT_INSN_SIZE 20
17#define MCOUNT_OFFSET 4
18#endif 16#endif
19 17
20static inline unsigned long ftrace_call_adjust(unsigned long addr) 18static inline unsigned long ftrace_call_adjust(unsigned long addr)
21{ 19{
22 return addr - MCOUNT_OFFSET; 20 return addr;
23} 21}
24 22
25#endif /* __ASSEMBLY__ */ 23#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
new file mode 100644
index 000000000000..95a6cf2b5b67
--- /dev/null
+++ b/arch/s390/include/asm/jump_label.h
@@ -0,0 +1,37 @@
1#ifndef _ASM_S390_JUMP_LABEL_H
2#define _ASM_S390_JUMP_LABEL_H
3
4#include <linux/types.h>
5
6#define JUMP_LABEL_NOP_SIZE 6
7
8#ifdef CONFIG_64BIT
9#define ASM_PTR ".quad"
10#define ASM_ALIGN ".balign 8"
11#else
12#define ASM_PTR ".long"
13#define ASM_ALIGN ".balign 4"
14#endif
15
16static __always_inline bool arch_static_branch(struct jump_label_key *key)
17{
18 asm goto("0: brcl 0,0\n"
19 ".pushsection __jump_table, \"aw\"\n"
20 ASM_ALIGN "\n"
21 ASM_PTR " 0b, %l[label], %0\n"
22 ".popsection\n"
23 : : "X" (key) : : label);
24 return false;
25label:
26 return true;
27}
28
29typedef unsigned long jump_label_t;
30
31struct jump_entry {
32 jump_label_t code;
33 jump_label_t target;
34 jump_label_t key;
35};
36
37#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index a6f0e7cc9cde..8c277caa8d3a 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
23#ifdef CONFIG_64BIT 23#ifdef CONFIG_64BIT
24 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 24 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
25#endif 25#endif
26 if (current->mm->context.alloc_pgste) { 26 if (current->mm && current->mm->context.alloc_pgste) {
27 /* 27 /*
28 * alloc_pgste indicates, that any NEW context will be created 28 * alloc_pgste indicates, that any NEW context will be created
29 * with extended page tables. The old context is unchanged. The 29 * with extended page tables. The old context is unchanged. The
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 64230bc392fa..5ff15dacb571 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
25 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 25 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
26 vdso.o vtime.o sysinfo.o nmi.o sclp.o 26 vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index c032d11da8a1..8237fc07ac79 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -9,27 +9,6 @@
9#include <asm/diag.h> 9#include <asm/diag.h>
10 10
11/* 11/*
12 * Diagnose 10: Release pages
13 */
14void diag10(unsigned long addr)
15{
16 if (addr >= 0x7ff00000)
17 return;
18 asm volatile(
19#ifdef CONFIG_64BIT
20 " sam31\n"
21 " diag %0,%0,0x10\n"
22 "0: sam64\n"
23#else
24 " diag %0,%0,0x10\n"
25 "0:\n"
26#endif
27 EX_TABLE(0b, 0b)
28 : : "a" (addr));
29}
30EXPORT_SYMBOL(diag10);
31
32/*
33 * Diagnose 14: Input spool file manipulation 12 * Diagnose 14: Input spool file manipulation
34 */ 13 */
35int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) 14int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index c83726c9fe03..3d4a78fc1adc 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -672,6 +672,7 @@ static struct insn opcode_b2[] = {
672 { "rp", 0x77, INSTR_S_RD }, 672 { "rp", 0x77, INSTR_S_RD },
673 { "stcke", 0x78, INSTR_S_RD }, 673 { "stcke", 0x78, INSTR_S_RD },
674 { "sacf", 0x79, INSTR_S_RD }, 674 { "sacf", 0x79, INSTR_S_RD },
675 { "spp", 0x80, INSTR_S_RD },
675 { "stsi", 0x7d, INSTR_S_RD }, 676 { "stsi", 0x7d, INSTR_S_RD },
676 { "srnm", 0x99, INSTR_S_RD }, 677 { "srnm", 0x99, INSTR_S_RD },
677 { "stfpc", 0x9c, INSTR_S_RD }, 678 { "stfpc", 0x9c, INSTR_S_RD },
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 648f64239a9d..1b67fc6ebdc2 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -836,7 +836,7 @@ restart_base:
836 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 836 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
837 basr %r14,0 837 basr %r14,0
838 l %r14,restart_addr-.(%r14) 838 l %r14,restart_addr-.(%r14)
839 br %r14 # branch to start_secondary 839 basr %r14,%r14 # branch to start_secondary
840restart_addr: 840restart_addr:
841 .long start_secondary 841 .long start_secondary
842 .align 8 842 .align 8
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9d3603d6c511..9fd864563499 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -841,7 +841,7 @@ restart_base:
841 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) 841 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
842 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER 842 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
843 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 843 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
844 jg start_secondary 844 brasl %r14,start_secondary
845 .align 8 845 .align 8
846restart_vtime: 846restart_vtime:
847 .long 0x7fffffff,0xffffffff 847 .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
new file mode 100644
index 000000000000..44cc06bedf77
--- /dev/null
+++ b/arch/s390/kernel/jump_label.c
@@ -0,0 +1,59 @@
1/*
2 * Jump label s390 support
3 *
4 * Copyright IBM Corp. 2011
5 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7#include <linux/module.h>
8#include <linux/uaccess.h>
9#include <linux/stop_machine.h>
10#include <linux/jump_label.h>
11#include <asm/ipl.h>
12
13#ifdef HAVE_JUMP_LABEL
14
15struct insn {
16 u16 opcode;
17 s32 offset;
18} __packed;
19
20struct insn_args {
21 unsigned long *target;
22 struct insn *insn;
23 ssize_t size;
24};
25
26static int __arch_jump_label_transform(void *data)
27{
28 struct insn_args *args = data;
29 int rc;
30
31 rc = probe_kernel_write(args->target, args->insn, args->size);
32 WARN_ON_ONCE(rc < 0);
33 return 0;
34}
35
36void arch_jump_label_transform(struct jump_entry *entry,
37 enum jump_label_type type)
38{
39 struct insn_args args;
40 struct insn insn;
41
42 if (type == JUMP_LABEL_ENABLE) {
43 /* brcl 15,offset */
44 insn.opcode = 0xc0f4;
45 insn.offset = (entry->target - entry->code) >> 1;
46 } else {
47 /* brcl 0,0 */
48 insn.opcode = 0xc004;
49 insn.offset = 0;
50 }
51
52 args.target = (void *) entry->code;
53 args.insn = &insn;
54 args.size = JUMP_LABEL_NOP_SIZE;
55
56 stop_machine(__arch_jump_label_transform, &args, NULL);
57}
58
59#endif
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index c66ffd8dbbb7..1f1dba9dcf58 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter,
91 } else 91 } else
92 free_page((unsigned long) npa); 92 free_page((unsigned long) npa);
93 } 93 }
94 diag10(addr); 94 diag10_range(addr >> PAGE_SHIFT, 1);
95 pa->pages[pa->index++] = addr; 95 pa->pages[pa->index++] = addr;
96 (*counter)++; 96 (*counter)++;
97 spin_unlock(&cmm_lock); 97 spin_unlock(&cmm_lock);
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 0607e4b14b27..f05edcc3beff 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -54,3 +54,8 @@ int set_memory_nx(unsigned long addr, int numpages)
54 return 0; 54 return 0;
55} 55}
56EXPORT_SYMBOL_GPL(set_memory_nx); 56EXPORT_SYMBOL_GPL(set_memory_nx);
57
58int set_memory_x(unsigned long addr, int numpages)
59{
60 return 0;
61}
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 4952872d6f0a..33cbd373cce4 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -1021,20 +1021,14 @@ deallocate_exit:
1021 return rc; 1021 return rc;
1022} 1022}
1023 1023
1024long hwsampler_query_min_interval(void) 1024unsigned long hwsampler_query_min_interval(void)
1025{ 1025{
1026 if (min_sampler_rate) 1026 return min_sampler_rate;
1027 return min_sampler_rate;
1028 else
1029 return -EINVAL;
1030} 1027}
1031 1028
1032long hwsampler_query_max_interval(void) 1029unsigned long hwsampler_query_max_interval(void)
1033{ 1030{
1034 if (max_sampler_rate) 1031 return max_sampler_rate;
1035 return max_sampler_rate;
1036 else
1037 return -EINVAL;
1038} 1032}
1039 1033
1040unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) 1034unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
index 8c72b59316b5..1912f3bb190c 100644
--- a/arch/s390/oprofile/hwsampler.h
+++ b/arch/s390/oprofile/hwsampler.h
@@ -102,8 +102,8 @@ int hwsampler_setup(void);
102int hwsampler_shutdown(void); 102int hwsampler_shutdown(void);
103int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); 103int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
104int hwsampler_deallocate(void); 104int hwsampler_deallocate(void);
105long hwsampler_query_min_interval(void); 105unsigned long hwsampler_query_min_interval(void);
106long hwsampler_query_max_interval(void); 106unsigned long hwsampler_query_max_interval(void);
107int hwsampler_start_all(unsigned long interval); 107int hwsampler_start_all(unsigned long interval);
108int hwsampler_stop_all(void); 108int hwsampler_stop_all(void);
109int hwsampler_deactivate(unsigned int cpu); 109int hwsampler_deactivate(unsigned int cpu);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index c63d7e58352b..5995e9bc72d9 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
145 * create hwsampler files only if hwsampler_setup() succeeds. 145 * create hwsampler files only if hwsampler_setup() succeeds.
146 */ 146 */
147 oprofile_min_interval = hwsampler_query_min_interval(); 147 oprofile_min_interval = hwsampler_query_min_interval();
148 if (oprofile_min_interval < 0) { 148 if (oprofile_min_interval == 0)
149 oprofile_min_interval = 0;
150 return -ENODEV; 149 return -ENODEV;
151 }
152 oprofile_max_interval = hwsampler_query_max_interval(); 150 oprofile_max_interval = hwsampler_query_max_interval();
153 if (oprofile_max_interval < 0) { 151 if (oprofile_max_interval == 0)
154 oprofile_max_interval = 0;
155 return -ENODEV; 152 return -ENODEV;
156 }
157 153
158 if (oprofile_timer_init(ops)) 154 if (oprofile_timer_init(ops))
159 return -ENODEV; 155 return -ENODEV;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 4b89da248d17..bc439de48cd1 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -24,7 +24,6 @@ config SUPERH
24 select RTC_LIB 24 select RTC_LIB
25 select GENERIC_ATOMIC64 25 select GENERIC_ATOMIC64
26 select GENERIC_IRQ_SHOW 26 select GENERIC_IRQ_SHOW
27 select ARCH_NO_SYSDEV_OPS
28 help 27 help
29 The SuperH is a RISC processor targeted for use in embedded systems 28 The SuperH is a RISC processor targeted for use in embedded systems
30 and consumer electronics; it was also used in the Sega Dreamcast 29 and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index e71a531f1e31..77ec0e7b8ddf 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -48,7 +48,6 @@ CONFIG_PREEMPT=y
48CONFIG_BINFMT_MISC=y 48CONFIG_BINFMT_MISC=y
49CONFIG_PM=y 49CONFIG_PM=y
50CONFIG_PM_DEBUG=y 50CONFIG_PM_DEBUG=y
51CONFIG_PM_VERBOSE=y
52CONFIG_PM_RUNTIME=y 51CONFIG_PM_RUNTIME=y
53CONFIG_CPU_IDLE=y 52CONFIG_CPU_IDLE=y
54CONFIG_NET=y 53CONFIG_NET=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index dc4a2eb6a616..c41650572d79 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -83,7 +83,6 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
83CONFIG_BINFMT_MISC=y 83CONFIG_BINFMT_MISC=y
84CONFIG_PM=y 84CONFIG_PM=y
85CONFIG_PM_DEBUG=y 85CONFIG_PM_DEBUG=y
86CONFIG_PM_VERBOSE=y
87CONFIG_PM_RUNTIME=y 86CONFIG_PM_RUNTIME=y
88CONFIG_CPU_IDLE=y 87CONFIG_CPU_IDLE=y
89CONFIG_NET=y 88CONFIG_NET=y
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
index 6dcb8166a64d..22db127afa7b 100644
--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -139,7 +139,7 @@ void platform_pm_runtime_suspend_idle(void)
139 queue_work(pm_wq, &hwblk_work); 139 queue_work(pm_wq, &hwblk_work);
140} 140}
141 141
142int platform_pm_runtime_suspend(struct device *dev) 142static int default_platform_runtime_suspend(struct device *dev)
143{ 143{
144 struct platform_device *pdev = to_platform_device(dev); 144 struct platform_device *pdev = to_platform_device(dev);
145 struct pdev_archdata *ad = &pdev->archdata; 145 struct pdev_archdata *ad = &pdev->archdata;
@@ -147,7 +147,7 @@ int platform_pm_runtime_suspend(struct device *dev)
147 int hwblk = ad->hwblk_id; 147 int hwblk = ad->hwblk_id;
148 int ret = 0; 148 int ret = 0;
149 149
150 dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk); 150 dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
151 151
152 /* ignore off-chip platform devices */ 152 /* ignore off-chip platform devices */
153 if (!hwblk) 153 if (!hwblk)
@@ -183,20 +183,20 @@ int platform_pm_runtime_suspend(struct device *dev)
183 mutex_unlock(&ad->mutex); 183 mutex_unlock(&ad->mutex);
184 184
185out: 185out:
186 dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n", 186 dev_dbg(dev, "%s() [%d] returns %d\n",
187 hwblk, ret); 187 __func__, hwblk, ret);
188 188
189 return ret; 189 return ret;
190} 190}
191 191
192int platform_pm_runtime_resume(struct device *dev) 192static int default_platform_runtime_resume(struct device *dev)
193{ 193{
194 struct platform_device *pdev = to_platform_device(dev); 194 struct platform_device *pdev = to_platform_device(dev);
195 struct pdev_archdata *ad = &pdev->archdata; 195 struct pdev_archdata *ad = &pdev->archdata;
196 int hwblk = ad->hwblk_id; 196 int hwblk = ad->hwblk_id;
197 int ret = 0; 197 int ret = 0;
198 198
199 dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk); 199 dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
200 200
201 /* ignore off-chip platform devices */ 201 /* ignore off-chip platform devices */
202 if (!hwblk) 202 if (!hwblk)
@@ -228,19 +228,19 @@ int platform_pm_runtime_resume(struct device *dev)
228 */ 228 */
229 mutex_unlock(&ad->mutex); 229 mutex_unlock(&ad->mutex);
230out: 230out:
231 dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n", 231 dev_dbg(dev, "%s() [%d] returns %d\n",
232 hwblk, ret); 232 __func__, hwblk, ret);
233 233
234 return ret; 234 return ret;
235} 235}
236 236
237int platform_pm_runtime_idle(struct device *dev) 237static int default_platform_runtime_idle(struct device *dev)
238{ 238{
239 struct platform_device *pdev = to_platform_device(dev); 239 struct platform_device *pdev = to_platform_device(dev);
240 int hwblk = pdev->archdata.hwblk_id; 240 int hwblk = pdev->archdata.hwblk_id;
241 int ret = 0; 241 int ret = 0;
242 242
243 dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk); 243 dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
244 244
245 /* ignore off-chip platform devices */ 245 /* ignore off-chip platform devices */
246 if (!hwblk) 246 if (!hwblk)
@@ -252,10 +252,19 @@ int platform_pm_runtime_idle(struct device *dev)
252 /* suspend synchronously to disable clocks immediately */ 252 /* suspend synchronously to disable clocks immediately */
253 ret = pm_runtime_suspend(dev); 253 ret = pm_runtime_suspend(dev);
254out: 254out:
255 dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk); 255 dev_dbg(dev, "%s() [%d] done!\n", __func__, hwblk);
256 return ret; 256 return ret;
257} 257}
258 258
259static struct dev_power_domain default_power_domain = {
260 .ops = {
261 .runtime_suspend = default_platform_runtime_suspend,
262 .runtime_resume = default_platform_runtime_resume,
263 .runtime_idle = default_platform_runtime_idle,
264 USE_PLATFORM_PM_SLEEP_OPS
265 },
266};
267
259static int platform_bus_notify(struct notifier_block *nb, 268static int platform_bus_notify(struct notifier_block *nb,
260 unsigned long action, void *data) 269 unsigned long action, void *data)
261{ 270{
@@ -276,6 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb,
276 hwblk_disable(hwblk_info, hwblk); 285 hwblk_disable(hwblk_info, hwblk);
277 /* make sure driver re-inits itself once */ 286 /* make sure driver re-inits itself once */
278 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 287 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
288 dev->pwr_domain = &default_power_domain;
279 break; 289 break;
280 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ 290 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
281 case BUS_NOTIFY_BOUND_DRIVER: 291 case BUS_NOTIFY_BOUND_DRIVER:
@@ -289,6 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb,
289 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 299 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
290 break; 300 break;
291 case BUS_NOTIFY_DEL_DEVICE: 301 case BUS_NOTIFY_DEL_DEVICE:
302 dev->pwr_domain = NULL;
292 break; 303 break;
293 } 304 }
294 return 0; 305 return 0;
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 427d4684e0d2..fc73a82366f8 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -7,17 +7,20 @@
7 7
8#define JUMP_LABEL_NOP_SIZE 4 8#define JUMP_LABEL_NOP_SIZE 4
9 9
10#define JUMP_LABEL(key, label) \ 10static __always_inline bool arch_static_branch(struct jump_label_key *key)
11 do { \ 11{
12 asm goto("1:\n\t" \ 12 asm goto("1:\n\t"
13 "nop\n\t" \ 13 "nop\n\t"
14 "nop\n\t" \ 14 "nop\n\t"
15 ".pushsection __jump_table, \"a\"\n\t"\ 15 ".pushsection __jump_table, \"aw\"\n\t"
16 ".align 4\n\t" \ 16 ".align 4\n\t"
17 ".word 1b, %l[" #label "], %c0\n\t" \ 17 ".word 1b, %l[l_yes], %c0\n\t"
18 ".popsection \n\t" \ 18 ".popsection \n\t"
19 : : "i" (key) : : label);\ 19 : : "i" (key) : : l_yes);
20 } while (0) 20 return false;
21l_yes:
22 return true;
23}
21 24
22#endif /* __KERNEL__ */ 25#endif /* __KERNEL__ */
23 26
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index f679c57644d5..1e34f29e58bb 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op)
165 return 0; 165 return 0;
166} 166}
167 167
168static struct of_device_id __initdata apc_match[] = { 168static struct of_device_id apc_match[] = {
169 { 169 {
170 .name = APC_OBPNAME, 170 .name = APC_OBPNAME,
171 }, 171 },
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 948068a083fc..d1840dbdaa2f 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm,
452 sabre_scan_bus(pbm, &op->dev); 452 sabre_scan_bus(pbm, &op->dev);
453} 453}
454 454
455static const struct of_device_id sabre_match[];
455static int __devinit sabre_probe(struct platform_device *op) 456static int __devinit sabre_probe(struct platform_device *op)
456{ 457{
458 const struct of_device_id *match;
457 const struct linux_prom64_registers *pr_regs; 459 const struct linux_prom64_registers *pr_regs;
458 struct device_node *dp = op->dev.of_node; 460 struct device_node *dp = op->dev.of_node;
459 struct pci_pbm_info *pbm; 461 struct pci_pbm_info *pbm;
@@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op)
463 const u32 *vdma; 465 const u32 *vdma;
464 u64 clear_irq; 466 u64 clear_irq;
465 467
466 hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); 468 match = of_match_device(sabre_match, &op->dev);
469 hummingbird_p = match && (match->data != NULL);
467 if (!hummingbird_p) { 470 if (!hummingbird_p) {
468 struct device_node *cpu_dp; 471 struct device_node *cpu_dp;
469 472
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index fecfcb2063c8..283fbc329a43 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1458,11 +1458,15 @@ out_err:
1458 return err; 1458 return err;
1459} 1459}
1460 1460
1461static const struct of_device_id schizo_match[];
1461static int __devinit schizo_probe(struct platform_device *op) 1462static int __devinit schizo_probe(struct platform_device *op)
1462{ 1463{
1463 if (!op->dev.of_match) 1464 const struct of_device_id *match;
1465
1466 match = of_match_device(schizo_match, &op->dev);
1467 if (!match)
1464 return -EINVAL; 1468 return -EINVAL;
1465 return __schizo_init(op, (unsigned long) op->dev.of_match->data); 1469 return __schizo_init(op, (unsigned long)match->data);
1466} 1470}
1467 1471
1468/* The ordering of this table is very important. Some Tomatillo 1472/* The ordering of this table is very important. Some Tomatillo
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 93d7b4465f8d..6a585d393580 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op)
69 return 0; 69 return 0;
70} 70}
71 71
72static struct of_device_id __initdata pmc_match[] = { 72static struct of_device_id pmc_match[] = {
73 { 73 {
74 .name = PMC_OBPNAME, 74 .name = PMC_OBPNAME,
75 }, 75 },
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index f95690c167b6..442286d83435 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
53void __cpuinit smp_store_cpu_info(int id) 53void __cpuinit smp_store_cpu_info(int id)
54{ 54{
55 int cpu_node; 55 int cpu_node;
56 int mid;
56 57
57 cpu_data(id).udelay_val = loops_per_jiffy; 58 cpu_data(id).udelay_val = loops_per_jiffy;
58 59
@@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id)
60 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 61 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
61 "clock-frequency", 0); 62 "clock-frequency", 0);
62 cpu_data(id).prom_node = cpu_node; 63 cpu_data(id).prom_node = cpu_node;
63 cpu_data(id).mid = cpu_get_hwmid(cpu_node); 64 mid = cpu_get_hwmid(cpu_node);
64 65
65 if (cpu_data(id).mid < 0) 66 if (mid < 0) {
66 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); 67 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
68 mid = 0;
69 }
70 cpu_data(id).mid = mid;
67} 71}
68 72
69void __init smp_cpus_done(unsigned int max_cpus) 73void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 4e236391b635..96046a4024c2 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op)
168 return 0; 168 return 0;
169} 169}
170 170
171static struct of_device_id __initdata clock_match[] = { 171static struct of_device_id clock_match[] = {
172 { 172 {
173 .name = "eeprom", 173 .name = "eeprom",
174 }, 174 },
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 3632cb34e914..0084c3361e15 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -289,10 +289,16 @@ cc_end_cruft:
289 289
290 /* Also, handle the alignment code out of band. */ 290 /* Also, handle the alignment code out of band. */
291cc_dword_align: 291cc_dword_align:
292 cmp %g1, 6 292 cmp %g1, 16
293 bl,a ccte 293 bge 1f
294 srl %g1, 1, %o3
2952: cmp %o3, 0
296 be,a ccte
294 andcc %g1, 0xf, %o3 297 andcc %g1, 0xf, %o3
295 andcc %o0, 0x1, %g0 298 andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits)
299 be,a 2b
300 srl %o3, 1, %o3
3011: andcc %o0, 0x1, %g0
296 bne ccslow 302 bne ccslow
297 andcc %o0, 0x2, %g0 303 andcc %o0, 0x2, %g0
298 be 1f 304 be 1f
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
index 6ea77979531c..42827cafa6af 100644
--- a/arch/um/os-Linux/util.c
+++ b/arch/um/os-Linux/util.c
@@ -5,6 +5,7 @@
5 5
6#include <stdio.h> 6#include <stdio.h>
7#include <stdlib.h> 7#include <stdlib.h>
8#include <unistd.h>
8#include <errno.h> 9#include <errno.h>
9#include <signal.h> 10#include <signal.h>
10#include <string.h> 11#include <string.h>
@@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len)
75 host.release, host.version, host.machine); 76 host.release, host.version, host.machine);
76} 77}
77 78
79/*
80 * We cannot use glibc's abort(). It makes use of tgkill() which
81 * has no effect within UML's kernel threads.
82 * After that glibc would execute an invalid instruction to kill
83 * the calling process and UML crashes with SIGSEGV.
84 */
85static inline void __attribute__ ((noreturn)) uml_abort(void)
86{
87 sigset_t sig;
88
89 fflush(NULL);
90
91 if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT))
92 sigprocmask(SIG_UNBLOCK, &sig, 0);
93
94 for (;;)
95 if (kill(getpid(), SIGABRT) < 0)
96 exit(127);
97}
98
78void os_dump_core(void) 99void os_dump_core(void)
79{ 100{
80 int pid; 101 int pid;
@@ -116,5 +137,5 @@ void os_dump_core(void)
116 while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) 137 while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0)
117 os_kill_ptraced_process(pid, 0); 138 os_kill_ptraced_process(pid, 0);
118 139
119 abort(); 140 uml_abort();
120} 141}
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c
index 2aa30a364bbe..d4efa7d679ff 100644
--- a/arch/unicore32/kernel/irq.c
+++ b/arch/unicore32/kernel/irq.c
@@ -23,7 +23,7 @@
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
26#include <linux/sysdev.h> 26#include <linux/syscore_ops.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28 28
29#include <asm/system.h> 29#include <asm/system.h>
@@ -237,7 +237,7 @@ static struct puv3_irq_state {
237 unsigned int iccr; 237 unsigned int iccr;
238} puv3_irq_state; 238} puv3_irq_state;
239 239
240static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state) 240static int puv3_irq_suspend(void)
241{ 241{
242 struct puv3_irq_state *st = &puv3_irq_state; 242 struct puv3_irq_state *st = &puv3_irq_state;
243 243
@@ -265,7 +265,7 @@ static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state)
265 return 0; 265 return 0;
266} 266}
267 267
268static int puv3_irq_resume(struct sys_device *dev) 268static void puv3_irq_resume(void)
269{ 269{
270 struct puv3_irq_state *st = &puv3_irq_state; 270 struct puv3_irq_state *st = &puv3_irq_state;
271 271
@@ -278,27 +278,20 @@ static int puv3_irq_resume(struct sys_device *dev)
278 278
279 writel(st->icmr, INTC_ICMR); 279 writel(st->icmr, INTC_ICMR);
280 } 280 }
281 return 0;
282} 281}
283 282
284static struct sysdev_class puv3_irq_sysclass = { 283static struct syscore_ops puv3_irq_syscore_ops = {
285 .name = "pkunity-irq",
286 .suspend = puv3_irq_suspend, 284 .suspend = puv3_irq_suspend,
287 .resume = puv3_irq_resume, 285 .resume = puv3_irq_resume,
288}; 286};
289 287
290static struct sys_device puv3_irq_device = { 288static int __init puv3_irq_init_syscore(void)
291 .id = 0,
292 .cls = &puv3_irq_sysclass,
293};
294
295static int __init puv3_irq_init_devicefs(void)
296{ 289{
297 sysdev_class_register(&puv3_irq_sysclass); 290 register_syscore_ops(&puv3_irq_syscore_ops);
298 return sysdev_register(&puv3_irq_device); 291 return 0;
299} 292}
300 293
301device_initcall(puv3_irq_init_devicefs); 294device_initcall(puv3_irq_init_syscore);
302 295
303void __init init_IRQ(void) 296void __init init_IRQ(void)
304{ 297{
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cc6c53a95bfd..650bb8c47eca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -71,7 +71,6 @@ config X86
71 select GENERIC_IRQ_SHOW 71 select GENERIC_IRQ_SHOW
72 select IRQ_FORCED_THREADING 72 select IRQ_FORCED_THREADING
73 select USE_GENERIC_SMP_HELPERS if SMP 73 select USE_GENERIC_SMP_HELPERS if SMP
74 select ARCH_NO_SYSDEV_OPS
75 74
76config INSTRUCTION_DECODER 75config INSTRUCTION_DECODER
77 def_bool (KPROBES || PERF_EVENTS) 76 def_bool (KPROBES || PERF_EVENTS)
@@ -690,6 +689,7 @@ config AMD_IOMMU
690 bool "AMD IOMMU support" 689 bool "AMD IOMMU support"
691 select SWIOTLB 690 select SWIOTLB
692 select PCI_MSI 691 select PCI_MSI
692 select PCI_IOV
693 depends on X86_64 && PCI && ACPI 693 depends on X86_64 && PCI && ACPI
694 ---help--- 694 ---help---
695 With this option you can enable support for AMD IOMMU hardware in 695 With this option you can enable support for AMD IOMMU hardware in
@@ -1848,7 +1848,7 @@ config APM_ALLOW_INTS
1848 1848
1849endif # APM 1849endif # APM
1850 1850
1851source "arch/x86/kernel/cpu/cpufreq/Kconfig" 1851source "drivers/cpufreq/Kconfig"
1852 1852
1853source "drivers/cpuidle/Kconfig" 1853source "drivers/cpuidle/Kconfig"
1854 1854
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index a63a68be1cce..94d420b360d1 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -15,4 +15,13 @@
15 .endm 15 .endm
16#endif 16#endif
17 17
18.macro altinstruction_entry orig alt feature orig_len alt_len
19 .align 8
20 .quad \orig
21 .quad \alt
22 .word \feature
23 .byte \orig_len
24 .byte \alt_len
25.endm
26
18#endif /* __ASSEMBLY__ */ 27#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 13009d1af99a..8cdd1e247975 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -4,7 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/stringify.h> 6#include <linux/stringify.h>
7#include <linux/jump_label.h>
8#include <asm/asm.h> 7#include <asm/asm.h>
9 8
10/* 9/*
@@ -191,7 +190,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len);
191extern void *text_poke_smp(void *addr, const void *opcode, size_t len); 190extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
192extern void text_poke_smp_batch(struct text_poke_param *params, int n); 191extern void text_poke_smp_batch(struct text_poke_param *params, int n);
193 192
194#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 193#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL)
195#define IDEAL_NOP_SIZE_5 5 194#define IDEAL_NOP_SIZE_5 5
196extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; 195extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
197extern void arch_init_ideal_nop5(void); 196extern void arch_init_ideal_nop5(void);
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index 916bc8111a01..55d95eb789b3 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -19,13 +19,12 @@
19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H 19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
20#define _ASM_X86_AMD_IOMMU_PROTO_H 20#define _ASM_X86_AMD_IOMMU_PROTO_H
21 21
22struct amd_iommu; 22#include <asm/amd_iommu_types.h>
23 23
24extern int amd_iommu_init_dma_ops(void); 24extern int amd_iommu_init_dma_ops(void);
25extern int amd_iommu_init_passthrough(void); 25extern int amd_iommu_init_passthrough(void);
26extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
26extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 27extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
27extern void amd_iommu_flush_all_domains(void);
28extern void amd_iommu_flush_all_devices(void);
29extern void amd_iommu_apply_erratum_63(u16 devid); 28extern void amd_iommu_apply_erratum_63(u16 devid);
30extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); 29extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
31extern int amd_iommu_init_devices(void); 30extern int amd_iommu_init_devices(void);
@@ -44,4 +43,12 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
44 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); 43 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
45} 44}
46 45
46static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
47{
48 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
49 return false;
50
51 return !!(iommu->features & f);
52}
53
47#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ 54#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index e3509fc303bf..4c9982995414 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -68,12 +68,25 @@
68#define MMIO_CONTROL_OFFSET 0x0018 68#define MMIO_CONTROL_OFFSET 0x0018
69#define MMIO_EXCL_BASE_OFFSET 0x0020 69#define MMIO_EXCL_BASE_OFFSET 0x0020
70#define MMIO_EXCL_LIMIT_OFFSET 0x0028 70#define MMIO_EXCL_LIMIT_OFFSET 0x0028
71#define MMIO_EXT_FEATURES 0x0030
71#define MMIO_CMD_HEAD_OFFSET 0x2000 72#define MMIO_CMD_HEAD_OFFSET 0x2000
72#define MMIO_CMD_TAIL_OFFSET 0x2008 73#define MMIO_CMD_TAIL_OFFSET 0x2008
73#define MMIO_EVT_HEAD_OFFSET 0x2010 74#define MMIO_EVT_HEAD_OFFSET 0x2010
74#define MMIO_EVT_TAIL_OFFSET 0x2018 75#define MMIO_EVT_TAIL_OFFSET 0x2018
75#define MMIO_STATUS_OFFSET 0x2020 76#define MMIO_STATUS_OFFSET 0x2020
76 77
78
79/* Extended Feature Bits */
80#define FEATURE_PREFETCH (1ULL<<0)
81#define FEATURE_PPR (1ULL<<1)
82#define FEATURE_X2APIC (1ULL<<2)
83#define FEATURE_NX (1ULL<<3)
84#define FEATURE_GT (1ULL<<4)
85#define FEATURE_IA (1ULL<<6)
86#define FEATURE_GA (1ULL<<7)
87#define FEATURE_HE (1ULL<<8)
88#define FEATURE_PC (1ULL<<9)
89
77/* MMIO status bits */ 90/* MMIO status bits */
78#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 91#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
79 92
@@ -113,7 +126,9 @@
113/* command specific defines */ 126/* command specific defines */
114#define CMD_COMPL_WAIT 0x01 127#define CMD_COMPL_WAIT 0x01
115#define CMD_INV_DEV_ENTRY 0x02 128#define CMD_INV_DEV_ENTRY 0x02
116#define CMD_INV_IOMMU_PAGES 0x03 129#define CMD_INV_IOMMU_PAGES 0x03
130#define CMD_INV_IOTLB_PAGES 0x04
131#define CMD_INV_ALL 0x08
117 132
118#define CMD_COMPL_WAIT_STORE_MASK 0x01 133#define CMD_COMPL_WAIT_STORE_MASK 0x01
119#define CMD_COMPL_WAIT_INT_MASK 0x02 134#define CMD_COMPL_WAIT_INT_MASK 0x02
@@ -215,6 +230,8 @@
215#define IOMMU_PTE_IR (1ULL << 61) 230#define IOMMU_PTE_IR (1ULL << 61)
216#define IOMMU_PTE_IW (1ULL << 62) 231#define IOMMU_PTE_IW (1ULL << 62)
217 232
233#define DTE_FLAG_IOTLB 0x01
234
218#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 235#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
219#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) 236#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
220#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) 237#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
@@ -227,6 +244,7 @@
227/* IOMMU capabilities */ 244/* IOMMU capabilities */
228#define IOMMU_CAP_IOTLB 24 245#define IOMMU_CAP_IOTLB 24
229#define IOMMU_CAP_NPCACHE 26 246#define IOMMU_CAP_NPCACHE 26
247#define IOMMU_CAP_EFR 27
230 248
231#define MAX_DOMAIN_ID 65536 249#define MAX_DOMAIN_ID 65536
232 250
@@ -249,6 +267,8 @@ extern bool amd_iommu_dump;
249 267
250/* global flag if IOMMUs cache non-present entries */ 268/* global flag if IOMMUs cache non-present entries */
251extern bool amd_iommu_np_cache; 269extern bool amd_iommu_np_cache;
270/* Only true if all IOMMUs support device IOTLBs */
271extern bool amd_iommu_iotlb_sup;
252 272
253/* 273/*
254 * Make iterating over all IOMMUs easier 274 * Make iterating over all IOMMUs easier
@@ -371,6 +391,9 @@ struct amd_iommu {
371 /* flags read from acpi table */ 391 /* flags read from acpi table */
372 u8 acpi_flags; 392 u8 acpi_flags;
373 393
394 /* Extended features */
395 u64 features;
396
374 /* 397 /*
375 * Capability pointer. There could be more than one IOMMU per PCI 398 * Capability pointer. There could be more than one IOMMU per PCI
376 * device function if there are more than one AMD IOMMU capability 399 * device function if there are more than one AMD IOMMU capability
@@ -409,9 +432,6 @@ struct amd_iommu {
409 /* if one, we need to send a completion wait command */ 432 /* if one, we need to send a completion wait command */
410 bool need_sync; 433 bool need_sync;
411 434
412 /* becomes true if a command buffer reset is running */
413 bool reset_in_progress;
414
415 /* default dma_ops domain for that IOMMU */ 435 /* default dma_ops domain for that IOMMU */
416 struct dma_ops_domain *default_dom; 436 struct dma_ops_domain *default_dom;
417 437
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index d87988bacf3e..34595d5e1038 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -78,6 +78,7 @@
78#define APIC_DEST_LOGICAL 0x00800 78#define APIC_DEST_LOGICAL 0x00800
79#define APIC_DEST_PHYSICAL 0x00000 79#define APIC_DEST_PHYSICAL 0x00000
80#define APIC_DM_FIXED 0x00000 80#define APIC_DM_FIXED 0x00000
81#define APIC_DM_FIXED_MASK 0x00700
81#define APIC_DM_LOWEST 0x00100 82#define APIC_DM_LOWEST 0x00100
82#define APIC_DM_SMI 0x00200 83#define APIC_DM_SMI 0x00200
83#define APIC_DM_REMRD 0x00300 84#define APIC_DM_REMRD 0x00300
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 91f3e087cf21..7f2f7b123293 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -195,6 +195,7 @@
195 195
196/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 196/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
197#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 197#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
198#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
198 199
199#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 200#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
200 201
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index db24c2278be0..268c783ab1c0 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -38,11 +38,10 @@ extern void mcount(void);
38static inline unsigned long ftrace_call_adjust(unsigned long addr) 38static inline unsigned long ftrace_call_adjust(unsigned long addr)
39{ 39{
40 /* 40 /*
41 * call mcount is "e8 <4 byte offset>" 41 * addr is the address of the mcount call instruction.
42 * The addr points to the 4 byte offset and the caller of this 42 * recordmcount does the necessary offset calculation.
43 * function wants the pointer to e8. Simply subtract one.
44 */ 43 */
45 return addr - 1; 44 return addr;
46} 45}
47 46
48#ifdef CONFIG_DYNAMIC_FTRACE 47#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 574dbc22893a..a32b18ce6ead 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -5,20 +5,25 @@
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm/nops.h> 7#include <asm/nops.h>
8#include <asm/asm.h>
8 9
9#define JUMP_LABEL_NOP_SIZE 5 10#define JUMP_LABEL_NOP_SIZE 5
10 11
11# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" 12#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
12 13
13# define JUMP_LABEL(key, label) \ 14static __always_inline bool arch_static_branch(struct jump_label_key *key)
14 do { \ 15{
15 asm goto("1:" \ 16 asm goto("1:"
16 JUMP_LABEL_INITIAL_NOP \ 17 JUMP_LABEL_INITIAL_NOP
17 ".pushsection __jump_table, \"aw\" \n\t"\ 18 ".pushsection __jump_table, \"aw\" \n\t"
18 _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ 19 _ASM_ALIGN "\n\t"
19 ".popsection \n\t" \ 20 _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
20 : : "i" (key) : : label); \ 21 ".popsection \n\t"
21 } while (0) 22 : : "i" (key) : : l_yes);
23 return false;
24l_yes:
25 return true;
26}
22 27
23#endif /* __KERNEL__ */ 28#endif /* __KERNEL__ */
24 29
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 7db7723d1f32..d56187c6b838 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
299/* Install a pte for a particular vaddr in kernel space. */ 299/* Install a pte for a particular vaddr in kernel space. */
300void set_pte_vaddr(unsigned long vaddr, pte_t pte); 300void set_pte_vaddr(unsigned long vaddr, pte_t pte);
301 301
302extern void native_pagetable_reserve(u64 start, u64 end);
302#ifdef CONFIG_X86_32 303#ifdef CONFIG_X86_32
303extern void native_pagetable_setup_start(pgd_t *base); 304extern void native_pagetable_setup_start(pgd_t *base);
304extern void native_pagetable_setup_done(pgd_t *base); 305extern void native_pagetable_setup_done(pgd_t *base);
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index db8aa19a08a2..647d8a06ce4f 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -88,7 +88,7 @@ void *extend_brk(size_t size, size_t align);
88 * executable.) 88 * executable.)
89 */ 89 */
90#define RESERVE_BRK(name,sz) \ 90#define RESERVE_BRK(name,sz) \
91 static void __section(.discard.text) __used \ 91 static void __section(.discard.text) __used notrace \
92 __brk_reservation_fn_##name##__(void) { \ 92 __brk_reservation_fn_##name##__(void) { \
93 asm volatile ( \ 93 asm volatile ( \
94 ".pushsection .brk_reservation,\"aw\",@nobits;" \ 94 ".pushsection .brk_reservation,\"aw\",@nobits;" \
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index d7e89c83645d..70bbe39043a9 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -37,9 +37,6 @@ print_context_stack_bp(struct thread_info *tinfo,
37/* Generic stack tracer with callbacks */ 37/* Generic stack tracer with callbacks */
38 38
39struct stacktrace_ops { 39struct stacktrace_ops {
40 void (*warning)(void *data, char *msg);
41 /* msg must contain %s for the symbol */
42 void (*warning_symbol)(void *data, char *msg, unsigned long symbol);
43 void (*address)(void *data, unsigned long address, int reliable); 40 void (*address)(void *data, unsigned long address, int reliable);
44 /* On negative return stop dumping */ 41 /* On negative return stop dumping */
45 int (*stack)(void *data, char *name); 42 int (*stack)(void *data, char *name);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index abd3e0ea762a..99f0ad753f32 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -42,7 +42,7 @@
42 * Returns 0 if the range is valid, nonzero otherwise. 42 * Returns 0 if the range is valid, nonzero otherwise.
43 * 43 *
44 * This is equivalent to the following test: 44 * This is equivalent to the following test:
45 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) 45 * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
46 * 46 *
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... 47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */ 48 */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 3e094af443c3..130f1eeee5fe 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -94,6 +94,8 @@
94/* after this # consecutive successes, bump up the throttle if it was lowered */ 94/* after this # consecutive successes, bump up the throttle if it was lowered */
95#define COMPLETE_THRESHOLD 5 95#define COMPLETE_THRESHOLD 5
96 96
97#define UV_LB_SUBNODEID 0x10
98
97/* 99/*
98 * number of entries in the destination side payload queue 100 * number of entries in the destination side payload queue
99 */ 101 */
@@ -124,7 +126,7 @@
124 * The distribution specification (32 bytes) is interpreted as a 256-bit 126 * The distribution specification (32 bytes) is interpreted as a 256-bit
125 * distribution vector. Adjacent bits correspond to consecutive even numbered 127 * distribution vector. Adjacent bits correspond to consecutive even numbered
126 * nodeIDs. The result of adding the index of a given bit to the 15-bit 128 * nodeIDs. The result of adding the index of a given bit to the 15-bit
127 * 'base_dest_nodeid' field of the header corresponds to the 129 * 'base_dest_nasid' field of the header corresponds to the
128 * destination nodeID associated with that specified bit. 130 * destination nodeID associated with that specified bit.
129 */ 131 */
130struct bau_target_uvhubmask { 132struct bau_target_uvhubmask {
@@ -176,7 +178,7 @@ struct bau_msg_payload {
176struct bau_msg_header { 178struct bau_msg_header {
177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 179 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
178 /* bits 5:0 */ 180 /* bits 5:0 */
179 unsigned int base_dest_nodeid:15; /* nasid of the */ 181 unsigned int base_dest_nasid:15; /* nasid of the */
180 /* bits 20:6 */ /* first bit in uvhub map */ 182 /* bits 20:6 */ /* first bit in uvhub map */
181 unsigned int command:8; /* message type */ 183 unsigned int command:8; /* message type */
182 /* bits 28:21 */ 184 /* bits 28:21 */
@@ -378,6 +380,10 @@ struct ptc_stats {
378 unsigned long d_rcanceled; /* number of messages canceled by resets */ 380 unsigned long d_rcanceled; /* number of messages canceled by resets */
379}; 381};
380 382
383struct hub_and_pnode {
384 short uvhub;
385 short pnode;
386};
381/* 387/*
382 * one per-cpu; to locate the software tables 388 * one per-cpu; to locate the software tables
383 */ 389 */
@@ -399,10 +405,12 @@ struct bau_control {
399 int baudisabled; 405 int baudisabled;
400 int set_bau_off; 406 int set_bau_off;
401 short cpu; 407 short cpu;
408 short osnode;
402 short uvhub_cpu; 409 short uvhub_cpu;
403 short uvhub; 410 short uvhub;
404 short cpus_in_socket; 411 short cpus_in_socket;
405 short cpus_in_uvhub; 412 short cpus_in_uvhub;
413 short partition_base_pnode;
406 unsigned short message_number; 414 unsigned short message_number;
407 unsigned short uvhub_quiesce; 415 unsigned short uvhub_quiesce;
408 short socket_acknowledge_count[DEST_Q_SIZE]; 416 short socket_acknowledge_count[DEST_Q_SIZE];
@@ -422,15 +430,16 @@ struct bau_control {
422 int congested_period; 430 int congested_period;
423 cycles_t period_time; 431 cycles_t period_time;
424 long period_requests; 432 long period_requests;
433 struct hub_and_pnode *target_hub_and_pnode;
425}; 434};
426 435
427static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) 436static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
428{ 437{
429 return constant_test_bit(uvhub, &dstp->bits[0]); 438 return constant_test_bit(uvhub, &dstp->bits[0]);
430} 439}
431static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) 440static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
432{ 441{
433 __set_bit(uvhub, &dstp->bits[0]); 442 __set_bit(pnode, &dstp->bits[0]);
434} 443}
435static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, 444static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
436 int nbits) 445 int nbits)
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index a501741c2335..4298002d0c83 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -398,6 +398,8 @@ struct uv_blade_info {
398 unsigned short nr_online_cpus; 398 unsigned short nr_online_cpus;
399 unsigned short pnode; 399 unsigned short pnode;
400 short memory_nid; 400 short memory_nid;
401 spinlock_t nmi_lock;
402 unsigned long nmi_count;
401}; 403};
402extern struct uv_blade_info *uv_blade_info; 404extern struct uv_blade_info *uv_blade_info;
403extern short *uv_node_to_blade; 405extern short *uv_node_to_blade;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 20cafeac7455..f5bb64a823d7 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV MMR definitions 6 * SGI UV MMR definitions
7 * 7 *
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_MMRS_H 11#ifndef _ASM_X86_UV_UV_MMRS_H
@@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u {
1099 } s; 1099 } s;
1100}; 1100};
1101 1101
1102/* ========================================================================= */
1103/* UVH_SCRATCH5 */
1104/* ========================================================================= */
1105#define UVH_SCRATCH5 0x2d0200UL
1106#define UVH_SCRATCH5_32 0x00778
1107
1108#define UVH_SCRATCH5_SCRATCH5_SHFT 0
1109#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
1110union uvh_scratch5_u {
1111 unsigned long v;
1112 struct uvh_scratch5_s {
1113 unsigned long scratch5 : 64; /* RW, W1CS */
1114 } s;
1115};
1102 1116
1103#endif /* __ASM_UV_MMRS_X86_H__ */ 1117#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 643ebf2e2ad8..d3d859035af9 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -68,6 +68,17 @@ struct x86_init_oem {
68}; 68};
69 69
70/** 70/**
71 * struct x86_init_mapping - platform specific initial kernel pagetable setup
72 * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
73 *
74 * For more details on the purpose of this hook, look in
75 * init_memory_mapping and the commit that added it.
76 */
77struct x86_init_mapping {
78 void (*pagetable_reserve)(u64 start, u64 end);
79};
80
81/**
71 * struct x86_init_paging - platform specific paging functions 82 * struct x86_init_paging - platform specific paging functions
72 * @pagetable_setup_start: platform specific pre paging_init() call 83 * @pagetable_setup_start: platform specific pre paging_init() call
73 * @pagetable_setup_done: platform specific post paging_init() call 84 * @pagetable_setup_done: platform specific post paging_init() call
@@ -123,6 +134,7 @@ struct x86_init_ops {
123 struct x86_init_mpparse mpparse; 134 struct x86_init_mpparse mpparse;
124 struct x86_init_irqs irqs; 135 struct x86_init_irqs irqs;
125 struct x86_init_oem oem; 136 struct x86_init_oem oem;
137 struct x86_init_mapping mapping;
126 struct x86_init_paging paging; 138 struct x86_init_paging paging;
127 struct x86_init_timers timers; 139 struct x86_init_timers timers;
128 struct x86_init_iommu iommu; 140 struct x86_init_iommu iommu;
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c61934fbf22a..64a619d47d34 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -47,8 +47,9 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47extern unsigned long set_phys_range_identity(unsigned long pfn_s, 47extern unsigned long set_phys_range_identity(unsigned long pfn_s,
48 unsigned long pfn_e); 48 unsigned long pfn_e);
49 49
50extern int m2p_add_override(unsigned long mfn, struct page *page); 50extern int m2p_add_override(unsigned long mfn, struct page *page,
51extern int m2p_remove_override(struct page *page); 51 bool clear_pte);
52extern int m2p_remove_override(struct page *page, bool clear_pte);
52extern struct page *m2p_find_override(unsigned long mfn); 53extern struct page *m2p_find_override(unsigned long mfn);
53extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); 54extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
54 55
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index aa8620989162..4fbda9a3f339 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -15,10 +15,26 @@ static inline int pci_xen_hvm_init(void)
15#endif 15#endif
16#if defined(CONFIG_XEN_DOM0) 16#if defined(CONFIG_XEN_DOM0)
17void __init xen_setup_pirqs(void); 17void __init xen_setup_pirqs(void);
18int xen_find_device_domain_owner(struct pci_dev *dev);
19int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
20int xen_unregister_device_domain_owner(struct pci_dev *dev);
18#else 21#else
19static inline void __init xen_setup_pirqs(void) 22static inline void __init xen_setup_pirqs(void)
20{ 23{
21} 24}
25static inline int xen_find_device_domain_owner(struct pci_dev *dev)
26{
27 return -1;
28}
29static inline int xen_register_device_domain_owner(struct pci_dev *dev,
30 uint16_t domain)
31{
32 return -1;
33}
34static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
35{
36 return -1;
37}
22#endif 38#endif
23 39
24#if defined(CONFIG_PCI_MSI) 40#if defined(CONFIG_PCI_MSI)
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 7338ef2218bc..97ebf82e0b7f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -117,7 +117,7 @@ obj-$(CONFIG_OF) += devicetree.o
117ifeq ($(CONFIG_X86_64),y) 117ifeq ($(CONFIG_X86_64),y)
118 obj-$(CONFIG_AUDIT) += audit_64.o 118 obj-$(CONFIG_AUDIT) += audit_64.o
119 119
120 obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o 120 obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o
121 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o 121 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
122 obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o 122 obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
123 123
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index ff93bc1b09c3..18a857ba7a25 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -112,11 +112,6 @@ static int __init acpi_sleep_setup(char *str)
112#ifdef CONFIG_HIBERNATION 112#ifdef CONFIG_HIBERNATION
113 if (strncmp(str, "s4_nohwsig", 10) == 0) 113 if (strncmp(str, "s4_nohwsig", 10) == 0)
114 acpi_no_s4_hw_signature(); 114 acpi_no_s4_hw_signature();
115 if (strncmp(str, "s4_nonvs", 8) == 0) {
116 pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, "
117 "please use acpi_sleep=nonvs instead");
118 acpi_nvs_nosave();
119 }
120#endif 115#endif
121 if (strncmp(str, "nonvs", 5) == 0) 116 if (strncmp(str, "nonvs", 5) == 0)
122 acpi_nvs_nosave(); 117 acpi_nvs_nosave();
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 4a234677e213..1eeeafcb4410 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -210,6 +210,15 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
210 u8 insnbuf[MAX_PATCH_LEN]; 210 u8 insnbuf[MAX_PATCH_LEN];
211 211
212 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); 212 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
213 /*
214 * The scan order should be from start to end. A later scanned
215 * alternative code can overwrite a previous scanned alternative code.
216 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
217 * patch code.
218 *
219 * So be careful if you want to change the scan order to any other
220 * order.
221 */
213 for (a = start; a < end; a++) { 222 for (a = start; a < end; a++) {
214 u8 *instr = a->instr; 223 u8 *instr = a->instr;
215 BUG_ON(a->replacementlen > a->instrlen); 224 BUG_ON(a->replacementlen > a->instrlen);
@@ -679,7 +688,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
679 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); 688 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
680} 689}
681 690
682#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 691#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL)
683 692
684#ifdef CONFIG_X86_64 693#ifdef CONFIG_X86_64
685unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; 694unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/amd_gart_64.c
index b117efd24f71..b117efd24f71 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 57ca77787220..873e7e1ead7b 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/pci-ats.h>
21#include <linux/bitmap.h> 22#include <linux/bitmap.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/debugfs.h> 24#include <linux/debugfs.h>
@@ -25,6 +26,7 @@
25#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
26#include <linux/iommu-helper.h> 27#include <linux/iommu-helper.h>
27#include <linux/iommu.h> 28#include <linux/iommu.h>
29#include <linux/delay.h>
28#include <asm/proto.h> 30#include <asm/proto.h>
29#include <asm/iommu.h> 31#include <asm/iommu.h>
30#include <asm/gart.h> 32#include <asm/gart.h>
@@ -34,7 +36,7 @@
34 36
35#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 37#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
36 38
37#define EXIT_LOOP_COUNT 10000000 39#define LOOP_TIMEOUT 100000
38 40
39static DEFINE_RWLOCK(amd_iommu_devtable_lock); 41static DEFINE_RWLOCK(amd_iommu_devtable_lock);
40 42
@@ -57,7 +59,6 @@ struct iommu_cmd {
57 u32 data[4]; 59 u32 data[4];
58}; 60};
59 61
60static void reset_iommu_command_buffer(struct amd_iommu *iommu);
61static void update_domain(struct protection_domain *domain); 62static void update_domain(struct protection_domain *domain);
62 63
63/**************************************************************************** 64/****************************************************************************
@@ -322,8 +323,6 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
322 break; 323 break;
323 case EVENT_TYPE_ILL_CMD: 324 case EVENT_TYPE_ILL_CMD:
324 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); 325 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
325 iommu->reset_in_progress = true;
326 reset_iommu_command_buffer(iommu);
327 dump_command(address); 326 dump_command(address);
328 break; 327 break;
329 case EVENT_TYPE_CMD_HARD_ERR: 328 case EVENT_TYPE_CMD_HARD_ERR:
@@ -367,7 +366,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
367 spin_unlock_irqrestore(&iommu->lock, flags); 366 spin_unlock_irqrestore(&iommu->lock, flags);
368} 367}
369 368
370irqreturn_t amd_iommu_int_handler(int irq, void *data) 369irqreturn_t amd_iommu_int_thread(int irq, void *data)
371{ 370{
372 struct amd_iommu *iommu; 371 struct amd_iommu *iommu;
373 372
@@ -377,192 +376,300 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
377 return IRQ_HANDLED; 376 return IRQ_HANDLED;
378} 377}
379 378
379irqreturn_t amd_iommu_int_handler(int irq, void *data)
380{
381 return IRQ_WAKE_THREAD;
382}
383
380/**************************************************************************** 384/****************************************************************************
381 * 385 *
382 * IOMMU command queuing functions 386 * IOMMU command queuing functions
383 * 387 *
384 ****************************************************************************/ 388 ****************************************************************************/
385 389
386/* 390static int wait_on_sem(volatile u64 *sem)
387 * Writes the command to the IOMMUs command buffer and informs the 391{
388 * hardware about the new command. Must be called with iommu->lock held. 392 int i = 0;
389 */ 393
390static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 394 while (*sem == 0 && i < LOOP_TIMEOUT) {
395 udelay(1);
396 i += 1;
397 }
398
399 if (i == LOOP_TIMEOUT) {
400 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
401 return -EIO;
402 }
403
404 return 0;
405}
406
407static void copy_cmd_to_buffer(struct amd_iommu *iommu,
408 struct iommu_cmd *cmd,
409 u32 tail)
391{ 410{
392 u32 tail, head;
393 u8 *target; 411 u8 *target;
394 412
395 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
396 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
397 target = iommu->cmd_buf + tail; 413 target = iommu->cmd_buf + tail;
398 memcpy_toio(target, cmd, sizeof(*cmd)); 414 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
399 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; 415
400 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 416 /* Copy command to buffer */
401 if (tail == head) 417 memcpy(target, cmd, sizeof(*cmd));
402 return -ENOMEM; 418
419 /* Tell the IOMMU about it */
403 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 420 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
421}
404 422
405 return 0; 423static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
424{
425 WARN_ON(address & 0x7ULL);
426
427 memset(cmd, 0, sizeof(*cmd));
428 cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
429 cmd->data[1] = upper_32_bits(__pa(address));
430 cmd->data[2] = 1;
431 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
432}
433
434static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
435{
436 memset(cmd, 0, sizeof(*cmd));
437 cmd->data[0] = devid;
438 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
439}
440
441static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
442 size_t size, u16 domid, int pde)
443{
444 u64 pages;
445 int s;
446
447 pages = iommu_num_pages(address, size, PAGE_SIZE);
448 s = 0;
449
450 if (pages > 1) {
451 /*
452 * If we have to flush more than one page, flush all
453 * TLB entries for this domain
454 */
455 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
456 s = 1;
457 }
458
459 address &= PAGE_MASK;
460
461 memset(cmd, 0, sizeof(*cmd));
462 cmd->data[1] |= domid;
463 cmd->data[2] = lower_32_bits(address);
464 cmd->data[3] = upper_32_bits(address);
465 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
466 if (s) /* size bit - we flush more than one 4kb page */
467 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
468 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
469 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
470}
471
472static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
473 u64 address, size_t size)
474{
475 u64 pages;
476 int s;
477
478 pages = iommu_num_pages(address, size, PAGE_SIZE);
479 s = 0;
480
481 if (pages > 1) {
482 /*
483 * If we have to flush more than one page, flush all
484 * TLB entries for this domain
485 */
486 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
487 s = 1;
488 }
489
490 address &= PAGE_MASK;
491
492 memset(cmd, 0, sizeof(*cmd));
493 cmd->data[0] = devid;
494 cmd->data[0] |= (qdep & 0xff) << 24;
495 cmd->data[1] = devid;
496 cmd->data[2] = lower_32_bits(address);
497 cmd->data[3] = upper_32_bits(address);
498 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
499 if (s)
500 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
501}
502
503static void build_inv_all(struct iommu_cmd *cmd)
504{
505 memset(cmd, 0, sizeof(*cmd));
506 CMD_SET_TYPE(cmd, CMD_INV_ALL);
406} 507}
407 508
408/* 509/*
409 * General queuing function for commands. Takes iommu->lock and calls 510 * Writes the command to the IOMMUs command buffer and informs the
410 * __iommu_queue_command(). 511 * hardware about the new command.
411 */ 512 */
412static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 513static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
413{ 514{
515 u32 left, tail, head, next_tail;
414 unsigned long flags; 516 unsigned long flags;
415 int ret;
416 517
518 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
519
520again:
417 spin_lock_irqsave(&iommu->lock, flags); 521 spin_lock_irqsave(&iommu->lock, flags);
418 ret = __iommu_queue_command(iommu, cmd);
419 if (!ret)
420 iommu->need_sync = true;
421 spin_unlock_irqrestore(&iommu->lock, flags);
422 522
423 return ret; 523 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
424} 524 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
525 next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
526 left = (head - next_tail) % iommu->cmd_buf_size;
425 527
426/* 528 if (left <= 2) {
427 * This function waits until an IOMMU has completed a completion 529 struct iommu_cmd sync_cmd;
428 * wait command 530 volatile u64 sem = 0;
429 */ 531 int ret;
430static void __iommu_wait_for_completion(struct amd_iommu *iommu)
431{
432 int ready = 0;
433 unsigned status = 0;
434 unsigned long i = 0;
435 532
436 INC_STATS_COUNTER(compl_wait); 533 build_completion_wait(&sync_cmd, (u64)&sem);
534 copy_cmd_to_buffer(iommu, &sync_cmd, tail);
437 535
438 while (!ready && (i < EXIT_LOOP_COUNT)) { 536 spin_unlock_irqrestore(&iommu->lock, flags);
439 ++i; 537
440 /* wait for the bit to become one */ 538 if ((ret = wait_on_sem(&sem)) != 0)
441 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 539 return ret;
442 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; 540
541 goto again;
443 } 542 }
444 543
445 /* set bit back to zero */ 544 copy_cmd_to_buffer(iommu, cmd, tail);
446 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 545
447 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 546 /* We need to sync now to make sure all commands are processed */
547 iommu->need_sync = true;
548
549 spin_unlock_irqrestore(&iommu->lock, flags);
448 550
449 if (unlikely(i == EXIT_LOOP_COUNT)) 551 return 0;
450 iommu->reset_in_progress = true;
451} 552}
452 553
453/* 554/*
454 * This function queues a completion wait command into the command 555 * This function queues a completion wait command into the command
455 * buffer of an IOMMU 556 * buffer of an IOMMU
456 */ 557 */
457static int __iommu_completion_wait(struct amd_iommu *iommu) 558static int iommu_completion_wait(struct amd_iommu *iommu)
458{ 559{
459 struct iommu_cmd cmd; 560 struct iommu_cmd cmd;
561 volatile u64 sem = 0;
562 int ret;
460 563
461 memset(&cmd, 0, sizeof(cmd)); 564 if (!iommu->need_sync)
462 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 565 return 0;
463 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
464 566
465 return __iommu_queue_command(iommu, &cmd); 567 build_completion_wait(&cmd, (u64)&sem);
568
569 ret = iommu_queue_command(iommu, &cmd);
570 if (ret)
571 return ret;
572
573 return wait_on_sem(&sem);
466} 574}
467 575
468/* 576static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
469 * This function is called whenever we need to ensure that the IOMMU has
470 * completed execution of all commands we sent. It sends a
471 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
472 * us about that by writing a value to a physical address we pass with
473 * the command.
474 */
475static int iommu_completion_wait(struct amd_iommu *iommu)
476{ 577{
477 int ret = 0; 578 struct iommu_cmd cmd;
478 unsigned long flags;
479 579
480 spin_lock_irqsave(&iommu->lock, flags); 580 build_inv_dte(&cmd, devid);
481 581
482 if (!iommu->need_sync) 582 return iommu_queue_command(iommu, &cmd);
483 goto out; 583}
484 584
485 ret = __iommu_completion_wait(iommu); 585static void iommu_flush_dte_all(struct amd_iommu *iommu)
586{
587 u32 devid;
486 588
487 iommu->need_sync = false; 589 for (devid = 0; devid <= 0xffff; ++devid)
590 iommu_flush_dte(iommu, devid);
488 591
489 if (ret) 592 iommu_completion_wait(iommu);
490 goto out; 593}
491
492 __iommu_wait_for_completion(iommu);
493 594
494out: 595/*
495 spin_unlock_irqrestore(&iommu->lock, flags); 596 * This function uses heavy locking and may disable irqs for some time. But
597 * this is no issue because it is only called during resume.
598 */
599static void iommu_flush_tlb_all(struct amd_iommu *iommu)
600{
601 u32 dom_id;
496 602
497 if (iommu->reset_in_progress) 603 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
498 reset_iommu_command_buffer(iommu); 604 struct iommu_cmd cmd;
605 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
606 dom_id, 1);
607 iommu_queue_command(iommu, &cmd);
608 }
499 609
500 return 0; 610 iommu_completion_wait(iommu);
501} 611}
502 612
503static void iommu_flush_complete(struct protection_domain *domain) 613static void iommu_flush_all(struct amd_iommu *iommu)
504{ 614{
505 int i; 615 struct iommu_cmd cmd;
506 616
507 for (i = 0; i < amd_iommus_present; ++i) { 617 build_inv_all(&cmd);
508 if (!domain->dev_iommu[i])
509 continue;
510 618
511 /* 619 iommu_queue_command(iommu, &cmd);
512 * Devices of this domain are behind this IOMMU 620 iommu_completion_wait(iommu);
513 * We need to wait for completion of all commands. 621}
514 */ 622
515 iommu_completion_wait(amd_iommus[i]); 623void iommu_flush_all_caches(struct amd_iommu *iommu)
624{
625 if (iommu_feature(iommu, FEATURE_IA)) {
626 iommu_flush_all(iommu);
627 } else {
628 iommu_flush_dte_all(iommu);
629 iommu_flush_tlb_all(iommu);
516 } 630 }
517} 631}
518 632
519/* 633/*
520 * Command send function for invalidating a device table entry 634 * Command send function for flushing on-device TLB
521 */ 635 */
522static int iommu_flush_device(struct device *dev) 636static int device_flush_iotlb(struct device *dev, u64 address, size_t size)
523{ 637{
638 struct pci_dev *pdev = to_pci_dev(dev);
524 struct amd_iommu *iommu; 639 struct amd_iommu *iommu;
525 struct iommu_cmd cmd; 640 struct iommu_cmd cmd;
526 u16 devid; 641 u16 devid;
642 int qdep;
527 643
644 qdep = pci_ats_queue_depth(pdev);
528 devid = get_device_id(dev); 645 devid = get_device_id(dev);
529 iommu = amd_iommu_rlookup_table[devid]; 646 iommu = amd_iommu_rlookup_table[devid];
530 647
531 /* Build command */ 648 build_inv_iotlb_pages(&cmd, devid, qdep, address, size);
532 memset(&cmd, 0, sizeof(cmd));
533 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
534 cmd.data[0] = devid;
535 649
536 return iommu_queue_command(iommu, &cmd); 650 return iommu_queue_command(iommu, &cmd);
537} 651}
538 652
539static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
540 u16 domid, int pde, int s)
541{
542 memset(cmd, 0, sizeof(*cmd));
543 address &= PAGE_MASK;
544 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
545 cmd->data[1] |= domid;
546 cmd->data[2] = lower_32_bits(address);
547 cmd->data[3] = upper_32_bits(address);
548 if (s) /* size bit - we flush more than one 4kb page */
549 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
550 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
551 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
552}
553
554/* 653/*
555 * Generic command send function for invalidaing TLB entries 654 * Command send function for invalidating a device table entry
556 */ 655 */
557static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, 656static int device_flush_dte(struct device *dev)
558 u64 address, u16 domid, int pde, int s)
559{ 657{
560 struct iommu_cmd cmd; 658 struct amd_iommu *iommu;
659 struct pci_dev *pdev;
660 u16 devid;
561 int ret; 661 int ret;
562 662
563 __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s); 663 pdev = to_pci_dev(dev);
664 devid = get_device_id(dev);
665 iommu = amd_iommu_rlookup_table[devid];
564 666
565 ret = iommu_queue_command(iommu, &cmd); 667 ret = iommu_flush_dte(iommu, devid);
668 if (ret)
669 return ret;
670
671 if (pci_ats_enabled(pdev))
672 ret = device_flush_iotlb(dev, 0, ~0UL);
566 673
567 return ret; 674 return ret;
568} 675}
@@ -572,23 +679,14 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
572 * It invalidates a single PTE if the range to flush is within a single 679 * It invalidates a single PTE if the range to flush is within a single
573 * page. Otherwise it flushes the whole TLB of the IOMMU. 680 * page. Otherwise it flushes the whole TLB of the IOMMU.
574 */ 681 */
575static void __iommu_flush_pages(struct protection_domain *domain, 682static void __domain_flush_pages(struct protection_domain *domain,
576 u64 address, size_t size, int pde) 683 u64 address, size_t size, int pde)
577{ 684{
578 int s = 0, i; 685 struct iommu_dev_data *dev_data;
579 unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); 686 struct iommu_cmd cmd;
580 687 int ret = 0, i;
581 address &= PAGE_MASK;
582
583 if (pages > 1) {
584 /*
585 * If we have to flush more than one page, flush all
586 * TLB entries for this domain
587 */
588 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
589 s = 1;
590 }
591 688
689 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
592 690
593 for (i = 0; i < amd_iommus_present; ++i) { 691 for (i = 0; i < amd_iommus_present; ++i) {
594 if (!domain->dev_iommu[i]) 692 if (!domain->dev_iommu[i])
@@ -598,101 +696,70 @@ static void __iommu_flush_pages(struct protection_domain *domain,
598 * Devices of this domain are behind this IOMMU 696 * Devices of this domain are behind this IOMMU
599 * We need a TLB flush 697 * We need a TLB flush
600 */ 698 */
601 iommu_queue_inv_iommu_pages(amd_iommus[i], address, 699 ret |= iommu_queue_command(amd_iommus[i], &cmd);
602 domain->id, pde, s); 700 }
701
702 list_for_each_entry(dev_data, &domain->dev_list, list) {
703 struct pci_dev *pdev = to_pci_dev(dev_data->dev);
704
705 if (!pci_ats_enabled(pdev))
706 continue;
707
708 ret |= device_flush_iotlb(dev_data->dev, address, size);
603 } 709 }
604 710
605 return; 711 WARN_ON(ret);
606} 712}
607 713
608static void iommu_flush_pages(struct protection_domain *domain, 714static void domain_flush_pages(struct protection_domain *domain,
609 u64 address, size_t size) 715 u64 address, size_t size)
610{ 716{
611 __iommu_flush_pages(domain, address, size, 0); 717 __domain_flush_pages(domain, address, size, 0);
612} 718}
613 719
614/* Flush the whole IO/TLB for a given protection domain */ 720/* Flush the whole IO/TLB for a given protection domain */
615static void iommu_flush_tlb(struct protection_domain *domain) 721static void domain_flush_tlb(struct protection_domain *domain)
616{ 722{
617 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); 723 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
618} 724}
619 725
620/* Flush the whole IO/TLB for a given protection domain - including PDE */ 726/* Flush the whole IO/TLB for a given protection domain - including PDE */
621static void iommu_flush_tlb_pde(struct protection_domain *domain) 727static void domain_flush_tlb_pde(struct protection_domain *domain)
622{ 728{
623 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); 729 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
624}
625
626
627/*
628 * This function flushes the DTEs for all devices in domain
629 */
630static void iommu_flush_domain_devices(struct protection_domain *domain)
631{
632 struct iommu_dev_data *dev_data;
633 unsigned long flags;
634
635 spin_lock_irqsave(&domain->lock, flags);
636
637 list_for_each_entry(dev_data, &domain->dev_list, list)
638 iommu_flush_device(dev_data->dev);
639
640 spin_unlock_irqrestore(&domain->lock, flags);
641} 730}
642 731
643static void iommu_flush_all_domain_devices(void) 732static void domain_flush_complete(struct protection_domain *domain)
644{ 733{
645 struct protection_domain *domain; 734 int i;
646 unsigned long flags;
647 735
648 spin_lock_irqsave(&amd_iommu_pd_lock, flags); 736 for (i = 0; i < amd_iommus_present; ++i) {
737 if (!domain->dev_iommu[i])
738 continue;
649 739
650 list_for_each_entry(domain, &amd_iommu_pd_list, list) { 740 /*
651 iommu_flush_domain_devices(domain); 741 * Devices of this domain are behind this IOMMU
652 iommu_flush_complete(domain); 742 * We need to wait for completion of all commands.
743 */
744 iommu_completion_wait(amd_iommus[i]);
653 } 745 }
654
655 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
656} 746}
657 747
658void amd_iommu_flush_all_devices(void)
659{
660 iommu_flush_all_domain_devices();
661}
662 748
663/* 749/*
664 * This function uses heavy locking and may disable irqs for some time. But 750 * This function flushes the DTEs for all devices in domain
665 * this is no issue because it is only called during resume.
666 */ 751 */
667void amd_iommu_flush_all_domains(void) 752static void domain_flush_devices(struct protection_domain *domain)
668{ 753{
669 struct protection_domain *domain; 754 struct iommu_dev_data *dev_data;
670 unsigned long flags; 755 unsigned long flags;
671 756
672 spin_lock_irqsave(&amd_iommu_pd_lock, flags); 757 spin_lock_irqsave(&domain->lock, flags);
673
674 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
675 spin_lock(&domain->lock);
676 iommu_flush_tlb_pde(domain);
677 iommu_flush_complete(domain);
678 spin_unlock(&domain->lock);
679 }
680
681 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
682}
683
684static void reset_iommu_command_buffer(struct amd_iommu *iommu)
685{
686 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
687
688 if (iommu->reset_in_progress)
689 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
690 758
691 amd_iommu_reset_cmd_buffer(iommu); 759 list_for_each_entry(dev_data, &domain->dev_list, list)
692 amd_iommu_flush_all_devices(); 760 device_flush_dte(dev_data->dev);
693 amd_iommu_flush_all_domains();
694 761
695 iommu->reset_in_progress = false; 762 spin_unlock_irqrestore(&domain->lock, flags);
696} 763}
697 764
698/**************************************************************************** 765/****************************************************************************
@@ -1410,17 +1477,22 @@ static bool dma_ops_domain(struct protection_domain *domain)
1410 return domain->flags & PD_DMA_OPS_MASK; 1477 return domain->flags & PD_DMA_OPS_MASK;
1411} 1478}
1412 1479
1413static void set_dte_entry(u16 devid, struct protection_domain *domain) 1480static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1414{ 1481{
1415 u64 pte_root = virt_to_phys(domain->pt_root); 1482 u64 pte_root = virt_to_phys(domain->pt_root);
1483 u32 flags = 0;
1416 1484
1417 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) 1485 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1418 << DEV_ENTRY_MODE_SHIFT; 1486 << DEV_ENTRY_MODE_SHIFT;
1419 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; 1487 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1420 1488
1421 amd_iommu_dev_table[devid].data[2] = domain->id; 1489 if (ats)
1422 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); 1490 flags |= DTE_FLAG_IOTLB;
1423 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); 1491
1492 amd_iommu_dev_table[devid].data[3] |= flags;
1493 amd_iommu_dev_table[devid].data[2] = domain->id;
1494 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1495 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
1424} 1496}
1425 1497
1426static void clear_dte_entry(u16 devid) 1498static void clear_dte_entry(u16 devid)
@@ -1437,34 +1509,42 @@ static void do_attach(struct device *dev, struct protection_domain *domain)
1437{ 1509{
1438 struct iommu_dev_data *dev_data; 1510 struct iommu_dev_data *dev_data;
1439 struct amd_iommu *iommu; 1511 struct amd_iommu *iommu;
1512 struct pci_dev *pdev;
1513 bool ats = false;
1440 u16 devid; 1514 u16 devid;
1441 1515
1442 devid = get_device_id(dev); 1516 devid = get_device_id(dev);
1443 iommu = amd_iommu_rlookup_table[devid]; 1517 iommu = amd_iommu_rlookup_table[devid];
1444 dev_data = get_dev_data(dev); 1518 dev_data = get_dev_data(dev);
1519 pdev = to_pci_dev(dev);
1520
1521 if (amd_iommu_iotlb_sup)
1522 ats = pci_ats_enabled(pdev);
1445 1523
1446 /* Update data structures */ 1524 /* Update data structures */
1447 dev_data->domain = domain; 1525 dev_data->domain = domain;
1448 list_add(&dev_data->list, &domain->dev_list); 1526 list_add(&dev_data->list, &domain->dev_list);
1449 set_dte_entry(devid, domain); 1527 set_dte_entry(devid, domain, ats);
1450 1528
1451 /* Do reference counting */ 1529 /* Do reference counting */
1452 domain->dev_iommu[iommu->index] += 1; 1530 domain->dev_iommu[iommu->index] += 1;
1453 domain->dev_cnt += 1; 1531 domain->dev_cnt += 1;
1454 1532
1455 /* Flush the DTE entry */ 1533 /* Flush the DTE entry */
1456 iommu_flush_device(dev); 1534 device_flush_dte(dev);
1457} 1535}
1458 1536
1459static void do_detach(struct device *dev) 1537static void do_detach(struct device *dev)
1460{ 1538{
1461 struct iommu_dev_data *dev_data; 1539 struct iommu_dev_data *dev_data;
1462 struct amd_iommu *iommu; 1540 struct amd_iommu *iommu;
1541 struct pci_dev *pdev;
1463 u16 devid; 1542 u16 devid;
1464 1543
1465 devid = get_device_id(dev); 1544 devid = get_device_id(dev);
1466 iommu = amd_iommu_rlookup_table[devid]; 1545 iommu = amd_iommu_rlookup_table[devid];
1467 dev_data = get_dev_data(dev); 1546 dev_data = get_dev_data(dev);
1547 pdev = to_pci_dev(dev);
1468 1548
1469 /* decrease reference counters */ 1549 /* decrease reference counters */
1470 dev_data->domain->dev_iommu[iommu->index] -= 1; 1550 dev_data->domain->dev_iommu[iommu->index] -= 1;
@@ -1476,7 +1556,7 @@ static void do_detach(struct device *dev)
1476 clear_dte_entry(devid); 1556 clear_dte_entry(devid);
1477 1557
1478 /* Flush the DTE entry */ 1558 /* Flush the DTE entry */
1479 iommu_flush_device(dev); 1559 device_flush_dte(dev);
1480} 1560}
1481 1561
1482/* 1562/*
@@ -1539,9 +1619,13 @@ out_unlock:
1539static int attach_device(struct device *dev, 1619static int attach_device(struct device *dev,
1540 struct protection_domain *domain) 1620 struct protection_domain *domain)
1541{ 1621{
1622 struct pci_dev *pdev = to_pci_dev(dev);
1542 unsigned long flags; 1623 unsigned long flags;
1543 int ret; 1624 int ret;
1544 1625
1626 if (amd_iommu_iotlb_sup)
1627 pci_enable_ats(pdev, PAGE_SHIFT);
1628
1545 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1629 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1546 ret = __attach_device(dev, domain); 1630 ret = __attach_device(dev, domain);
1547 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1631 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -1551,7 +1635,7 @@ static int attach_device(struct device *dev,
1551 * left the caches in the IOMMU dirty. So we have to flush 1635 * left the caches in the IOMMU dirty. So we have to flush
1552 * here to evict all dirty stuff. 1636 * here to evict all dirty stuff.
1553 */ 1637 */
1554 iommu_flush_tlb_pde(domain); 1638 domain_flush_tlb_pde(domain);
1555 1639
1556 return ret; 1640 return ret;
1557} 1641}
@@ -1598,12 +1682,16 @@ static void __detach_device(struct device *dev)
1598 */ 1682 */
1599static void detach_device(struct device *dev) 1683static void detach_device(struct device *dev)
1600{ 1684{
1685 struct pci_dev *pdev = to_pci_dev(dev);
1601 unsigned long flags; 1686 unsigned long flags;
1602 1687
1603 /* lock device table */ 1688 /* lock device table */
1604 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1689 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1605 __detach_device(dev); 1690 __detach_device(dev);
1606 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1691 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1692
1693 if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev))
1694 pci_disable_ats(pdev);
1607} 1695}
1608 1696
1609/* 1697/*
@@ -1692,7 +1780,7 @@ static int device_change_notifier(struct notifier_block *nb,
1692 goto out; 1780 goto out;
1693 } 1781 }
1694 1782
1695 iommu_flush_device(dev); 1783 device_flush_dte(dev);
1696 iommu_completion_wait(iommu); 1784 iommu_completion_wait(iommu);
1697 1785
1698out: 1786out:
@@ -1753,8 +1841,9 @@ static void update_device_table(struct protection_domain *domain)
1753 struct iommu_dev_data *dev_data; 1841 struct iommu_dev_data *dev_data;
1754 1842
1755 list_for_each_entry(dev_data, &domain->dev_list, list) { 1843 list_for_each_entry(dev_data, &domain->dev_list, list) {
1844 struct pci_dev *pdev = to_pci_dev(dev_data->dev);
1756 u16 devid = get_device_id(dev_data->dev); 1845 u16 devid = get_device_id(dev_data->dev);
1757 set_dte_entry(devid, domain); 1846 set_dte_entry(devid, domain, pci_ats_enabled(pdev));
1758 } 1847 }
1759} 1848}
1760 1849
@@ -1764,8 +1853,9 @@ static void update_domain(struct protection_domain *domain)
1764 return; 1853 return;
1765 1854
1766 update_device_table(domain); 1855 update_device_table(domain);
1767 iommu_flush_domain_devices(domain); 1856
1768 iommu_flush_tlb_pde(domain); 1857 domain_flush_devices(domain);
1858 domain_flush_tlb_pde(domain);
1769 1859
1770 domain->updated = false; 1860 domain->updated = false;
1771} 1861}
@@ -1924,10 +2014,10 @@ retry:
1924 ADD_STATS_COUNTER(alloced_io_mem, size); 2014 ADD_STATS_COUNTER(alloced_io_mem, size);
1925 2015
1926 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { 2016 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
1927 iommu_flush_tlb(&dma_dom->domain); 2017 domain_flush_tlb(&dma_dom->domain);
1928 dma_dom->need_flush = false; 2018 dma_dom->need_flush = false;
1929 } else if (unlikely(amd_iommu_np_cache)) 2019 } else if (unlikely(amd_iommu_np_cache))
1930 iommu_flush_pages(&dma_dom->domain, address, size); 2020 domain_flush_pages(&dma_dom->domain, address, size);
1931 2021
1932out: 2022out:
1933 return address; 2023 return address;
@@ -1976,7 +2066,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1976 dma_ops_free_addresses(dma_dom, dma_addr, pages); 2066 dma_ops_free_addresses(dma_dom, dma_addr, pages);
1977 2067
1978 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 2068 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
1979 iommu_flush_pages(&dma_dom->domain, flush_addr, size); 2069 domain_flush_pages(&dma_dom->domain, flush_addr, size);
1980 dma_dom->need_flush = false; 2070 dma_dom->need_flush = false;
1981 } 2071 }
1982} 2072}
@@ -2012,7 +2102,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
2012 if (addr == DMA_ERROR_CODE) 2102 if (addr == DMA_ERROR_CODE)
2013 goto out; 2103 goto out;
2014 2104
2015 iommu_flush_complete(domain); 2105 domain_flush_complete(domain);
2016 2106
2017out: 2107out:
2018 spin_unlock_irqrestore(&domain->lock, flags); 2108 spin_unlock_irqrestore(&domain->lock, flags);
@@ -2039,7 +2129,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2039 2129
2040 __unmap_single(domain->priv, dma_addr, size, dir); 2130 __unmap_single(domain->priv, dma_addr, size, dir);
2041 2131
2042 iommu_flush_complete(domain); 2132 domain_flush_complete(domain);
2043 2133
2044 spin_unlock_irqrestore(&domain->lock, flags); 2134 spin_unlock_irqrestore(&domain->lock, flags);
2045} 2135}
@@ -2104,7 +2194,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2104 goto unmap; 2194 goto unmap;
2105 } 2195 }
2106 2196
2107 iommu_flush_complete(domain); 2197 domain_flush_complete(domain);
2108 2198
2109out: 2199out:
2110 spin_unlock_irqrestore(&domain->lock, flags); 2200 spin_unlock_irqrestore(&domain->lock, flags);
@@ -2150,7 +2240,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2150 s->dma_address = s->dma_length = 0; 2240 s->dma_address = s->dma_length = 0;
2151 } 2241 }
2152 2242
2153 iommu_flush_complete(domain); 2243 domain_flush_complete(domain);
2154 2244
2155 spin_unlock_irqrestore(&domain->lock, flags); 2245 spin_unlock_irqrestore(&domain->lock, flags);
2156} 2246}
@@ -2200,7 +2290,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
2200 goto out_free; 2290 goto out_free;
2201 } 2291 }
2202 2292
2203 iommu_flush_complete(domain); 2293 domain_flush_complete(domain);
2204 2294
2205 spin_unlock_irqrestore(&domain->lock, flags); 2295 spin_unlock_irqrestore(&domain->lock, flags);
2206 2296
@@ -2232,7 +2322,7 @@ static void free_coherent(struct device *dev, size_t size,
2232 2322
2233 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 2323 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
2234 2324
2235 iommu_flush_complete(domain); 2325 domain_flush_complete(domain);
2236 2326
2237 spin_unlock_irqrestore(&domain->lock, flags); 2327 spin_unlock_irqrestore(&domain->lock, flags);
2238 2328
@@ -2476,7 +2566,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
2476 if (!iommu) 2566 if (!iommu)
2477 return; 2567 return;
2478 2568
2479 iommu_flush_device(dev); 2569 device_flush_dte(dev);
2480 iommu_completion_wait(iommu); 2570 iommu_completion_wait(iommu);
2481} 2571}
2482 2572
@@ -2542,7 +2632,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2542 unmap_size = iommu_unmap_page(domain, iova, page_size); 2632 unmap_size = iommu_unmap_page(domain, iova, page_size);
2543 mutex_unlock(&domain->api_lock); 2633 mutex_unlock(&domain->api_lock);
2544 2634
2545 iommu_flush_tlb_pde(domain); 2635 domain_flush_tlb_pde(domain);
2546 2636
2547 return get_order(unmap_size); 2637 return get_order(unmap_size);
2548} 2638}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 246d727b65b7..9179c21120a8 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -137,6 +137,7 @@ int amd_iommus_present;
137 137
138/* IOMMUs have a non-present cache? */ 138/* IOMMUs have a non-present cache? */
139bool amd_iommu_np_cache __read_mostly; 139bool amd_iommu_np_cache __read_mostly;
140bool amd_iommu_iotlb_sup __read_mostly = true;
140 141
141/* 142/*
142 * The ACPI table parsing functions set this variable on an error 143 * The ACPI table parsing functions set this variable on an error
@@ -180,6 +181,12 @@ static u32 dev_table_size; /* size of the device table */
180static u32 alias_table_size; /* size of the alias table */ 181static u32 alias_table_size; /* size of the alias table */
181static u32 rlookup_table_size; /* size if the rlookup table */ 182static u32 rlookup_table_size; /* size if the rlookup table */
182 183
184/*
185 * This function flushes all internal caches of
186 * the IOMMU used by this driver.
187 */
188extern void iommu_flush_all_caches(struct amd_iommu *iommu);
189
183static inline void update_last_devid(u16 devid) 190static inline void update_last_devid(u16 devid)
184{ 191{
185 if (devid > amd_iommu_last_bdf) 192 if (devid > amd_iommu_last_bdf)
@@ -293,9 +300,23 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
293/* Function to enable the hardware */ 300/* Function to enable the hardware */
294static void iommu_enable(struct amd_iommu *iommu) 301static void iommu_enable(struct amd_iommu *iommu)
295{ 302{
296 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", 303 static const char * const feat_str[] = {
304 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
305 "IA", "GA", "HE", "PC", NULL
306 };
307 int i;
308
309 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
297 dev_name(&iommu->dev->dev), iommu->cap_ptr); 310 dev_name(&iommu->dev->dev), iommu->cap_ptr);
298 311
312 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
313 printk(KERN_CONT " extended features: ");
314 for (i = 0; feat_str[i]; ++i)
315 if (iommu_feature(iommu, (1ULL << i)))
316 printk(KERN_CONT " %s", feat_str[i]);
317 }
318 printk(KERN_CONT "\n");
319
299 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 320 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
300} 321}
301 322
@@ -651,7 +672,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
651static void __init init_iommu_from_pci(struct amd_iommu *iommu) 672static void __init init_iommu_from_pci(struct amd_iommu *iommu)
652{ 673{
653 int cap_ptr = iommu->cap_ptr; 674 int cap_ptr = iommu->cap_ptr;
654 u32 range, misc; 675 u32 range, misc, low, high;
655 int i, j; 676 int i, j;
656 677
657 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 678 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
@@ -667,6 +688,15 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
667 MMIO_GET_LD(range)); 688 MMIO_GET_LD(range));
668 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 689 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
669 690
691 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
692 amd_iommu_iotlb_sup = false;
693
694 /* read extended feature bits */
695 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
696 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
697
698 iommu->features = ((u64)high << 32) | low;
699
670 if (!is_rd890_iommu(iommu->dev)) 700 if (!is_rd890_iommu(iommu->dev))
671 return; 701 return;
672 702
@@ -1004,10 +1034,11 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
1004 if (pci_enable_msi(iommu->dev)) 1034 if (pci_enable_msi(iommu->dev))
1005 return 1; 1035 return 1;
1006 1036
1007 r = request_irq(iommu->dev->irq, amd_iommu_int_handler, 1037 r = request_threaded_irq(iommu->dev->irq,
1008 IRQF_SAMPLE_RANDOM, 1038 amd_iommu_int_handler,
1009 "AMD-Vi", 1039 amd_iommu_int_thread,
1010 NULL); 1040 0, "AMD-Vi",
1041 iommu->dev);
1011 1042
1012 if (r) { 1043 if (r) {
1013 pci_disable_msi(iommu->dev); 1044 pci_disable_msi(iommu->dev);
@@ -1244,6 +1275,7 @@ static void enable_iommus(void)
1244 iommu_set_exclusion_range(iommu); 1275 iommu_set_exclusion_range(iommu);
1245 iommu_init_msi(iommu); 1276 iommu_init_msi(iommu);
1246 iommu_enable(iommu); 1277 iommu_enable(iommu);
1278 iommu_flush_all_caches(iommu);
1247 } 1279 }
1248} 1280}
1249 1281
@@ -1274,8 +1306,8 @@ static void amd_iommu_resume(void)
1274 * we have to flush after the IOMMUs are enabled because a 1306 * we have to flush after the IOMMUs are enabled because a
1275 * disabled IOMMU will never execute the commands we send 1307 * disabled IOMMU will never execute the commands we send
1276 */ 1308 */
1277 amd_iommu_flush_all_devices(); 1309 for_each_iommu(iommu)
1278 amd_iommu_flush_all_domains(); 1310 iommu_flush_all_caches(iommu);
1279} 1311}
1280 1312
1281static int amd_iommu_suspend(void) 1313static int amd_iommu_suspend(void)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 33b10a0fc095..7acd2d2ac965 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -37,6 +37,13 @@
37#include <asm/smp.h> 37#include <asm/smp.h>
38#include <asm/x86_init.h> 38#include <asm/x86_init.h>
39#include <asm/emergency-restart.h> 39#include <asm/emergency-restart.h>
40#include <asm/nmi.h>
41
42/* BMC sets a bit this MMR non-zero before sending an NMI */
43#define UVH_NMI_MMR UVH_SCRATCH5
44#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45#define UV_NMI_PENDING_MASK (1UL << 63)
46DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
40 47
41DEFINE_PER_CPU(int, x2apic_extra_bits); 48DEFINE_PER_CPU(int, x2apic_extra_bits);
42 49
@@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void)
642 */ 649 */
643int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) 650int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
644{ 651{
652 unsigned long real_uv_nmi;
653 int bid;
654
645 if (reason != DIE_NMIUNKNOWN) 655 if (reason != DIE_NMIUNKNOWN)
646 return NOTIFY_OK; 656 return NOTIFY_OK;
647 657
648 if (in_crash_kexec) 658 if (in_crash_kexec)
649 /* do nothing if entering the crash kernel */ 659 /* do nothing if entering the crash kernel */
650 return NOTIFY_OK; 660 return NOTIFY_OK;
661
651 /* 662 /*
652 * Use a lock so only one cpu prints at a time 663 * Each blade has an MMR that indicates when an NMI has been sent
653 * to prevent intermixed output. 664 * to cpus on the blade. If an NMI is detected, atomically
665 * clear the MMR and update a per-blade NMI count used to
666 * cause each cpu on the blade to notice a new NMI.
667 */
668 bid = uv_numa_blade_id();
669 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
670
671 if (unlikely(real_uv_nmi)) {
672 spin_lock(&uv_blade_info[bid].nmi_lock);
673 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
674 if (real_uv_nmi) {
675 uv_blade_info[bid].nmi_count++;
676 uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
677 }
678 spin_unlock(&uv_blade_info[bid].nmi_lock);
679 }
680
681 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
682 return NOTIFY_DONE;
683
684 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
685
686 /*
687 * Use a lock so only one cpu prints at a time.
688 * This prevents intermixed output.
654 */ 689 */
655 spin_lock(&uv_nmi_lock); 690 spin_lock(&uv_nmi_lock);
656 pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); 691 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
657 dump_stack(); 692 dump_stack();
658 spin_unlock(&uv_nmi_lock); 693 spin_unlock(&uv_nmi_lock);
659 694
@@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
661} 696}
662 697
663static struct notifier_block uv_dump_stack_nmi_nb = { 698static struct notifier_block uv_dump_stack_nmi_nb = {
664 .notifier_call = uv_handle_nmi 699 .notifier_call = uv_handle_nmi,
700 .priority = NMI_LOCAL_LOW_PRIOR - 1,
665}; 701};
666 702
667void uv_register_nmi_notifier(void) 703void uv_register_nmi_notifier(void)
@@ -720,8 +756,9 @@ void __init uv_system_init(void)
720 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 756 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
721 757
722 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 758 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
723 uv_blade_info = kmalloc(bytes, GFP_KERNEL); 759 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
724 BUG_ON(!uv_blade_info); 760 BUG_ON(!uv_blade_info);
761
725 for (blade = 0; blade < uv_num_possible_blades(); blade++) 762 for (blade = 0; blade < uv_num_possible_blades(); blade++)
726 uv_blade_info[blade].memory_nid = -1; 763 uv_blade_info[blade].memory_nid = -1;
727 764
@@ -747,6 +784,7 @@ void __init uv_system_init(void)
747 uv_blade_info[blade].pnode = pnode; 784 uv_blade_info[blade].pnode = pnode;
748 uv_blade_info[blade].nr_possible_cpus = 0; 785 uv_blade_info[blade].nr_possible_cpus = 0;
749 uv_blade_info[blade].nr_online_cpus = 0; 786 uv_blade_info[blade].nr_online_cpus = 0;
787 spin_lock_init(&uv_blade_info[blade].nmi_lock);
750 max_pnode = max(pnode, max_pnode); 788 max_pnode = max(pnode, max_pnode);
751 blade++; 789 blade++;
752 } 790 }
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index adee12e0da1f..3bfa02235965 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1238,7 +1238,6 @@ static int suspend(int vetoable)
1238 dpm_suspend_noirq(PMSG_SUSPEND); 1238 dpm_suspend_noirq(PMSG_SUSPEND);
1239 1239
1240 local_irq_disable(); 1240 local_irq_disable();
1241 sysdev_suspend(PMSG_SUSPEND);
1242 syscore_suspend(); 1241 syscore_suspend();
1243 1242
1244 local_irq_enable(); 1243 local_irq_enable();
@@ -1258,7 +1257,6 @@ static int suspend(int vetoable)
1258 err = (err == APM_SUCCESS) ? 0 : -EIO; 1257 err = (err == APM_SUCCESS) ? 0 : -EIO;
1259 1258
1260 syscore_resume(); 1259 syscore_resume();
1261 sysdev_resume();
1262 local_irq_enable(); 1260 local_irq_enable();
1263 1261
1264 dpm_resume_noirq(PMSG_RESUME); 1262 dpm_resume_noirq(PMSG_RESUME);
@@ -1282,7 +1280,6 @@ static void standby(void)
1282 dpm_suspend_noirq(PMSG_SUSPEND); 1280 dpm_suspend_noirq(PMSG_SUSPEND);
1283 1281
1284 local_irq_disable(); 1282 local_irq_disable();
1285 sysdev_suspend(PMSG_SUSPEND);
1286 syscore_suspend(); 1283 syscore_suspend();
1287 local_irq_enable(); 1284 local_irq_enable();
1288 1285
@@ -1292,7 +1289,6 @@ static void standby(void)
1292 1289
1293 local_irq_disable(); 1290 local_irq_disable();
1294 syscore_resume(); 1291 syscore_resume();
1295 sysdev_resume();
1296 local_irq_enable(); 1292 local_irq_enable();
1297 1293
1298 dpm_resume_noirq(PMSG_RESUME); 1294 dpm_resume_noirq(PMSG_RESUME);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 3f0ebe429a01..6042981d0309 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -30,7 +30,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
30 30
31obj-$(CONFIG_X86_MCE) += mcheck/ 31obj-$(CONFIG_X86_MCE) += mcheck/
32obj-$(CONFIG_MTRR) += mtrr/ 32obj-$(CONFIG_MTRR) += mtrr/
33obj-$(CONFIG_CPU_FREQ) += cpufreq/
34 33
35obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 34obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
36 35
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bb9eb29a52dd..6f9d1f6063e9 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
613#endif 613#endif
614 614
615 /* As a rule processors have APIC timer running in deep C states */ 615 /* As a rule processors have APIC timer running in deep C states */
616 if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) 616 if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
617 set_cpu_cap(c, X86_FEATURE_ARAT); 617 set_cpu_cap(c, X86_FEATURE_ARAT);
618 618
619 /* 619 /*
@@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev);
698 */ 698 */
699 699
700const int amd_erratum_400[] = 700const int amd_erratum_400[] =
701 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf), 701 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
702 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 702 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
703EXPORT_SYMBOL_GPL(amd_erratum_400); 703EXPORT_SYMBOL_GPL(amd_erratum_400);
704 704
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e2ced0074a45..173f3a3fa1a6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -565,8 +565,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
565 565
566 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 566 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
567 567
568 if (eax > 0) 568 c->x86_capability[9] = ebx;
569 c->x86_capability[9] = ebx;
570 } 569 }
571 570
572 /* AMD-defined flags: level 0x80000001 */ 571 /* AMD-defined flags: level 0x80000001 */
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig
deleted file mode 100644
index 870e6cc6ad28..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ /dev/null
@@ -1,266 +0,0 @@
1#
2# CPU Frequency scaling
3#
4
5menu "CPU Frequency scaling"
6
7source "drivers/cpufreq/Kconfig"
8
9if CPU_FREQ
10
11comment "CPUFreq processor drivers"
12
13config X86_PCC_CPUFREQ
14 tristate "Processor Clocking Control interface driver"
15 depends on ACPI && ACPI_PROCESSOR
16 help
17 This driver adds support for the PCC interface.
18
19 For details, take a look at:
20 <file:Documentation/cpu-freq/pcc-cpufreq.txt>.
21
22 To compile this driver as a module, choose M here: the
23 module will be called pcc-cpufreq.
24
25 If in doubt, say N.
26
27config X86_ACPI_CPUFREQ
28 tristate "ACPI Processor P-States driver"
29 select CPU_FREQ_TABLE
30 depends on ACPI_PROCESSOR
31 help
32 This driver adds a CPUFreq driver which utilizes the ACPI
33 Processor Performance States.
34 This driver also supports Intel Enhanced Speedstep.
35
36 To compile this driver as a module, choose M here: the
37 module will be called acpi-cpufreq.
38
39 For details, take a look at <file:Documentation/cpu-freq/>.
40
41 If in doubt, say N.
42
43config ELAN_CPUFREQ
44 tristate "AMD Elan SC400 and SC410"
45 select CPU_FREQ_TABLE
46 depends on X86_ELAN
47 ---help---
48 This adds the CPUFreq driver for AMD Elan SC400 and SC410
49 processors.
50
51 You need to specify the processor maximum speed as boot
52 parameter: elanfreq=maxspeed (in kHz) or as module
53 parameter "max_freq".
54
55 For details, take a look at <file:Documentation/cpu-freq/>.
56
57 If in doubt, say N.
58
59config SC520_CPUFREQ
60 tristate "AMD Elan SC520"
61 select CPU_FREQ_TABLE
62 depends on X86_ELAN
63 ---help---
64 This adds the CPUFreq driver for AMD Elan SC520 processor.
65
66 For details, take a look at <file:Documentation/cpu-freq/>.
67
68 If in doubt, say N.
69
70
71config X86_POWERNOW_K6
72 tristate "AMD Mobile K6-2/K6-3 PowerNow!"
73 select CPU_FREQ_TABLE
74 depends on X86_32
75 help
76 This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
77 AMD K6-3+ processors.
78
79 For details, take a look at <file:Documentation/cpu-freq/>.
80
81 If in doubt, say N.
82
83config X86_POWERNOW_K7
84 tristate "AMD Mobile Athlon/Duron PowerNow!"
85 select CPU_FREQ_TABLE
86 depends on X86_32
87 help
88 This adds the CPUFreq driver for mobile AMD K7 mobile processors.
89
90 For details, take a look at <file:Documentation/cpu-freq/>.
91
92 If in doubt, say N.
93
94config X86_POWERNOW_K7_ACPI
95 bool
96 depends on X86_POWERNOW_K7 && ACPI_PROCESSOR
97 depends on !(X86_POWERNOW_K7 = y && ACPI_PROCESSOR = m)
98 depends on X86_32
99 default y
100
101config X86_POWERNOW_K8
102 tristate "AMD Opteron/Athlon64 PowerNow!"
103 select CPU_FREQ_TABLE
104 depends on ACPI && ACPI_PROCESSOR
105 help
106 This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors.
107
108 To compile this driver as a module, choose M here: the
109 module will be called powernow-k8.
110
111 For details, take a look at <file:Documentation/cpu-freq/>.
112
113config X86_GX_SUSPMOD
114 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
115 depends on X86_32 && PCI
116 help
117 This add the CPUFreq driver for NatSemi Geode processors which
118 support suspend modulation.
119
120 For details, take a look at <file:Documentation/cpu-freq/>.
121
122 If in doubt, say N.
123
124config X86_SPEEDSTEP_CENTRINO
125 tristate "Intel Enhanced SpeedStep (deprecated)"
126 select CPU_FREQ_TABLE
127 select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
128 depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
129 help
130 This is deprecated and this functionality is now merged into
131 acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
132 speedstep_centrino.
133 This adds the CPUFreq driver for Enhanced SpeedStep enabled
134 mobile CPUs. This means Intel Pentium M (Centrino) CPUs
135 or 64bit enabled Intel Xeons.
136
137 To compile this driver as a module, choose M here: the
138 module will be called speedstep-centrino.
139
140 For details, take a look at <file:Documentation/cpu-freq/>.
141
142 If in doubt, say N.
143
144config X86_SPEEDSTEP_CENTRINO_TABLE
145 bool "Built-in tables for Banias CPUs"
146 depends on X86_32 && X86_SPEEDSTEP_CENTRINO
147 default y
148 help
149 Use built-in tables for Banias CPUs if ACPI encoding
150 is not available.
151
152 If in doubt, say N.
153
154config X86_SPEEDSTEP_ICH
155 tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
156 select CPU_FREQ_TABLE
157 depends on X86_32
158 help
159 This adds the CPUFreq driver for certain mobile Intel Pentium III
160 (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all
161 mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
162 ICH3 or ICH4 southbridge.
163
164 For details, take a look at <file:Documentation/cpu-freq/>.
165
166 If in doubt, say N.
167
168config X86_SPEEDSTEP_SMI
169 tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
170 select CPU_FREQ_TABLE
171 depends on X86_32 && EXPERIMENTAL
172 help
173 This adds the CPUFreq driver for certain mobile Intel Pentium III
174 (Coppermine), all mobile Intel Pentium III-M (Tualatin)
175 on systems which have an Intel 440BX/ZX/MX southbridge.
176
177 For details, take a look at <file:Documentation/cpu-freq/>.
178
179 If in doubt, say N.
180
181config X86_P4_CLOCKMOD
182 tristate "Intel Pentium 4 clock modulation"
183 select CPU_FREQ_TABLE
184 help
185 This adds the CPUFreq driver for Intel Pentium 4 / XEON
186 processors. When enabled it will lower CPU temperature by skipping
187 clocks.
188
189 This driver should be only used in exceptional
190 circumstances when very low power is needed because it causes severe
191 slowdowns and noticeable latencies. Normally Speedstep should be used
192 instead.
193
194 To compile this driver as a module, choose M here: the
195 module will be called p4-clockmod.
196
197 For details, take a look at <file:Documentation/cpu-freq/>.
198
199 Unless you are absolutely sure say N.
200
201config X86_CPUFREQ_NFORCE2
202 tristate "nVidia nForce2 FSB changing"
203 depends on X86_32 && EXPERIMENTAL
204 help
205 This adds the CPUFreq driver for FSB changing on nVidia nForce2
206 platforms.
207
208 For details, take a look at <file:Documentation/cpu-freq/>.
209
210 If in doubt, say N.
211
212config X86_LONGRUN
213 tristate "Transmeta LongRun"
214 depends on X86_32
215 help
216 This adds the CPUFreq driver for Transmeta Crusoe and Efficeon processors
217 which support LongRun.
218
219 For details, take a look at <file:Documentation/cpu-freq/>.
220
221 If in doubt, say N.
222
223config X86_LONGHAUL
224 tristate "VIA Cyrix III Longhaul"
225 select CPU_FREQ_TABLE
226 depends on X86_32 && ACPI_PROCESSOR
227 help
228 This adds the CPUFreq driver for VIA Samuel/CyrixIII,
229 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
230 processors.
231
232 For details, take a look at <file:Documentation/cpu-freq/>.
233
234 If in doubt, say N.
235
236config X86_E_POWERSAVER
237 tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
238 select CPU_FREQ_TABLE
239 depends on X86_32 && EXPERIMENTAL
240 help
241 This adds the CPUFreq driver for VIA C7 processors. However, this driver
242 does not have any safeguards to prevent operating the CPU out of spec
243 and is thus considered dangerous. Please use the regular ACPI cpufreq
244 driver, enabled by CONFIG_X86_ACPI_CPUFREQ.
245
246 If in doubt, say N.
247
248comment "shared options"
249
250config X86_SPEEDSTEP_LIB
251 tristate
252 default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
253
254config X86_SPEEDSTEP_RELAXED_CAP_CHECK
255 bool "Relaxed speedstep capability checks"
256 depends on X86_32 && (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
257 help
258 Don't perform all checks for a speedstep capable system which would
259 normally be done. Some ancient or strange systems, though speedstep
260 capable, don't always indicate that they are speedstep capable. This
261 option lets the probing code bypass some of those checks if the
262 parameter "relaxed_check=1" is passed to the module.
263
264endif # CPU_FREQ
265
266endmenu
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile
deleted file mode 100644
index bd54bf67e6fb..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
1# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
2# K8 systems. ACPI is preferred to all other hardware-specific drivers.
3# speedstep-* is preferred over p4-clockmod.
4
5obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
6obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
7obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
8obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
9obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
10obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
11obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
12obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
13obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
14obj-$(CONFIG_X86_LONGRUN) += longrun.o
15obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
16obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
17obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
18obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
19obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
20obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
21obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
deleted file mode 100644
index a2baafb2fe6d..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ /dev/null
@@ -1,776 +0,0 @@
1/*
2 * acpi-cpufreq.c - ACPI Processor P-States Driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 *
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/smp.h>
32#include <linux/sched.h>
33#include <linux/cpufreq.h>
34#include <linux/compiler.h>
35#include <linux/dmi.h>
36#include <linux/slab.h>
37
38#include <linux/acpi.h>
39#include <linux/io.h>
40#include <linux/delay.h>
41#include <linux/uaccess.h>
42
43#include <acpi/processor.h>
44
45#include <asm/msr.h>
46#include <asm/processor.h>
47#include <asm/cpufeature.h>
48#include "mperf.h"
49
50#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
51 "acpi-cpufreq", msg)
52
53MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
54MODULE_DESCRIPTION("ACPI Processor P-States Driver");
55MODULE_LICENSE("GPL");
56
57enum {
58 UNDEFINED_CAPABLE = 0,
59 SYSTEM_INTEL_MSR_CAPABLE,
60 SYSTEM_IO_CAPABLE,
61};
62
63#define INTEL_MSR_RANGE (0xffff)
64
65struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data;
67 struct cpufreq_frequency_table *freq_table;
68 unsigned int resume;
69 unsigned int cpu_feature;
70};
71
72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
73
74/* acpi_perf_data is a pointer to percpu data. */
75static struct acpi_processor_performance __percpu *acpi_perf_data;
76
77static struct cpufreq_driver acpi_cpufreq_driver;
78
79static unsigned int acpi_pstate_strict;
80
81static int check_est_cpu(unsigned int cpuid)
82{
83 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
84
85 return cpu_has(cpu, X86_FEATURE_EST);
86}
87
88static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
89{
90 struct acpi_processor_performance *perf;
91 int i;
92
93 perf = data->acpi_data;
94
95 for (i = 0; i < perf->state_count; i++) {
96 if (value == perf->states[i].status)
97 return data->freq_table[i].frequency;
98 }
99 return 0;
100}
101
102static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
103{
104 int i;
105 struct acpi_processor_performance *perf;
106
107 msr &= INTEL_MSR_RANGE;
108 perf = data->acpi_data;
109
110 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
111 if (msr == perf->states[data->freq_table[i].index].status)
112 return data->freq_table[i].frequency;
113 }
114 return data->freq_table[0].frequency;
115}
116
117static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
118{
119 switch (data->cpu_feature) {
120 case SYSTEM_INTEL_MSR_CAPABLE:
121 return extract_msr(val, data);
122 case SYSTEM_IO_CAPABLE:
123 return extract_io(val, data);
124 default:
125 return 0;
126 }
127}
128
129struct msr_addr {
130 u32 reg;
131};
132
133struct io_addr {
134 u16 port;
135 u8 bit_width;
136};
137
138struct drv_cmd {
139 unsigned int type;
140 const struct cpumask *mask;
141 union {
142 struct msr_addr msr;
143 struct io_addr io;
144 } addr;
145 u32 val;
146};
147
148/* Called via smp_call_function_single(), on the target CPU */
149static void do_drv_read(void *_cmd)
150{
151 struct drv_cmd *cmd = _cmd;
152 u32 h;
153
154 switch (cmd->type) {
155 case SYSTEM_INTEL_MSR_CAPABLE:
156 rdmsr(cmd->addr.msr.reg, cmd->val, h);
157 break;
158 case SYSTEM_IO_CAPABLE:
159 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
160 &cmd->val,
161 (u32)cmd->addr.io.bit_width);
162 break;
163 default:
164 break;
165 }
166}
167
168/* Called via smp_call_function_many(), on the target CPUs */
169static void do_drv_write(void *_cmd)
170{
171 struct drv_cmd *cmd = _cmd;
172 u32 lo, hi;
173
174 switch (cmd->type) {
175 case SYSTEM_INTEL_MSR_CAPABLE:
176 rdmsr(cmd->addr.msr.reg, lo, hi);
177 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
178 wrmsr(cmd->addr.msr.reg, lo, hi);
179 break;
180 case SYSTEM_IO_CAPABLE:
181 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
182 cmd->val,
183 (u32)cmd->addr.io.bit_width);
184 break;
185 default:
186 break;
187 }
188}
189
190static void drv_read(struct drv_cmd *cmd)
191{
192 int err;
193 cmd->val = 0;
194
195 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
196 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
197}
198
199static void drv_write(struct drv_cmd *cmd)
200{
201 int this_cpu;
202
203 this_cpu = get_cpu();
204 if (cpumask_test_cpu(this_cpu, cmd->mask))
205 do_drv_write(cmd);
206 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
207 put_cpu();
208}
209
210static u32 get_cur_val(const struct cpumask *mask)
211{
212 struct acpi_processor_performance *perf;
213 struct drv_cmd cmd;
214
215 if (unlikely(cpumask_empty(mask)))
216 return 0;
217
218 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
219 case SYSTEM_INTEL_MSR_CAPABLE:
220 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
221 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
222 break;
223 case SYSTEM_IO_CAPABLE:
224 cmd.type = SYSTEM_IO_CAPABLE;
225 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
226 cmd.addr.io.port = perf->control_register.address;
227 cmd.addr.io.bit_width = perf->control_register.bit_width;
228 break;
229 default:
230 return 0;
231 }
232
233 cmd.mask = mask;
234 drv_read(&cmd);
235
236 dprintk("get_cur_val = %u\n", cmd.val);
237
238 return cmd.val;
239}
240
241static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
242{
243 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
244 unsigned int freq;
245 unsigned int cached_freq;
246
247 dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
248
249 if (unlikely(data == NULL ||
250 data->acpi_data == NULL || data->freq_table == NULL)) {
251 return 0;
252 }
253
254 cached_freq = data->freq_table[data->acpi_data->state].frequency;
255 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
256 if (freq != cached_freq) {
257 /*
258 * The dreaded BIOS frequency change behind our back.
259 * Force set the frequency on next target call.
260 */
261 data->resume = 1;
262 }
263
264 dprintk("cur freq = %u\n", freq);
265
266 return freq;
267}
268
269static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
270 struct acpi_cpufreq_data *data)
271{
272 unsigned int cur_freq;
273 unsigned int i;
274
275 for (i = 0; i < 100; i++) {
276 cur_freq = extract_freq(get_cur_val(mask), data);
277 if (cur_freq == freq)
278 return 1;
279 udelay(10);
280 }
281 return 0;
282}
283
284static int acpi_cpufreq_target(struct cpufreq_policy *policy,
285 unsigned int target_freq, unsigned int relation)
286{
287 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
288 struct acpi_processor_performance *perf;
289 struct cpufreq_freqs freqs;
290 struct drv_cmd cmd;
291 unsigned int next_state = 0; /* Index into freq_table */
292 unsigned int next_perf_state = 0; /* Index into perf table */
293 unsigned int i;
294 int result = 0;
295
296 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
297
298 if (unlikely(data == NULL ||
299 data->acpi_data == NULL || data->freq_table == NULL)) {
300 return -ENODEV;
301 }
302
303 perf = data->acpi_data;
304 result = cpufreq_frequency_table_target(policy,
305 data->freq_table,
306 target_freq,
307 relation, &next_state);
308 if (unlikely(result)) {
309 result = -ENODEV;
310 goto out;
311 }
312
313 next_perf_state = data->freq_table[next_state].index;
314 if (perf->state == next_perf_state) {
315 if (unlikely(data->resume)) {
316 dprintk("Called after resume, resetting to P%d\n",
317 next_perf_state);
318 data->resume = 0;
319 } else {
320 dprintk("Already at target state (P%d)\n",
321 next_perf_state);
322 goto out;
323 }
324 }
325
326 switch (data->cpu_feature) {
327 case SYSTEM_INTEL_MSR_CAPABLE:
328 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
329 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
330 cmd.val = (u32) perf->states[next_perf_state].control;
331 break;
332 case SYSTEM_IO_CAPABLE:
333 cmd.type = SYSTEM_IO_CAPABLE;
334 cmd.addr.io.port = perf->control_register.address;
335 cmd.addr.io.bit_width = perf->control_register.bit_width;
336 cmd.val = (u32) perf->states[next_perf_state].control;
337 break;
338 default:
339 result = -ENODEV;
340 goto out;
341 }
342
343 /* cpufreq holds the hotplug lock, so we are safe from here on */
344 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
345 cmd.mask = policy->cpus;
346 else
347 cmd.mask = cpumask_of(policy->cpu);
348
349 freqs.old = perf->states[perf->state].core_frequency * 1000;
350 freqs.new = data->freq_table[next_state].frequency;
351 for_each_cpu(i, policy->cpus) {
352 freqs.cpu = i;
353 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
354 }
355
356 drv_write(&cmd);
357
358 if (acpi_pstate_strict) {
359 if (!check_freqs(cmd.mask, freqs.new, data)) {
360 dprintk("acpi_cpufreq_target failed (%d)\n",
361 policy->cpu);
362 result = -EAGAIN;
363 goto out;
364 }
365 }
366
367 for_each_cpu(i, policy->cpus) {
368 freqs.cpu = i;
369 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
370 }
371 perf->state = next_perf_state;
372
373out:
374 return result;
375}
376
377static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
378{
379 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
380
381 dprintk("acpi_cpufreq_verify\n");
382
383 return cpufreq_frequency_table_verify(policy, data->freq_table);
384}
385
386static unsigned long
387acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
388{
389 struct acpi_processor_performance *perf = data->acpi_data;
390
391 if (cpu_khz) {
392 /* search the closest match to cpu_khz */
393 unsigned int i;
394 unsigned long freq;
395 unsigned long freqn = perf->states[0].core_frequency * 1000;
396
397 for (i = 0; i < (perf->state_count-1); i++) {
398 freq = freqn;
399 freqn = perf->states[i+1].core_frequency * 1000;
400 if ((2 * cpu_khz) > (freqn + freq)) {
401 perf->state = i;
402 return freq;
403 }
404 }
405 perf->state = perf->state_count-1;
406 return freqn;
407 } else {
408 /* assume CPU is at P0... */
409 perf->state = 0;
410 return perf->states[0].core_frequency * 1000;
411 }
412}
413
414static void free_acpi_perf_data(void)
415{
416 unsigned int i;
417
418 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
419 for_each_possible_cpu(i)
420 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
421 ->shared_cpu_map);
422 free_percpu(acpi_perf_data);
423}
424
425/*
426 * acpi_cpufreq_early_init - initialize ACPI P-States library
427 *
428 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
429 * in order to determine correct frequency and voltage pairings. We can
430 * do _PDC and _PSD and find out the processor dependency for the
431 * actual init that will happen later...
432 */
433static int __init acpi_cpufreq_early_init(void)
434{
435 unsigned int i;
436 dprintk("acpi_cpufreq_early_init\n");
437
438 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
439 if (!acpi_perf_data) {
440 dprintk("Memory allocation error for acpi_perf_data.\n");
441 return -ENOMEM;
442 }
443 for_each_possible_cpu(i) {
444 if (!zalloc_cpumask_var_node(
445 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
446 GFP_KERNEL, cpu_to_node(i))) {
447
448 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
449 free_acpi_perf_data();
450 return -ENOMEM;
451 }
452 }
453
454 /* Do initialization in ACPI core */
455 acpi_processor_preregister_performance(acpi_perf_data);
456 return 0;
457}
458
459#ifdef CONFIG_SMP
460/*
461 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
462 * or do it in BIOS firmware and won't inform about it to OS. If not
463 * detected, this has a side effect of making CPU run at a different speed
464 * than OS intended it to run at. Detect it and handle it cleanly.
465 */
466static int bios_with_sw_any_bug;
467
468static int sw_any_bug_found(const struct dmi_system_id *d)
469{
470 bios_with_sw_any_bug = 1;
471 return 0;
472}
473
474static const struct dmi_system_id sw_any_bug_dmi_table[] = {
475 {
476 .callback = sw_any_bug_found,
477 .ident = "Supermicro Server X6DLP",
478 .matches = {
479 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
480 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
481 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
482 },
483 },
484 { }
485};
486
487static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
488{
489 /* Intel Xeon Processor 7100 Series Specification Update
490 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
491 * AL30: A Machine Check Exception (MCE) Occurring during an
492 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
493 * Both Processor Cores to Lock Up. */
494 if (c->x86_vendor == X86_VENDOR_INTEL) {
495 if ((c->x86 == 15) &&
496 (c->x86_model == 6) &&
497 (c->x86_mask == 8)) {
498 printk(KERN_INFO "acpi-cpufreq: Intel(R) "
499 "Xeon(R) 7100 Errata AL30, processors may "
500 "lock up on frequency changes: disabling "
501 "acpi-cpufreq.\n");
502 return -ENODEV;
503 }
504 }
505 return 0;
506}
507#endif
508
509static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
510{
511 unsigned int i;
512 unsigned int valid_states = 0;
513 unsigned int cpu = policy->cpu;
514 struct acpi_cpufreq_data *data;
515 unsigned int result = 0;
516 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
517 struct acpi_processor_performance *perf;
518#ifdef CONFIG_SMP
519 static int blacklisted;
520#endif
521
522 dprintk("acpi_cpufreq_cpu_init\n");
523
524#ifdef CONFIG_SMP
525 if (blacklisted)
526 return blacklisted;
527 blacklisted = acpi_cpufreq_blacklist(c);
528 if (blacklisted)
529 return blacklisted;
530#endif
531
532 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
533 if (!data)
534 return -ENOMEM;
535
536 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
537 per_cpu(acfreq_data, cpu) = data;
538
539 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
540 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
541
542 result = acpi_processor_register_performance(data->acpi_data, cpu);
543 if (result)
544 goto err_free;
545
546 perf = data->acpi_data;
547 policy->shared_type = perf->shared_type;
548
549 /*
550 * Will let policy->cpus know about dependency only when software
551 * coordination is required.
552 */
553 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
554 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
555 cpumask_copy(policy->cpus, perf->shared_cpu_map);
556 }
557 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
558
559#ifdef CONFIG_SMP
560 dmi_check_system(sw_any_bug_dmi_table);
561 if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
562 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
563 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
564 }
565#endif
566
567 /* capability check */
568 if (perf->state_count <= 1) {
569 dprintk("No P-States\n");
570 result = -ENODEV;
571 goto err_unreg;
572 }
573
574 if (perf->control_register.space_id != perf->status_register.space_id) {
575 result = -ENODEV;
576 goto err_unreg;
577 }
578
579 switch (perf->control_register.space_id) {
580 case ACPI_ADR_SPACE_SYSTEM_IO:
581 dprintk("SYSTEM IO addr space\n");
582 data->cpu_feature = SYSTEM_IO_CAPABLE;
583 break;
584 case ACPI_ADR_SPACE_FIXED_HARDWARE:
585 dprintk("HARDWARE addr space\n");
586 if (!check_est_cpu(cpu)) {
587 result = -ENODEV;
588 goto err_unreg;
589 }
590 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
591 break;
592 default:
593 dprintk("Unknown addr space %d\n",
594 (u32) (perf->control_register.space_id));
595 result = -ENODEV;
596 goto err_unreg;
597 }
598
599 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
600 (perf->state_count+1), GFP_KERNEL);
601 if (!data->freq_table) {
602 result = -ENOMEM;
603 goto err_unreg;
604 }
605
606 /* detect transition latency */
607 policy->cpuinfo.transition_latency = 0;
608 for (i = 0; i < perf->state_count; i++) {
609 if ((perf->states[i].transition_latency * 1000) >
610 policy->cpuinfo.transition_latency)
611 policy->cpuinfo.transition_latency =
612 perf->states[i].transition_latency * 1000;
613 }
614
615 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
616 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
617 policy->cpuinfo.transition_latency > 20 * 1000) {
618 policy->cpuinfo.transition_latency = 20 * 1000;
619 printk_once(KERN_INFO
620 "P-state transition latency capped at 20 uS\n");
621 }
622
623 /* table init */
624 for (i = 0; i < perf->state_count; i++) {
625 if (i > 0 && perf->states[i].core_frequency >=
626 data->freq_table[valid_states-1].frequency / 1000)
627 continue;
628
629 data->freq_table[valid_states].index = i;
630 data->freq_table[valid_states].frequency =
631 perf->states[i].core_frequency * 1000;
632 valid_states++;
633 }
634 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
635 perf->state = 0;
636
637 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
638 if (result)
639 goto err_freqfree;
640
641 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
642 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
643
644 switch (perf->control_register.space_id) {
645 case ACPI_ADR_SPACE_SYSTEM_IO:
646 /* Current speed is unknown and not detectable by IO port */
647 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
648 break;
649 case ACPI_ADR_SPACE_FIXED_HARDWARE:
650 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
651 policy->cur = get_cur_freq_on_cpu(cpu);
652 break;
653 default:
654 break;
655 }
656
657 /* notify BIOS that we exist */
658 acpi_processor_notify_smm(THIS_MODULE);
659
660 /* Check for APERF/MPERF support in hardware */
661 if (cpu_has(c, X86_FEATURE_APERFMPERF))
662 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
663
664 dprintk("CPU%u - ACPI performance management activated.\n", cpu);
665 for (i = 0; i < perf->state_count; i++)
666 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
667 (i == perf->state ? '*' : ' '), i,
668 (u32) perf->states[i].core_frequency,
669 (u32) perf->states[i].power,
670 (u32) perf->states[i].transition_latency);
671
672 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
673
674 /*
675 * the first call to ->target() should result in us actually
676 * writing something to the appropriate registers.
677 */
678 data->resume = 1;
679
680 return result;
681
682err_freqfree:
683 kfree(data->freq_table);
684err_unreg:
685 acpi_processor_unregister_performance(perf, cpu);
686err_free:
687 kfree(data);
688 per_cpu(acfreq_data, cpu) = NULL;
689
690 return result;
691}
692
693static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
694{
695 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
696
697 dprintk("acpi_cpufreq_cpu_exit\n");
698
699 if (data) {
700 cpufreq_frequency_table_put_attr(policy->cpu);
701 per_cpu(acfreq_data, policy->cpu) = NULL;
702 acpi_processor_unregister_performance(data->acpi_data,
703 policy->cpu);
704 kfree(data->freq_table);
705 kfree(data);
706 }
707
708 return 0;
709}
710
711static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
712{
713 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
714
715 dprintk("acpi_cpufreq_resume\n");
716
717 data->resume = 1;
718
719 return 0;
720}
721
722static struct freq_attr *acpi_cpufreq_attr[] = {
723 &cpufreq_freq_attr_scaling_available_freqs,
724 NULL,
725};
726
727static struct cpufreq_driver acpi_cpufreq_driver = {
728 .verify = acpi_cpufreq_verify,
729 .target = acpi_cpufreq_target,
730 .bios_limit = acpi_processor_get_bios_limit,
731 .init = acpi_cpufreq_cpu_init,
732 .exit = acpi_cpufreq_cpu_exit,
733 .resume = acpi_cpufreq_resume,
734 .name = "acpi-cpufreq",
735 .owner = THIS_MODULE,
736 .attr = acpi_cpufreq_attr,
737};
738
739static int __init acpi_cpufreq_init(void)
740{
741 int ret;
742
743 if (acpi_disabled)
744 return 0;
745
746 dprintk("acpi_cpufreq_init\n");
747
748 ret = acpi_cpufreq_early_init();
749 if (ret)
750 return ret;
751
752 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
753 if (ret)
754 free_acpi_perf_data();
755
756 return ret;
757}
758
759static void __exit acpi_cpufreq_exit(void)
760{
761 dprintk("acpi_cpufreq_exit\n");
762
763 cpufreq_unregister_driver(&acpi_cpufreq_driver);
764
765 free_percpu(acpi_perf_data);
766}
767
768module_param(acpi_pstate_strict, uint, 0644);
769MODULE_PARM_DESC(acpi_pstate_strict,
770 "value 0 or non-zero. non-zero -> strict ACPI checks are "
771 "performed during frequency changes.");
772
773late_initcall(acpi_cpufreq_init);
774module_exit(acpi_cpufreq_exit);
775
776MODULE_ALIAS("acpi");
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
deleted file mode 100644
index 141abebc4516..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
+++ /dev/null
@@ -1,446 +0,0 @@
1/*
2 * (C) 2004-2006 Sebastian Witt <se.witt@gmx.net>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 * Based upon reverse engineered information
6 *
7 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/cpufreq.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17
18#define NFORCE2_XTAL 25
19#define NFORCE2_BOOTFSB 0x48
20#define NFORCE2_PLLENABLE 0xa8
21#define NFORCE2_PLLREG 0xa4
22#define NFORCE2_PLLADR 0xa0
23#define NFORCE2_PLL(mul, div) (0x100000 | (mul << 8) | div)
24
25#define NFORCE2_MIN_FSB 50
26#define NFORCE2_SAFE_DISTANCE 50
27
28/* Delay in ms between FSB changes */
29/* #define NFORCE2_DELAY 10 */
30
31/*
32 * nforce2_chipset:
33 * FSB is changed using the chipset
34 */
35static struct pci_dev *nforce2_dev;
36
37/* fid:
38 * multiplier * 10
39 */
40static int fid;
41
42/* min_fsb, max_fsb:
43 * minimum and maximum FSB (= FSB at boot time)
44 */
45static int min_fsb;
46static int max_fsb;
47
48MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
49MODULE_DESCRIPTION("nForce2 FSB changing cpufreq driver");
50MODULE_LICENSE("GPL");
51
52module_param(fid, int, 0444);
53module_param(min_fsb, int, 0444);
54
55MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
56MODULE_PARM_DESC(min_fsb,
57 "Minimum FSB to use, if not defined: current FSB - 50");
58
59#define PFX "cpufreq-nforce2: "
60#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
61 "cpufreq-nforce2", msg)
62
63/**
64 * nforce2_calc_fsb - calculate FSB
65 * @pll: PLL value
66 *
67 * Calculates FSB from PLL value
68 */
69static int nforce2_calc_fsb(int pll)
70{
71 unsigned char mul, div;
72
73 mul = (pll >> 8) & 0xff;
74 div = pll & 0xff;
75
76 if (div > 0)
77 return NFORCE2_XTAL * mul / div;
78
79 return 0;
80}
81
82/**
83 * nforce2_calc_pll - calculate PLL value
84 * @fsb: FSB
85 *
86 * Calculate PLL value for given FSB
87 */
88static int nforce2_calc_pll(unsigned int fsb)
89{
90 unsigned char xmul, xdiv;
91 unsigned char mul = 0, div = 0;
92 int tried = 0;
93
94 /* Try to calculate multiplier and divider up to 4 times */
95 while (((mul == 0) || (div == 0)) && (tried <= 3)) {
96 for (xdiv = 2; xdiv <= 0x80; xdiv++)
97 for (xmul = 1; xmul <= 0xfe; xmul++)
98 if (nforce2_calc_fsb(NFORCE2_PLL(xmul, xdiv)) ==
99 fsb + tried) {
100 mul = xmul;
101 div = xdiv;
102 }
103 tried++;
104 }
105
106 if ((mul == 0) || (div == 0))
107 return -1;
108
109 return NFORCE2_PLL(mul, div);
110}
111
112/**
113 * nforce2_write_pll - write PLL value to chipset
114 * @pll: PLL value
115 *
116 * Writes new FSB PLL value to chipset
117 */
118static void nforce2_write_pll(int pll)
119{
120 int temp;
121
122 /* Set the pll addr. to 0x00 */
123 pci_write_config_dword(nforce2_dev, NFORCE2_PLLADR, 0);
124
125 /* Now write the value in all 64 registers */
126 for (temp = 0; temp <= 0x3f; temp++)
127 pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll);
128
129 return;
130}
131
132/**
133 * nforce2_fsb_read - Read FSB
134 *
135 * Read FSB from chipset
136 * If bootfsb != 0, return FSB at boot-time
137 */
138static unsigned int nforce2_fsb_read(int bootfsb)
139{
140 struct pci_dev *nforce2_sub5;
141 u32 fsb, temp = 0;
142
143 /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
144 nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 0x01EF,
145 PCI_ANY_ID, PCI_ANY_ID, NULL);
146 if (!nforce2_sub5)
147 return 0;
148
149 pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
150 fsb /= 1000000;
151
152 /* Check if PLL register is already set */
153 pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
154
155 if (bootfsb || !temp)
156 return fsb;
157
158 /* Use PLL register FSB value */
159 pci_read_config_dword(nforce2_dev, NFORCE2_PLLREG, &temp);
160 fsb = nforce2_calc_fsb(temp);
161
162 return fsb;
163}
164
165/**
166 * nforce2_set_fsb - set new FSB
167 * @fsb: New FSB
168 *
169 * Sets new FSB
170 */
171static int nforce2_set_fsb(unsigned int fsb)
172{
173 u32 temp = 0;
174 unsigned int tfsb;
175 int diff;
176 int pll = 0;
177
178 if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
179 printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
180 return -EINVAL;
181 }
182
183 tfsb = nforce2_fsb_read(0);
184 if (!tfsb) {
185 printk(KERN_ERR PFX "Error while reading the FSB\n");
186 return -EINVAL;
187 }
188
189 /* First write? Then set actual value */
190 pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
191 if (!temp) {
192 pll = nforce2_calc_pll(tfsb);
193
194 if (pll < 0)
195 return -EINVAL;
196
197 nforce2_write_pll(pll);
198 }
199
200 /* Enable write access */
201 temp = 0x01;
202 pci_write_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8)temp);
203
204 diff = tfsb - fsb;
205
206 if (!diff)
207 return 0;
208
209 while ((tfsb != fsb) && (tfsb <= max_fsb) && (tfsb >= min_fsb)) {
210 if (diff < 0)
211 tfsb++;
212 else
213 tfsb--;
214
215 /* Calculate the PLL reg. value */
216 pll = nforce2_calc_pll(tfsb);
217 if (pll == -1)
218 return -EINVAL;
219
220 nforce2_write_pll(pll);
221#ifdef NFORCE2_DELAY
222 mdelay(NFORCE2_DELAY);
223#endif
224 }
225
226 temp = 0x40;
227 pci_write_config_byte(nforce2_dev, NFORCE2_PLLADR, (u8)temp);
228
229 return 0;
230}
231
232/**
233 * nforce2_get - get the CPU frequency
234 * @cpu: CPU number
235 *
236 * Returns the CPU frequency
237 */
238static unsigned int nforce2_get(unsigned int cpu)
239{
240 if (cpu)
241 return 0;
242 return nforce2_fsb_read(0) * fid * 100;
243}
244
245/**
246 * nforce2_target - set a new CPUFreq policy
247 * @policy: new policy
248 * @target_freq: the target frequency
249 * @relation: how that frequency relates to achieved frequency
250 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
251 *
252 * Sets a new CPUFreq policy.
253 */
254static int nforce2_target(struct cpufreq_policy *policy,
255 unsigned int target_freq, unsigned int relation)
256{
257/* unsigned long flags; */
258 struct cpufreq_freqs freqs;
259 unsigned int target_fsb;
260
261 if ((target_freq > policy->max) || (target_freq < policy->min))
262 return -EINVAL;
263
264 target_fsb = target_freq / (fid * 100);
265
266 freqs.old = nforce2_get(policy->cpu);
267 freqs.new = target_fsb * fid * 100;
268 freqs.cpu = 0; /* Only one CPU on nForce2 platforms */
269
270 if (freqs.old == freqs.new)
271 return 0;
272
273 dprintk("Old CPU frequency %d kHz, new %d kHz\n",
274 freqs.old, freqs.new);
275
276 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
277
278 /* Disable IRQs */
279 /* local_irq_save(flags); */
280
281 if (nforce2_set_fsb(target_fsb) < 0)
282 printk(KERN_ERR PFX "Changing FSB to %d failed\n",
283 target_fsb);
284 else
285 dprintk("Changed FSB successfully to %d\n",
286 target_fsb);
287
288 /* Enable IRQs */
289 /* local_irq_restore(flags); */
290
291 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
292
293 return 0;
294}
295
296/**
297 * nforce2_verify - verifies a new CPUFreq policy
298 * @policy: new policy
299 */
300static int nforce2_verify(struct cpufreq_policy *policy)
301{
302 unsigned int fsb_pol_max;
303
304 fsb_pol_max = policy->max / (fid * 100);
305
306 if (policy->min < (fsb_pol_max * fid * 100))
307 policy->max = (fsb_pol_max + 1) * fid * 100;
308
309 cpufreq_verify_within_limits(policy,
310 policy->cpuinfo.min_freq,
311 policy->cpuinfo.max_freq);
312 return 0;
313}
314
315static int nforce2_cpu_init(struct cpufreq_policy *policy)
316{
317 unsigned int fsb;
318 unsigned int rfid;
319
320 /* capability check */
321 if (policy->cpu != 0)
322 return -ENODEV;
323
324 /* Get current FSB */
325 fsb = nforce2_fsb_read(0);
326
327 if (!fsb)
328 return -EIO;
329
330 /* FIX: Get FID from CPU */
331 if (!fid) {
332 if (!cpu_khz) {
333 printk(KERN_WARNING PFX
334 "cpu_khz not set, can't calculate multiplier!\n");
335 return -ENODEV;
336 }
337
338 fid = cpu_khz / (fsb * 100);
339 rfid = fid % 5;
340
341 if (rfid) {
342 if (rfid > 2)
343 fid += 5 - rfid;
344 else
345 fid -= rfid;
346 }
347 }
348
349 printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
350 fid / 10, fid % 10);
351
352 /* Set maximum FSB to FSB at boot time */
353 max_fsb = nforce2_fsb_read(1);
354
355 if (!max_fsb)
356 return -EIO;
357
358 if (!min_fsb)
359 min_fsb = max_fsb - NFORCE2_SAFE_DISTANCE;
360
361 if (min_fsb < NFORCE2_MIN_FSB)
362 min_fsb = NFORCE2_MIN_FSB;
363
364 /* cpuinfo and default policy values */
365 policy->cpuinfo.min_freq = min_fsb * fid * 100;
366 policy->cpuinfo.max_freq = max_fsb * fid * 100;
367 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
368 policy->cur = nforce2_get(policy->cpu);
369 policy->min = policy->cpuinfo.min_freq;
370 policy->max = policy->cpuinfo.max_freq;
371
372 return 0;
373}
374
375static int nforce2_cpu_exit(struct cpufreq_policy *policy)
376{
377 return 0;
378}
379
380static struct cpufreq_driver nforce2_driver = {
381 .name = "nforce2",
382 .verify = nforce2_verify,
383 .target = nforce2_target,
384 .get = nforce2_get,
385 .init = nforce2_cpu_init,
386 .exit = nforce2_cpu_exit,
387 .owner = THIS_MODULE,
388};
389
390/**
391 * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic
392 *
393 * Detects nForce2 A2 and C1 stepping
394 *
395 */
396static int nforce2_detect_chipset(void)
397{
398 nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
399 PCI_DEVICE_ID_NVIDIA_NFORCE2,
400 PCI_ANY_ID, PCI_ANY_ID, NULL);
401
402 if (nforce2_dev == NULL)
403 return -ENODEV;
404
405 printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
406 nforce2_dev->revision);
407 printk(KERN_INFO PFX
408 "FSB changing is maybe unstable and can lead to "
409 "crashes and data loss.\n");
410
411 return 0;
412}
413
414/**
415 * nforce2_init - initializes the nForce2 CPUFreq driver
416 *
417 * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
418 * devices, -EINVAL on problems during initiatization, and zero on
419 * success.
420 */
421static int __init nforce2_init(void)
422{
423 /* TODO: do we need to detect the processor? */
424
425 /* detect chipset */
426 if (nforce2_detect_chipset()) {
427 printk(KERN_INFO PFX "No nForce2 chipset.\n");
428 return -ENODEV;
429 }
430
431 return cpufreq_register_driver(&nforce2_driver);
432}
433
434/**
435 * nforce2_exit - unregisters cpufreq module
436 *
437 * Unregisters nForce2 FSB change support.
438 */
439static void __exit nforce2_exit(void)
440{
441 cpufreq_unregister_driver(&nforce2_driver);
442}
443
444module_init(nforce2_init);
445module_exit(nforce2_exit);
446
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
deleted file mode 100644
index 35a257dd4bb7..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ /dev/null
@@ -1,367 +0,0 @@
1/*
2 * Based on documentation provided by Dave Jones. Thanks!
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/cpufreq.h>
13#include <linux/ioport.h>
14#include <linux/slab.h>
15#include <linux/timex.h>
16#include <linux/io.h>
17#include <linux/delay.h>
18
19#include <asm/msr.h>
20#include <asm/tsc.h>
21
22#define EPS_BRAND_C7M 0
23#define EPS_BRAND_C7 1
24#define EPS_BRAND_EDEN 2
25#define EPS_BRAND_C3 3
26#define EPS_BRAND_C7D 4
27
28struct eps_cpu_data {
29 u32 fsb;
30 struct cpufreq_frequency_table freq_table[];
31};
32
33static struct eps_cpu_data *eps_cpu[NR_CPUS];
34
35
36static unsigned int eps_get(unsigned int cpu)
37{
38 struct eps_cpu_data *centaur;
39 u32 lo, hi;
40
41 if (cpu)
42 return 0;
43 centaur = eps_cpu[cpu];
44 if (centaur == NULL)
45 return 0;
46
47 /* Return current frequency */
48 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
49 return centaur->fsb * ((lo >> 8) & 0xff);
50}
51
52static int eps_set_state(struct eps_cpu_data *centaur,
53 unsigned int cpu,
54 u32 dest_state)
55{
56 struct cpufreq_freqs freqs;
57 u32 lo, hi;
58 int err = 0;
59 int i;
60
61 freqs.old = eps_get(cpu);
62 freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
63 freqs.cpu = cpu;
64 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
65
66 /* Wait while CPU is busy */
67 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
68 i = 0;
69 while (lo & ((1 << 16) | (1 << 17))) {
70 udelay(16);
71 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
72 i++;
73 if (unlikely(i > 64)) {
74 err = -ENODEV;
75 goto postchange;
76 }
77 }
78 /* Set new multiplier and voltage */
79 wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0);
80 /* Wait until transition end */
81 i = 0;
82 do {
83 udelay(16);
84 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
85 i++;
86 if (unlikely(i > 64)) {
87 err = -ENODEV;
88 goto postchange;
89 }
90 } while (lo & ((1 << 16) | (1 << 17)));
91
92 /* Return current frequency */
93postchange:
94 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
95 freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
96
97#ifdef DEBUG
98 {
99 u8 current_multiplier, current_voltage;
100
101 /* Print voltage and multiplier */
102 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
103 current_voltage = lo & 0xff;
104 printk(KERN_INFO "eps: Current voltage = %dmV\n",
105 current_voltage * 16 + 700);
106 current_multiplier = (lo >> 8) & 0xff;
107 printk(KERN_INFO "eps: Current multiplier = %d\n",
108 current_multiplier);
109 }
110#endif
111 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
112 return err;
113}
114
115static int eps_target(struct cpufreq_policy *policy,
116 unsigned int target_freq,
117 unsigned int relation)
118{
119 struct eps_cpu_data *centaur;
120 unsigned int newstate = 0;
121 unsigned int cpu = policy->cpu;
122 unsigned int dest_state;
123 int ret;
124
125 if (unlikely(eps_cpu[cpu] == NULL))
126 return -ENODEV;
127 centaur = eps_cpu[cpu];
128
129 if (unlikely(cpufreq_frequency_table_target(policy,
130 &eps_cpu[cpu]->freq_table[0],
131 target_freq,
132 relation,
133 &newstate))) {
134 return -EINVAL;
135 }
136
137 /* Make frequency transition */
138 dest_state = centaur->freq_table[newstate].index & 0xffff;
139 ret = eps_set_state(centaur, cpu, dest_state);
140 if (ret)
141 printk(KERN_ERR "eps: Timeout!\n");
142 return ret;
143}
144
145static int eps_verify(struct cpufreq_policy *policy)
146{
147 return cpufreq_frequency_table_verify(policy,
148 &eps_cpu[policy->cpu]->freq_table[0]);
149}
150
151static int eps_cpu_init(struct cpufreq_policy *policy)
152{
153 unsigned int i;
154 u32 lo, hi;
155 u64 val;
156 u8 current_multiplier, current_voltage;
157 u8 max_multiplier, max_voltage;
158 u8 min_multiplier, min_voltage;
159 u8 brand = 0;
160 u32 fsb;
161 struct eps_cpu_data *centaur;
162 struct cpuinfo_x86 *c = &cpu_data(0);
163 struct cpufreq_frequency_table *f_table;
164 int k, step, voltage;
165 int ret;
166 int states;
167
168 if (policy->cpu != 0)
169 return -ENODEV;
170
171 /* Check brand */
172 printk(KERN_INFO "eps: Detected VIA ");
173
174 switch (c->x86_model) {
175 case 10:
176 rdmsr(0x1153, lo, hi);
177 brand = (((lo >> 2) ^ lo) >> 18) & 3;
178 printk(KERN_CONT "Model A ");
179 break;
180 case 13:
181 rdmsr(0x1154, lo, hi);
182 brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
183 printk(KERN_CONT "Model D ");
184 break;
185 }
186
187 switch (brand) {
188 case EPS_BRAND_C7M:
189 printk(KERN_CONT "C7-M\n");
190 break;
191 case EPS_BRAND_C7:
192 printk(KERN_CONT "C7\n");
193 break;
194 case EPS_BRAND_EDEN:
195 printk(KERN_CONT "Eden\n");
196 break;
197 case EPS_BRAND_C7D:
198 printk(KERN_CONT "C7-D\n");
199 break;
200 case EPS_BRAND_C3:
201 printk(KERN_CONT "C3\n");
202 return -ENODEV;
203 break;
204 }
205 /* Enable Enhanced PowerSaver */
206 rdmsrl(MSR_IA32_MISC_ENABLE, val);
207 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
208 val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
209 wrmsrl(MSR_IA32_MISC_ENABLE, val);
210 /* Can be locked at 0 */
211 rdmsrl(MSR_IA32_MISC_ENABLE, val);
212 if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
213 printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
214 return -ENODEV;
215 }
216 }
217
218 /* Print voltage and multiplier */
219 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
220 current_voltage = lo & 0xff;
221 printk(KERN_INFO "eps: Current voltage = %dmV\n",
222 current_voltage * 16 + 700);
223 current_multiplier = (lo >> 8) & 0xff;
224 printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
225
226 /* Print limits */
227 max_voltage = hi & 0xff;
228 printk(KERN_INFO "eps: Highest voltage = %dmV\n",
229 max_voltage * 16 + 700);
230 max_multiplier = (hi >> 8) & 0xff;
231 printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
232 min_voltage = (hi >> 16) & 0xff;
233 printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
234 min_voltage * 16 + 700);
235 min_multiplier = (hi >> 24) & 0xff;
236 printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
237
238 /* Sanity checks */
239 if (current_multiplier == 0 || max_multiplier == 0
240 || min_multiplier == 0)
241 return -EINVAL;
242 if (current_multiplier > max_multiplier
243 || max_multiplier <= min_multiplier)
244 return -EINVAL;
245 if (current_voltage > 0x1f || max_voltage > 0x1f)
246 return -EINVAL;
247 if (max_voltage < min_voltage)
248 return -EINVAL;
249
250 /* Calc FSB speed */
251 fsb = cpu_khz / current_multiplier;
252 /* Calc number of p-states supported */
253 if (brand == EPS_BRAND_C7M)
254 states = max_multiplier - min_multiplier + 1;
255 else
256 states = 2;
257
258 /* Allocate private data and frequency table for current cpu */
259 centaur = kzalloc(sizeof(struct eps_cpu_data)
260 + (states + 1) * sizeof(struct cpufreq_frequency_table),
261 GFP_KERNEL);
262 if (!centaur)
263 return -ENOMEM;
264 eps_cpu[0] = centaur;
265
266 /* Copy basic values */
267 centaur->fsb = fsb;
268
269 /* Fill frequency and MSR value table */
270 f_table = &centaur->freq_table[0];
271 if (brand != EPS_BRAND_C7M) {
272 f_table[0].frequency = fsb * min_multiplier;
273 f_table[0].index = (min_multiplier << 8) | min_voltage;
274 f_table[1].frequency = fsb * max_multiplier;
275 f_table[1].index = (max_multiplier << 8) | max_voltage;
276 f_table[2].frequency = CPUFREQ_TABLE_END;
277 } else {
278 k = 0;
279 step = ((max_voltage - min_voltage) * 256)
280 / (max_multiplier - min_multiplier);
281 for (i = min_multiplier; i <= max_multiplier; i++) {
282 voltage = (k * step) / 256 + min_voltage;
283 f_table[k].frequency = fsb * i;
284 f_table[k].index = (i << 8) | voltage;
285 k++;
286 }
287 f_table[k].frequency = CPUFREQ_TABLE_END;
288 }
289
290 policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
291 policy->cur = fsb * current_multiplier;
292
293 ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
294 if (ret) {
295 kfree(centaur);
296 return ret;
297 }
298
299 cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
300 return 0;
301}
302
303static int eps_cpu_exit(struct cpufreq_policy *policy)
304{
305 unsigned int cpu = policy->cpu;
306 struct eps_cpu_data *centaur;
307 u32 lo, hi;
308
309 if (eps_cpu[cpu] == NULL)
310 return -ENODEV;
311 centaur = eps_cpu[cpu];
312
313 /* Get max frequency */
314 rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
315 /* Set max frequency */
316 eps_set_state(centaur, cpu, hi & 0xffff);
317 /* Bye */
318 cpufreq_frequency_table_put_attr(policy->cpu);
319 kfree(eps_cpu[cpu]);
320 eps_cpu[cpu] = NULL;
321 return 0;
322}
323
324static struct freq_attr *eps_attr[] = {
325 &cpufreq_freq_attr_scaling_available_freqs,
326 NULL,
327};
328
329static struct cpufreq_driver eps_driver = {
330 .verify = eps_verify,
331 .target = eps_target,
332 .init = eps_cpu_init,
333 .exit = eps_cpu_exit,
334 .get = eps_get,
335 .name = "e_powersaver",
336 .owner = THIS_MODULE,
337 .attr = eps_attr,
338};
339
340static int __init eps_init(void)
341{
342 struct cpuinfo_x86 *c = &cpu_data(0);
343
344 /* This driver will work only on Centaur C7 processors with
345 * Enhanced SpeedStep/PowerSaver registers */
346 if (c->x86_vendor != X86_VENDOR_CENTAUR
347 || c->x86 != 6 || c->x86_model < 10)
348 return -ENODEV;
349 if (!cpu_has(c, X86_FEATURE_EST))
350 return -ENODEV;
351
352 if (cpufreq_register_driver(&eps_driver))
353 return -EINVAL;
354 return 0;
355}
356
357static void __exit eps_exit(void)
358{
359 cpufreq_unregister_driver(&eps_driver);
360}
361
362MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>");
363MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
364MODULE_LICENSE("GPL");
365
366module_init(eps_init);
367module_exit(eps_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
deleted file mode 100644
index c587db472a75..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c
+++ /dev/null
@@ -1,309 +0,0 @@
1/*
2 * elanfreq: cpufreq driver for the AMD ELAN family
3 *
4 * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de>
5 *
6 * Parts of this code are (c) Sven Geggus <sven@geggus.net>
7 *
8 * All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22
23#include <linux/delay.h>
24#include <linux/cpufreq.h>
25
26#include <asm/msr.h>
27#include <linux/timex.h>
28#include <linux/io.h>
29
30#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
31#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
32
33/* Module parameter */
34static int max_freq;
35
36struct s_elan_multiplier {
37 int clock; /* frequency in kHz */
38 int val40h; /* PMU Force Mode register */
39 int val80h; /* CPU Clock Speed Register */
40};
41
42/*
43 * It is important that the frequencies
44 * are listed in ascending order here!
45 */
46static struct s_elan_multiplier elan_multiplier[] = {
47 {1000, 0x02, 0x18},
48 {2000, 0x02, 0x10},
49 {4000, 0x02, 0x08},
50 {8000, 0x00, 0x00},
51 {16000, 0x00, 0x02},
52 {33000, 0x00, 0x04},
53 {66000, 0x01, 0x04},
54 {99000, 0x01, 0x05}
55};
56
57static struct cpufreq_frequency_table elanfreq_table[] = {
58 {0, 1000},
59 {1, 2000},
60 {2, 4000},
61 {3, 8000},
62 {4, 16000},
63 {5, 33000},
64 {6, 66000},
65 {7, 99000},
66 {0, CPUFREQ_TABLE_END},
67};
68
69
70/**
71 * elanfreq_get_cpu_frequency: determine current cpu speed
72 *
73 * Finds out at which frequency the CPU of the Elan SOC runs
74 * at the moment. Frequencies from 1 to 33 MHz are generated
75 * the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
76 * and have the rest of the chip running with 33 MHz.
77 */
78
79static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
80{
81 u8 clockspeed_reg; /* Clock Speed Register */
82
83 local_irq_disable();
84 outb_p(0x80, REG_CSCIR);
85 clockspeed_reg = inb_p(REG_CSCDR);
86 local_irq_enable();
87
88 if ((clockspeed_reg & 0xE0) == 0xE0)
89 return 0;
90
91 /* Are we in CPU clock multiplied mode (66/99 MHz)? */
92 if ((clockspeed_reg & 0xE0) == 0xC0) {
93 if ((clockspeed_reg & 0x01) == 0)
94 return 66000;
95 else
96 return 99000;
97 }
98
99 /* 33 MHz is not 32 MHz... */
100 if ((clockspeed_reg & 0xE0) == 0xA0)
101 return 33000;
102
103 return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000;
104}
105
106
107/**
108 * elanfreq_set_cpu_frequency: Change the CPU core frequency
109 * @cpu: cpu number
110 * @freq: frequency in kHz
111 *
112 * This function takes a frequency value and changes the CPU frequency
113 * according to this. Note that the frequency has to be checked by
114 * elanfreq_validatespeed() for correctness!
115 *
116 * There is no return value.
117 */
118
119static void elanfreq_set_cpu_state(unsigned int state)
120{
121 struct cpufreq_freqs freqs;
122
123 freqs.old = elanfreq_get_cpu_frequency(0);
124 freqs.new = elan_multiplier[state].clock;
125 freqs.cpu = 0; /* elanfreq.c is UP only driver */
126
127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
128
129 printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
130 elan_multiplier[state].clock);
131
132
133 /*
134 * Access to the Elan's internal registers is indexed via
135 * 0x22: Chip Setup & Control Register Index Register (CSCI)
136 * 0x23: Chip Setup & Control Register Data Register (CSCD)
137 *
138 */
139
140 /*
141 * 0x40 is the Power Management Unit's Force Mode Register.
142 * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
143 */
144
145 local_irq_disable();
146 outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */
147 outb_p(0x00, REG_CSCDR);
148 local_irq_enable(); /* wait till internal pipelines and */
149 udelay(1000); /* buffers have cleaned up */
150
151 local_irq_disable();
152
153 /* now, set the CPU clock speed register (0x80) */
154 outb_p(0x80, REG_CSCIR);
155 outb_p(elan_multiplier[state].val80h, REG_CSCDR);
156
157 /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */
158 outb_p(0x40, REG_CSCIR);
159 outb_p(elan_multiplier[state].val40h, REG_CSCDR);
160 udelay(10000);
161 local_irq_enable();
162
163 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
164};
165
166
167/**
168 * elanfreq_validatespeed: test if frequency range is valid
169 * @policy: the policy to validate
170 *
171 * This function checks if a given frequency range in kHz is valid
172 * for the hardware supported by the driver.
173 */
174
175static int elanfreq_verify(struct cpufreq_policy *policy)
176{
177 return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
178}
179
180static int elanfreq_target(struct cpufreq_policy *policy,
181 unsigned int target_freq,
182 unsigned int relation)
183{
184 unsigned int newstate = 0;
185
186 if (cpufreq_frequency_table_target(policy, &elanfreq_table[0],
187 target_freq, relation, &newstate))
188 return -EINVAL;
189
190 elanfreq_set_cpu_state(newstate);
191
192 return 0;
193}
194
195
196/*
197 * Module init and exit code
198 */
199
200static int elanfreq_cpu_init(struct cpufreq_policy *policy)
201{
202 struct cpuinfo_x86 *c = &cpu_data(0);
203 unsigned int i;
204 int result;
205
206 /* capability check */
207 if ((c->x86_vendor != X86_VENDOR_AMD) ||
208 (c->x86 != 4) || (c->x86_model != 10))
209 return -ENODEV;
210
211 /* max freq */
212 if (!max_freq)
213 max_freq = elanfreq_get_cpu_frequency(0);
214
215 /* table init */
216 for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
217 if (elanfreq_table[i].frequency > max_freq)
218 elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
219 }
220
221 /* cpuinfo and default policy values */
222 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
223 policy->cur = elanfreq_get_cpu_frequency(0);
224
225 result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
226 if (result)
227 return result;
228
229 cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
230 return 0;
231}
232
233
234static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
235{
236 cpufreq_frequency_table_put_attr(policy->cpu);
237 return 0;
238}
239
240
241#ifndef MODULE
242/**
243 * elanfreq_setup - elanfreq command line parameter parsing
244 *
245 * elanfreq command line parameter. Use:
246 * elanfreq=66000
247 * to set the maximum CPU frequency to 66 MHz. Note that in
248 * case you do not give this boot parameter, the maximum
249 * frequency will fall back to _current_ CPU frequency which
250 * might be lower. If you build this as a module, use the
251 * max_freq module parameter instead.
252 */
253static int __init elanfreq_setup(char *str)
254{
255 max_freq = simple_strtoul(str, &str, 0);
256 printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
257 return 1;
258}
259__setup("elanfreq=", elanfreq_setup);
260#endif
261
262
263static struct freq_attr *elanfreq_attr[] = {
264 &cpufreq_freq_attr_scaling_available_freqs,
265 NULL,
266};
267
268
269static struct cpufreq_driver elanfreq_driver = {
270 .get = elanfreq_get_cpu_frequency,
271 .verify = elanfreq_verify,
272 .target = elanfreq_target,
273 .init = elanfreq_cpu_init,
274 .exit = elanfreq_cpu_exit,
275 .name = "elanfreq",
276 .owner = THIS_MODULE,
277 .attr = elanfreq_attr,
278};
279
280
281static int __init elanfreq_init(void)
282{
283 struct cpuinfo_x86 *c = &cpu_data(0);
284
285 /* Test if we have the right hardware */
286 if ((c->x86_vendor != X86_VENDOR_AMD) ||
287 (c->x86 != 4) || (c->x86_model != 10)) {
288 printk(KERN_INFO "elanfreq: error: no Elan processor found!\n");
289 return -ENODEV;
290 }
291 return cpufreq_register_driver(&elanfreq_driver);
292}
293
294
295static void __exit elanfreq_exit(void)
296{
297 cpufreq_unregister_driver(&elanfreq_driver);
298}
299
300
301module_param(max_freq, int, 0444);
302
303MODULE_LICENSE("GPL");
304MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, "
305 "Sven Geggus <sven@geggus.net>");
306MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
307
308module_init(elanfreq_init);
309module_exit(elanfreq_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
deleted file mode 100644
index 32974cf84232..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
+++ /dev/null
@@ -1,517 +0,0 @@
1/*
2 * Cyrix MediaGX and NatSemi Geode Suspend Modulation
3 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
4 * (C) 2002 Hiroshi Miura <miura@da-cha.org>
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation
10 *
11 * The author(s) of this software shall not be held liable for damages
12 * of any nature resulting due to the use of this software. This
13 * software is provided AS-IS with no warranties.
14 *
15 * Theoretical note:
16 *
17 * (see Geode(tm) CS5530 manual (rev.4.1) page.56)
18 *
19 * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0
20 * are based on Suspend Modulation.
21 *
22 * Suspend Modulation works by asserting and de-asserting the SUSP# pin
23 * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
24 * the CPU enters an idle state. GX1 stops its core clock when SUSP# is
25 * asserted then power consumption is reduced.
26 *
27 * Suspend Modulation's OFF/ON duration are configurable
28 * with 'Suspend Modulation OFF Count Register'
29 * and 'Suspend Modulation ON Count Register'.
30 * These registers are 8bit counters that represent the number of
31 * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
32 * to the processor.
33 *
34 * These counters define a ratio which is the effective frequency
35 * of operation of the system.
36 *
37 * OFF Count
38 * F_eff = Fgx * ----------------------
39 * OFF Count + ON Count
40 *
41 * 0 <= On Count, Off Count <= 255
42 *
43 * From these limits, we can get register values
44 *
45 * off_duration + on_duration <= MAX_DURATION
46 * on_duration = off_duration * (stock_freq - freq) / freq
47 *
48 * off_duration = (freq * DURATION) / stock_freq
49 * on_duration = DURATION - off_duration
50 *
51 *
52 *---------------------------------------------------------------------------
53 *
54 * ChangeLog:
55 * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
56 * - fix on/off register mistake
57 * - fix cpu_khz calc when it stops cpu modulation.
58 *
59 * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
60 * - rewrite for Cyrix MediaGX Cx5510/5520 and
61 * NatSemi Geode Cs5530(A).
62 *
63 * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com>
64 * - cs5530_mod patch for 2.4.19-rc1.
65 *
66 *---------------------------------------------------------------------------
67 *
68 * Todo
69 * Test on machines with 5510, 5530, 5530A
70 */
71
72/************************************************************************
73 * Suspend Modulation - Definitions *
74 ************************************************************************/
75
76#include <linux/kernel.h>
77#include <linux/module.h>
78#include <linux/init.h>
79#include <linux/smp.h>
80#include <linux/cpufreq.h>
81#include <linux/pci.h>
82#include <linux/errno.h>
83#include <linux/slab.h>
84
85#include <asm/processor-cyrix.h>
86
87/* PCI config registers, all at F0 */
88#define PCI_PMER1 0x80 /* power management enable register 1 */
89#define PCI_PMER2 0x81 /* power management enable register 2 */
90#define PCI_PMER3 0x82 /* power management enable register 3 */
91#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
92#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
93#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
94#define PCI_MODON 0x95 /* suspend modulation ON counter register */
95#define PCI_SUSCFG 0x96 /* suspend configuration register */
96
97/* PMER1 bits */
98#define GPM (1<<0) /* global power management */
99#define GIT (1<<1) /* globally enable PM device idle timers */
100#define GTR (1<<2) /* globally enable IO traps */
101#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
102#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
103
104/* SUSCFG bits */
105#define SUSMOD (1<<0) /* enable/disable suspend modulation */
106/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */
107#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
108 /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
109#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
110/* the below is supported only with cs5530A */
111#define PWRSVE_ISA (1<<3) /* stop ISA clock */
112#define PWRSVE (1<<4) /* active idle */
113
114struct gxfreq_params {
115 u8 on_duration;
116 u8 off_duration;
117 u8 pci_suscfg;
118 u8 pci_pmer1;
119 u8 pci_pmer2;
120 struct pci_dev *cs55x0;
121};
122
123static struct gxfreq_params *gx_params;
124static int stock_freq;
125
126/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */
127static int pci_busclk;
128module_param(pci_busclk, int, 0444);
129
130/* maximum duration for which the cpu may be suspended
131 * (32us * MAX_DURATION). If no parameter is given, this defaults
132 * to 255.
133 * Note that this leads to a maximum of 8 ms(!) where the CPU clock
134 * is suspended -- processing power is just 0.39% of what it used to be,
135 * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
136static int max_duration = 255;
137module_param(max_duration, int, 0444);
138
139/* For the default policy, we want at least some processing power
140 * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV)
141 */
142#define POLICY_MIN_DIV 20
143
144
145#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
146 "gx-suspmod", msg)
147
148/**
149 * we can detect a core multipiler from dir0_lsb
150 * from GX1 datasheet p.56,
151 * MULT[3:0]:
152 * 0000 = SYSCLK multiplied by 4 (test only)
153 * 0001 = SYSCLK multiplied by 10
154 * 0010 = SYSCLK multiplied by 4
155 * 0011 = SYSCLK multiplied by 6
156 * 0100 = SYSCLK multiplied by 9
157 * 0101 = SYSCLK multiplied by 5
158 * 0110 = SYSCLK multiplied by 7
159 * 0111 = SYSCLK multiplied by 8
160 * of 33.3MHz
161 **/
162static int gx_freq_mult[16] = {
163 4, 10, 4, 6, 9, 5, 7, 8,
164 0, 0, 0, 0, 0, 0, 0, 0
165};
166
167
168/****************************************************************
169 * Low Level chipset interface *
170 ****************************************************************/
171static struct pci_device_id gx_chipset_tbl[] __initdata = {
172 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY), },
173 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
174 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
175 { 0, },
176};
177
178static void gx_write_byte(int reg, int value)
179{
180 pci_write_config_byte(gx_params->cs55x0, reg, value);
181}
182
183/**
184 * gx_detect_chipset:
185 *
186 **/
187static __init struct pci_dev *gx_detect_chipset(void)
188{
189 struct pci_dev *gx_pci = NULL;
190
191 /* check if CPU is a MediaGX or a Geode. */
192 if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
193 (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) {
194 dprintk("error: no MediaGX/Geode processor found!\n");
195 return NULL;
196 }
197
198 /* detect which companion chip is used */
199 for_each_pci_dev(gx_pci) {
200 if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
201 return gx_pci;
202 }
203
204 dprintk("error: no supported chipset found!\n");
205 return NULL;
206}
207
208/**
209 * gx_get_cpuspeed:
210 *
211 * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi
212 * Geode CPU runs.
213 */
214static unsigned int gx_get_cpuspeed(unsigned int cpu)
215{
216 if ((gx_params->pci_suscfg & SUSMOD) == 0)
217 return stock_freq;
218
219 return (stock_freq * gx_params->off_duration)
220 / (gx_params->on_duration + gx_params->off_duration);
221}
222
223/**
224 * gx_validate_speed:
225 * determine current cpu speed
226 *
227 **/
228
229static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
230 u8 *off_duration)
231{
232 unsigned int i;
233 u8 tmp_on, tmp_off;
234 int old_tmp_freq = stock_freq;
235 int tmp_freq;
236
237 *off_duration = 1;
238 *on_duration = 0;
239
240 for (i = max_duration; i > 0; i--) {
241 tmp_off = ((khz * i) / stock_freq) & 0xff;
242 tmp_on = i - tmp_off;
243 tmp_freq = (stock_freq * tmp_off) / i;
244 /* if this relation is closer to khz, use this. If it's equal,
245 * prefer it, too - lower latency */
246 if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) {
247 *on_duration = tmp_on;
248 *off_duration = tmp_off;
249 old_tmp_freq = tmp_freq;
250 }
251 }
252
253 return old_tmp_freq;
254}
255
256
257/**
258 * gx_set_cpuspeed:
259 * set cpu speed in khz.
260 **/
261
262static void gx_set_cpuspeed(unsigned int khz)
263{
264 u8 suscfg, pmer1;
265 unsigned int new_khz;
266 unsigned long flags;
267 struct cpufreq_freqs freqs;
268
269 freqs.cpu = 0;
270 freqs.old = gx_get_cpuspeed(0);
271
272 new_khz = gx_validate_speed(khz, &gx_params->on_duration,
273 &gx_params->off_duration);
274
275 freqs.new = new_khz;
276
277 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
278 local_irq_save(flags);
279
280
281
282 if (new_khz != stock_freq) {
283 /* if new khz == 100% of CPU speed, it is special case */
284 switch (gx_params->cs55x0->device) {
285 case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
286 pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP;
287 /* FIXME: need to test other values -- Zwane,Miura */
288 /* typical 2 to 4ms */
289 gx_write_byte(PCI_IRQTC, 4);
290 /* typical 50 to 100ms */
291 gx_write_byte(PCI_VIDTC, 100);
292 gx_write_byte(PCI_PMER1, pmer1);
293
294 if (gx_params->cs55x0->revision < 0x10) {
295 /* CS5530(rev 1.2, 1.3) */
296 suscfg = gx_params->pci_suscfg|SUSMOD;
297 } else {
298 /* CS5530A,B.. */
299 suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE;
300 }
301 break;
302 case PCI_DEVICE_ID_CYRIX_5520:
303 case PCI_DEVICE_ID_CYRIX_5510:
304 suscfg = gx_params->pci_suscfg | SUSMOD;
305 break;
306 default:
307 local_irq_restore(flags);
308 dprintk("fatal: try to set unknown chipset.\n");
309 return;
310 }
311 } else {
312 suscfg = gx_params->pci_suscfg & ~(SUSMOD);
313 gx_params->off_duration = 0;
314 gx_params->on_duration = 0;
315 dprintk("suspend modulation disabled: cpu runs 100%% speed.\n");
316 }
317
318 gx_write_byte(PCI_MODOFF, gx_params->off_duration);
319 gx_write_byte(PCI_MODON, gx_params->on_duration);
320
321 gx_write_byte(PCI_SUSCFG, suscfg);
322 pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
323
324 local_irq_restore(flags);
325
326 gx_params->pci_suscfg = suscfg;
327
328 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
329
330 dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
331 gx_params->on_duration * 32, gx_params->off_duration * 32);
332 dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
333}
334
335/****************************************************************
336 * High level functions *
337 ****************************************************************/
338
339/*
340 * cpufreq_gx_verify: test if frequency range is valid
341 *
342 * This function checks if a given frequency range in kHz is valid
343 * for the hardware supported by the driver.
344 */
345
346static int cpufreq_gx_verify(struct cpufreq_policy *policy)
347{
348 unsigned int tmp_freq = 0;
349 u8 tmp1, tmp2;
350
351 if (!stock_freq || !policy)
352 return -EINVAL;
353
354 policy->cpu = 0;
355 cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
356 stock_freq);
357
358 /* it needs to be assured that at least one supported frequency is
359 * within policy->min and policy->max. If it is not, policy->max
360 * needs to be increased until one freuqency is supported.
361 * policy->min may not be decreased, though. This way we guarantee a
362 * specific processing capacity.
363 */
364 tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
365 if (tmp_freq < policy->min)
366 tmp_freq += stock_freq / max_duration;
367 policy->min = tmp_freq;
368 if (policy->min > policy->max)
369 policy->max = tmp_freq;
370 tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
371 if (tmp_freq > policy->max)
372 tmp_freq -= stock_freq / max_duration;
373 policy->max = tmp_freq;
374 if (policy->max < policy->min)
375 policy->max = policy->min;
376 cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
377 stock_freq);
378
379 return 0;
380}
381
382/*
383 * cpufreq_gx_target:
384 *
385 */
386static int cpufreq_gx_target(struct cpufreq_policy *policy,
387 unsigned int target_freq,
388 unsigned int relation)
389{
390 u8 tmp1, tmp2;
391 unsigned int tmp_freq;
392
393 if (!stock_freq || !policy)
394 return -EINVAL;
395
396 policy->cpu = 0;
397
398 tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2);
399 while (tmp_freq < policy->min) {
400 tmp_freq += stock_freq / max_duration;
401 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
402 }
403 while (tmp_freq > policy->max) {
404 tmp_freq -= stock_freq / max_duration;
405 tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
406 }
407
408 gx_set_cpuspeed(tmp_freq);
409
410 return 0;
411}
412
413static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
414{
415 unsigned int maxfreq, curfreq;
416
417 if (!policy || policy->cpu != 0)
418 return -ENODEV;
419
420 /* determine maximum frequency */
421 if (pci_busclk)
422 maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
423 else if (cpu_khz)
424 maxfreq = cpu_khz;
425 else
426 maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
427
428 stock_freq = maxfreq;
429 curfreq = gx_get_cpuspeed(0);
430
431 dprintk("cpu max frequency is %d.\n", maxfreq);
432 dprintk("cpu current frequency is %dkHz.\n", curfreq);
433
434 /* setup basic struct for cpufreq API */
435 policy->cpu = 0;
436
437 if (max_duration < POLICY_MIN_DIV)
438 policy->min = maxfreq / max_duration;
439 else
440 policy->min = maxfreq / POLICY_MIN_DIV;
441 policy->max = maxfreq;
442 policy->cur = curfreq;
443 policy->cpuinfo.min_freq = maxfreq / max_duration;
444 policy->cpuinfo.max_freq = maxfreq;
445 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
446
447 return 0;
448}
449
450/*
451 * cpufreq_gx_init:
452 * MediaGX/Geode GX initialize cpufreq driver
453 */
454static struct cpufreq_driver gx_suspmod_driver = {
455 .get = gx_get_cpuspeed,
456 .verify = cpufreq_gx_verify,
457 .target = cpufreq_gx_target,
458 .init = cpufreq_gx_cpu_init,
459 .name = "gx-suspmod",
460 .owner = THIS_MODULE,
461};
462
463static int __init cpufreq_gx_init(void)
464{
465 int ret;
466 struct gxfreq_params *params;
467 struct pci_dev *gx_pci;
468
469 /* Test if we have the right hardware */
470 gx_pci = gx_detect_chipset();
471 if (gx_pci == NULL)
472 return -ENODEV;
473
474 /* check whether module parameters are sane */
475 if (max_duration > 0xff)
476 max_duration = 0xff;
477
478 dprintk("geode suspend modulation available.\n");
479
480 params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
481 if (params == NULL)
482 return -ENOMEM;
483
484 params->cs55x0 = gx_pci;
485 gx_params = params;
486
487 /* keep cs55x0 configurations */
488 pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg));
489 pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1));
490 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
491 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
492 pci_read_config_byte(params->cs55x0, PCI_MODOFF,
493 &(params->off_duration));
494
495 ret = cpufreq_register_driver(&gx_suspmod_driver);
496 if (ret) {
497 kfree(params);
498 return ret; /* register error! */
499 }
500
501 return 0;
502}
503
504static void __exit cpufreq_gx_exit(void)
505{
506 cpufreq_unregister_driver(&gx_suspmod_driver);
507 pci_dev_put(gx_params->cs55x0);
508 kfree(gx_params);
509}
510
511MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>");
512MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode");
513MODULE_LICENSE("GPL");
514
515module_init(cpufreq_gx_init);
516module_exit(cpufreq_gx_exit);
517
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
deleted file mode 100644
index cf48cdd6907d..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ /dev/null
@@ -1,1029 +0,0 @@
1/*
2 * (C) 2001-2004 Dave Jones. <davej@redhat.com>
3 * (C) 2002 Padraig Brady. <padraig@antefacto.com>
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 * Based upon datasheets & sample CPUs kindly provided by VIA.
7 *
8 * VIA have currently 3 different versions of Longhaul.
9 * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
10 * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
11 * Version 2 of longhaul is backward compatible with v1, but adds
12 * LONGHAUL MSR for purpose of both frequency and voltage scaling.
13 * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C).
14 * Version 3 of longhaul got renamed to Powersaver and redesigned
15 * to use only the POWERSAVER MSR at 0x110a.
16 * It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
17 * It's pretty much the same feature wise to longhaul v2, though
18 * there is provision for scaling FSB too, but this doesn't work
19 * too well in practice so we don't even try to use this.
20 *
21 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/cpufreq.h>
29#include <linux/pci.h>
30#include <linux/slab.h>
31#include <linux/string.h>
32#include <linux/delay.h>
33#include <linux/timex.h>
34#include <linux/io.h>
35#include <linux/acpi.h>
36
37#include <asm/msr.h>
38#include <acpi/processor.h>
39
40#include "longhaul.h"
41
42#define PFX "longhaul: "
43
44#define TYPE_LONGHAUL_V1 1
45#define TYPE_LONGHAUL_V2 2
46#define TYPE_POWERSAVER 3
47
48#define CPU_SAMUEL 1
49#define CPU_SAMUEL2 2
50#define CPU_EZRA 3
51#define CPU_EZRA_T 4
52#define CPU_NEHEMIAH 5
53#define CPU_NEHEMIAH_C 6
54
55/* Flags */
56#define USE_ACPI_C3 (1 << 1)
57#define USE_NORTHBRIDGE (1 << 2)
58
59static int cpu_model;
60static unsigned int numscales = 16;
61static unsigned int fsb;
62
63static const struct mV_pos *vrm_mV_table;
64static const unsigned char *mV_vrm_table;
65
66static unsigned int highest_speed, lowest_speed; /* kHz */
67static unsigned int minmult, maxmult;
68static int can_scale_voltage;
69static struct acpi_processor *pr;
70static struct acpi_processor_cx *cx;
71static u32 acpi_regs_addr;
72static u8 longhaul_flags;
73static unsigned int longhaul_index;
74
75/* Module parameters */
76static int scale_voltage;
77static int disable_acpi_c3;
78static int revid_errata;
79
80#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
81 "longhaul", msg)
82
83
84/* Clock ratios multiplied by 10 */
85static int mults[32];
86static int eblcr[32];
87static int longhaul_version;
88static struct cpufreq_frequency_table *longhaul_table;
89
90#ifdef CONFIG_CPU_FREQ_DEBUG
91static char speedbuffer[8];
92
93static char *print_speed(int speed)
94{
95 if (speed < 1000) {
96 snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed);
97 return speedbuffer;
98 }
99
100 if (speed%1000 == 0)
101 snprintf(speedbuffer, sizeof(speedbuffer),
102 "%dGHz", speed/1000);
103 else
104 snprintf(speedbuffer, sizeof(speedbuffer),
105 "%d.%dGHz", speed/1000, (speed%1000)/100);
106
107 return speedbuffer;
108}
109#endif
110
111
112static unsigned int calc_speed(int mult)
113{
114 int khz;
115 khz = (mult/10)*fsb;
116 if (mult%10)
117 khz += fsb/2;
118 khz *= 1000;
119 return khz;
120}
121
122
123static int longhaul_get_cpu_mult(void)
124{
125 unsigned long invalue = 0, lo, hi;
126
127 rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi);
128 invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22;
129 if (longhaul_version == TYPE_LONGHAUL_V2 ||
130 longhaul_version == TYPE_POWERSAVER) {
131 if (lo & (1<<27))
132 invalue += 16;
133 }
134 return eblcr[invalue];
135}
136
137/* For processor with BCR2 MSR */
138
139static void do_longhaul1(unsigned int mults_index)
140{
141 union msr_bcr2 bcr2;
142
143 rdmsrl(MSR_VIA_BCR2, bcr2.val);
144 /* Enable software clock multiplier */
145 bcr2.bits.ESOFTBF = 1;
146 bcr2.bits.CLOCKMUL = mults_index & 0xff;
147
148 /* Sync to timer tick */
149 safe_halt();
150 /* Change frequency on next halt or sleep */
151 wrmsrl(MSR_VIA_BCR2, bcr2.val);
152 /* Invoke transition */
153 ACPI_FLUSH_CPU_CACHE();
154 halt();
155
156 /* Disable software clock multiplier */
157 local_irq_disable();
158 rdmsrl(MSR_VIA_BCR2, bcr2.val);
159 bcr2.bits.ESOFTBF = 0;
160 wrmsrl(MSR_VIA_BCR2, bcr2.val);
161}
162
163/* For processor with Longhaul MSR */
164
165static void do_powersaver(int cx_address, unsigned int mults_index,
166 unsigned int dir)
167{
168 union msr_longhaul longhaul;
169 u32 t;
170
171 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
172 /* Setup new frequency */
173 if (!revid_errata)
174 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
175 else
176 longhaul.bits.RevisionKey = 0;
177 longhaul.bits.SoftBusRatio = mults_index & 0xf;
178 longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4;
179 /* Setup new voltage */
180 if (can_scale_voltage)
181 longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f;
182 /* Sync to timer tick */
183 safe_halt();
184 /* Raise voltage if necessary */
185 if (can_scale_voltage && dir) {
186 longhaul.bits.EnableSoftVID = 1;
187 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
188 /* Change voltage */
189 if (!cx_address) {
190 ACPI_FLUSH_CPU_CACHE();
191 halt();
192 } else {
193 ACPI_FLUSH_CPU_CACHE();
194 /* Invoke C3 */
195 inb(cx_address);
196 /* Dummy op - must do something useless after P_LVL3
197 * read */
198 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
199 }
200 longhaul.bits.EnableSoftVID = 0;
201 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
202 }
203
204 /* Change frequency on next halt or sleep */
205 longhaul.bits.EnableSoftBusRatio = 1;
206 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
207 if (!cx_address) {
208 ACPI_FLUSH_CPU_CACHE();
209 halt();
210 } else {
211 ACPI_FLUSH_CPU_CACHE();
212 /* Invoke C3 */
213 inb(cx_address);
214 /* Dummy op - must do something useless after P_LVL3 read */
215 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
216 }
217 /* Disable bus ratio bit */
218 longhaul.bits.EnableSoftBusRatio = 0;
219 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
220
221 /* Reduce voltage if necessary */
222 if (can_scale_voltage && !dir) {
223 longhaul.bits.EnableSoftVID = 1;
224 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
225 /* Change voltage */
226 if (!cx_address) {
227 ACPI_FLUSH_CPU_CACHE();
228 halt();
229 } else {
230 ACPI_FLUSH_CPU_CACHE();
231 /* Invoke C3 */
232 inb(cx_address);
233 /* Dummy op - must do something useless after P_LVL3
234 * read */
235 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
236 }
237 longhaul.bits.EnableSoftVID = 0;
238 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
239 }
240}
241
242/**
243 * longhaul_set_cpu_frequency()
244 * @mults_index : bitpattern of the new multiplier.
245 *
246 * Sets a new clock ratio.
247 */
248
249static void longhaul_setstate(unsigned int table_index)
250{
251 unsigned int mults_index;
252 int speed, mult;
253 struct cpufreq_freqs freqs;
254 unsigned long flags;
255 unsigned int pic1_mask, pic2_mask;
256 u16 bm_status = 0;
257 u32 bm_timeout = 1000;
258 unsigned int dir = 0;
259
260 mults_index = longhaul_table[table_index].index;
261 /* Safety precautions */
262 mult = mults[mults_index & 0x1f];
263 if (mult == -1)
264 return;
265 speed = calc_speed(mult);
266 if ((speed > highest_speed) || (speed < lowest_speed))
267 return;
268 /* Voltage transition before frequency transition? */
269 if (can_scale_voltage && longhaul_index < table_index)
270 dir = 1;
271
272 freqs.old = calc_speed(longhaul_get_cpu_mult());
273 freqs.new = speed;
274 freqs.cpu = 0; /* longhaul.c is UP only driver */
275
276 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
277
278 dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
279 fsb, mult/10, mult%10, print_speed(speed/1000));
280retry_loop:
281 preempt_disable();
282 local_irq_save(flags);
283
284 pic2_mask = inb(0xA1);
285 pic1_mask = inb(0x21); /* works on C3. save mask. */
286 outb(0xFF, 0xA1); /* Overkill */
287 outb(0xFE, 0x21); /* TMR0 only */
288
289 /* Wait while PCI bus is busy. */
290 if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE
291 || ((pr != NULL) && pr->flags.bm_control))) {
292 bm_status = inw(acpi_regs_addr);
293 bm_status &= 1 << 4;
294 while (bm_status && bm_timeout) {
295 outw(1 << 4, acpi_regs_addr);
296 bm_timeout--;
297 bm_status = inw(acpi_regs_addr);
298 bm_status &= 1 << 4;
299 }
300 }
301
302 if (longhaul_flags & USE_NORTHBRIDGE) {
303 /* Disable AGP and PCI arbiters */
304 outb(3, 0x22);
305 } else if ((pr != NULL) && pr->flags.bm_control) {
306 /* Disable bus master arbitration */
307 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
308 }
309 switch (longhaul_version) {
310
311 /*
312 * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
313 * Software controlled multipliers only.
314 */
315 case TYPE_LONGHAUL_V1:
316 do_longhaul1(mults_index);
317 break;
318
319 /*
320 * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C]
321 *
322 * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
323 * Nehemiah can do FSB scaling too, but this has never been proven
324 * to work in practice.
325 */
326 case TYPE_LONGHAUL_V2:
327 case TYPE_POWERSAVER:
328 if (longhaul_flags & USE_ACPI_C3) {
329 /* Don't allow wakeup */
330 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
331 do_powersaver(cx->address, mults_index, dir);
332 } else {
333 do_powersaver(0, mults_index, dir);
334 }
335 break;
336 }
337
338 if (longhaul_flags & USE_NORTHBRIDGE) {
339 /* Enable arbiters */
340 outb(0, 0x22);
341 } else if ((pr != NULL) && pr->flags.bm_control) {
342 /* Enable bus master arbitration */
343 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
344 }
345 outb(pic2_mask, 0xA1); /* restore mask */
346 outb(pic1_mask, 0x21);
347
348 local_irq_restore(flags);
349 preempt_enable();
350
351 freqs.new = calc_speed(longhaul_get_cpu_mult());
352 /* Check if requested frequency is set. */
353 if (unlikely(freqs.new != speed)) {
354 printk(KERN_INFO PFX "Failed to set requested frequency!\n");
355 /* Revision ID = 1 but processor is expecting revision key
356 * equal to 0. Jumpers at the bottom of processor will change
357 * multiplier and FSB, but will not change bits in Longhaul
358 * MSR nor enable voltage scaling. */
359 if (!revid_errata) {
360 printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
361 "option.\n");
362 revid_errata = 1;
363 msleep(200);
364 goto retry_loop;
365 }
366 /* Why ACPI C3 sometimes doesn't work is a mystery for me.
367 * But it does happen. Processor is entering ACPI C3 state,
368 * but it doesn't change frequency. I tried poking various
369 * bits in northbridge registers, but without success. */
370 if (longhaul_flags & USE_ACPI_C3) {
371 printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
372 longhaul_flags &= ~USE_ACPI_C3;
373 if (revid_errata) {
374 printk(KERN_INFO PFX "Disabling \"Ignore "
375 "Revision ID\" option.\n");
376 revid_errata = 0;
377 }
378 msleep(200);
379 goto retry_loop;
380 }
381 /* This shouldn't happen. Longhaul ver. 2 was reported not
382 * working on processors without voltage scaling, but with
383 * RevID = 1. RevID errata will make things right. Just
384 * to be 100% sure. */
385 if (longhaul_version == TYPE_LONGHAUL_V2) {
386 printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
387 longhaul_version = TYPE_LONGHAUL_V1;
388 msleep(200);
389 goto retry_loop;
390 }
391 }
392 /* Report true CPU frequency */
393 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
394
395 if (!bm_timeout)
396 printk(KERN_INFO PFX "Warning: Timeout while waiting for "
397 "idle PCI bus.\n");
398}
399
400/*
401 * Centaur decided to make life a little more tricky.
402 * Only longhaul v1 is allowed to read EBLCR BSEL[0:1].
403 * Samuel2 and above have to try and guess what the FSB is.
404 * We do this by assuming we booted at maximum multiplier, and interpolate
405 * between that value multiplied by possible FSBs and cpu_mhz which
406 * was calculated at boot time. Really ugly, but no other way to do this.
407 */
408
409#define ROUNDING 0xf
410
411static int guess_fsb(int mult)
412{
413 int speed = cpu_khz / 1000;
414 int i;
415 int speeds[] = { 666, 1000, 1333, 2000 };
416 int f_max, f_min;
417
418 for (i = 0; i < 4; i++) {
419 f_max = ((speeds[i] * mult) + 50) / 100;
420 f_max += (ROUNDING / 2);
421 f_min = f_max - ROUNDING;
422 if ((speed <= f_max) && (speed >= f_min))
423 return speeds[i] / 10;
424 }
425 return 0;
426}
427
428
429static int __cpuinit longhaul_get_ranges(void)
430{
431 unsigned int i, j, k = 0;
432 unsigned int ratio;
433 int mult;
434
435 /* Get current frequency */
436 mult = longhaul_get_cpu_mult();
437 if (mult == -1) {
438 printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
439 return -EINVAL;
440 }
441 fsb = guess_fsb(mult);
442 if (fsb == 0) {
443 printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
444 return -EINVAL;
445 }
446 /* Get max multiplier - as we always did.
447 * Longhaul MSR is useful only when voltage scaling is enabled.
448 * C3 is booting at max anyway. */
449 maxmult = mult;
450 /* Get min multiplier */
451 switch (cpu_model) {
452 case CPU_NEHEMIAH:
453 minmult = 50;
454 break;
455 case CPU_NEHEMIAH_C:
456 minmult = 40;
457 break;
458 default:
459 minmult = 30;
460 break;
461 }
462
463 dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n",
464 minmult/10, minmult%10, maxmult/10, maxmult%10);
465
466 highest_speed = calc_speed(maxmult);
467 lowest_speed = calc_speed(minmult);
468 dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
469 print_speed(lowest_speed/1000),
470 print_speed(highest_speed/1000));
471
472 if (lowest_speed == highest_speed) {
473 printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
474 return -EINVAL;
475 }
476 if (lowest_speed > highest_speed) {
477 printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
478 lowest_speed, highest_speed);
479 return -EINVAL;
480 }
481
482 longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table),
483 GFP_KERNEL);
484 if (!longhaul_table)
485 return -ENOMEM;
486
487 for (j = 0; j < numscales; j++) {
488 ratio = mults[j];
489 if (ratio == -1)
490 continue;
491 if (ratio > maxmult || ratio < minmult)
492 continue;
493 longhaul_table[k].frequency = calc_speed(ratio);
494 longhaul_table[k].index = j;
495 k++;
496 }
497 if (k <= 1) {
498 kfree(longhaul_table);
499 return -ENODEV;
500 }
501 /* Sort */
502 for (j = 0; j < k - 1; j++) {
503 unsigned int min_f, min_i;
504 min_f = longhaul_table[j].frequency;
505 min_i = j;
506 for (i = j + 1; i < k; i++) {
507 if (longhaul_table[i].frequency < min_f) {
508 min_f = longhaul_table[i].frequency;
509 min_i = i;
510 }
511 }
512 if (min_i != j) {
513 swap(longhaul_table[j].frequency,
514 longhaul_table[min_i].frequency);
515 swap(longhaul_table[j].index,
516 longhaul_table[min_i].index);
517 }
518 }
519
520 longhaul_table[k].frequency = CPUFREQ_TABLE_END;
521
522 /* Find index we are running on */
523 for (j = 0; j < k; j++) {
524 if (mults[longhaul_table[j].index & 0x1f] == mult) {
525 longhaul_index = j;
526 break;
527 }
528 }
529 return 0;
530}
531
532
533static void __cpuinit longhaul_setup_voltagescaling(void)
534{
535 union msr_longhaul longhaul;
536 struct mV_pos minvid, maxvid, vid;
537 unsigned int j, speed, pos, kHz_step, numvscales;
538 int min_vid_speed;
539
540 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
541 if (!(longhaul.bits.RevisionID & 1)) {
542 printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n");
543 return;
544 }
545
546 if (!longhaul.bits.VRMRev) {
547 printk(KERN_INFO PFX "VRM 8.5\n");
548 vrm_mV_table = &vrm85_mV[0];
549 mV_vrm_table = &mV_vrm85[0];
550 } else {
551 printk(KERN_INFO PFX "Mobile VRM\n");
552 if (cpu_model < CPU_NEHEMIAH)
553 return;
554 vrm_mV_table = &mobilevrm_mV[0];
555 mV_vrm_table = &mV_mobilevrm[0];
556 }
557
558 minvid = vrm_mV_table[longhaul.bits.MinimumVID];
559 maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
560
561 if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
562 printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
563 "Voltage scaling disabled.\n",
564 minvid.mV/1000, minvid.mV%1000,
565 maxvid.mV/1000, maxvid.mV%1000);
566 return;
567 }
568
569 if (minvid.mV == maxvid.mV) {
570 printk(KERN_INFO PFX "Claims to support voltage scaling but "
571 "min & max are both %d.%03d. "
572 "Voltage scaling disabled\n",
573 maxvid.mV/1000, maxvid.mV%1000);
574 return;
575 }
576
577 /* How many voltage steps*/
578 numvscales = maxvid.pos - minvid.pos + 1;
579 printk(KERN_INFO PFX
580 "Max VID=%d.%03d "
581 "Min VID=%d.%03d, "
582 "%d possible voltage scales\n",
583 maxvid.mV/1000, maxvid.mV%1000,
584 minvid.mV/1000, minvid.mV%1000,
585 numvscales);
586
587 /* Calculate max frequency at min voltage */
588 j = longhaul.bits.MinMHzBR;
589 if (longhaul.bits.MinMHzBR4)
590 j += 16;
591 min_vid_speed = eblcr[j];
592 if (min_vid_speed == -1)
593 return;
594 switch (longhaul.bits.MinMHzFSB) {
595 case 0:
596 min_vid_speed *= 13333;
597 break;
598 case 1:
599 min_vid_speed *= 10000;
600 break;
601 case 3:
602 min_vid_speed *= 6666;
603 break;
604 default:
605 return;
606 break;
607 }
608 if (min_vid_speed >= highest_speed)
609 return;
610 /* Calculate kHz for one voltage step */
611 kHz_step = (highest_speed - min_vid_speed) / numvscales;
612
613 j = 0;
614 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
615 speed = longhaul_table[j].frequency;
616 if (speed > min_vid_speed)
617 pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
618 else
619 pos = minvid.pos;
620 longhaul_table[j].index |= mV_vrm_table[pos] << 8;
621 vid = vrm_mV_table[mV_vrm_table[pos]];
622 printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
623 speed, j, vid.mV);
624 j++;
625 }
626
627 can_scale_voltage = 1;
628 printk(KERN_INFO PFX "Voltage scaling enabled.\n");
629}
630
631
632static int longhaul_verify(struct cpufreq_policy *policy)
633{
634 return cpufreq_frequency_table_verify(policy, longhaul_table);
635}
636
637
638static int longhaul_target(struct cpufreq_policy *policy,
639 unsigned int target_freq, unsigned int relation)
640{
641 unsigned int table_index = 0;
642 unsigned int i;
643 unsigned int dir = 0;
644 u8 vid, current_vid;
645
646 if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq,
647 relation, &table_index))
648 return -EINVAL;
649
650 /* Don't set same frequency again */
651 if (longhaul_index == table_index)
652 return 0;
653
654 if (!can_scale_voltage)
655 longhaul_setstate(table_index);
656 else {
657 /* On test system voltage transitions exceeding single
658 * step up or down were turning motherboard off. Both
659 * "ondemand" and "userspace" are unsafe. C7 is doing
660 * this in hardware, C3 is old and we need to do this
661 * in software. */
662 i = longhaul_index;
663 current_vid = (longhaul_table[longhaul_index].index >> 8);
664 current_vid &= 0x1f;
665 if (table_index > longhaul_index)
666 dir = 1;
667 while (i != table_index) {
668 vid = (longhaul_table[i].index >> 8) & 0x1f;
669 if (vid != current_vid) {
670 longhaul_setstate(i);
671 current_vid = vid;
672 msleep(200);
673 }
674 if (dir)
675 i++;
676 else
677 i--;
678 }
679 longhaul_setstate(table_index);
680 }
681 longhaul_index = table_index;
682 return 0;
683}
684
685
686static unsigned int longhaul_get(unsigned int cpu)
687{
688 if (cpu)
689 return 0;
690 return calc_speed(longhaul_get_cpu_mult());
691}
692
693static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
694 u32 nesting_level,
695 void *context, void **return_value)
696{
697 struct acpi_device *d;
698
699 if (acpi_bus_get_device(obj_handle, &d))
700 return 0;
701
702 *return_value = acpi_driver_data(d);
703 return 1;
704}
705
706/* VIA don't support PM2 reg, but have something similar */
707static int enable_arbiter_disable(void)
708{
709 struct pci_dev *dev;
710 int status = 1;
711 int reg;
712 u8 pci_cmd;
713
714 /* Find PLE133 host bridge */
715 reg = 0x78;
716 dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
717 NULL);
718 /* Find PM133/VT8605 host bridge */
719 if (dev == NULL)
720 dev = pci_get_device(PCI_VENDOR_ID_VIA,
721 PCI_DEVICE_ID_VIA_8605_0, NULL);
722 /* Find CLE266 host bridge */
723 if (dev == NULL) {
724 reg = 0x76;
725 dev = pci_get_device(PCI_VENDOR_ID_VIA,
726 PCI_DEVICE_ID_VIA_862X_0, NULL);
727 /* Find CN400 V-Link host bridge */
728 if (dev == NULL)
729 dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
730 }
731 if (dev != NULL) {
732 /* Enable access to port 0x22 */
733 pci_read_config_byte(dev, reg, &pci_cmd);
734 if (!(pci_cmd & 1<<7)) {
735 pci_cmd |= 1<<7;
736 pci_write_config_byte(dev, reg, pci_cmd);
737 pci_read_config_byte(dev, reg, &pci_cmd);
738 if (!(pci_cmd & 1<<7)) {
739 printk(KERN_ERR PFX
740 "Can't enable access to port 0x22.\n");
741 status = 0;
742 }
743 }
744 pci_dev_put(dev);
745 return status;
746 }
747 return 0;
748}
749
750static int longhaul_setup_southbridge(void)
751{
752 struct pci_dev *dev;
753 u8 pci_cmd;
754
755 /* Find VT8235 southbridge */
756 dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
757 if (dev == NULL)
758 /* Find VT8237 southbridge */
759 dev = pci_get_device(PCI_VENDOR_ID_VIA,
760 PCI_DEVICE_ID_VIA_8237, NULL);
761 if (dev != NULL) {
762 /* Set transition time to max */
763 pci_read_config_byte(dev, 0xec, &pci_cmd);
764 pci_cmd &= ~(1 << 2);
765 pci_write_config_byte(dev, 0xec, pci_cmd);
766 pci_read_config_byte(dev, 0xe4, &pci_cmd);
767 pci_cmd &= ~(1 << 7);
768 pci_write_config_byte(dev, 0xe4, pci_cmd);
769 pci_read_config_byte(dev, 0xe5, &pci_cmd);
770 pci_cmd |= 1 << 7;
771 pci_write_config_byte(dev, 0xe5, pci_cmd);
772 /* Get address of ACPI registers block*/
773 pci_read_config_byte(dev, 0x81, &pci_cmd);
774 if (pci_cmd & 1 << 7) {
775 pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
776 acpi_regs_addr &= 0xff00;
777 printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
778 acpi_regs_addr);
779 }
780
781 pci_dev_put(dev);
782 return 1;
783 }
784 return 0;
785}
786
787static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy)
788{
789 struct cpuinfo_x86 *c = &cpu_data(0);
790 char *cpuname = NULL;
791 int ret;
792 u32 lo, hi;
793
794 /* Check what we have on this motherboard */
795 switch (c->x86_model) {
796 case 6:
797 cpu_model = CPU_SAMUEL;
798 cpuname = "C3 'Samuel' [C5A]";
799 longhaul_version = TYPE_LONGHAUL_V1;
800 memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
801 memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr));
802 break;
803
804 case 7:
805 switch (c->x86_mask) {
806 case 0:
807 longhaul_version = TYPE_LONGHAUL_V1;
808 cpu_model = CPU_SAMUEL2;
809 cpuname = "C3 'Samuel 2' [C5B]";
810 /* Note, this is not a typo, early Samuel2's had
811 * Samuel1 ratios. */
812 memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
813 memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
814 break;
815 case 1 ... 15:
816 longhaul_version = TYPE_LONGHAUL_V2;
817 if (c->x86_mask < 8) {
818 cpu_model = CPU_SAMUEL2;
819 cpuname = "C3 'Samuel 2' [C5B]";
820 } else {
821 cpu_model = CPU_EZRA;
822 cpuname = "C3 'Ezra' [C5C]";
823 }
824 memcpy(mults, ezra_mults, sizeof(ezra_mults));
825 memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr));
826 break;
827 }
828 break;
829
830 case 8:
831 cpu_model = CPU_EZRA_T;
832 cpuname = "C3 'Ezra-T' [C5M]";
833 longhaul_version = TYPE_POWERSAVER;
834 numscales = 32;
835 memcpy(mults, ezrat_mults, sizeof(ezrat_mults));
836 memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr));
837 break;
838
839 case 9:
840 longhaul_version = TYPE_POWERSAVER;
841 numscales = 32;
842 memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
843 memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
844 switch (c->x86_mask) {
845 case 0 ... 1:
846 cpu_model = CPU_NEHEMIAH;
847 cpuname = "C3 'Nehemiah A' [C5XLOE]";
848 break;
849 case 2 ... 4:
850 cpu_model = CPU_NEHEMIAH;
851 cpuname = "C3 'Nehemiah B' [C5XLOH]";
852 break;
853 case 5 ... 15:
854 cpu_model = CPU_NEHEMIAH_C;
855 cpuname = "C3 'Nehemiah C' [C5P]";
856 break;
857 }
858 break;
859
860 default:
861 cpuname = "Unknown";
862 break;
863 }
864 /* Check Longhaul ver. 2 */
865 if (longhaul_version == TYPE_LONGHAUL_V2) {
866 rdmsr(MSR_VIA_LONGHAUL, lo, hi);
867 if (lo == 0 && hi == 0)
868 /* Looks like MSR isn't present */
869 longhaul_version = TYPE_LONGHAUL_V1;
870 }
871
872 printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
873 switch (longhaul_version) {
874 case TYPE_LONGHAUL_V1:
875 case TYPE_LONGHAUL_V2:
876 printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
877 break;
878 case TYPE_POWERSAVER:
879 printk(KERN_CONT "Powersaver supported.\n");
880 break;
881 };
882
883 /* Doesn't hurt */
884 longhaul_setup_southbridge();
885
886 /* Find ACPI data for processor */
887 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
888 ACPI_UINT32_MAX, &longhaul_walk_callback, NULL,
889 NULL, (void *)&pr);
890
891 /* Check ACPI support for C3 state */
892 if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
893 cx = &pr->power.states[ACPI_STATE_C3];
894 if (cx->address > 0 && cx->latency <= 1000)
895 longhaul_flags |= USE_ACPI_C3;
896 }
897 /* Disable if it isn't working */
898 if (disable_acpi_c3)
899 longhaul_flags &= ~USE_ACPI_C3;
900 /* Check if northbridge is friendly */
901 if (enable_arbiter_disable())
902 longhaul_flags |= USE_NORTHBRIDGE;
903
904 /* Check ACPI support for bus master arbiter disable */
905 if (!(longhaul_flags & USE_ACPI_C3
906 || longhaul_flags & USE_NORTHBRIDGE)
907 && ((pr == NULL) || !(pr->flags.bm_control))) {
908 printk(KERN_ERR PFX
909 "No ACPI support. Unsupported northbridge.\n");
910 return -ENODEV;
911 }
912
913 if (longhaul_flags & USE_NORTHBRIDGE)
914 printk(KERN_INFO PFX "Using northbridge support.\n");
915 if (longhaul_flags & USE_ACPI_C3)
916 printk(KERN_INFO PFX "Using ACPI support.\n");
917
918 ret = longhaul_get_ranges();
919 if (ret != 0)
920 return ret;
921
922 if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
923 longhaul_setup_voltagescaling();
924
925 policy->cpuinfo.transition_latency = 200000; /* nsec */
926 policy->cur = calc_speed(longhaul_get_cpu_mult());
927
928 ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
929 if (ret)
930 return ret;
931
932 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
933
934 return 0;
935}
936
937static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
938{
939 cpufreq_frequency_table_put_attr(policy->cpu);
940 return 0;
941}
942
943static struct freq_attr *longhaul_attr[] = {
944 &cpufreq_freq_attr_scaling_available_freqs,
945 NULL,
946};
947
948static struct cpufreq_driver longhaul_driver = {
949 .verify = longhaul_verify,
950 .target = longhaul_target,
951 .get = longhaul_get,
952 .init = longhaul_cpu_init,
953 .exit = __devexit_p(longhaul_cpu_exit),
954 .name = "longhaul",
955 .owner = THIS_MODULE,
956 .attr = longhaul_attr,
957};
958
959
960static int __init longhaul_init(void)
961{
962 struct cpuinfo_x86 *c = &cpu_data(0);
963
964 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
965 return -ENODEV;
966
967#ifdef CONFIG_SMP
968 if (num_online_cpus() > 1) {
969 printk(KERN_ERR PFX "More than 1 CPU detected, "
970 "longhaul disabled.\n");
971 return -ENODEV;
972 }
973#endif
974#ifdef CONFIG_X86_IO_APIC
975 if (cpu_has_apic) {
976 printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
977 "broken in this configuration.\n");
978 return -ENODEV;
979 }
980#endif
981 switch (c->x86_model) {
982 case 6 ... 9:
983 return cpufreq_register_driver(&longhaul_driver);
984 case 10:
985 printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
986 default:
987 ;
988 }
989
990 return -ENODEV;
991}
992
993
994static void __exit longhaul_exit(void)
995{
996 int i;
997
998 for (i = 0; i < numscales; i++) {
999 if (mults[i] == maxmult) {
1000 longhaul_setstate(i);
1001 break;
1002 }
1003 }
1004
1005 cpufreq_unregister_driver(&longhaul_driver);
1006 kfree(longhaul_table);
1007}
1008
1009/* Even if BIOS is exporting ACPI C3 state, and it is used
1010 * with success when CPU is idle, this state doesn't
1011 * trigger frequency transition in some cases. */
1012module_param(disable_acpi_c3, int, 0644);
1013MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
1014/* Change CPU voltage with frequency. Very useful to save
1015 * power, but most VIA C3 processors aren't supporting it. */
1016module_param(scale_voltage, int, 0644);
1017MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
1018/* Force revision key to 0 for processors which doesn't
1019 * support voltage scaling, but are introducing itself as
1020 * such. */
1021module_param(revid_errata, int, 0644);
1022MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
1023
1024MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1025MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
1026MODULE_LICENSE("GPL");
1027
1028late_initcall(longhaul_init);
1029module_exit(longhaul_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.h b/arch/x86/kernel/cpu/cpufreq/longhaul.h
deleted file mode 100644
index cbf48fbca881..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.h
+++ /dev/null
@@ -1,353 +0,0 @@
1/*
2 * longhaul.h
3 * (C) 2003 Dave Jones.
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 *
7 * VIA-specific information
8 */
9
10union msr_bcr2 {
11 struct {
12 unsigned Reseved:19, // 18:0
13 ESOFTBF:1, // 19
14 Reserved2:3, // 22:20
15 CLOCKMUL:4, // 26:23
16 Reserved3:5; // 31:27
17 } bits;
18 unsigned long val;
19};
20
21union msr_longhaul {
22 struct {
23 unsigned RevisionID:4, // 3:0
24 RevisionKey:4, // 7:4
25 EnableSoftBusRatio:1, // 8
26 EnableSoftVID:1, // 9
27 EnableSoftBSEL:1, // 10
28 Reserved:3, // 11:13
29 SoftBusRatio4:1, // 14
30 VRMRev:1, // 15
31 SoftBusRatio:4, // 19:16
32 SoftVID:5, // 24:20
33 Reserved2:3, // 27:25
34 SoftBSEL:2, // 29:28
35 Reserved3:2, // 31:30
36 MaxMHzBR:4, // 35:32
37 MaximumVID:5, // 40:36
38 MaxMHzFSB:2, // 42:41
39 MaxMHzBR4:1, // 43
40 Reserved4:4, // 47:44
41 MinMHzBR:4, // 51:48
42 MinimumVID:5, // 56:52
43 MinMHzFSB:2, // 58:57
44 MinMHzBR4:1, // 59
45 Reserved5:4; // 63:60
46 } bits;
47 unsigned long long val;
48};
49
50/*
51 * Clock ratio tables. Div/Mod by 10 to get ratio.
52 * The eblcr values specify the ratio read from the CPU.
53 * The mults values specify what to write to the CPU.
54 */
55
56/*
57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
58 */
59static const int __cpuinitdata samuel1_mults[16] = {
60 -1, /* 0000 -> RESERVED */
61 30, /* 0001 -> 3.0x */
62 40, /* 0010 -> 4.0x */
63 -1, /* 0011 -> RESERVED */
64 -1, /* 0100 -> RESERVED */
65 35, /* 0101 -> 3.5x */
66 45, /* 0110 -> 4.5x */
67 55, /* 0111 -> 5.5x */
68 60, /* 1000 -> 6.0x */
69 70, /* 1001 -> 7.0x */
70 80, /* 1010 -> 8.0x */
71 50, /* 1011 -> 5.0x */
72 65, /* 1100 -> 6.5x */
73 75, /* 1101 -> 7.5x */
74 -1, /* 1110 -> RESERVED */
75 -1, /* 1111 -> RESERVED */
76};
77
78static const int __cpuinitdata samuel1_eblcr[16] = {
79 50, /* 0000 -> RESERVED */
80 30, /* 0001 -> 3.0x */
81 40, /* 0010 -> 4.0x */
82 -1, /* 0011 -> RESERVED */
83 55, /* 0100 -> 5.5x */
84 35, /* 0101 -> 3.5x */
85 45, /* 0110 -> 4.5x */
86 -1, /* 0111 -> RESERVED */
87 -1, /* 1000 -> RESERVED */
88 70, /* 1001 -> 7.0x */
89 80, /* 1010 -> 8.0x */
90 60, /* 1011 -> 6.0x */
91 -1, /* 1100 -> RESERVED */
92 75, /* 1101 -> 7.5x */
93 -1, /* 1110 -> RESERVED */
94 65, /* 1111 -> 6.5x */
95};
96
97/*
98 * VIA C3 Samuel2 Stepping 1->15
99 */
100static const int __cpuinitdata samuel2_eblcr[16] = {
101 50, /* 0000 -> 5.0x */
102 30, /* 0001 -> 3.0x */
103 40, /* 0010 -> 4.0x */
104 100, /* 0011 -> 10.0x */
105 55, /* 0100 -> 5.5x */
106 35, /* 0101 -> 3.5x */
107 45, /* 0110 -> 4.5x */
108 110, /* 0111 -> 11.0x */
109 90, /* 1000 -> 9.0x */
110 70, /* 1001 -> 7.0x */
111 80, /* 1010 -> 8.0x */
112 60, /* 1011 -> 6.0x */
113 120, /* 1100 -> 12.0x */
114 75, /* 1101 -> 7.5x */
115 130, /* 1110 -> 13.0x */
116 65, /* 1111 -> 6.5x */
117};
118
119/*
120 * VIA C3 Ezra
121 */
122static const int __cpuinitdata ezra_mults[16] = {
123 100, /* 0000 -> 10.0x */
124 30, /* 0001 -> 3.0x */
125 40, /* 0010 -> 4.0x */
126 90, /* 0011 -> 9.0x */
127 95, /* 0100 -> 9.5x */
128 35, /* 0101 -> 3.5x */
129 45, /* 0110 -> 4.5x */
130 55, /* 0111 -> 5.5x */
131 60, /* 1000 -> 6.0x */
132 70, /* 1001 -> 7.0x */
133 80, /* 1010 -> 8.0x */
134 50, /* 1011 -> 5.0x */
135 65, /* 1100 -> 6.5x */
136 75, /* 1101 -> 7.5x */
137 85, /* 1110 -> 8.5x */
138 120, /* 1111 -> 12.0x */
139};
140
141static const int __cpuinitdata ezra_eblcr[16] = {
142 50, /* 0000 -> 5.0x */
143 30, /* 0001 -> 3.0x */
144 40, /* 0010 -> 4.0x */
145 100, /* 0011 -> 10.0x */
146 55, /* 0100 -> 5.5x */
147 35, /* 0101 -> 3.5x */
148 45, /* 0110 -> 4.5x */
149 95, /* 0111 -> 9.5x */
150 90, /* 1000 -> 9.0x */
151 70, /* 1001 -> 7.0x */
152 80, /* 1010 -> 8.0x */
153 60, /* 1011 -> 6.0x */
154 120, /* 1100 -> 12.0x */
155 75, /* 1101 -> 7.5x */
156 85, /* 1110 -> 8.5x */
157 65, /* 1111 -> 6.5x */
158};
159
160/*
161 * VIA C3 (Ezra-T) [C5M].
162 */
163static const int __cpuinitdata ezrat_mults[32] = {
164 100, /* 0000 -> 10.0x */
165 30, /* 0001 -> 3.0x */
166 40, /* 0010 -> 4.0x */
167 90, /* 0011 -> 9.0x */
168 95, /* 0100 -> 9.5x */
169 35, /* 0101 -> 3.5x */
170 45, /* 0110 -> 4.5x */
171 55, /* 0111 -> 5.5x */
172 60, /* 1000 -> 6.0x */
173 70, /* 1001 -> 7.0x */
174 80, /* 1010 -> 8.0x */
175 50, /* 1011 -> 5.0x */
176 65, /* 1100 -> 6.5x */
177 75, /* 1101 -> 7.5x */
178 85, /* 1110 -> 8.5x */
179 120, /* 1111 -> 12.0x */
180
181 -1, /* 0000 -> RESERVED (10.0x) */
182 110, /* 0001 -> 11.0x */
183 -1, /* 0010 -> 12.0x */
184 -1, /* 0011 -> RESERVED (9.0x)*/
185 105, /* 0100 -> 10.5x */
186 115, /* 0101 -> 11.5x */
187 125, /* 0110 -> 12.5x */
188 135, /* 0111 -> 13.5x */
189 140, /* 1000 -> 14.0x */
190 150, /* 1001 -> 15.0x */
191 160, /* 1010 -> 16.0x */
192 130, /* 1011 -> 13.0x */
193 145, /* 1100 -> 14.5x */
194 155, /* 1101 -> 15.5x */
195 -1, /* 1110 -> RESERVED (13.0x) */
196 -1, /* 1111 -> RESERVED (12.0x) */
197};
198
199static const int __cpuinitdata ezrat_eblcr[32] = {
200 50, /* 0000 -> 5.0x */
201 30, /* 0001 -> 3.0x */
202 40, /* 0010 -> 4.0x */
203 100, /* 0011 -> 10.0x */
204 55, /* 0100 -> 5.5x */
205 35, /* 0101 -> 3.5x */
206 45, /* 0110 -> 4.5x */
207 95, /* 0111 -> 9.5x */
208 90, /* 1000 -> 9.0x */
209 70, /* 1001 -> 7.0x */
210 80, /* 1010 -> 8.0x */
211 60, /* 1011 -> 6.0x */
212 120, /* 1100 -> 12.0x */
213 75, /* 1101 -> 7.5x */
214 85, /* 1110 -> 8.5x */
215 65, /* 1111 -> 6.5x */
216
217 -1, /* 0000 -> RESERVED (9.0x) */
218 110, /* 0001 -> 11.0x */
219 120, /* 0010 -> 12.0x */
220 -1, /* 0011 -> RESERVED (10.0x)*/
221 135, /* 0100 -> 13.5x */
222 115, /* 0101 -> 11.5x */
223 125, /* 0110 -> 12.5x */
224 105, /* 0111 -> 10.5x */
225 130, /* 1000 -> 13.0x */
226 150, /* 1001 -> 15.0x */
227 160, /* 1010 -> 16.0x */
228 140, /* 1011 -> 14.0x */
229 -1, /* 1100 -> RESERVED (12.0x) */
230 155, /* 1101 -> 15.5x */
231 -1, /* 1110 -> RESERVED (13.0x) */
232 145, /* 1111 -> 14.5x */
233};
234
235/*
236 * VIA C3 Nehemiah */
237
238static const int __cpuinitdata nehemiah_mults[32] = {
239 100, /* 0000 -> 10.0x */
240 -1, /* 0001 -> 16.0x */
241 40, /* 0010 -> 4.0x */
242 90, /* 0011 -> 9.0x */
243 95, /* 0100 -> 9.5x */
244 -1, /* 0101 -> RESERVED */
245 45, /* 0110 -> 4.5x */
246 55, /* 0111 -> 5.5x */
247 60, /* 1000 -> 6.0x */
248 70, /* 1001 -> 7.0x */
249 80, /* 1010 -> 8.0x */
250 50, /* 1011 -> 5.0x */
251 65, /* 1100 -> 6.5x */
252 75, /* 1101 -> 7.5x */
253 85, /* 1110 -> 8.5x */
254 120, /* 1111 -> 12.0x */
255 -1, /* 0000 -> 10.0x */
256 110, /* 0001 -> 11.0x */
257 -1, /* 0010 -> 12.0x */
258 -1, /* 0011 -> 9.0x */
259 105, /* 0100 -> 10.5x */
260 115, /* 0101 -> 11.5x */
261 125, /* 0110 -> 12.5x */
262 135, /* 0111 -> 13.5x */
263 140, /* 1000 -> 14.0x */
264 150, /* 1001 -> 15.0x */
265 160, /* 1010 -> 16.0x */
266 130, /* 1011 -> 13.0x */
267 145, /* 1100 -> 14.5x */
268 155, /* 1101 -> 15.5x */
269 -1, /* 1110 -> RESERVED (13.0x) */
270 -1, /* 1111 -> 12.0x */
271};
272
273static const int __cpuinitdata nehemiah_eblcr[32] = {
274 50, /* 0000 -> 5.0x */
275 160, /* 0001 -> 16.0x */
276 40, /* 0010 -> 4.0x */
277 100, /* 0011 -> 10.0x */
278 55, /* 0100 -> 5.5x */
279 -1, /* 0101 -> RESERVED */
280 45, /* 0110 -> 4.5x */
281 95, /* 0111 -> 9.5x */
282 90, /* 1000 -> 9.0x */
283 70, /* 1001 -> 7.0x */
284 80, /* 1010 -> 8.0x */
285 60, /* 1011 -> 6.0x */
286 120, /* 1100 -> 12.0x */
287 75, /* 1101 -> 7.5x */
288 85, /* 1110 -> 8.5x */
289 65, /* 1111 -> 6.5x */
290 90, /* 0000 -> 9.0x */
291 110, /* 0001 -> 11.0x */
292 120, /* 0010 -> 12.0x */
293 100, /* 0011 -> 10.0x */
294 135, /* 0100 -> 13.5x */
295 115, /* 0101 -> 11.5x */
296 125, /* 0110 -> 12.5x */
297 105, /* 0111 -> 10.5x */
298 130, /* 1000 -> 13.0x */
299 150, /* 1001 -> 15.0x */
300 160, /* 1010 -> 16.0x */
301 140, /* 1011 -> 14.0x */
302 120, /* 1100 -> 12.0x */
303 155, /* 1101 -> 15.5x */
304 -1, /* 1110 -> RESERVED (13.0x) */
305 145 /* 1111 -> 14.5x */
306};
307
308/*
309 * Voltage scales. Div/Mod by 1000 to get actual voltage.
310 * Which scale to use depends on the VRM type in use.
311 */
312
313struct mV_pos {
314 unsigned short mV;
315 unsigned short pos;
316};
317
318static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
322 {1450, 16}, {1400, 14}, {1350, 12}, {1300, 10},
323 {1275, 9}, {1225, 7}, {1175, 5}, {1125, 3},
324 {1075, 1}, {1825, 31}, {1775, 29}, {1725, 27},
325 {1675, 25}, {1625, 23}, {1575, 21}, {1525, 19},
326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
327};
328
329static const unsigned char __cpuinitdata mV_vrm85[32] = {
330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
334};
335
336static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
340 {1150, 19}, {1100, 18}, {1050, 17}, {1000, 16},
341 {975, 15}, {950, 14}, {925, 13}, {900, 12},
342 {875, 11}, {850, 10}, {825, 9}, {800, 8},
343 {775, 7}, {750, 6}, {725, 5}, {700, 4},
344 {675, 3}, {650, 2}, {625, 1}, {600, 0}
345};
346
347static const unsigned char __cpuinitdata mV_mobilevrm[32] = {
348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
351 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
352};
353
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c
deleted file mode 100644
index d9f51367666b..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/longrun.c
+++ /dev/null
@@ -1,327 +0,0 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/cpufreq.h>
13#include <linux/timex.h>
14
15#include <asm/msr.h>
16#include <asm/processor.h>
17
18#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
19 "longrun", msg)
20
21static struct cpufreq_driver longrun_driver;
22
23/**
24 * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
25 * values into per cent values. In TMTA microcode, the following is valid:
26 * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
27 */
28static unsigned int longrun_low_freq, longrun_high_freq;
29
30
31/**
32 * longrun_get_policy - get the current LongRun policy
33 * @policy: struct cpufreq_policy where current policy is written into
34 *
35 * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
36 * and MSR_TMTA_LONGRUN_CTRL
37 */
38static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy)
39{
40 u32 msr_lo, msr_hi;
41
42 rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
43 dprintk("longrun flags are %x - %x\n", msr_lo, msr_hi);
44 if (msr_lo & 0x01)
45 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
46 else
47 policy->policy = CPUFREQ_POLICY_POWERSAVE;
48
49 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
50 dprintk("longrun ctrl is %x - %x\n", msr_lo, msr_hi);
51 msr_lo &= 0x0000007F;
52 msr_hi &= 0x0000007F;
53
54 if (longrun_high_freq <= longrun_low_freq) {
55 /* Assume degenerate Longrun table */
56 policy->min = policy->max = longrun_high_freq;
57 } else {
58 policy->min = longrun_low_freq + msr_lo *
59 ((longrun_high_freq - longrun_low_freq) / 100);
60 policy->max = longrun_low_freq + msr_hi *
61 ((longrun_high_freq - longrun_low_freq) / 100);
62 }
63 policy->cpu = 0;
64}
65
66
67/**
68 * longrun_set_policy - sets a new CPUFreq policy
69 * @policy: new policy
70 *
71 * Sets a new CPUFreq policy on LongRun-capable processors. This function
72 * has to be called with cpufreq_driver locked.
73 */
74static int longrun_set_policy(struct cpufreq_policy *policy)
75{
76 u32 msr_lo, msr_hi;
77 u32 pctg_lo, pctg_hi;
78
79 if (!policy)
80 return -EINVAL;
81
82 if (longrun_high_freq <= longrun_low_freq) {
83 /* Assume degenerate Longrun table */
84 pctg_lo = pctg_hi = 100;
85 } else {
86 pctg_lo = (policy->min - longrun_low_freq) /
87 ((longrun_high_freq - longrun_low_freq) / 100);
88 pctg_hi = (policy->max - longrun_low_freq) /
89 ((longrun_high_freq - longrun_low_freq) / 100);
90 }
91
92 if (pctg_hi > 100)
93 pctg_hi = 100;
94 if (pctg_lo > pctg_hi)
95 pctg_lo = pctg_hi;
96
97 /* performance or economy mode */
98 rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
99 msr_lo &= 0xFFFFFFFE;
100 switch (policy->policy) {
101 case CPUFREQ_POLICY_PERFORMANCE:
102 msr_lo |= 0x00000001;
103 break;
104 case CPUFREQ_POLICY_POWERSAVE:
105 break;
106 }
107 wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
108
109 /* lower and upper boundary */
110 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
111 msr_lo &= 0xFFFFFF80;
112 msr_hi &= 0xFFFFFF80;
113 msr_lo |= pctg_lo;
114 msr_hi |= pctg_hi;
115 wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
116
117 return 0;
118}
119
120
121/**
122 * longrun_verify_poliy - verifies a new CPUFreq policy
123 * @policy: the policy to verify
124 *
125 * Validates a new CPUFreq policy. This function has to be called with
126 * cpufreq_driver locked.
127 */
128static int longrun_verify_policy(struct cpufreq_policy *policy)
129{
130 if (!policy)
131 return -EINVAL;
132
133 policy->cpu = 0;
134 cpufreq_verify_within_limits(policy,
135 policy->cpuinfo.min_freq,
136 policy->cpuinfo.max_freq);
137
138 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
139 (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
140 return -EINVAL;
141
142 return 0;
143}
144
145static unsigned int longrun_get(unsigned int cpu)
146{
147 u32 eax, ebx, ecx, edx;
148
149 if (cpu)
150 return 0;
151
152 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
153 dprintk("cpuid eax is %u\n", eax);
154
155 return eax * 1000;
156}
157
158/**
159 * longrun_determine_freqs - determines the lowest and highest possible core frequency
160 * @low_freq: an int to put the lowest frequency into
161 * @high_freq: an int to put the highest frequency into
162 *
163 * Determines the lowest and highest possible core frequencies on this CPU.
164 * This is necessary to calculate the performance percentage according to
165 * TMTA rules:
166 * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
167 */
168static int __cpuinit longrun_determine_freqs(unsigned int *low_freq,
169 unsigned int *high_freq)
170{
171 u32 msr_lo, msr_hi;
172 u32 save_lo, save_hi;
173 u32 eax, ebx, ecx, edx;
174 u32 try_hi;
175 struct cpuinfo_x86 *c = &cpu_data(0);
176
177 if (!low_freq || !high_freq)
178 return -EINVAL;
179
180 if (cpu_has(c, X86_FEATURE_LRTI)) {
181 /* if the LongRun Table Interface is present, the
182 * detection is a bit easier:
183 * For minimum frequency, read out the maximum
184 * level (msr_hi), write that into "currently
185 * selected level", and read out the frequency.
186 * For maximum frequency, read out level zero.
187 */
188 /* minimum */
189 rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
190 wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
191 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
192 *low_freq = msr_lo * 1000; /* to kHz */
193
194 /* maximum */
195 wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
196 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
197 *high_freq = msr_lo * 1000; /* to kHz */
198
199 dprintk("longrun table interface told %u - %u kHz\n",
200 *low_freq, *high_freq);
201
202 if (*low_freq > *high_freq)
203 *low_freq = *high_freq;
204 return 0;
205 }
206
207 /* set the upper border to the value determined during TSC init */
208 *high_freq = (cpu_khz / 1000);
209 *high_freq = *high_freq * 1000;
210 dprintk("high frequency is %u kHz\n", *high_freq);
211
212 /* get current borders */
213 rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
214 save_lo = msr_lo & 0x0000007F;
215 save_hi = msr_hi & 0x0000007F;
216
217 /* if current perf_pctg is larger than 90%, we need to decrease the
218 * upper limit to make the calculation more accurate.
219 */
220 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
221 /* try decreasing in 10% steps, some processors react only
222 * on some barrier values */
223 for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) {
224 /* set to 0 to try_hi perf_pctg */
225 msr_lo &= 0xFFFFFF80;
226 msr_hi &= 0xFFFFFF80;
227 msr_hi |= try_hi;
228 wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
229
230 /* read out current core MHz and current perf_pctg */
231 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
232
233 /* restore values */
234 wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
235 }
236 dprintk("percentage is %u %%, freq is %u MHz\n", ecx, eax);
237
238 /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
239 * eqals
240 * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
241 *
242 * high_freq * perf_pctg is stored tempoarily into "ebx".
243 */
244 ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
245
246 if ((ecx > 95) || (ecx == 0) || (eax < ebx))
247 return -EIO;
248
249 edx = ((eax - ebx) * 100) / (100 - ecx);
250 *low_freq = edx * 1000; /* back to kHz */
251
252 dprintk("low frequency is %u kHz\n", *low_freq);
253
254 if (*low_freq > *high_freq)
255 *low_freq = *high_freq;
256
257 return 0;
258}
259
260
261static int __cpuinit longrun_cpu_init(struct cpufreq_policy *policy)
262{
263 int result = 0;
264
265 /* capability check */
266 if (policy->cpu != 0)
267 return -ENODEV;
268
269 /* detect low and high frequency */
270 result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
271 if (result)
272 return result;
273
274 /* cpuinfo and default policy values */
275 policy->cpuinfo.min_freq = longrun_low_freq;
276 policy->cpuinfo.max_freq = longrun_high_freq;
277 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
278 longrun_get_policy(policy);
279
280 return 0;
281}
282
283
284static struct cpufreq_driver longrun_driver = {
285 .flags = CPUFREQ_CONST_LOOPS,
286 .verify = longrun_verify_policy,
287 .setpolicy = longrun_set_policy,
288 .get = longrun_get,
289 .init = longrun_cpu_init,
290 .name = "longrun",
291 .owner = THIS_MODULE,
292};
293
294
295/**
296 * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
297 *
298 * Initializes the LongRun support.
299 */
300static int __init longrun_init(void)
301{
302 struct cpuinfo_x86 *c = &cpu_data(0);
303
304 if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
305 !cpu_has(c, X86_FEATURE_LONGRUN))
306 return -ENODEV;
307
308 return cpufreq_register_driver(&longrun_driver);
309}
310
311
312/**
313 * longrun_exit - unregisters LongRun support
314 */
315static void __exit longrun_exit(void)
316{
317 cpufreq_unregister_driver(&longrun_driver);
318}
319
320
321MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
322MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and "
323 "Efficeon processors.");
324MODULE_LICENSE("GPL");
325
326module_init(longrun_init);
327module_exit(longrun_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.c b/arch/x86/kernel/cpu/cpufreq/mperf.c
deleted file mode 100644
index 911e193018ae..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/mperf.c
+++ /dev/null
@@ -1,51 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/smp.h>
3#include <linux/module.h>
4#include <linux/init.h>
5#include <linux/cpufreq.h>
6#include <linux/slab.h>
7
8#include "mperf.h"
9
10static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
11
12/* Called via smp_call_function_single(), on the target CPU */
13static void read_measured_perf_ctrs(void *_cur)
14{
15 struct aperfmperf *am = _cur;
16
17 get_aperfmperf(am);
18}
19
20/*
21 * Return the measured active (C0) frequency on this CPU since last call
22 * to this function.
23 * Input: cpu number
24 * Return: Average CPU frequency in terms of max frequency (zero on error)
25 *
26 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
27 * over a period of time, while CPU is in C0 state.
28 * IA32_MPERF counts at the rate of max advertised frequency
29 * IA32_APERF counts at the rate of actual CPU frequency
30 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
31 * no meaning should be associated with absolute values of these MSRs.
32 */
33unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
34 unsigned int cpu)
35{
36 struct aperfmperf perf;
37 unsigned long ratio;
38 unsigned int retval;
39
40 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
41 return 0;
42
43 ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
44 per_cpu(acfreq_old_perf, cpu) = perf;
45
46 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
47
48 return retval;
49}
50EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
51MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.h b/arch/x86/kernel/cpu/cpufreq/mperf.h
deleted file mode 100644
index 5dbf2950dc22..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/mperf.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * (c) 2010 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 */
7
8unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
9 unsigned int cpu);
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
deleted file mode 100644
index 52c93648e492..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ /dev/null
@@ -1,331 +0,0 @@
1/*
2 * Pentium 4/Xeon CPU on demand clock modulation/speed scaling
3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
4 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
5 * (C) 2002 Arjan van de Ven <arjanv@redhat.com>
6 * (C) 2002 Tora T. Engstad
7 * All Rights Reserved
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * The author(s) of this software shall not be held liable for damages
15 * of any nature resulting due to the use of this software. This
16 * software is provided AS-IS with no warranties.
17 *
18 * Date Errata Description
19 * 20020525 N44, O17 12.5% or 25% DC causes lockup
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/smp.h>
27#include <linux/cpufreq.h>
28#include <linux/cpumask.h>
29#include <linux/timex.h>
30
31#include <asm/processor.h>
32#include <asm/msr.h>
33#include <asm/timer.h>
34
35#include "speedstep-lib.h"
36
37#define PFX "p4-clockmod: "
38#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
39 "p4-clockmod", msg)
40
41/*
42 * Duty Cycle (3bits), note DC_DISABLE is not specified in
43 * intel docs i just use it to mean disable
44 */
45enum {
46 DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
47 DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
48};
49
50#define DC_ENTRIES 8
51
52
53static int has_N44_O17_errata[NR_CPUS];
54static unsigned int stock_freq;
55static struct cpufreq_driver p4clockmod_driver;
56static unsigned int cpufreq_p4_get(unsigned int cpu);
57
58static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
59{
60 u32 l, h;
61
62 if (!cpu_online(cpu) ||
63 (newstate > DC_DISABLE) || (newstate == DC_RESV))
64 return -EINVAL;
65
66 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
67
68 if (l & 0x01)
69 dprintk("CPU#%d currently thermal throttled\n", cpu);
70
71 if (has_N44_O17_errata[cpu] &&
72 (newstate == DC_25PT || newstate == DC_DFLT))
73 newstate = DC_38PT;
74
75 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
76 if (newstate == DC_DISABLE) {
77 dprintk("CPU#%d disabling modulation\n", cpu);
78 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
79 } else {
80 dprintk("CPU#%d setting duty cycle to %d%%\n",
81 cpu, ((125 * newstate) / 10));
82 /* bits 63 - 5 : reserved
83 * bit 4 : enable/disable
84 * bits 3-1 : duty cycle
85 * bit 0 : reserved
86 */
87 l = (l & ~14);
88 l = l | (1<<4) | ((newstate & 0x7)<<1);
89 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
90 }
91
92 return 0;
93}
94
95
96static struct cpufreq_frequency_table p4clockmod_table[] = {
97 {DC_RESV, CPUFREQ_ENTRY_INVALID},
98 {DC_DFLT, 0},
99 {DC_25PT, 0},
100 {DC_38PT, 0},
101 {DC_50PT, 0},
102 {DC_64PT, 0},
103 {DC_75PT, 0},
104 {DC_88PT, 0},
105 {DC_DISABLE, 0},
106 {DC_RESV, CPUFREQ_TABLE_END},
107};
108
109
110static int cpufreq_p4_target(struct cpufreq_policy *policy,
111 unsigned int target_freq,
112 unsigned int relation)
113{
114 unsigned int newstate = DC_RESV;
115 struct cpufreq_freqs freqs;
116 int i;
117
118 if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0],
119 target_freq, relation, &newstate))
120 return -EINVAL;
121
122 freqs.old = cpufreq_p4_get(policy->cpu);
123 freqs.new = stock_freq * p4clockmod_table[newstate].index / 8;
124
125 if (freqs.new == freqs.old)
126 return 0;
127
128 /* notifiers */
129 for_each_cpu(i, policy->cpus) {
130 freqs.cpu = i;
131 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
132 }
133
134 /* run on each logical CPU,
135 * see section 13.15.3 of IA32 Intel Architecture Software
136 * Developer's Manual, Volume 3
137 */
138 for_each_cpu(i, policy->cpus)
139 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
140
141 /* notifiers */
142 for_each_cpu(i, policy->cpus) {
143 freqs.cpu = i;
144 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
145 }
146
147 return 0;
148}
149
150
151static int cpufreq_p4_verify(struct cpufreq_policy *policy)
152{
153 return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
154}
155
156
157static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
158{
159 if (c->x86 == 0x06) {
160 if (cpu_has(c, X86_FEATURE_EST))
161 printk_once(KERN_WARNING PFX "Warning: EST-capable "
162 "CPU detected. The acpi-cpufreq module offers "
163 "voltage scaling in addition to frequency "
164 "scaling. You should use that instead of "
165 "p4-clockmod, if possible.\n");
166 switch (c->x86_model) {
167 case 0x0E: /* Core */
168 case 0x0F: /* Core Duo */
169 case 0x16: /* Celeron Core */
170 case 0x1C: /* Atom */
171 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
172 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
173 case 0x0D: /* Pentium M (Dothan) */
174 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
175 /* fall through */
176 case 0x09: /* Pentium M (Banias) */
177 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
178 }
179 }
180
181 if (c->x86 != 0xF)
182 return 0;
183
184 /* on P-4s, the TSC runs with constant frequency independent whether
185 * throttling is active or not. */
186 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
187
188 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
189 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
190 "The speedstep-ich or acpi cpufreq modules offer "
191 "voltage scaling in addition of frequency scaling. "
192 "You should use either one instead of p4-clockmod, "
193 "if possible.\n");
194 return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
195 }
196
197 return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
198}
199
200
201
202static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
203{
204 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
205 int cpuid = 0;
206 unsigned int i;
207
208#ifdef CONFIG_SMP
209 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
210#endif
211
212 /* Errata workaround */
213 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
214 switch (cpuid) {
215 case 0x0f07:
216 case 0x0f0a:
217 case 0x0f11:
218 case 0x0f12:
219 has_N44_O17_errata[policy->cpu] = 1;
220 dprintk("has errata -- disabling low frequencies\n");
221 }
222
223 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D &&
224 c->x86_model < 2) {
225 /* switch to maximum frequency and measure result */
226 cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
227 recalibrate_cpu_khz();
228 }
229 /* get max frequency */
230 stock_freq = cpufreq_p4_get_frequency(c);
231 if (!stock_freq)
232 return -EINVAL;
233
234 /* table init */
235 for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
236 if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
237 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
238 else
239 p4clockmod_table[i].frequency = (stock_freq * i)/8;
240 }
241 cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
242
243 /* cpuinfo and default policy values */
244
245 /* the transition latency is set to be 1 higher than the maximum
246 * transition latency of the ondemand governor */
247 policy->cpuinfo.transition_latency = 10000001;
248 policy->cur = stock_freq;
249
250 return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
251}
252
253
254static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
255{
256 cpufreq_frequency_table_put_attr(policy->cpu);
257 return 0;
258}
259
260static unsigned int cpufreq_p4_get(unsigned int cpu)
261{
262 u32 l, h;
263
264 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
265
266 if (l & 0x10) {
267 l = l >> 1;
268 l &= 0x7;
269 } else
270 l = DC_DISABLE;
271
272 if (l != DC_DISABLE)
273 return stock_freq * l / 8;
274
275 return stock_freq;
276}
277
278static struct freq_attr *p4clockmod_attr[] = {
279 &cpufreq_freq_attr_scaling_available_freqs,
280 NULL,
281};
282
283static struct cpufreq_driver p4clockmod_driver = {
284 .verify = cpufreq_p4_verify,
285 .target = cpufreq_p4_target,
286 .init = cpufreq_p4_cpu_init,
287 .exit = cpufreq_p4_cpu_exit,
288 .get = cpufreq_p4_get,
289 .name = "p4-clockmod",
290 .owner = THIS_MODULE,
291 .attr = p4clockmod_attr,
292};
293
294
295static int __init cpufreq_p4_init(void)
296{
297 struct cpuinfo_x86 *c = &cpu_data(0);
298 int ret;
299
300 /*
301 * THERM_CONTROL is architectural for IA32 now, so
302 * we can rely on the capability checks
303 */
304 if (c->x86_vendor != X86_VENDOR_INTEL)
305 return -ENODEV;
306
307 if (!test_cpu_cap(c, X86_FEATURE_ACPI) ||
308 !test_cpu_cap(c, X86_FEATURE_ACC))
309 return -ENODEV;
310
311 ret = cpufreq_register_driver(&p4clockmod_driver);
312 if (!ret)
313 printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
314 "Modulation available\n");
315
316 return ret;
317}
318
319
320static void __exit cpufreq_p4_exit(void)
321{
322 cpufreq_unregister_driver(&p4clockmod_driver);
323}
324
325
326MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
327MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
328MODULE_LICENSE("GPL");
329
330late_initcall(cpufreq_p4_init);
331module_exit(cpufreq_p4_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
deleted file mode 100644
index 755a31e0f5b0..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ /dev/null
@@ -1,624 +0,0 @@
1/*
2 * pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface
3 *
4 * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com>
5 * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
6 * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com>
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON
17 * INFRINGEMENT. See the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/smp.h>
30#include <linux/sched.h>
31#include <linux/cpufreq.h>
32#include <linux/compiler.h>
33#include <linux/slab.h>
34
35#include <linux/acpi.h>
36#include <linux/io.h>
37#include <linux/spinlock.h>
38#include <linux/uaccess.h>
39
40#include <acpi/processor.h>
41
42#define PCC_VERSION "1.00.00"
43#define POLL_LOOPS 300
44
45#define CMD_COMPLETE 0x1
46#define CMD_GET_FREQ 0x0
47#define CMD_SET_FREQ 0x1
48
49#define BUF_SZ 4
50
51#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
52 "pcc-cpufreq", msg)
53
54struct pcc_register_resource {
55 u8 descriptor;
56 u16 length;
57 u8 space_id;
58 u8 bit_width;
59 u8 bit_offset;
60 u8 access_size;
61 u64 address;
62} __attribute__ ((packed));
63
64struct pcc_memory_resource {
65 u8 descriptor;
66 u16 length;
67 u8 space_id;
68 u8 resource_usage;
69 u8 type_specific;
70 u64 granularity;
71 u64 minimum;
72 u64 maximum;
73 u64 translation_offset;
74 u64 address_length;
75} __attribute__ ((packed));
76
77static struct cpufreq_driver pcc_cpufreq_driver;
78
79struct pcc_header {
80 u32 signature;
81 u16 length;
82 u8 major;
83 u8 minor;
84 u32 features;
85 u16 command;
86 u16 status;
87 u32 latency;
88 u32 minimum_time;
89 u32 maximum_time;
90 u32 nominal;
91 u32 throttled_frequency;
92 u32 minimum_frequency;
93};
94
95static void __iomem *pcch_virt_addr;
96static struct pcc_header __iomem *pcch_hdr;
97
98static DEFINE_SPINLOCK(pcc_lock);
99
100static struct acpi_generic_address doorbell;
101
102static u64 doorbell_preserve;
103static u64 doorbell_write;
104
105static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f,
106 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
107
108struct pcc_cpu {
109 u32 input_offset;
110 u32 output_offset;
111};
112
113static struct pcc_cpu __percpu *pcc_cpu_info;
114
115static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
116{
117 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
118 policy->cpuinfo.max_freq);
119 return 0;
120}
121
122static inline void pcc_cmd(void)
123{
124 u64 doorbell_value;
125 int i;
126
127 acpi_read(&doorbell_value, &doorbell);
128 acpi_write((doorbell_value & doorbell_preserve) | doorbell_write,
129 &doorbell);
130
131 for (i = 0; i < POLL_LOOPS; i++) {
132 if (ioread16(&pcch_hdr->status) & CMD_COMPLETE)
133 break;
134 }
135}
136
137static inline void pcc_clear_mapping(void)
138{
139 if (pcch_virt_addr)
140 iounmap(pcch_virt_addr);
141 pcch_virt_addr = NULL;
142}
143
144static unsigned int pcc_get_freq(unsigned int cpu)
145{
146 struct pcc_cpu *pcc_cpu_data;
147 unsigned int curr_freq;
148 unsigned int freq_limit;
149 u16 status;
150 u32 input_buffer;
151 u32 output_buffer;
152
153 spin_lock(&pcc_lock);
154
155 dprintk("get: get_freq for CPU %d\n", cpu);
156 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
157
158 input_buffer = 0x1;
159 iowrite32(input_buffer,
160 (pcch_virt_addr + pcc_cpu_data->input_offset));
161 iowrite16(CMD_GET_FREQ, &pcch_hdr->command);
162
163 pcc_cmd();
164
165 output_buffer =
166 ioread32(pcch_virt_addr + pcc_cpu_data->output_offset);
167
168 /* Clear the input buffer - we are done with the current command */
169 memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
170
171 status = ioread16(&pcch_hdr->status);
172 if (status != CMD_COMPLETE) {
173 dprintk("get: FAILED: for CPU %d, status is %d\n",
174 cpu, status);
175 goto cmd_incomplete;
176 }
177 iowrite16(0, &pcch_hdr->status);
178 curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff))
179 / 100) * 1000);
180
181 dprintk("get: SUCCESS: (virtual) output_offset for cpu %d is "
182 "0x%x, contains a value of: 0x%x. Speed is: %d MHz\n",
183 cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
184 output_buffer, curr_freq);
185
186 freq_limit = (output_buffer >> 8) & 0xff;
187 if (freq_limit != 0xff) {
188 dprintk("get: frequency for cpu %d is being temporarily"
189 " capped at %d\n", cpu, curr_freq);
190 }
191
192 spin_unlock(&pcc_lock);
193 return curr_freq;
194
195cmd_incomplete:
196 iowrite16(0, &pcch_hdr->status);
197 spin_unlock(&pcc_lock);
198 return 0;
199}
200
201static int pcc_cpufreq_target(struct cpufreq_policy *policy,
202 unsigned int target_freq,
203 unsigned int relation)
204{
205 struct pcc_cpu *pcc_cpu_data;
206 struct cpufreq_freqs freqs;
207 u16 status;
208 u32 input_buffer;
209 int cpu;
210
211 spin_lock(&pcc_lock);
212 cpu = policy->cpu;
213 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
214
215 dprintk("target: CPU %d should go to target freq: %d "
216 "(virtual) input_offset is 0x%x\n",
217 cpu, target_freq,
218 (pcch_virt_addr + pcc_cpu_data->input_offset));
219
220 freqs.new = target_freq;
221 freqs.cpu = cpu;
222 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
223
224 input_buffer = 0x1 | (((target_freq * 100)
225 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
226 iowrite32(input_buffer,
227 (pcch_virt_addr + pcc_cpu_data->input_offset));
228 iowrite16(CMD_SET_FREQ, &pcch_hdr->command);
229
230 pcc_cmd();
231
232 /* Clear the input buffer - we are done with the current command */
233 memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
234
235 status = ioread16(&pcch_hdr->status);
236 if (status != CMD_COMPLETE) {
237 dprintk("target: FAILED for cpu %d, with status: 0x%x\n",
238 cpu, status);
239 goto cmd_incomplete;
240 }
241 iowrite16(0, &pcch_hdr->status);
242
243 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
244 dprintk("target: was SUCCESSFUL for cpu %d\n", cpu);
245 spin_unlock(&pcc_lock);
246
247 return 0;
248
249cmd_incomplete:
250 iowrite16(0, &pcch_hdr->status);
251 spin_unlock(&pcc_lock);
252 return -EINVAL;
253}
254
255static int pcc_get_offset(int cpu)
256{
257 acpi_status status;
258 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
259 union acpi_object *pccp, *offset;
260 struct pcc_cpu *pcc_cpu_data;
261 struct acpi_processor *pr;
262 int ret = 0;
263
264 pr = per_cpu(processors, cpu);
265 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
266
267 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
268 if (ACPI_FAILURE(status))
269 return -ENODEV;
270
271 pccp = buffer.pointer;
272 if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
273 ret = -ENODEV;
274 goto out_free;
275 };
276
277 offset = &(pccp->package.elements[0]);
278 if (!offset || offset->type != ACPI_TYPE_INTEGER) {
279 ret = -ENODEV;
280 goto out_free;
281 }
282
283 pcc_cpu_data->input_offset = offset->integer.value;
284
285 offset = &(pccp->package.elements[1]);
286 if (!offset || offset->type != ACPI_TYPE_INTEGER) {
287 ret = -ENODEV;
288 goto out_free;
289 }
290
291 pcc_cpu_data->output_offset = offset->integer.value;
292
293 memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
294 memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ);
295
296 dprintk("pcc_get_offset: for CPU %d: pcc_cpu_data "
297 "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n",
298 cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
299out_free:
300 kfree(buffer.pointer);
301 return ret;
302}
303
304static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
305{
306 acpi_status status;
307 struct acpi_object_list input;
308 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
309 union acpi_object in_params[4];
310 union acpi_object *out_obj;
311 u32 capabilities[2];
312 u32 errors;
313 u32 supported;
314 int ret = 0;
315
316 input.count = 4;
317 input.pointer = in_params;
318 in_params[0].type = ACPI_TYPE_BUFFER;
319 in_params[0].buffer.length = 16;
320 in_params[0].buffer.pointer = OSC_UUID;
321 in_params[1].type = ACPI_TYPE_INTEGER;
322 in_params[1].integer.value = 1;
323 in_params[2].type = ACPI_TYPE_INTEGER;
324 in_params[2].integer.value = 2;
325 in_params[3].type = ACPI_TYPE_BUFFER;
326 in_params[3].buffer.length = 8;
327 in_params[3].buffer.pointer = (u8 *)&capabilities;
328
329 capabilities[0] = OSC_QUERY_ENABLE;
330 capabilities[1] = 0x1;
331
332 status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
333 if (ACPI_FAILURE(status))
334 return -ENODEV;
335
336 if (!output.length)
337 return -ENODEV;
338
339 out_obj = output.pointer;
340 if (out_obj->type != ACPI_TYPE_BUFFER) {
341 ret = -ENODEV;
342 goto out_free;
343 }
344
345 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
346 if (errors) {
347 ret = -ENODEV;
348 goto out_free;
349 }
350
351 supported = *((u32 *)(out_obj->buffer.pointer + 4));
352 if (!(supported & 0x1)) {
353 ret = -ENODEV;
354 goto out_free;
355 }
356
357 kfree(output.pointer);
358 capabilities[0] = 0x0;
359 capabilities[1] = 0x1;
360
361 status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
362 if (ACPI_FAILURE(status))
363 return -ENODEV;
364
365 if (!output.length)
366 return -ENODEV;
367
368 out_obj = output.pointer;
369 if (out_obj->type != ACPI_TYPE_BUFFER) {
370 ret = -ENODEV;
371 goto out_free;
372 }
373
374 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
375 if (errors) {
376 ret = -ENODEV;
377 goto out_free;
378 }
379
380 supported = *((u32 *)(out_obj->buffer.pointer + 4));
381 if (!(supported & 0x1)) {
382 ret = -ENODEV;
383 goto out_free;
384 }
385
386out_free:
387 kfree(output.pointer);
388 return ret;
389}
390
391static int __init pcc_cpufreq_probe(void)
392{
393 acpi_status status;
394 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
395 struct pcc_memory_resource *mem_resource;
396 struct pcc_register_resource *reg_resource;
397 union acpi_object *out_obj, *member;
398 acpi_handle handle, osc_handle, pcch_handle;
399 int ret = 0;
400
401 status = acpi_get_handle(NULL, "\\_SB", &handle);
402 if (ACPI_FAILURE(status))
403 return -ENODEV;
404
405 status = acpi_get_handle(handle, "PCCH", &pcch_handle);
406 if (ACPI_FAILURE(status))
407 return -ENODEV;
408
409 status = acpi_get_handle(handle, "_OSC", &osc_handle);
410 if (ACPI_SUCCESS(status)) {
411 ret = pcc_cpufreq_do_osc(&osc_handle);
412 if (ret)
413 dprintk("probe: _OSC evaluation did not succeed\n");
414 /* Firmware's use of _OSC is optional */
415 ret = 0;
416 }
417
418 status = acpi_evaluate_object(handle, "PCCH", NULL, &output);
419 if (ACPI_FAILURE(status))
420 return -ENODEV;
421
422 out_obj = output.pointer;
423 if (out_obj->type != ACPI_TYPE_PACKAGE) {
424 ret = -ENODEV;
425 goto out_free;
426 }
427
428 member = &out_obj->package.elements[0];
429 if (member->type != ACPI_TYPE_BUFFER) {
430 ret = -ENODEV;
431 goto out_free;
432 }
433
434 mem_resource = (struct pcc_memory_resource *)member->buffer.pointer;
435
436 dprintk("probe: mem_resource descriptor: 0x%x,"
437 " length: %d, space_id: %d, resource_usage: %d,"
438 " type_specific: %d, granularity: 0x%llx,"
439 " minimum: 0x%llx, maximum: 0x%llx,"
440 " translation_offset: 0x%llx, address_length: 0x%llx\n",
441 mem_resource->descriptor, mem_resource->length,
442 mem_resource->space_id, mem_resource->resource_usage,
443 mem_resource->type_specific, mem_resource->granularity,
444 mem_resource->minimum, mem_resource->maximum,
445 mem_resource->translation_offset,
446 mem_resource->address_length);
447
448 if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
449 ret = -ENODEV;
450 goto out_free;
451 }
452
453 pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
454 mem_resource->address_length);
455 if (pcch_virt_addr == NULL) {
456 dprintk("probe: could not map shared mem region\n");
457 goto out_free;
458 }
459 pcch_hdr = pcch_virt_addr;
460
461 dprintk("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr);
462 dprintk("probe: PCCH header is at physical address: 0x%llx,"
463 " signature: 0x%x, length: %d bytes, major: %d, minor: %d,"
464 " supported features: 0x%x, command field: 0x%x,"
465 " status field: 0x%x, nominal latency: %d us\n",
466 mem_resource->minimum, ioread32(&pcch_hdr->signature),
467 ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major),
468 ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features),
469 ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status),
470 ioread32(&pcch_hdr->latency));
471
472 dprintk("probe: min time between commands: %d us,"
473 " max time between commands: %d us,"
474 " nominal CPU frequency: %d MHz,"
475 " minimum CPU frequency: %d MHz,"
476 " minimum CPU frequency without throttling: %d MHz\n",
477 ioread32(&pcch_hdr->minimum_time),
478 ioread32(&pcch_hdr->maximum_time),
479 ioread32(&pcch_hdr->nominal),
480 ioread32(&pcch_hdr->throttled_frequency),
481 ioread32(&pcch_hdr->minimum_frequency));
482
483 member = &out_obj->package.elements[1];
484 if (member->type != ACPI_TYPE_BUFFER) {
485 ret = -ENODEV;
486 goto pcch_free;
487 }
488
489 reg_resource = (struct pcc_register_resource *)member->buffer.pointer;
490
491 doorbell.space_id = reg_resource->space_id;
492 doorbell.bit_width = reg_resource->bit_width;
493 doorbell.bit_offset = reg_resource->bit_offset;
494 doorbell.access_width = 64;
495 doorbell.address = reg_resource->address;
496
497 dprintk("probe: doorbell: space_id is %d, bit_width is %d, "
498 "bit_offset is %d, access_width is %d, address is 0x%llx\n",
499 doorbell.space_id, doorbell.bit_width, doorbell.bit_offset,
500 doorbell.access_width, reg_resource->address);
501
502 member = &out_obj->package.elements[2];
503 if (member->type != ACPI_TYPE_INTEGER) {
504 ret = -ENODEV;
505 goto pcch_free;
506 }
507
508 doorbell_preserve = member->integer.value;
509
510 member = &out_obj->package.elements[3];
511 if (member->type != ACPI_TYPE_INTEGER) {
512 ret = -ENODEV;
513 goto pcch_free;
514 }
515
516 doorbell_write = member->integer.value;
517
518 dprintk("probe: doorbell_preserve: 0x%llx,"
519 " doorbell_write: 0x%llx\n",
520 doorbell_preserve, doorbell_write);
521
522 pcc_cpu_info = alloc_percpu(struct pcc_cpu);
523 if (!pcc_cpu_info) {
524 ret = -ENOMEM;
525 goto pcch_free;
526 }
527
528 printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency"
529 " limits: %d MHz, %d MHz\n", PCC_VERSION,
530 ioread32(&pcch_hdr->minimum_frequency),
531 ioread32(&pcch_hdr->nominal));
532 kfree(output.pointer);
533 return ret;
534pcch_free:
535 pcc_clear_mapping();
536out_free:
537 kfree(output.pointer);
538 return ret;
539}
540
541static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
542{
543 unsigned int cpu = policy->cpu;
544 unsigned int result = 0;
545
546 if (!pcch_virt_addr) {
547 result = -1;
548 goto out;
549 }
550
551 result = pcc_get_offset(cpu);
552 if (result) {
553 dprintk("init: PCCP evaluation failed\n");
554 goto out;
555 }
556
557 policy->max = policy->cpuinfo.max_freq =
558 ioread32(&pcch_hdr->nominal) * 1000;
559 policy->min = policy->cpuinfo.min_freq =
560 ioread32(&pcch_hdr->minimum_frequency) * 1000;
561 policy->cur = pcc_get_freq(cpu);
562
563 if (!policy->cur) {
564 dprintk("init: Unable to get current CPU frequency\n");
565 result = -EINVAL;
566 goto out;
567 }
568
569 dprintk("init: policy->max is %d, policy->min is %d\n",
570 policy->max, policy->min);
571out:
572 return result;
573}
574
575static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
576{
577 return 0;
578}
579
580static struct cpufreq_driver pcc_cpufreq_driver = {
581 .flags = CPUFREQ_CONST_LOOPS,
582 .get = pcc_get_freq,
583 .verify = pcc_cpufreq_verify,
584 .target = pcc_cpufreq_target,
585 .init = pcc_cpufreq_cpu_init,
586 .exit = pcc_cpufreq_cpu_exit,
587 .name = "pcc-cpufreq",
588 .owner = THIS_MODULE,
589};
590
591static int __init pcc_cpufreq_init(void)
592{
593 int ret;
594
595 if (acpi_disabled)
596 return 0;
597
598 ret = pcc_cpufreq_probe();
599 if (ret) {
600 dprintk("pcc_cpufreq_init: PCCH evaluation failed\n");
601 return ret;
602 }
603
604 ret = cpufreq_register_driver(&pcc_cpufreq_driver);
605
606 return ret;
607}
608
609static void __exit pcc_cpufreq_exit(void)
610{
611 cpufreq_unregister_driver(&pcc_cpufreq_driver);
612
613 pcc_clear_mapping();
614
615 free_percpu(pcc_cpu_info);
616}
617
618MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
619MODULE_VERSION(PCC_VERSION);
620MODULE_DESCRIPTION("Processor Clocking Control interface driver");
621MODULE_LICENSE("GPL");
622
623late_initcall(pcc_cpufreq_init);
624module_exit(pcc_cpufreq_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
deleted file mode 100644
index b3379d6a5c57..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ /dev/null
@@ -1,261 +0,0 @@
1/*
2 * This file was based upon code in Powertweak Linux (http://powertweak.sf.net)
3 * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä,
4 * Dominik Brodowski.
5 *
6 * Licensed under the terms of the GNU GPL License version 2.
7 *
8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/cpufreq.h>
15#include <linux/ioport.h>
16#include <linux/timex.h>
17#include <linux/io.h>
18
19#include <asm/msr.h>
20
21#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
22 as it is unused */
23
24#define PFX "powernow-k6: "
25static unsigned int busfreq; /* FSB, in 10 kHz */
26static unsigned int max_multiplier;
27
28
29/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
30static struct cpufreq_frequency_table clock_ratio[] = {
31 {45, /* 000 -> 4.5x */ 0},
32 {50, /* 001 -> 5.0x */ 0},
33 {40, /* 010 -> 4.0x */ 0},
34 {55, /* 011 -> 5.5x */ 0},
35 {20, /* 100 -> 2.0x */ 0},
36 {30, /* 101 -> 3.0x */ 0},
37 {60, /* 110 -> 6.0x */ 0},
38 {35, /* 111 -> 3.5x */ 0},
39 {0, CPUFREQ_TABLE_END}
40};
41
42
43/**
44 * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
45 *
46 * Returns the current setting of the frequency multiplier. Core clock
47 * speed is frequency of the Front-Side Bus multiplied with this value.
48 */
49static int powernow_k6_get_cpu_multiplier(void)
50{
51 u64 invalue = 0;
52 u32 msrval;
53
54 msrval = POWERNOW_IOPORT + 0x1;
55 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
56 invalue = inl(POWERNOW_IOPORT + 0x8);
57 msrval = POWERNOW_IOPORT + 0x0;
58 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
59
60 return clock_ratio[(invalue >> 5)&7].index;
61}
62
63
64/**
65 * powernow_k6_set_state - set the PowerNow! multiplier
66 * @best_i: clock_ratio[best_i] is the target multiplier
67 *
68 * Tries to change the PowerNow! multiplier
69 */
70static void powernow_k6_set_state(unsigned int best_i)
71{
72 unsigned long outvalue = 0, invalue = 0;
73 unsigned long msrval;
74 struct cpufreq_freqs freqs;
75
76 if (clock_ratio[best_i].index > max_multiplier) {
77 printk(KERN_ERR PFX "invalid target frequency\n");
78 return;
79 }
80
81 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
82 freqs.new = busfreq * clock_ratio[best_i].index;
83 freqs.cpu = 0; /* powernow-k6.c is UP only driver */
84
85 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
86
87 /* we now need to transform best_i to the BVC format, see AMD#23446 */
88
89 outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
90
91 msrval = POWERNOW_IOPORT + 0x1;
92 wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
93 invalue = inl(POWERNOW_IOPORT + 0x8);
94 invalue = invalue & 0xf;
95 outvalue = outvalue | invalue;
96 outl(outvalue , (POWERNOW_IOPORT + 0x8));
97 msrval = POWERNOW_IOPORT + 0x0;
98 wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
99
100 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
101
102 return;
103}
104
105
106/**
107 * powernow_k6_verify - verifies a new CPUfreq policy
108 * @policy: new policy
109 *
110 * Policy must be within lowest and highest possible CPU Frequency,
111 * and at least one possible state must be within min and max.
112 */
113static int powernow_k6_verify(struct cpufreq_policy *policy)
114{
115 return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
116}
117
118
119/**
120 * powernow_k6_setpolicy - sets a new CPUFreq policy
121 * @policy: new policy
122 * @target_freq: the target frequency
123 * @relation: how that frequency relates to achieved frequency
124 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
125 *
126 * sets a new CPUFreq policy
127 */
128static int powernow_k6_target(struct cpufreq_policy *policy,
129 unsigned int target_freq,
130 unsigned int relation)
131{
132 unsigned int newstate = 0;
133
134 if (cpufreq_frequency_table_target(policy, &clock_ratio[0],
135 target_freq, relation, &newstate))
136 return -EINVAL;
137
138 powernow_k6_set_state(newstate);
139
140 return 0;
141}
142
143
144static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
145{
146 unsigned int i, f;
147 int result;
148
149 if (policy->cpu != 0)
150 return -ENODEV;
151
152 /* get frequencies */
153 max_multiplier = powernow_k6_get_cpu_multiplier();
154 busfreq = cpu_khz / max_multiplier;
155
156 /* table init */
157 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
158 f = clock_ratio[i].index;
159 if (f > max_multiplier)
160 clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
161 else
162 clock_ratio[i].frequency = busfreq * f;
163 }
164
165 /* cpuinfo and default policy values */
166 policy->cpuinfo.transition_latency = 200000;
167 policy->cur = busfreq * max_multiplier;
168
169 result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
170 if (result)
171 return result;
172
173 cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
174
175 return 0;
176}
177
178
179static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
180{
181 unsigned int i;
182 for (i = 0; i < 8; i++) {
183 if (i == max_multiplier)
184 powernow_k6_set_state(i);
185 }
186 cpufreq_frequency_table_put_attr(policy->cpu);
187 return 0;
188}
189
190static unsigned int powernow_k6_get(unsigned int cpu)
191{
192 unsigned int ret;
193 ret = (busfreq * powernow_k6_get_cpu_multiplier());
194 return ret;
195}
196
197static struct freq_attr *powernow_k6_attr[] = {
198 &cpufreq_freq_attr_scaling_available_freqs,
199 NULL,
200};
201
202static struct cpufreq_driver powernow_k6_driver = {
203 .verify = powernow_k6_verify,
204 .target = powernow_k6_target,
205 .init = powernow_k6_cpu_init,
206 .exit = powernow_k6_cpu_exit,
207 .get = powernow_k6_get,
208 .name = "powernow-k6",
209 .owner = THIS_MODULE,
210 .attr = powernow_k6_attr,
211};
212
213
214/**
215 * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver
216 *
217 * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported
218 * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero
219 * on success.
220 */
221static int __init powernow_k6_init(void)
222{
223 struct cpuinfo_x86 *c = &cpu_data(0);
224
225 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
226 ((c->x86_model != 12) && (c->x86_model != 13)))
227 return -ENODEV;
228
229 if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
230 printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
231 return -EIO;
232 }
233
234 if (cpufreq_register_driver(&powernow_k6_driver)) {
235 release_region(POWERNOW_IOPORT, 16);
236 return -EINVAL;
237 }
238
239 return 0;
240}
241
242
243/**
244 * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support
245 *
246 * Unregisters AMD K6-2+ / K6-3+ PowerNow! support.
247 */
248static void __exit powernow_k6_exit(void)
249{
250 cpufreq_unregister_driver(&powernow_k6_driver);
251 release_region(POWERNOW_IOPORT, 16);
252}
253
254
255MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, "
256 "Dominik Brodowski <linux@brodo.de>");
257MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
258MODULE_LICENSE("GPL");
259
260module_init(powernow_k6_init);
261module_exit(powernow_k6_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
deleted file mode 100644
index 4a45fd6e41ba..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ /dev/null
@@ -1,752 +0,0 @@
1/*
2 * AMD K7 Powernow driver.
3 * (C) 2003 Dave Jones on behalf of SuSE Labs.
4 * (C) 2003-2004 Dave Jones <davej@redhat.com>
5 *
6 * Licensed under the terms of the GNU GPL License version 2.
7 * Based upon datasheets & sample CPUs kindly provided by AMD.
8 *
9 * Errata 5:
10 * CPU may fail to execute a FID/VID change in presence of interrupt.
11 * - We cli/sti on stepping A0 CPUs around the FID/VID transition.
12 * Errata 15:
13 * CPU with half frequency multipliers may hang upon wakeup from disconnect.
14 * - We disable half multipliers if ACPI is used on A0 stepping CPUs.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/init.h>
21#include <linux/cpufreq.h>
22#include <linux/slab.h>
23#include <linux/string.h>
24#include <linux/dmi.h>
25#include <linux/timex.h>
26#include <linux/io.h>
27
28#include <asm/timer.h> /* Needed for recalibrate_cpu_khz() */
29#include <asm/msr.h>
30#include <asm/system.h>
31
32#ifdef CONFIG_X86_POWERNOW_K7_ACPI
33#include <linux/acpi.h>
34#include <acpi/processor.h>
35#endif
36
37#include "powernow-k7.h"
38
39#define PFX "powernow: "
40
41
42struct psb_s {
43 u8 signature[10];
44 u8 tableversion;
45 u8 flags;
46 u16 settlingtime;
47 u8 reserved1;
48 u8 numpst;
49};
50
51struct pst_s {
52 u32 cpuid;
53 u8 fsbspeed;
54 u8 maxfid;
55 u8 startvid;
56 u8 numpstates;
57};
58
59#ifdef CONFIG_X86_POWERNOW_K7_ACPI
60union powernow_acpi_control_t {
61 struct {
62 unsigned long fid:5,
63 vid:5,
64 sgtc:20,
65 res1:2;
66 } bits;
67 unsigned long val;
68};
69#endif
70
71#ifdef CONFIG_CPU_FREQ_DEBUG
72/* divide by 1000 to get VCore voltage in V. */
73static const int mobile_vid_table[32] = {
74 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
75 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
76 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
77 1075, 1050, 1025, 1000, 975, 950, 925, 0,
78};
79#endif
80
81/* divide by 10 to get FID. */
82static const int fid_codes[32] = {
83 110, 115, 120, 125, 50, 55, 60, 65,
84 70, 75, 80, 85, 90, 95, 100, 105,
85 30, 190, 40, 200, 130, 135, 140, 210,
86 150, 225, 160, 165, 170, 180, -1, -1,
87};
88
89/* This parameter is used in order to force ACPI instead of legacy method for
90 * configuration purpose.
91 */
92
93static int acpi_force;
94
95static struct cpufreq_frequency_table *powernow_table;
96
97static unsigned int can_scale_bus;
98static unsigned int can_scale_vid;
99static unsigned int minimum_speed = -1;
100static unsigned int maximum_speed;
101static unsigned int number_scales;
102static unsigned int fsb;
103static unsigned int latency;
104static char have_a0;
105
106#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
107 "powernow-k7", msg)
108
109static int check_fsb(unsigned int fsbspeed)
110{
111 int delta;
112 unsigned int f = fsb / 1000;
113
114 delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed;
115 return delta < 5;
116}
117
118static int check_powernow(void)
119{
120 struct cpuinfo_x86 *c = &cpu_data(0);
121 unsigned int maxei, eax, ebx, ecx, edx;
122
123 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 6)) {
124#ifdef MODULE
125 printk(KERN_INFO PFX "This module only works with "
126 "AMD K7 CPUs\n");
127#endif
128 return 0;
129 }
130
131 /* Get maximum capabilities */
132 maxei = cpuid_eax(0x80000000);
133 if (maxei < 0x80000007) { /* Any powernow info ? */
134#ifdef MODULE
135 printk(KERN_INFO PFX "No powernow capabilities detected\n");
136#endif
137 return 0;
138 }
139
140 if ((c->x86_model == 6) && (c->x86_mask == 0)) {
141 printk(KERN_INFO PFX "K7 660[A0] core detected, "
142 "enabling errata workarounds\n");
143 have_a0 = 1;
144 }
145
146 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
147
148 /* Check we can actually do something before we say anything.*/
149 if (!(edx & (1 << 1 | 1 << 2)))
150 return 0;
151
152 printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
153
154 if (edx & 1 << 1) {
155 printk("frequency");
156 can_scale_bus = 1;
157 }
158
159 if ((edx & (1 << 1 | 1 << 2)) == 0x6)
160 printk(" and ");
161
162 if (edx & 1 << 2) {
163 printk("voltage");
164 can_scale_vid = 1;
165 }
166
167 printk(".\n");
168 return 1;
169}
170
171#ifdef CONFIG_X86_POWERNOW_K7_ACPI
172static void invalidate_entry(unsigned int entry)
173{
174 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
175}
176#endif
177
178static int get_ranges(unsigned char *pst)
179{
180 unsigned int j;
181 unsigned int speed;
182 u8 fid, vid;
183
184 powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
185 (number_scales + 1)), GFP_KERNEL);
186 if (!powernow_table)
187 return -ENOMEM;
188
189 for (j = 0 ; j < number_scales; j++) {
190 fid = *pst++;
191
192 powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
193 powernow_table[j].index = fid; /* lower 8 bits */
194
195 speed = powernow_table[j].frequency;
196
197 if ((fid_codes[fid] % 10) == 5) {
198#ifdef CONFIG_X86_POWERNOW_K7_ACPI
199 if (have_a0 == 1)
200 invalidate_entry(j);
201#endif
202 }
203
204 if (speed < minimum_speed)
205 minimum_speed = speed;
206 if (speed > maximum_speed)
207 maximum_speed = speed;
208
209 vid = *pst++;
210 powernow_table[j].index |= (vid << 8); /* upper 8 bits */
211
212 dprintk(" FID: 0x%x (%d.%dx [%dMHz]) "
213 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
214 fid_codes[fid] % 10, speed/1000, vid,
215 mobile_vid_table[vid]/1000,
216 mobile_vid_table[vid]%1000);
217 }
218 powernow_table[number_scales].frequency = CPUFREQ_TABLE_END;
219 powernow_table[number_scales].index = 0;
220
221 return 0;
222}
223
224
225static void change_FID(int fid)
226{
227 union msr_fidvidctl fidvidctl;
228
229 rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
230 if (fidvidctl.bits.FID != fid) {
231 fidvidctl.bits.SGTC = latency;
232 fidvidctl.bits.FID = fid;
233 fidvidctl.bits.VIDC = 0;
234 fidvidctl.bits.FIDC = 1;
235 wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
236 }
237}
238
239
240static void change_VID(int vid)
241{
242 union msr_fidvidctl fidvidctl;
243
244 rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
245 if (fidvidctl.bits.VID != vid) {
246 fidvidctl.bits.SGTC = latency;
247 fidvidctl.bits.VID = vid;
248 fidvidctl.bits.FIDC = 0;
249 fidvidctl.bits.VIDC = 1;
250 wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
251 }
252}
253
254
255static void change_speed(unsigned int index)
256{
257 u8 fid, vid;
258 struct cpufreq_freqs freqs;
259 union msr_fidvidstatus fidvidstatus;
260 int cfid;
261
262 /* fid are the lower 8 bits of the index we stored into
263 * the cpufreq frequency table in powernow_decode_bios,
264 * vid are the upper 8 bits.
265 */
266
267 fid = powernow_table[index].index & 0xFF;
268 vid = (powernow_table[index].index & 0xFF00) >> 8;
269
270 freqs.cpu = 0;
271
272 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
273 cfid = fidvidstatus.bits.CFID;
274 freqs.old = fsb * fid_codes[cfid] / 10;
275
276 freqs.new = powernow_table[index].frequency;
277
278 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
279
280 /* Now do the magic poking into the MSRs. */
281
282 if (have_a0 == 1) /* A0 errata 5 */
283 local_irq_disable();
284
285 if (freqs.old > freqs.new) {
286 /* Going down, so change FID first */
287 change_FID(fid);
288 change_VID(vid);
289 } else {
290 /* Going up, so change VID first */
291 change_VID(vid);
292 change_FID(fid);
293 }
294
295
296 if (have_a0 == 1)
297 local_irq_enable();
298
299 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
300}
301
302
303#ifdef CONFIG_X86_POWERNOW_K7_ACPI
304
305static struct acpi_processor_performance *acpi_processor_perf;
306
307static int powernow_acpi_init(void)
308{
309 int i;
310 int retval = 0;
311 union powernow_acpi_control_t pc;
312
313 if (acpi_processor_perf != NULL && powernow_table != NULL) {
314 retval = -EINVAL;
315 goto err0;
316 }
317
318 acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance),
319 GFP_KERNEL);
320 if (!acpi_processor_perf) {
321 retval = -ENOMEM;
322 goto err0;
323 }
324
325 if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
326 GFP_KERNEL)) {
327 retval = -ENOMEM;
328 goto err05;
329 }
330
331 if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
332 retval = -EIO;
333 goto err1;
334 }
335
336 if (acpi_processor_perf->control_register.space_id !=
337 ACPI_ADR_SPACE_FIXED_HARDWARE) {
338 retval = -ENODEV;
339 goto err2;
340 }
341
342 if (acpi_processor_perf->status_register.space_id !=
343 ACPI_ADR_SPACE_FIXED_HARDWARE) {
344 retval = -ENODEV;
345 goto err2;
346 }
347
348 number_scales = acpi_processor_perf->state_count;
349
350 if (number_scales < 2) {
351 retval = -ENODEV;
352 goto err2;
353 }
354
355 powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
356 (number_scales + 1)), GFP_KERNEL);
357 if (!powernow_table) {
358 retval = -ENOMEM;
359 goto err2;
360 }
361
362 pc.val = (unsigned long) acpi_processor_perf->states[0].control;
363 for (i = 0; i < number_scales; i++) {
364 u8 fid, vid;
365 struct acpi_processor_px *state =
366 &acpi_processor_perf->states[i];
367 unsigned int speed, speed_mhz;
368
369 pc.val = (unsigned long) state->control;
370 dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
371 i,
372 (u32) state->core_frequency,
373 (u32) state->power,
374 (u32) state->transition_latency,
375 (u32) state->control,
376 pc.bits.sgtc);
377
378 vid = pc.bits.vid;
379 fid = pc.bits.fid;
380
381 powernow_table[i].frequency = fsb * fid_codes[fid] / 10;
382 powernow_table[i].index = fid; /* lower 8 bits */
383 powernow_table[i].index |= (vid << 8); /* upper 8 bits */
384
385 speed = powernow_table[i].frequency;
386 speed_mhz = speed / 1000;
387
388 /* processor_perflib will multiply the MHz value by 1000 to
389 * get a KHz value (e.g. 1266000). However, powernow-k7 works
390 * with true KHz values (e.g. 1266768). To ensure that all
391 * powernow frequencies are available, we must ensure that
392 * ACPI doesn't restrict them, so we round up the MHz value
393 * to ensure that perflib's computed KHz value is greater than
394 * or equal to powernow's KHz value.
395 */
396 if (speed % 1000 > 0)
397 speed_mhz++;
398
399 if ((fid_codes[fid] % 10) == 5) {
400 if (have_a0 == 1)
401 invalidate_entry(i);
402 }
403
404 dprintk(" FID: 0x%x (%d.%dx [%dMHz]) "
405 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
406 fid_codes[fid] % 10, speed_mhz, vid,
407 mobile_vid_table[vid]/1000,
408 mobile_vid_table[vid]%1000);
409
410 if (state->core_frequency != speed_mhz) {
411 state->core_frequency = speed_mhz;
412 dprintk(" Corrected ACPI frequency to %d\n",
413 speed_mhz);
414 }
415
416 if (latency < pc.bits.sgtc)
417 latency = pc.bits.sgtc;
418
419 if (speed < minimum_speed)
420 minimum_speed = speed;
421 if (speed > maximum_speed)
422 maximum_speed = speed;
423 }
424
425 powernow_table[i].frequency = CPUFREQ_TABLE_END;
426 powernow_table[i].index = 0;
427
428 /* notify BIOS that we exist */
429 acpi_processor_notify_smm(THIS_MODULE);
430
431 return 0;
432
433err2:
434 acpi_processor_unregister_performance(acpi_processor_perf, 0);
435err1:
436 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
437err05:
438 kfree(acpi_processor_perf);
439err0:
440 printk(KERN_WARNING PFX "ACPI perflib can not be used on "
441 "this platform\n");
442 acpi_processor_perf = NULL;
443 return retval;
444}
445#else
446static int powernow_acpi_init(void)
447{
448 printk(KERN_INFO PFX "no support for ACPI processor found."
449 " Please recompile your kernel with ACPI processor\n");
450 return -EINVAL;
451}
452#endif
453
454static void print_pst_entry(struct pst_s *pst, unsigned int j)
455{
456 dprintk("PST:%d (@%p)\n", j, pst);
457 dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
458 pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
459}
460
461static int powernow_decode_bios(int maxfid, int startvid)
462{
463 struct psb_s *psb;
464 struct pst_s *pst;
465 unsigned int i, j;
466 unsigned char *p;
467 unsigned int etuple;
468 unsigned int ret;
469
470 etuple = cpuid_eax(0x80000001);
471
472 for (i = 0xC0000; i < 0xffff0 ; i += 16) {
473
474 p = phys_to_virt(i);
475
476 if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
477 dprintk("Found PSB header at %p\n", p);
478 psb = (struct psb_s *) p;
479 dprintk("Table version: 0x%x\n", psb->tableversion);
480 if (psb->tableversion != 0x12) {
481 printk(KERN_INFO PFX "Sorry, only v1.2 tables"
482 " supported right now\n");
483 return -ENODEV;
484 }
485
486 dprintk("Flags: 0x%x\n", psb->flags);
487 if ((psb->flags & 1) == 0)
488 dprintk("Mobile voltage regulator\n");
489 else
490 dprintk("Desktop voltage regulator\n");
491
492 latency = psb->settlingtime;
493 if (latency < 100) {
494 printk(KERN_INFO PFX "BIOS set settling time "
495 "to %d microseconds. "
496 "Should be at least 100. "
497 "Correcting.\n", latency);
498 latency = 100;
499 }
500 dprintk("Settling Time: %d microseconds.\n",
501 psb->settlingtime);
502 dprintk("Has %d PST tables. (Only dumping ones "
503 "relevant to this CPU).\n",
504 psb->numpst);
505
506 p += sizeof(struct psb_s);
507
508 pst = (struct pst_s *) p;
509
510 for (j = 0; j < psb->numpst; j++) {
511 pst = (struct pst_s *) p;
512 number_scales = pst->numpstates;
513
514 if ((etuple == pst->cpuid) &&
515 check_fsb(pst->fsbspeed) &&
516 (maxfid == pst->maxfid) &&
517 (startvid == pst->startvid)) {
518 print_pst_entry(pst, j);
519 p = (char *)pst + sizeof(struct pst_s);
520 ret = get_ranges(p);
521 return ret;
522 } else {
523 unsigned int k;
524 p = (char *)pst + sizeof(struct pst_s);
525 for (k = 0; k < number_scales; k++)
526 p += 2;
527 }
528 }
529 printk(KERN_INFO PFX "No PST tables match this cpuid "
530 "(0x%x)\n", etuple);
531 printk(KERN_INFO PFX "This is indicative of a broken "
532 "BIOS.\n");
533
534 return -EINVAL;
535 }
536 p++;
537 }
538
539 return -ENODEV;
540}
541
542
543static int powernow_target(struct cpufreq_policy *policy,
544 unsigned int target_freq,
545 unsigned int relation)
546{
547 unsigned int newstate;
548
549 if (cpufreq_frequency_table_target(policy, powernow_table, target_freq,
550 relation, &newstate))
551 return -EINVAL;
552
553 change_speed(newstate);
554
555 return 0;
556}
557
558
559static int powernow_verify(struct cpufreq_policy *policy)
560{
561 return cpufreq_frequency_table_verify(policy, powernow_table);
562}
563
564/*
565 * We use the fact that the bus frequency is somehow
566 * a multiple of 100000/3 khz, then we compute sgtc according
567 * to this multiple.
568 * That way, we match more how AMD thinks all of that work.
569 * We will then get the same kind of behaviour already tested under
570 * the "well-known" other OS.
571 */
572static int __cpuinit fixup_sgtc(void)
573{
574 unsigned int sgtc;
575 unsigned int m;
576
577 m = fsb / 3333;
578 if ((m % 10) >= 5)
579 m += 5;
580
581 m /= 10;
582
583 sgtc = 100 * m * latency;
584 sgtc = sgtc / 3;
585 if (sgtc > 0xfffff) {
586 printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
587 sgtc = 0xfffff;
588 }
589 return sgtc;
590}
591
592static unsigned int powernow_get(unsigned int cpu)
593{
594 union msr_fidvidstatus fidvidstatus;
595 unsigned int cfid;
596
597 if (cpu)
598 return 0;
599 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
600 cfid = fidvidstatus.bits.CFID;
601
602 return fsb * fid_codes[cfid] / 10;
603}
604
605
606static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d)
607{
608 printk(KERN_WARNING PFX
609 "%s laptop with broken PST tables in BIOS detected.\n",
610 d->ident);
611 printk(KERN_WARNING PFX
612 "You need to downgrade to 3A21 (09/09/2002), or try a newer "
613 "BIOS than 3A71 (01/20/2003)\n");
614 printk(KERN_WARNING PFX
615 "cpufreq scaling has been disabled as a result of this.\n");
616 return 0;
617}
618
619/*
620 * Some Athlon laptops have really fucked PST tables.
621 * A BIOS update is all that can save them.
622 * Mention this, and disable cpufreq.
623 */
624static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = {
625 {
626 .callback = acer_cpufreq_pst,
627 .ident = "Acer Aspire",
628 .matches = {
629 DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"),
630 DMI_MATCH(DMI_BIOS_VERSION, "3A71"),
631 },
632 },
633 { }
634};
635
636static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy)
637{
638 union msr_fidvidstatus fidvidstatus;
639 int result;
640
641 if (policy->cpu != 0)
642 return -ENODEV;
643
644 rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
645
646 recalibrate_cpu_khz();
647
648 fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
649 if (!fsb) {
650 printk(KERN_WARNING PFX "can not determine bus frequency\n");
651 return -EINVAL;
652 }
653 dprintk("FSB: %3dMHz\n", fsb/1000);
654
655 if (dmi_check_system(powernow_dmi_table) || acpi_force) {
656 printk(KERN_INFO PFX "PSB/PST known to be broken. "
657 "Trying ACPI instead\n");
658 result = powernow_acpi_init();
659 } else {
660 result = powernow_decode_bios(fidvidstatus.bits.MFID,
661 fidvidstatus.bits.SVID);
662 if (result) {
663 printk(KERN_INFO PFX "Trying ACPI perflib\n");
664 maximum_speed = 0;
665 minimum_speed = -1;
666 latency = 0;
667 result = powernow_acpi_init();
668 if (result) {
669 printk(KERN_INFO PFX
670 "ACPI and legacy methods failed\n");
671 }
672 } else {
673 /* SGTC use the bus clock as timer */
674 latency = fixup_sgtc();
675 printk(KERN_INFO PFX "SGTC: %d\n", latency);
676 }
677 }
678
679 if (result)
680 return result;
681
682 printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
683 minimum_speed/1000, maximum_speed/1000);
684
685 policy->cpuinfo.transition_latency =
686 cpufreq_scale(2000000UL, fsb, latency);
687
688 policy->cur = powernow_get(0);
689
690 cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
691
692 return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
693}
694
695static int powernow_cpu_exit(struct cpufreq_policy *policy)
696{
697 cpufreq_frequency_table_put_attr(policy->cpu);
698
699#ifdef CONFIG_X86_POWERNOW_K7_ACPI
700 if (acpi_processor_perf) {
701 acpi_processor_unregister_performance(acpi_processor_perf, 0);
702 free_cpumask_var(acpi_processor_perf->shared_cpu_map);
703 kfree(acpi_processor_perf);
704 }
705#endif
706
707 kfree(powernow_table);
708 return 0;
709}
710
711static struct freq_attr *powernow_table_attr[] = {
712 &cpufreq_freq_attr_scaling_available_freqs,
713 NULL,
714};
715
716static struct cpufreq_driver powernow_driver = {
717 .verify = powernow_verify,
718 .target = powernow_target,
719 .get = powernow_get,
720#ifdef CONFIG_X86_POWERNOW_K7_ACPI
721 .bios_limit = acpi_processor_get_bios_limit,
722#endif
723 .init = powernow_cpu_init,
724 .exit = powernow_cpu_exit,
725 .name = "powernow-k7",
726 .owner = THIS_MODULE,
727 .attr = powernow_table_attr,
728};
729
730static int __init powernow_init(void)
731{
732 if (check_powernow() == 0)
733 return -ENODEV;
734 return cpufreq_register_driver(&powernow_driver);
735}
736
737
738static void __exit powernow_exit(void)
739{
740 cpufreq_unregister_driver(&powernow_driver);
741}
742
743module_param(acpi_force, int, 0444);
744MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
745
746MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
747MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
748MODULE_LICENSE("GPL");
749
750late_initcall(powernow_init);
751module_exit(powernow_exit);
752
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h b/arch/x86/kernel/cpu/cpufreq/powernow-k7.h
deleted file mode 100644
index 35fb4eaf6e1c..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * (C) 2003 Dave Jones.
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * AMD-specific information
7 *
8 */
9
10union msr_fidvidctl {
11 struct {
12 unsigned FID:5, // 4:0
13 reserved1:3, // 7:5
14 VID:5, // 12:8
15 reserved2:3, // 15:13
16 FIDC:1, // 16
17 VIDC:1, // 17
18 reserved3:2, // 19:18
19 FIDCHGRATIO:1, // 20
20 reserved4:11, // 31-21
21 SGTC:20, // 32:51
22 reserved5:12; // 63:52
23 } bits;
24 unsigned long long val;
25};
26
27union msr_fidvidstatus {
28 struct {
29 unsigned CFID:5, // 4:0
30 reserved1:3, // 7:5
31 SFID:5, // 12:8
32 reserved2:3, // 15:13
33 MFID:5, // 20:16
34 reserved3:11, // 31:21
35 CVID:5, // 36:32
36 reserved4:3, // 39:37
37 SVID:5, // 44:40
38 reserved5:3, // 47:45
39 MVID:5, // 52:48
40 reserved6:11; // 63:53
41 } bits;
42 unsigned long long val;
43};
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
deleted file mode 100644
index 2368e38327b3..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ /dev/null
@@ -1,1607 +0,0 @@
1/*
2 * (c) 2003-2010 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 *
7 * Support : mark.langsdorf@amd.com
8 *
9 * Based on the powernow-k7.c module written by Dave Jones.
10 * (C) 2003 Dave Jones on behalf of SuSE Labs
11 * (C) 2004 Dominik Brodowski <linux@brodo.de>
12 * (C) 2004 Pavel Machek <pavel@ucw.cz>
13 * Licensed under the terms of the GNU GPL License version 2.
14 * Based upon datasheets & sample CPUs kindly provided by AMD.
15 *
16 * Valuable input gratefully received from Dave Jones, Pavel Machek,
17 * Dominik Brodowski, Jacob Shin, and others.
18 * Originally developed by Paul Devriendt.
19 * Processor information obtained from Chapter 9 (Power and Thermal Management)
20 * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
21 * Opteron Processors" available for download from www.amd.com
22 *
23 * Tables for specific CPUs can be inferred from
24 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
25 */
26
27#include <linux/kernel.h>
28#include <linux/smp.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/cpufreq.h>
32#include <linux/slab.h>
33#include <linux/string.h>
34#include <linux/cpumask.h>
35#include <linux/sched.h> /* for current / set_cpus_allowed() */
36#include <linux/io.h>
37#include <linux/delay.h>
38
39#include <asm/msr.h>
40
41#include <linux/acpi.h>
42#include <linux/mutex.h>
43#include <acpi/processor.h>
44
45#define PFX "powernow-k8: "
46#define VERSION "version 2.20.00"
47#include "powernow-k8.h"
48#include "mperf.h"
49
50/* serialize freq changes */
51static DEFINE_MUTEX(fidvid_mutex);
52
53static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
54
55static int cpu_family = CPU_OPTERON;
56
57/* core performance boost */
58static bool cpb_capable, cpb_enabled;
59static struct msr __percpu *msrs;
60
61static struct cpufreq_driver cpufreq_amd64_driver;
62
63#ifndef CONFIG_SMP
64static inline const struct cpumask *cpu_core_mask(int cpu)
65{
66 return cpumask_of(0);
67}
68#endif
69
70/* Return a frequency in MHz, given an input fid */
71static u32 find_freq_from_fid(u32 fid)
72{
73 return 800 + (fid * 100);
74}
75
76/* Return a frequency in KHz, given an input fid */
77static u32 find_khz_freq_from_fid(u32 fid)
78{
79 return 1000 * find_freq_from_fid(fid);
80}
81
82static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
83 u32 pstate)
84{
85 return data[pstate].frequency;
86}
87
88/* Return the vco fid for an input fid
89 *
90 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
91 * only from corresponding high fids. This returns "high" fid corresponding to
92 * "low" one.
93 */
94static u32 convert_fid_to_vco_fid(u32 fid)
95{
96 if (fid < HI_FID_TABLE_BOTTOM)
97 return 8 + (2 * fid);
98 else
99 return fid;
100}
101
102/*
103 * Return 1 if the pending bit is set. Unless we just instructed the processor
104 * to transition to a new state, seeing this bit set is really bad news.
105 */
106static int pending_bit_stuck(void)
107{
108 u32 lo, hi;
109
110 if (cpu_family == CPU_HW_PSTATE)
111 return 0;
112
113 rdmsr(MSR_FIDVID_STATUS, lo, hi);
114 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
115}
116
117/*
118 * Update the global current fid / vid values from the status msr.
119 * Returns 1 on error.
120 */
121static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
122{
123 u32 lo, hi;
124 u32 i = 0;
125
126 if (cpu_family == CPU_HW_PSTATE) {
127 rdmsr(MSR_PSTATE_STATUS, lo, hi);
128 i = lo & HW_PSTATE_MASK;
129 data->currpstate = i;
130
131 /*
132 * a workaround for family 11h erratum 311 might cause
133 * an "out-of-range Pstate if the core is in Pstate-0
134 */
135 if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
136 data->currpstate = HW_PSTATE_0;
137
138 return 0;
139 }
140 do {
141 if (i++ > 10000) {
142 dprintk("detected change pending stuck\n");
143 return 1;
144 }
145 rdmsr(MSR_FIDVID_STATUS, lo, hi);
146 } while (lo & MSR_S_LO_CHANGE_PENDING);
147
148 data->currvid = hi & MSR_S_HI_CURRENT_VID;
149 data->currfid = lo & MSR_S_LO_CURRENT_FID;
150
151 return 0;
152}
153
154/* the isochronous relief time */
155static void count_off_irt(struct powernow_k8_data *data)
156{
157 udelay((1 << data->irt) * 10);
158 return;
159}
160
161/* the voltage stabilization time */
162static void count_off_vst(struct powernow_k8_data *data)
163{
164 udelay(data->vstable * VST_UNITS_20US);
165 return;
166}
167
168/* need to init the control msr to a safe value (for each cpu) */
169static void fidvid_msr_init(void)
170{
171 u32 lo, hi;
172 u8 fid, vid;
173
174 rdmsr(MSR_FIDVID_STATUS, lo, hi);
175 vid = hi & MSR_S_HI_CURRENT_VID;
176 fid = lo & MSR_S_LO_CURRENT_FID;
177 lo = fid | (vid << MSR_C_LO_VID_SHIFT);
178 hi = MSR_C_HI_STP_GNT_BENIGN;
179 dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
180 wrmsr(MSR_FIDVID_CTL, lo, hi);
181}
182
183/* write the new fid value along with the other control fields to the msr */
184static int write_new_fid(struct powernow_k8_data *data, u32 fid)
185{
186 u32 lo;
187 u32 savevid = data->currvid;
188 u32 i = 0;
189
190 if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
191 printk(KERN_ERR PFX "internal error - overflow on fid write\n");
192 return 1;
193 }
194
195 lo = fid;
196 lo |= (data->currvid << MSR_C_LO_VID_SHIFT);
197 lo |= MSR_C_LO_INIT_FID_VID;
198
199 dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
200 fid, lo, data->plllock * PLL_LOCK_CONVERSION);
201
202 do {
203 wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
204 if (i++ > 100) {
205 printk(KERN_ERR PFX
206 "Hardware error - pending bit very stuck - "
207 "no further pstate changes possible\n");
208 return 1;
209 }
210 } while (query_current_values_with_pending_wait(data));
211
212 count_off_irt(data);
213
214 if (savevid != data->currvid) {
215 printk(KERN_ERR PFX
216 "vid change on fid trans, old 0x%x, new 0x%x\n",
217 savevid, data->currvid);
218 return 1;
219 }
220
221 if (fid != data->currfid) {
222 printk(KERN_ERR PFX
223 "fid trans failed, fid 0x%x, curr 0x%x\n", fid,
224 data->currfid);
225 return 1;
226 }
227
228 return 0;
229}
230
231/* Write a new vid to the hardware */
232static int write_new_vid(struct powernow_k8_data *data, u32 vid)
233{
234 u32 lo;
235 u32 savefid = data->currfid;
236 int i = 0;
237
238 if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
239 printk(KERN_ERR PFX "internal error - overflow on vid write\n");
240 return 1;
241 }
242
243 lo = data->currfid;
244 lo |= (vid << MSR_C_LO_VID_SHIFT);
245 lo |= MSR_C_LO_INIT_FID_VID;
246
247 dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
248 vid, lo, STOP_GRANT_5NS);
249
250 do {
251 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
252 if (i++ > 100) {
253 printk(KERN_ERR PFX "internal error - pending bit "
254 "very stuck - no further pstate "
255 "changes possible\n");
256 return 1;
257 }
258 } while (query_current_values_with_pending_wait(data));
259
260 if (savefid != data->currfid) {
261 printk(KERN_ERR PFX "fid changed on vid trans, old "
262 "0x%x new 0x%x\n",
263 savefid, data->currfid);
264 return 1;
265 }
266
267 if (vid != data->currvid) {
268 printk(KERN_ERR PFX "vid trans failed, vid 0x%x, "
269 "curr 0x%x\n",
270 vid, data->currvid);
271 return 1;
272 }
273
274 return 0;
275}
276
277/*
278 * Reduce the vid by the max of step or reqvid.
279 * Decreasing vid codes represent increasing voltages:
280 * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
281 */
282static int decrease_vid_code_by_step(struct powernow_k8_data *data,
283 u32 reqvid, u32 step)
284{
285 if ((data->currvid - reqvid) > step)
286 reqvid = data->currvid - step;
287
288 if (write_new_vid(data, reqvid))
289 return 1;
290
291 count_off_vst(data);
292
293 return 0;
294}
295
296/* Change hardware pstate by single MSR write */
297static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
298{
299 wrmsr(MSR_PSTATE_CTRL, pstate, 0);
300 data->currpstate = pstate;
301 return 0;
302}
303
304/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
305static int transition_fid_vid(struct powernow_k8_data *data,
306 u32 reqfid, u32 reqvid)
307{
308 if (core_voltage_pre_transition(data, reqvid, reqfid))
309 return 1;
310
311 if (core_frequency_transition(data, reqfid))
312 return 1;
313
314 if (core_voltage_post_transition(data, reqvid))
315 return 1;
316
317 if (query_current_values_with_pending_wait(data))
318 return 1;
319
320 if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
321 printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, "
322 "curr 0x%x 0x%x\n",
323 smp_processor_id(),
324 reqfid, reqvid, data->currfid, data->currvid);
325 return 1;
326 }
327
328 dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
329 smp_processor_id(), data->currfid, data->currvid);
330
331 return 0;
332}
333
334/* Phase 1 - core voltage transition ... setup voltage */
335static int core_voltage_pre_transition(struct powernow_k8_data *data,
336 u32 reqvid, u32 reqfid)
337{
338 u32 rvosteps = data->rvo;
339 u32 savefid = data->currfid;
340 u32 maxvid, lo, rvomult = 1;
341
342 dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, "
343 "reqvid 0x%x, rvo 0x%x\n",
344 smp_processor_id(),
345 data->currfid, data->currvid, reqvid, data->rvo);
346
347 if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP))
348 rvomult = 2;
349 rvosteps *= rvomult;
350 rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
351 maxvid = 0x1f & (maxvid >> 16);
352 dprintk("ph1 maxvid=0x%x\n", maxvid);
353 if (reqvid < maxvid) /* lower numbers are higher voltages */
354 reqvid = maxvid;
355
356 while (data->currvid > reqvid) {
357 dprintk("ph1: curr 0x%x, req vid 0x%x\n",
358 data->currvid, reqvid);
359 if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
360 return 1;
361 }
362
363 while ((rvosteps > 0) &&
364 ((rvomult * data->rvo + data->currvid) > reqvid)) {
365 if (data->currvid == maxvid) {
366 rvosteps = 0;
367 } else {
368 dprintk("ph1: changing vid for rvo, req 0x%x\n",
369 data->currvid - 1);
370 if (decrease_vid_code_by_step(data, data->currvid-1, 1))
371 return 1;
372 rvosteps--;
373 }
374 }
375
376 if (query_current_values_with_pending_wait(data))
377 return 1;
378
379 if (savefid != data->currfid) {
380 printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n",
381 data->currfid);
382 return 1;
383 }
384
385 dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n",
386 data->currfid, data->currvid);
387
388 return 0;
389}
390
391/* Phase 2 - core frequency transition */
392static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
393{
394 u32 vcoreqfid, vcocurrfid, vcofiddiff;
395 u32 fid_interval, savevid = data->currvid;
396
397 if (data->currfid == reqfid) {
398 printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n",
399 data->currfid);
400 return 0;
401 }
402
403 dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, "
404 "reqfid 0x%x\n",
405 smp_processor_id(),
406 data->currfid, data->currvid, reqfid);
407
408 vcoreqfid = convert_fid_to_vco_fid(reqfid);
409 vcocurrfid = convert_fid_to_vco_fid(data->currfid);
410 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
411 : vcoreqfid - vcocurrfid;
412
413 if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP))
414 vcofiddiff = 0;
415
416 while (vcofiddiff > 2) {
417 (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2);
418
419 if (reqfid > data->currfid) {
420 if (data->currfid > LO_FID_TABLE_TOP) {
421 if (write_new_fid(data,
422 data->currfid + fid_interval))
423 return 1;
424 } else {
425 if (write_new_fid
426 (data,
427 2 + convert_fid_to_vco_fid(data->currfid)))
428 return 1;
429 }
430 } else {
431 if (write_new_fid(data, data->currfid - fid_interval))
432 return 1;
433 }
434
435 vcocurrfid = convert_fid_to_vco_fid(data->currfid);
436 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
437 : vcoreqfid - vcocurrfid;
438 }
439
440 if (write_new_fid(data, reqfid))
441 return 1;
442
443 if (query_current_values_with_pending_wait(data))
444 return 1;
445
446 if (data->currfid != reqfid) {
447 printk(KERN_ERR PFX
448 "ph2: mismatch, failed fid transition, "
449 "curr 0x%x, req 0x%x\n",
450 data->currfid, reqfid);
451 return 1;
452 }
453
454 if (savevid != data->currvid) {
455 printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n",
456 savevid, data->currvid);
457 return 1;
458 }
459
460 dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n",
461 data->currfid, data->currvid);
462
463 return 0;
464}
465
466/* Phase 3 - core voltage transition flow ... jump to the final vid. */
467static int core_voltage_post_transition(struct powernow_k8_data *data,
468 u32 reqvid)
469{
470 u32 savefid = data->currfid;
471 u32 savereqvid = reqvid;
472
473 dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
474 smp_processor_id(),
475 data->currfid, data->currvid);
476
477 if (reqvid != data->currvid) {
478 if (write_new_vid(data, reqvid))
479 return 1;
480
481 if (savefid != data->currfid) {
482 printk(KERN_ERR PFX
483 "ph3: bad fid change, save 0x%x, curr 0x%x\n",
484 savefid, data->currfid);
485 return 1;
486 }
487
488 if (data->currvid != reqvid) {
489 printk(KERN_ERR PFX
490 "ph3: failed vid transition\n, "
491 "req 0x%x, curr 0x%x",
492 reqvid, data->currvid);
493 return 1;
494 }
495 }
496
497 if (query_current_values_with_pending_wait(data))
498 return 1;
499
500 if (savereqvid != data->currvid) {
501 dprintk("ph3 failed, currvid 0x%x\n", data->currvid);
502 return 1;
503 }
504
505 if (savefid != data->currfid) {
506 dprintk("ph3 failed, currfid changed 0x%x\n",
507 data->currfid);
508 return 1;
509 }
510
511 dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n",
512 data->currfid, data->currvid);
513
514 return 0;
515}
516
517static void check_supported_cpu(void *_rc)
518{
519 u32 eax, ebx, ecx, edx;
520 int *rc = _rc;
521
522 *rc = -ENODEV;
523
524 if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
525 return;
526
527 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
528 if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) &&
529 ((eax & CPUID_XFAM) < CPUID_XFAM_10H))
530 return;
531
532 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
533 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
534 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
535 printk(KERN_INFO PFX
536 "Processor cpuid %x not supported\n", eax);
537 return;
538 }
539
540 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
541 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
542 printk(KERN_INFO PFX
543 "No frequency change capabilities detected\n");
544 return;
545 }
546
547 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
548 if ((edx & P_STATE_TRANSITION_CAPABLE)
549 != P_STATE_TRANSITION_CAPABLE) {
550 printk(KERN_INFO PFX
551 "Power state transitions not supported\n");
552 return;
553 }
554 } else { /* must be a HW Pstate capable processor */
555 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
556 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
557 cpu_family = CPU_HW_PSTATE;
558 else
559 return;
560 }
561
562 *rc = 0;
563}
564
565static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
566 u8 maxvid)
567{
568 unsigned int j;
569 u8 lastfid = 0xff;
570
571 for (j = 0; j < data->numps; j++) {
572 if (pst[j].vid > LEAST_VID) {
573 printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n",
574 j, pst[j].vid);
575 return -EINVAL;
576 }
577 if (pst[j].vid < data->rvo) {
578 /* vid + rvo >= 0 */
579 printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate"
580 " %d\n", j);
581 return -ENODEV;
582 }
583 if (pst[j].vid < maxvid + data->rvo) {
584 /* vid + rvo >= maxvid */
585 printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate"
586 " %d\n", j);
587 return -ENODEV;
588 }
589 if (pst[j].fid > MAX_FID) {
590 printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate"
591 " %d\n", j);
592 return -ENODEV;
593 }
594 if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) {
595 /* Only first fid is allowed to be in "low" range */
596 printk(KERN_ERR FW_BUG PFX "two low fids - %d : "
597 "0x%x\n", j, pst[j].fid);
598 return -EINVAL;
599 }
600 if (pst[j].fid < lastfid)
601 lastfid = pst[j].fid;
602 }
603 if (lastfid & 1) {
604 printk(KERN_ERR FW_BUG PFX "lastfid invalid\n");
605 return -EINVAL;
606 }
607 if (lastfid > LO_FID_TABLE_TOP)
608 printk(KERN_INFO FW_BUG PFX
609 "first fid not from lo freq table\n");
610
611 return 0;
612}
613
614static void invalidate_entry(struct cpufreq_frequency_table *powernow_table,
615 unsigned int entry)
616{
617 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
618}
619
620static void print_basics(struct powernow_k8_data *data)
621{
622 int j;
623 for (j = 0; j < data->numps; j++) {
624 if (data->powernow_table[j].frequency !=
625 CPUFREQ_ENTRY_INVALID) {
626 if (cpu_family == CPU_HW_PSTATE) {
627 printk(KERN_INFO PFX
628 " %d : pstate %d (%d MHz)\n", j,
629 data->powernow_table[j].index,
630 data->powernow_table[j].frequency/1000);
631 } else {
632 printk(KERN_INFO PFX
633 "fid 0x%x (%d MHz), vid 0x%x\n",
634 data->powernow_table[j].index & 0xff,
635 data->powernow_table[j].frequency/1000,
636 data->powernow_table[j].index >> 8);
637 }
638 }
639 }
640 if (data->batps)
641 printk(KERN_INFO PFX "Only %d pstates on battery\n",
642 data->batps);
643}
644
645static u32 freq_from_fid_did(u32 fid, u32 did)
646{
647 u32 mhz = 0;
648
649 if (boot_cpu_data.x86 == 0x10)
650 mhz = (100 * (fid + 0x10)) >> did;
651 else if (boot_cpu_data.x86 == 0x11)
652 mhz = (100 * (fid + 8)) >> did;
653 else
654 BUG();
655
656 return mhz * 1000;
657}
658
659static int fill_powernow_table(struct powernow_k8_data *data,
660 struct pst_s *pst, u8 maxvid)
661{
662 struct cpufreq_frequency_table *powernow_table;
663 unsigned int j;
664
665 if (data->batps) {
666 /* use ACPI support to get full speed on mains power */
667 printk(KERN_WARNING PFX
668 "Only %d pstates usable (use ACPI driver for full "
669 "range\n", data->batps);
670 data->numps = data->batps;
671 }
672
673 for (j = 1; j < data->numps; j++) {
674 if (pst[j-1].fid >= pst[j].fid) {
675 printk(KERN_ERR PFX "PST out of sequence\n");
676 return -EINVAL;
677 }
678 }
679
680 if (data->numps < 2) {
681 printk(KERN_ERR PFX "no p states to transition\n");
682 return -ENODEV;
683 }
684
685 if (check_pst_table(data, pst, maxvid))
686 return -EINVAL;
687
688 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
689 * (data->numps + 1)), GFP_KERNEL);
690 if (!powernow_table) {
691 printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
692 return -ENOMEM;
693 }
694
695 for (j = 0; j < data->numps; j++) {
696 int freq;
697 powernow_table[j].index = pst[j].fid; /* lower 8 bits */
698 powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
699 freq = find_khz_freq_from_fid(pst[j].fid);
700 powernow_table[j].frequency = freq;
701 }
702 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
703 powernow_table[data->numps].index = 0;
704
705 if (query_current_values_with_pending_wait(data)) {
706 kfree(powernow_table);
707 return -EIO;
708 }
709
710 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
711 data->powernow_table = powernow_table;
712 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
713 print_basics(data);
714
715 for (j = 0; j < data->numps; j++)
716 if ((pst[j].fid == data->currfid) &&
717 (pst[j].vid == data->currvid))
718 return 0;
719
720 dprintk("currfid/vid do not match PST, ignoring\n");
721 return 0;
722}
723
724/* Find and validate the PSB/PST table in BIOS. */
725static int find_psb_table(struct powernow_k8_data *data)
726{
727 struct psb_s *psb;
728 unsigned int i;
729 u32 mvs;
730 u8 maxvid;
731 u32 cpst = 0;
732 u32 thiscpuid;
733
734 for (i = 0xc0000; i < 0xffff0; i += 0x10) {
735 /* Scan BIOS looking for the signature. */
736 /* It can not be at ffff0 - it is too big. */
737
738 psb = phys_to_virt(i);
739 if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
740 continue;
741
742 dprintk("found PSB header at 0x%p\n", psb);
743
744 dprintk("table vers: 0x%x\n", psb->tableversion);
745 if (psb->tableversion != PSB_VERSION_1_4) {
746 printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n");
747 return -ENODEV;
748 }
749
750 dprintk("flags: 0x%x\n", psb->flags1);
751 if (psb->flags1) {
752 printk(KERN_ERR FW_BUG PFX "unknown flags\n");
753 return -ENODEV;
754 }
755
756 data->vstable = psb->vstable;
757 dprintk("voltage stabilization time: %d(*20us)\n",
758 data->vstable);
759
760 dprintk("flags2: 0x%x\n", psb->flags2);
761 data->rvo = psb->flags2 & 3;
762 data->irt = ((psb->flags2) >> 2) & 3;
763 mvs = ((psb->flags2) >> 4) & 3;
764 data->vidmvs = 1 << mvs;
765 data->batps = ((psb->flags2) >> 6) & 3;
766
767 dprintk("ramp voltage offset: %d\n", data->rvo);
768 dprintk("isochronous relief time: %d\n", data->irt);
769 dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
770
771 dprintk("numpst: 0x%x\n", psb->num_tables);
772 cpst = psb->num_tables;
773 if ((psb->cpuid == 0x00000fc0) ||
774 (psb->cpuid == 0x00000fe0)) {
775 thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
776 if ((thiscpuid == 0x00000fc0) ||
777 (thiscpuid == 0x00000fe0))
778 cpst = 1;
779 }
780 if (cpst != 1) {
781 printk(KERN_ERR FW_BUG PFX "numpst must be 1\n");
782 return -ENODEV;
783 }
784
785 data->plllock = psb->plllocktime;
786 dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
787 dprintk("maxfid: 0x%x\n", psb->maxfid);
788 dprintk("maxvid: 0x%x\n", psb->maxvid);
789 maxvid = psb->maxvid;
790
791 data->numps = psb->numps;
792 dprintk("numpstates: 0x%x\n", data->numps);
793 return fill_powernow_table(data,
794 (struct pst_s *)(psb+1), maxvid);
795 }
796 /*
797 * If you see this message, complain to BIOS manufacturer. If
798 * he tells you "we do not support Linux" or some similar
799 * nonsense, remember that Windows 2000 uses the same legacy
800 * mechanism that the old Linux PSB driver uses. Tell them it
801 * is broken with Windows 2000.
802 *
803 * The reference to the AMD documentation is chapter 9 in the
804 * BIOS and Kernel Developer's Guide, which is available on
805 * www.amd.com
806 */
807 printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n");
808 printk(KERN_ERR PFX "Make sure that your BIOS is up to date"
809 " and Cool'N'Quiet support is enabled in BIOS setup\n");
810 return -ENODEV;
811}
812
813static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
814 unsigned int index)
815{
816 u64 control;
817
818 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
819 return;
820
821 control = data->acpi_data.states[index].control;
822 data->irt = (control >> IRT_SHIFT) & IRT_MASK;
823 data->rvo = (control >> RVO_SHIFT) & RVO_MASK;
824 data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
825 data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK;
826 data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK);
827 data->vstable = (control >> VST_SHIFT) & VST_MASK;
828}
829
830static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
831{
832 struct cpufreq_frequency_table *powernow_table;
833 int ret_val = -ENODEV;
834 u64 control, status;
835
836 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
837 dprintk("register performance failed: bad ACPI data\n");
838 return -EIO;
839 }
840
841 /* verify the data contained in the ACPI structures */
842 if (data->acpi_data.state_count <= 1) {
843 dprintk("No ACPI P-States\n");
844 goto err_out;
845 }
846
847 control = data->acpi_data.control_register.space_id;
848 status = data->acpi_data.status_register.space_id;
849
850 if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
851 (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
852 dprintk("Invalid control/status registers (%x - %x)\n",
853 control, status);
854 goto err_out;
855 }
856
857 /* fill in data->powernow_table */
858 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
859 * (data->acpi_data.state_count + 1)), GFP_KERNEL);
860 if (!powernow_table) {
861 dprintk("powernow_table memory alloc failure\n");
862 goto err_out;
863 }
864
865 /* fill in data */
866 data->numps = data->acpi_data.state_count;
867 powernow_k8_acpi_pst_values(data, 0);
868
869 if (cpu_family == CPU_HW_PSTATE)
870 ret_val = fill_powernow_table_pstate(data, powernow_table);
871 else
872 ret_val = fill_powernow_table_fidvid(data, powernow_table);
873 if (ret_val)
874 goto err_out_mem;
875
876 powernow_table[data->acpi_data.state_count].frequency =
877 CPUFREQ_TABLE_END;
878 powernow_table[data->acpi_data.state_count].index = 0;
879 data->powernow_table = powernow_table;
880
881 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
882 print_basics(data);
883
884 /* notify BIOS that we exist */
885 acpi_processor_notify_smm(THIS_MODULE);
886
887 if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
888 printk(KERN_ERR PFX
889 "unable to alloc powernow_k8_data cpumask\n");
890 ret_val = -ENOMEM;
891 goto err_out_mem;
892 }
893
894 return 0;
895
896err_out_mem:
897 kfree(powernow_table);
898
899err_out:
900 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
901
902 /* data->acpi_data.state_count informs us at ->exit()
903 * whether ACPI was used */
904 data->acpi_data.state_count = 0;
905
906 return ret_val;
907}
908
909static int fill_powernow_table_pstate(struct powernow_k8_data *data,
910 struct cpufreq_frequency_table *powernow_table)
911{
912 int i;
913 u32 hi = 0, lo = 0;
914 rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
915 data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
916
917 for (i = 0; i < data->acpi_data.state_count; i++) {
918 u32 index;
919
920 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
921 if (index > data->max_hw_pstate) {
922 printk(KERN_ERR PFX "invalid pstate %d - "
923 "bad value %d.\n", i, index);
924 printk(KERN_ERR PFX "Please report to BIOS "
925 "manufacturer\n");
926 invalidate_entry(powernow_table, i);
927 continue;
928 }
929 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
930 if (!(hi & HW_PSTATE_VALID_MASK)) {
931 dprintk("invalid pstate %d, ignoring\n", index);
932 invalidate_entry(powernow_table, i);
933 continue;
934 }
935
936 powernow_table[i].index = index;
937
938 /* Frequency may be rounded for these */
939 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
940 || boot_cpu_data.x86 == 0x11) {
941 powernow_table[i].frequency =
942 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
943 } else
944 powernow_table[i].frequency =
945 data->acpi_data.states[i].core_frequency * 1000;
946 }
947 return 0;
948}
949
950static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
951 struct cpufreq_frequency_table *powernow_table)
952{
953 int i;
954
955 for (i = 0; i < data->acpi_data.state_count; i++) {
956 u32 fid;
957 u32 vid;
958 u32 freq, index;
959 u64 status, control;
960
961 if (data->exttype) {
962 status = data->acpi_data.states[i].status;
963 fid = status & EXT_FID_MASK;
964 vid = (status >> VID_SHIFT) & EXT_VID_MASK;
965 } else {
966 control = data->acpi_data.states[i].control;
967 fid = control & FID_MASK;
968 vid = (control >> VID_SHIFT) & VID_MASK;
969 }
970
971 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
972
973 index = fid | (vid<<8);
974 powernow_table[i].index = index;
975
976 freq = find_khz_freq_from_fid(fid);
977 powernow_table[i].frequency = freq;
978
979 /* verify frequency is OK */
980 if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
981 dprintk("invalid freq %u kHz, ignoring\n", freq);
982 invalidate_entry(powernow_table, i);
983 continue;
984 }
985
986 /* verify voltage is OK -
987 * BIOSs are using "off" to indicate invalid */
988 if (vid == VID_OFF) {
989 dprintk("invalid vid %u, ignoring\n", vid);
990 invalidate_entry(powernow_table, i);
991 continue;
992 }
993
994 if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
995 printk(KERN_INFO PFX "invalid freq entries "
996 "%u kHz vs. %u kHz\n", freq,
997 (unsigned int)
998 (data->acpi_data.states[i].core_frequency
999 * 1000));
1000 invalidate_entry(powernow_table, i);
1001 continue;
1002 }
1003 }
1004 return 0;
1005}
1006
1007static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
1008{
1009 if (data->acpi_data.state_count)
1010 acpi_processor_unregister_performance(&data->acpi_data,
1011 data->cpu);
1012 free_cpumask_var(data->acpi_data.shared_cpu_map);
1013}
1014
1015static int get_transition_latency(struct powernow_k8_data *data)
1016{
1017 int max_latency = 0;
1018 int i;
1019 for (i = 0; i < data->acpi_data.state_count; i++) {
1020 int cur_latency = data->acpi_data.states[i].transition_latency
1021 + data->acpi_data.states[i].bus_master_latency;
1022 if (cur_latency > max_latency)
1023 max_latency = cur_latency;
1024 }
1025 if (max_latency == 0) {
1026 /*
1027 * Fam 11h and later may return 0 as transition latency. This
1028 * is intended and means "very fast". While cpufreq core and
1029 * governors currently can handle that gracefully, better set it
1030 * to 1 to avoid problems in the future.
1031 */
1032 if (boot_cpu_data.x86 < 0x11)
1033 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1034 "latency\n");
1035 max_latency = 1;
1036 }
1037 /* value in usecs, needs to be in nanoseconds */
1038 return 1000 * max_latency;
1039}
1040
1041/* Take a frequency, and issue the fid/vid transition command */
1042static int transition_frequency_fidvid(struct powernow_k8_data *data,
1043 unsigned int index)
1044{
1045 u32 fid = 0;
1046 u32 vid = 0;
1047 int res, i;
1048 struct cpufreq_freqs freqs;
1049
1050 dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
1051
1052 /* fid/vid correctness check for k8 */
1053 /* fid are the lower 8 bits of the index we stored into
1054 * the cpufreq frequency table in find_psb_table, vid
1055 * are the upper 8 bits.
1056 */
1057 fid = data->powernow_table[index].index & 0xFF;
1058 vid = (data->powernow_table[index].index & 0xFF00) >> 8;
1059
1060 dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
1061
1062 if (query_current_values_with_pending_wait(data))
1063 return 1;
1064
1065 if ((data->currvid == vid) && (data->currfid == fid)) {
1066 dprintk("target matches current values (fid 0x%x, vid 0x%x)\n",
1067 fid, vid);
1068 return 0;
1069 }
1070
1071 dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n",
1072 smp_processor_id(), fid, vid);
1073 freqs.old = find_khz_freq_from_fid(data->currfid);
1074 freqs.new = find_khz_freq_from_fid(fid);
1075
1076 for_each_cpu(i, data->available_cores) {
1077 freqs.cpu = i;
1078 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1079 }
1080
1081 res = transition_fid_vid(data, fid, vid);
1082 freqs.new = find_khz_freq_from_fid(data->currfid);
1083
1084 for_each_cpu(i, data->available_cores) {
1085 freqs.cpu = i;
1086 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1087 }
1088 return res;
1089}
1090
1091/* Take a frequency, and issue the hardware pstate transition command */
1092static int transition_frequency_pstate(struct powernow_k8_data *data,
1093 unsigned int index)
1094{
1095 u32 pstate = 0;
1096 int res, i;
1097 struct cpufreq_freqs freqs;
1098
1099 dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
1100
1101 /* get MSR index for hardware pstate transition */
1102 pstate = index & HW_PSTATE_MASK;
1103 if (pstate > data->max_hw_pstate)
1104 return 0;
1105 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1106 data->currpstate);
1107 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1108
1109 for_each_cpu(i, data->available_cores) {
1110 freqs.cpu = i;
1111 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1112 }
1113
1114 res = transition_pstate(data, pstate);
1115 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1116
1117 for_each_cpu(i, data->available_cores) {
1118 freqs.cpu = i;
1119 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1120 }
1121 return res;
1122}
1123
1124/* Driver entry point to switch to the target frequency */
1125static int powernowk8_target(struct cpufreq_policy *pol,
1126 unsigned targfreq, unsigned relation)
1127{
1128 cpumask_var_t oldmask;
1129 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1130 u32 checkfid;
1131 u32 checkvid;
1132 unsigned int newstate;
1133 int ret = -EIO;
1134
1135 if (!data)
1136 return -EINVAL;
1137
1138 checkfid = data->currfid;
1139 checkvid = data->currvid;
1140
1141 /* only run on specific CPU from here on. */
1142 /* This is poor form: use a workqueue or smp_call_function_single */
1143 if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
1144 return -ENOMEM;
1145
1146 cpumask_copy(oldmask, tsk_cpus_allowed(current));
1147 set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
1148
1149 if (smp_processor_id() != pol->cpu) {
1150 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
1151 goto err_out;
1152 }
1153
1154 if (pending_bit_stuck()) {
1155 printk(KERN_ERR PFX "failing targ, change pending bit set\n");
1156 goto err_out;
1157 }
1158
1159 dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
1160 pol->cpu, targfreq, pol->min, pol->max, relation);
1161
1162 if (query_current_values_with_pending_wait(data))
1163 goto err_out;
1164
1165 if (cpu_family != CPU_HW_PSTATE) {
1166 dprintk("targ: curr fid 0x%x, vid 0x%x\n",
1167 data->currfid, data->currvid);
1168
1169 if ((checkvid != data->currvid) ||
1170 (checkfid != data->currfid)) {
1171 printk(KERN_INFO PFX
1172 "error - out of sync, fix 0x%x 0x%x, "
1173 "vid 0x%x 0x%x\n",
1174 checkfid, data->currfid,
1175 checkvid, data->currvid);
1176 }
1177 }
1178
1179 if (cpufreq_frequency_table_target(pol, data->powernow_table,
1180 targfreq, relation, &newstate))
1181 goto err_out;
1182
1183 mutex_lock(&fidvid_mutex);
1184
1185 powernow_k8_acpi_pst_values(data, newstate);
1186
1187 if (cpu_family == CPU_HW_PSTATE)
1188 ret = transition_frequency_pstate(data, newstate);
1189 else
1190 ret = transition_frequency_fidvid(data, newstate);
1191 if (ret) {
1192 printk(KERN_ERR PFX "transition frequency failed\n");
1193 ret = 1;
1194 mutex_unlock(&fidvid_mutex);
1195 goto err_out;
1196 }
1197 mutex_unlock(&fidvid_mutex);
1198
1199 if (cpu_family == CPU_HW_PSTATE)
1200 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1201 newstate);
1202 else
1203 pol->cur = find_khz_freq_from_fid(data->currfid);
1204 ret = 0;
1205
1206err_out:
1207 set_cpus_allowed_ptr(current, oldmask);
1208 free_cpumask_var(oldmask);
1209 return ret;
1210}
1211
1212/* Driver entry point to verify the policy and range of frequencies */
1213static int powernowk8_verify(struct cpufreq_policy *pol)
1214{
1215 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1216
1217 if (!data)
1218 return -EINVAL;
1219
1220 return cpufreq_frequency_table_verify(pol, data->powernow_table);
1221}
1222
1223struct init_on_cpu {
1224 struct powernow_k8_data *data;
1225 int rc;
1226};
1227
1228static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1229{
1230 struct init_on_cpu *init_on_cpu = _init_on_cpu;
1231
1232 if (pending_bit_stuck()) {
1233 printk(KERN_ERR PFX "failing init, change pending bit set\n");
1234 init_on_cpu->rc = -ENODEV;
1235 return;
1236 }
1237
1238 if (query_current_values_with_pending_wait(init_on_cpu->data)) {
1239 init_on_cpu->rc = -ENODEV;
1240 return;
1241 }
1242
1243 if (cpu_family == CPU_OPTERON)
1244 fidvid_msr_init();
1245
1246 init_on_cpu->rc = 0;
1247}
1248
1249/* per CPU init entry point to the driver */
1250static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1251{
1252 static const char ACPI_PSS_BIOS_BUG_MSG[] =
1253 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1254 FW_BUG PFX "Try again with latest BIOS.\n";
1255 struct powernow_k8_data *data;
1256 struct init_on_cpu init_on_cpu;
1257 int rc;
1258 struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
1259
1260 if (!cpu_online(pol->cpu))
1261 return -ENODEV;
1262
1263 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1264 if (rc)
1265 return -ENODEV;
1266
1267 data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
1268 if (!data) {
1269 printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
1270 return -ENOMEM;
1271 }
1272
1273 data->cpu = pol->cpu;
1274 data->currpstate = HW_PSTATE_INVALID;
1275
1276 if (powernow_k8_cpu_init_acpi(data)) {
1277 /*
1278 * Use the PSB BIOS structure. This is only available on
1279 * an UP version, and is deprecated by AMD.
1280 */
1281 if (num_online_cpus() != 1) {
1282 printk_once(ACPI_PSS_BIOS_BUG_MSG);
1283 goto err_out;
1284 }
1285 if (pol->cpu != 0) {
1286 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
1287 "CPU other than CPU0. Complain to your BIOS "
1288 "vendor.\n");
1289 goto err_out;
1290 }
1291 rc = find_psb_table(data);
1292 if (rc)
1293 goto err_out;
1294
1295 /* Take a crude guess here.
1296 * That guess was in microseconds, so multiply with 1000 */
1297 pol->cpuinfo.transition_latency = (
1298 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
1299 ((1 << data->irt) * 30)) * 1000;
1300 } else /* ACPI _PSS objects available */
1301 pol->cpuinfo.transition_latency = get_transition_latency(data);
1302
1303 /* only run on specific CPU from here on */
1304 init_on_cpu.data = data;
1305 smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
1306 &init_on_cpu, 1);
1307 rc = init_on_cpu.rc;
1308 if (rc != 0)
1309 goto err_out_exit_acpi;
1310
1311 if (cpu_family == CPU_HW_PSTATE)
1312 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1313 else
1314 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1315 data->available_cores = pol->cpus;
1316
1317 if (cpu_family == CPU_HW_PSTATE)
1318 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1319 data->currpstate);
1320 else
1321 pol->cur = find_khz_freq_from_fid(data->currfid);
1322 dprintk("policy current frequency %d kHz\n", pol->cur);
1323
1324 /* min/max the cpu is capable of */
1325 if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
1326 printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
1327 powernow_k8_cpu_exit_acpi(data);
1328 kfree(data->powernow_table);
1329 kfree(data);
1330 return -EINVAL;
1331 }
1332
1333 /* Check for APERF/MPERF support in hardware */
1334 if (cpu_has(c, X86_FEATURE_APERFMPERF))
1335 cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
1336
1337 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1338
1339 if (cpu_family == CPU_HW_PSTATE)
1340 dprintk("cpu_init done, current pstate 0x%x\n",
1341 data->currpstate);
1342 else
1343 dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
1344 data->currfid, data->currvid);
1345
1346 per_cpu(powernow_data, pol->cpu) = data;
1347
1348 return 0;
1349
1350err_out_exit_acpi:
1351 powernow_k8_cpu_exit_acpi(data);
1352
1353err_out:
1354 kfree(data);
1355 return -ENODEV;
1356}
1357
1358static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1359{
1360 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1361
1362 if (!data)
1363 return -EINVAL;
1364
1365 powernow_k8_cpu_exit_acpi(data);
1366
1367 cpufreq_frequency_table_put_attr(pol->cpu);
1368
1369 kfree(data->powernow_table);
1370 kfree(data);
1371 per_cpu(powernow_data, pol->cpu) = NULL;
1372
1373 return 0;
1374}
1375
1376static void query_values_on_cpu(void *_err)
1377{
1378 int *err = _err;
1379 struct powernow_k8_data *data = __this_cpu_read(powernow_data);
1380
1381 *err = query_current_values_with_pending_wait(data);
1382}
1383
1384static unsigned int powernowk8_get(unsigned int cpu)
1385{
1386 struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
1387 unsigned int khz = 0;
1388 int err;
1389
1390 if (!data)
1391 return 0;
1392
1393 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1394 if (err)
1395 goto out;
1396
1397 if (cpu_family == CPU_HW_PSTATE)
1398 khz = find_khz_freq_from_pstate(data->powernow_table,
1399 data->currpstate);
1400 else
1401 khz = find_khz_freq_from_fid(data->currfid);
1402
1403
1404out:
1405 return khz;
1406}
1407
1408static void _cpb_toggle_msrs(bool t)
1409{
1410 int cpu;
1411
1412 get_online_cpus();
1413
1414 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1415
1416 for_each_cpu(cpu, cpu_online_mask) {
1417 struct msr *reg = per_cpu_ptr(msrs, cpu);
1418 if (t)
1419 reg->l &= ~BIT(25);
1420 else
1421 reg->l |= BIT(25);
1422 }
1423 wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1424
1425 put_online_cpus();
1426}
1427
1428/*
1429 * Switch on/off core performance boosting.
1430 *
1431 * 0=disable
1432 * 1=enable.
1433 */
1434static void cpb_toggle(bool t)
1435{
1436 if (!cpb_capable)
1437 return;
1438
1439 if (t && !cpb_enabled) {
1440 cpb_enabled = true;
1441 _cpb_toggle_msrs(t);
1442 printk(KERN_INFO PFX "Core Boosting enabled.\n");
1443 } else if (!t && cpb_enabled) {
1444 cpb_enabled = false;
1445 _cpb_toggle_msrs(t);
1446 printk(KERN_INFO PFX "Core Boosting disabled.\n");
1447 }
1448}
1449
1450static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
1451 size_t count)
1452{
1453 int ret = -EINVAL;
1454 unsigned long val = 0;
1455
1456 ret = strict_strtoul(buf, 10, &val);
1457 if (!ret && (val == 0 || val == 1) && cpb_capable)
1458 cpb_toggle(val);
1459 else
1460 return -EINVAL;
1461
1462 return count;
1463}
1464
1465static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
1466{
1467 return sprintf(buf, "%u\n", cpb_enabled);
1468}
1469
1470#define define_one_rw(_name) \
1471static struct freq_attr _name = \
1472__ATTR(_name, 0644, show_##_name, store_##_name)
1473
1474define_one_rw(cpb);
1475
1476static struct freq_attr *powernow_k8_attr[] = {
1477 &cpufreq_freq_attr_scaling_available_freqs,
1478 &cpb,
1479 NULL,
1480};
1481
1482static struct cpufreq_driver cpufreq_amd64_driver = {
1483 .verify = powernowk8_verify,
1484 .target = powernowk8_target,
1485 .bios_limit = acpi_processor_get_bios_limit,
1486 .init = powernowk8_cpu_init,
1487 .exit = __devexit_p(powernowk8_cpu_exit),
1488 .get = powernowk8_get,
1489 .name = "powernow-k8",
1490 .owner = THIS_MODULE,
1491 .attr = powernow_k8_attr,
1492};
1493
1494/*
1495 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
1496 * cannot block the remaining ones from boosting. On the CPU_UP path we
1497 * simply keep the boost-disable flag in sync with the current global
1498 * state.
1499 */
1500static int cpb_notify(struct notifier_block *nb, unsigned long action,
1501 void *hcpu)
1502{
1503 unsigned cpu = (long)hcpu;
1504 u32 lo, hi;
1505
1506 switch (action) {
1507 case CPU_UP_PREPARE:
1508 case CPU_UP_PREPARE_FROZEN:
1509
1510 if (!cpb_enabled) {
1511 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1512 lo |= BIT(25);
1513 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1514 }
1515 break;
1516
1517 case CPU_DOWN_PREPARE:
1518 case CPU_DOWN_PREPARE_FROZEN:
1519 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1520 lo &= ~BIT(25);
1521 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1522 break;
1523
1524 default:
1525 break;
1526 }
1527
1528 return NOTIFY_OK;
1529}
1530
1531static struct notifier_block cpb_nb = {
1532 .notifier_call = cpb_notify,
1533};
1534
1535/* driver entry point for init */
1536static int __cpuinit powernowk8_init(void)
1537{
1538 unsigned int i, supported_cpus = 0, cpu;
1539 int rv;
1540
1541 for_each_online_cpu(i) {
1542 int rc;
1543 smp_call_function_single(i, check_supported_cpu, &rc, 1);
1544 if (rc == 0)
1545 supported_cpus++;
1546 }
1547
1548 if (supported_cpus != num_online_cpus())
1549 return -ENODEV;
1550
1551 printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
1552 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
1553
1554 if (boot_cpu_has(X86_FEATURE_CPB)) {
1555
1556 cpb_capable = true;
1557
1558 msrs = msrs_alloc();
1559 if (!msrs) {
1560 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1561 return -ENOMEM;
1562 }
1563
1564 register_cpu_notifier(&cpb_nb);
1565
1566 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1567
1568 for_each_cpu(cpu, cpu_online_mask) {
1569 struct msr *reg = per_cpu_ptr(msrs, cpu);
1570 cpb_enabled |= !(!!(reg->l & BIT(25)));
1571 }
1572
1573 printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
1574 (cpb_enabled ? "on" : "off"));
1575 }
1576
1577 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1578 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1579 unregister_cpu_notifier(&cpb_nb);
1580 msrs_free(msrs);
1581 msrs = NULL;
1582 }
1583 return rv;
1584}
1585
1586/* driver entry point for term */
1587static void __exit powernowk8_exit(void)
1588{
1589 dprintk("exit\n");
1590
1591 if (boot_cpu_has(X86_FEATURE_CPB)) {
1592 msrs_free(msrs);
1593 msrs = NULL;
1594
1595 unregister_cpu_notifier(&cpb_nb);
1596 }
1597
1598 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1599}
1600
1601MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and "
1602 "Mark Langsdorf <mark.langsdorf@amd.com>");
1603MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
1604MODULE_LICENSE("GPL");
1605
1606late_initcall(powernowk8_init);
1607module_exit(powernowk8_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
deleted file mode 100644
index df3529b1c02d..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ /dev/null
@@ -1,224 +0,0 @@
1/*
2 * (c) 2003-2006 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 */
7
8enum pstate {
9 HW_PSTATE_INVALID = 0xff,
10 HW_PSTATE_0 = 0,
11 HW_PSTATE_1 = 1,
12 HW_PSTATE_2 = 2,
13 HW_PSTATE_3 = 3,
14 HW_PSTATE_4 = 4,
15 HW_PSTATE_5 = 5,
16 HW_PSTATE_6 = 6,
17 HW_PSTATE_7 = 7,
18};
19
20struct powernow_k8_data {
21 unsigned int cpu;
22
23 u32 numps; /* number of p-states */
24 u32 batps; /* number of p-states supported on battery */
25 u32 max_hw_pstate; /* maximum legal hardware pstate */
26
27 /* these values are constant when the PSB is used to determine
28 * vid/fid pairings, but are modified during the ->target() call
29 * when ACPI is used */
30 u32 rvo; /* ramp voltage offset */
31 u32 irt; /* isochronous relief time */
32 u32 vidmvs; /* usable value calculated from mvs */
33 u32 vstable; /* voltage stabilization time, units 20 us */
34 u32 plllock; /* pll lock time, units 1 us */
35 u32 exttype; /* extended interface = 1 */
36
37 /* keep track of the current fid / vid or pstate */
38 u32 currvid;
39 u32 currfid;
40 enum pstate currpstate;
41
42 /* the powernow_table includes all frequency and vid/fid pairings:
43 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
44 * frequency is in kHz */
45 struct cpufreq_frequency_table *powernow_table;
46
47 /* the acpi table needs to be kept. it's only available if ACPI was
48 * used to determine valid frequency/vid/fid states */
49 struct acpi_processor_performance acpi_data;
50
51 /* we need to keep track of associated cores, but let cpufreq
52 * handle hotplug events - so just point at cpufreq pol->cpus
53 * structure */
54 struct cpumask *available_cores;
55};
56
57/* processor's cpuid instruction support */
58#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
59#define CPUID_XFAM 0x0ff00000 /* extended family */
60#define CPUID_XFAM_K8 0
61#define CPUID_XMOD 0x000f0000 /* extended model */
62#define CPUID_XMOD_REV_MASK 0x000c0000
63#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
64#define CPUID_USE_XFAM_XMOD 0x00000f00
65#define CPUID_GET_MAX_CAPABILITIES 0x80000000
66#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
67#define P_STATE_TRANSITION_CAPABLE 6
68
69/* Model Specific Registers for p-state transitions. MSRs are 64-bit. For */
70/* writes (wrmsr - opcode 0f 30), the register number is placed in ecx, and */
71/* the value to write is placed in edx:eax. For reads (rdmsr - opcode 0f 32), */
72/* the register number is placed in ecx, and the data is returned in edx:eax. */
73
74#define MSR_FIDVID_CTL 0xc0010041
75#define MSR_FIDVID_STATUS 0xc0010042
76
77/* Field definitions within the FID VID Low Control MSR : */
78#define MSR_C_LO_INIT_FID_VID 0x00010000
79#define MSR_C_LO_NEW_VID 0x00003f00
80#define MSR_C_LO_NEW_FID 0x0000003f
81#define MSR_C_LO_VID_SHIFT 8
82
83/* Field definitions within the FID VID High Control MSR : */
84#define MSR_C_HI_STP_GNT_TO 0x000fffff
85
86/* Field definitions within the FID VID Low Status MSR : */
87#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */
88#define MSR_S_LO_MAX_RAMP_VID 0x3f000000
89#define MSR_S_LO_MAX_FID 0x003f0000
90#define MSR_S_LO_START_FID 0x00003f00
91#define MSR_S_LO_CURRENT_FID 0x0000003f
92
93/* Field definitions within the FID VID High Status MSR : */
94#define MSR_S_HI_MIN_WORKING_VID 0x3f000000
95#define MSR_S_HI_MAX_WORKING_VID 0x003f0000
96#define MSR_S_HI_START_VID 0x00003f00
97#define MSR_S_HI_CURRENT_VID 0x0000003f
98#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
99
100
101/* Hardware Pstate _PSS and MSR definitions */
102#define USE_HW_PSTATE 0x00000080
103#define HW_PSTATE_MASK 0x00000007
104#define HW_PSTATE_VALID_MASK 0x80000000
105#define HW_PSTATE_MAX_MASK 0x000000f0
106#define HW_PSTATE_MAX_SHIFT 4
107#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
108#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
109#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
110#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
111
112/* define the two driver architectures */
113#define CPU_OPTERON 0
114#define CPU_HW_PSTATE 1
115
116
117/*
118 * There are restrictions frequencies have to follow:
119 * - only 1 entry in the low fid table ( <=1.4GHz )
120 * - lowest entry in the high fid table must be >= 2 * the entry in the
121 * low fid table
122 * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry
123 * in the low fid table
124 * - the parts can only step at <= 200 MHz intervals, odd fid values are
125 * supported in revision G and later revisions.
126 * - lowest frequency must be >= interprocessor hypertransport link speed
127 * (only applies to MP systems obviously)
128 */
129
130/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */
131#define LO_FID_TABLE_TOP 7 /* fid values marking the boundary */
132#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */
133
134#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */
135#define HI_VCOFREQ_TABLE_BOTTOM 1600
136
137#define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */
138
139#define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */
140#define LEAST_VID 0x3e /* Lowest (numerically highest) useful vid value */
141
142#define MIN_FREQ 800 /* Min and max freqs, per spec */
143#define MAX_FREQ 5000
144
145#define INVALID_FID_MASK 0xffffffc0 /* not a valid fid if these bits are set */
146#define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */
147
148#define VID_OFF 0x3f
149
150#define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */
151
152#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */
153
154#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */
155#define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */
156
157/*
158 * Most values of interest are encoded in a single field of the _PSS
159 * entries: the "control" value.
160 */
161
162#define IRT_SHIFT 30
163#define RVO_SHIFT 28
164#define EXT_TYPE_SHIFT 27
165#define PLL_L_SHIFT 20
166#define MVS_SHIFT 18
167#define VST_SHIFT 11
168#define VID_SHIFT 6
169#define IRT_MASK 3
170#define RVO_MASK 3
171#define EXT_TYPE_MASK 1
172#define PLL_L_MASK 0x7f
173#define MVS_MASK 3
174#define VST_MASK 0x7f
175#define VID_MASK 0x1f
176#define FID_MASK 0x1f
177#define EXT_VID_MASK 0x3f
178#define EXT_FID_MASK 0x3f
179
180
181/*
182 * Version 1.4 of the PSB table. This table is constructed by BIOS and is
183 * to tell the OS's power management driver which VIDs and FIDs are
184 * supported by this particular processor.
185 * If the data in the PSB / PST is wrong, then this driver will program the
186 * wrong values into hardware, which is very likely to lead to a crash.
187 */
188
189#define PSB_ID_STRING "AMDK7PNOW!"
190#define PSB_ID_STRING_LEN 10
191
192#define PSB_VERSION_1_4 0x14
193
194struct psb_s {
195 u8 signature[10];
196 u8 tableversion;
197 u8 flags1;
198 u16 vstable;
199 u8 flags2;
200 u8 num_tables;
201 u32 cpuid;
202 u8 plllocktime;
203 u8 maxfid;
204 u8 maxvid;
205 u8 numps;
206};
207
208/* Pairs of fid/vid values are appended to the version 1.4 PSB table. */
209struct pst_s {
210 u8 fid;
211 u8 vid;
212};
213
214#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg)
215
216static int core_voltage_pre_transition(struct powernow_k8_data *data,
217 u32 reqvid, u32 regfid);
218static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid);
219static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
220
221static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
222
223static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
224static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
deleted file mode 100644
index 435a996a613a..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
+++ /dev/null
@@ -1,194 +0,0 @@
1/*
2 * sc520_freq.c: cpufreq driver for the AMD Elan sc520
3 *
4 * Copyright (C) 2005 Sean Young <sean@mess.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Based on elanfreq.c
12 *
13 * 2005-03-30: - initial revision
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19
20#include <linux/delay.h>
21#include <linux/cpufreq.h>
22#include <linux/timex.h>
23#include <linux/io.h>
24
25#include <asm/msr.h>
26
27#define MMCR_BASE 0xfffef000 /* The default base address */
28#define OFFS_CPUCTL 0x2 /* CPU Control Register */
29
30static __u8 __iomem *cpuctl;
31
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
33 "sc520_freq", msg)
34#define PFX "sc520_freq: "
35
36static struct cpufreq_frequency_table sc520_freq_table[] = {
37 {0x01, 100000},
38 {0x02, 133000},
39 {0, CPUFREQ_TABLE_END},
40};
41
42static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
43{
44 u8 clockspeed_reg = *cpuctl;
45
46 switch (clockspeed_reg & 0x03) {
47 default:
48 printk(KERN_ERR PFX "error: cpuctl register has unexpected "
49 "value %02x\n", clockspeed_reg);
50 case 0x01:
51 return 100000;
52 case 0x02:
53 return 133000;
54 }
55}
56
57static void sc520_freq_set_cpu_state(unsigned int state)
58{
59
60 struct cpufreq_freqs freqs;
61 u8 clockspeed_reg;
62
63 freqs.old = sc520_freq_get_cpu_frequency(0);
64 freqs.new = sc520_freq_table[state].frequency;
65 freqs.cpu = 0; /* AMD Elan is UP */
66
67 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
68
69 dprintk("attempting to set frequency to %i kHz\n",
70 sc520_freq_table[state].frequency);
71
72 local_irq_disable();
73
74 clockspeed_reg = *cpuctl & ~0x03;
75 *cpuctl = clockspeed_reg | sc520_freq_table[state].index;
76
77 local_irq_enable();
78
79 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
80};
81
82static int sc520_freq_verify(struct cpufreq_policy *policy)
83{
84 return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
85}
86
87static int sc520_freq_target(struct cpufreq_policy *policy,
88 unsigned int target_freq,
89 unsigned int relation)
90{
91 unsigned int newstate = 0;
92
93 if (cpufreq_frequency_table_target(policy, sc520_freq_table,
94 target_freq, relation, &newstate))
95 return -EINVAL;
96
97 sc520_freq_set_cpu_state(newstate);
98
99 return 0;
100}
101
102
103/*
104 * Module init and exit code
105 */
106
107static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
108{
109 struct cpuinfo_x86 *c = &cpu_data(0);
110 int result;
111
112 /* capability check */
113 if (c->x86_vendor != X86_VENDOR_AMD ||
114 c->x86 != 4 || c->x86_model != 9)
115 return -ENODEV;
116
117 /* cpuinfo and default policy values */
118 policy->cpuinfo.transition_latency = 1000000; /* 1ms */
119 policy->cur = sc520_freq_get_cpu_frequency(0);
120
121 result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
122 if (result)
123 return result;
124
125 cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
126
127 return 0;
128}
129
130
131static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
132{
133 cpufreq_frequency_table_put_attr(policy->cpu);
134 return 0;
135}
136
137
138static struct freq_attr *sc520_freq_attr[] = {
139 &cpufreq_freq_attr_scaling_available_freqs,
140 NULL,
141};
142
143
144static struct cpufreq_driver sc520_freq_driver = {
145 .get = sc520_freq_get_cpu_frequency,
146 .verify = sc520_freq_verify,
147 .target = sc520_freq_target,
148 .init = sc520_freq_cpu_init,
149 .exit = sc520_freq_cpu_exit,
150 .name = "sc520_freq",
151 .owner = THIS_MODULE,
152 .attr = sc520_freq_attr,
153};
154
155
156static int __init sc520_freq_init(void)
157{
158 struct cpuinfo_x86 *c = &cpu_data(0);
159 int err;
160
161 /* Test if we have the right hardware */
162 if (c->x86_vendor != X86_VENDOR_AMD ||
163 c->x86 != 4 || c->x86_model != 9) {
164 dprintk("no Elan SC520 processor found!\n");
165 return -ENODEV;
166 }
167 cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
168 if (!cpuctl) {
169 printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
170 return -ENOMEM;
171 }
172
173 err = cpufreq_register_driver(&sc520_freq_driver);
174 if (err)
175 iounmap(cpuctl);
176
177 return err;
178}
179
180
181static void __exit sc520_freq_exit(void)
182{
183 cpufreq_unregister_driver(&sc520_freq_driver);
184 iounmap(cpuctl);
185}
186
187
188MODULE_LICENSE("GPL");
189MODULE_AUTHOR("Sean Young <sean@mess.org>");
190MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU");
191
192module_init(sc520_freq_init);
193module_exit(sc520_freq_exit);
194
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
deleted file mode 100644
index 9b1ff37de46a..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ /dev/null
@@ -1,636 +0,0 @@
1/*
2 * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
3 * M (part of the Centrino chipset).
4 *
5 * Since the original Pentium M, most new Intel CPUs support Enhanced
6 * SpeedStep.
7 *
8 * Despite the "SpeedStep" in the name, this is almost entirely unlike
9 * traditional SpeedStep.
10 *
11 * Modelled on speedstep.c
12 *
13 * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/cpufreq.h>
20#include <linux/sched.h> /* current */
21#include <linux/delay.h>
22#include <linux/compiler.h>
23#include <linux/gfp.h>
24
25#include <asm/msr.h>
26#include <asm/processor.h>
27#include <asm/cpufeature.h>
28
29#define PFX "speedstep-centrino: "
30#define MAINTAINER "cpufreq@vger.kernel.org"
31
32#define dprintk(msg...) \
33 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
34
35#define INTEL_MSR_RANGE (0xffff)
36
37struct cpu_id
38{
39 __u8 x86; /* CPU family */
40 __u8 x86_model; /* model */
41 __u8 x86_mask; /* stepping */
42};
43
44enum {
45 CPU_BANIAS,
46 CPU_DOTHAN_A1,
47 CPU_DOTHAN_A2,
48 CPU_DOTHAN_B0,
49 CPU_MP4HT_D0,
50 CPU_MP4HT_E0,
51};
52
53static const struct cpu_id cpu_ids[] = {
54 [CPU_BANIAS] = { 6, 9, 5 },
55 [CPU_DOTHAN_A1] = { 6, 13, 1 },
56 [CPU_DOTHAN_A2] = { 6, 13, 2 },
57 [CPU_DOTHAN_B0] = { 6, 13, 6 },
58 [CPU_MP4HT_D0] = {15, 3, 4 },
59 [CPU_MP4HT_E0] = {15, 4, 1 },
60};
61#define N_IDS ARRAY_SIZE(cpu_ids)
62
63struct cpu_model
64{
65 const struct cpu_id *cpu_id;
66 const char *model_name;
67 unsigned max_freq; /* max clock in kHz */
68
69 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
70};
71static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
72 const struct cpu_id *x);
73
74/* Operating points for current CPU */
75static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
76static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
77
78static struct cpufreq_driver centrino_driver;
79
80#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
81
82/* Computes the correct form for IA32_PERF_CTL MSR for a particular
83 frequency/voltage operating point; frequency in MHz, volts in mV.
84 This is stored as "index" in the structure. */
85#define OP(mhz, mv) \
86 { \
87 .frequency = (mhz) * 1000, \
88 .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \
89 }
90
91/*
92 * These voltage tables were derived from the Intel Pentium M
93 * datasheet, document 25261202.pdf, Table 5. I have verified they
94 * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium
95 * M.
96 */
97
98/* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */
99static struct cpufreq_frequency_table banias_900[] =
100{
101 OP(600, 844),
102 OP(800, 988),
103 OP(900, 1004),
104 { .frequency = CPUFREQ_TABLE_END }
105};
106
107/* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
108static struct cpufreq_frequency_table banias_1000[] =
109{
110 OP(600, 844),
111 OP(800, 972),
112 OP(900, 988),
113 OP(1000, 1004),
114 { .frequency = CPUFREQ_TABLE_END }
115};
116
117/* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */
118static struct cpufreq_frequency_table banias_1100[] =
119{
120 OP( 600, 956),
121 OP( 800, 1020),
122 OP( 900, 1100),
123 OP(1000, 1164),
124 OP(1100, 1180),
125 { .frequency = CPUFREQ_TABLE_END }
126};
127
128
129/* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */
130static struct cpufreq_frequency_table banias_1200[] =
131{
132 OP( 600, 956),
133 OP( 800, 1004),
134 OP( 900, 1020),
135 OP(1000, 1100),
136 OP(1100, 1164),
137 OP(1200, 1180),
138 { .frequency = CPUFREQ_TABLE_END }
139};
140
141/* Intel Pentium M processor 1.30GHz (Banias) */
142static struct cpufreq_frequency_table banias_1300[] =
143{
144 OP( 600, 956),
145 OP( 800, 1260),
146 OP(1000, 1292),
147 OP(1200, 1356),
148 OP(1300, 1388),
149 { .frequency = CPUFREQ_TABLE_END }
150};
151
152/* Intel Pentium M processor 1.40GHz (Banias) */
153static struct cpufreq_frequency_table banias_1400[] =
154{
155 OP( 600, 956),
156 OP( 800, 1180),
157 OP(1000, 1308),
158 OP(1200, 1436),
159 OP(1400, 1484),
160 { .frequency = CPUFREQ_TABLE_END }
161};
162
163/* Intel Pentium M processor 1.50GHz (Banias) */
164static struct cpufreq_frequency_table banias_1500[] =
165{
166 OP( 600, 956),
167 OP( 800, 1116),
168 OP(1000, 1228),
169 OP(1200, 1356),
170 OP(1400, 1452),
171 OP(1500, 1484),
172 { .frequency = CPUFREQ_TABLE_END }
173};
174
175/* Intel Pentium M processor 1.60GHz (Banias) */
176static struct cpufreq_frequency_table banias_1600[] =
177{
178 OP( 600, 956),
179 OP( 800, 1036),
180 OP(1000, 1164),
181 OP(1200, 1276),
182 OP(1400, 1420),
183 OP(1600, 1484),
184 { .frequency = CPUFREQ_TABLE_END }
185};
186
187/* Intel Pentium M processor 1.70GHz (Banias) */
188static struct cpufreq_frequency_table banias_1700[] =
189{
190 OP( 600, 956),
191 OP( 800, 1004),
192 OP(1000, 1116),
193 OP(1200, 1228),
194 OP(1400, 1308),
195 OP(1700, 1484),
196 { .frequency = CPUFREQ_TABLE_END }
197};
198#undef OP
199
200#define _BANIAS(cpuid, max, name) \
201{ .cpu_id = cpuid, \
202 .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \
203 .max_freq = (max)*1000, \
204 .op_points = banias_##max, \
205}
206#define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
207
208/* CPU models, their operating frequency range, and freq/voltage
209 operating points */
210static struct cpu_model models[] =
211{
212 _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
213 BANIAS(1000),
214 BANIAS(1100),
215 BANIAS(1200),
216 BANIAS(1300),
217 BANIAS(1400),
218 BANIAS(1500),
219 BANIAS(1600),
220 BANIAS(1700),
221
222 /* NULL model_name is a wildcard */
223 { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
224 { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
225 { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
226 { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
227 { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
228
229 { NULL, }
230};
231#undef _BANIAS
232#undef BANIAS
233
234static int centrino_cpu_init_table(struct cpufreq_policy *policy)
235{
236 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
237 struct cpu_model *model;
238
239 for(model = models; model->cpu_id != NULL; model++)
240 if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
241 (model->model_name == NULL ||
242 strcmp(cpu->x86_model_id, model->model_name) == 0))
243 break;
244
245 if (model->cpu_id == NULL) {
246 /* No match at all */
247 dprintk("no support for CPU model \"%s\": "
248 "send /proc/cpuinfo to " MAINTAINER "\n",
249 cpu->x86_model_id);
250 return -ENOENT;
251 }
252
253 if (model->op_points == NULL) {
254 /* Matched a non-match */
255 dprintk("no table support for CPU model \"%s\"\n",
256 cpu->x86_model_id);
257 dprintk("try using the acpi-cpufreq driver\n");
258 return -ENOENT;
259 }
260
261 per_cpu(centrino_model, policy->cpu) = model;
262
263 dprintk("found \"%s\": max frequency: %dkHz\n",
264 model->model_name, model->max_freq);
265
266 return 0;
267}
268
269#else
270static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
271{
272 return -ENODEV;
273}
274#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
275
276static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
277 const struct cpu_id *x)
278{
279 if ((c->x86 == x->x86) &&
280 (c->x86_model == x->x86_model) &&
281 (c->x86_mask == x->x86_mask))
282 return 1;
283 return 0;
284}
285
286/* To be called only after centrino_model is initialized */
287static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
288{
289 int i;
290
291 /*
292 * Extract clock in kHz from PERF_CTL value
293 * for centrino, as some DSDTs are buggy.
294 * Ideally, this can be done using the acpi_data structure.
295 */
296 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
298 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
299 msr = (msr >> 8) & 0xff;
300 return msr * 100000;
301 }
302
303 if ((!per_cpu(centrino_model, cpu)) ||
304 (!per_cpu(centrino_model, cpu)->op_points))
305 return 0;
306
307 msr &= 0xffff;
308 for (i = 0;
309 per_cpu(centrino_model, cpu)->op_points[i].frequency
310 != CPUFREQ_TABLE_END;
311 i++) {
312 if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
313 return per_cpu(centrino_model, cpu)->
314 op_points[i].frequency;
315 }
316 if (failsafe)
317 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
318 else
319 return 0;
320}
321
322/* Return the current CPU frequency in kHz */
323static unsigned int get_cur_freq(unsigned int cpu)
324{
325 unsigned l, h;
326 unsigned clock_freq;
327
328 rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
329 clock_freq = extract_clock(l, cpu, 0);
330
331 if (unlikely(clock_freq == 0)) {
332 /*
333 * On some CPUs, we can see transient MSR values (which are
334 * not present in _PSS), while CPU is doing some automatic
335 * P-state transition (like TM2). Get the last freq set
336 * in PERF_CTL.
337 */
338 rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
339 clock_freq = extract_clock(l, cpu, 1);
340 }
341 return clock_freq;
342}
343
344
345static int centrino_cpu_init(struct cpufreq_policy *policy)
346{
347 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
348 unsigned freq;
349 unsigned l, h;
350 int ret;
351 int i;
352
353 /* Only Intel makes Enhanced Speedstep-capable CPUs */
354 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
355 !cpu_has(cpu, X86_FEATURE_EST))
356 return -ENODEV;
357
358 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
359 centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
360
361 if (policy->cpu != 0)
362 return -ENODEV;
363
364 for (i = 0; i < N_IDS; i++)
365 if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
366 break;
367
368 if (i != N_IDS)
369 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
370
371 if (!per_cpu(centrino_cpu, policy->cpu)) {
372 dprintk("found unsupported CPU with "
373 "Enhanced SpeedStep: send /proc/cpuinfo to "
374 MAINTAINER "\n");
375 return -ENODEV;
376 }
377
378 if (centrino_cpu_init_table(policy)) {
379 return -ENODEV;
380 }
381
382 /* Check to see if Enhanced SpeedStep is enabled, and try to
383 enable it if not. */
384 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
385
386 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
387 l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
388 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
389 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
390
391 /* check to see if it stuck */
392 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
393 if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
394 printk(KERN_INFO PFX
395 "couldn't enable Enhanced SpeedStep\n");
396 return -ENODEV;
397 }
398 }
399
400 freq = get_cur_freq(policy->cpu);
401 policy->cpuinfo.transition_latency = 10000;
402 /* 10uS transition latency */
403 policy->cur = freq;
404
405 dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur);
406
407 ret = cpufreq_frequency_table_cpuinfo(policy,
408 per_cpu(centrino_model, policy->cpu)->op_points);
409 if (ret)
410 return (ret);
411
412 cpufreq_frequency_table_get_attr(
413 per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
414
415 return 0;
416}
417
418static int centrino_cpu_exit(struct cpufreq_policy *policy)
419{
420 unsigned int cpu = policy->cpu;
421
422 if (!per_cpu(centrino_model, cpu))
423 return -ENODEV;
424
425 cpufreq_frequency_table_put_attr(cpu);
426
427 per_cpu(centrino_model, cpu) = NULL;
428
429 return 0;
430}
431
432/**
433 * centrino_verify - verifies a new CPUFreq policy
434 * @policy: new policy
435 *
436 * Limit must be within this model's frequency range at least one
437 * border included.
438 */
439static int centrino_verify (struct cpufreq_policy *policy)
440{
441 return cpufreq_frequency_table_verify(policy,
442 per_cpu(centrino_model, policy->cpu)->op_points);
443}
444
445/**
446 * centrino_setpolicy - set a new CPUFreq policy
447 * @policy: new policy
448 * @target_freq: the target frequency
449 * @relation: how that frequency relates to achieved frequency
450 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
451 *
452 * Sets a new CPUFreq policy.
453 */
454static int centrino_target (struct cpufreq_policy *policy,
455 unsigned int target_freq,
456 unsigned int relation)
457{
458 unsigned int newstate = 0;
459 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
460 struct cpufreq_freqs freqs;
461 int retval = 0;
462 unsigned int j, k, first_cpu, tmp;
463 cpumask_var_t covered_cpus;
464
465 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
466 return -ENOMEM;
467
468 if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
469 retval = -ENODEV;
470 goto out;
471 }
472
473 if (unlikely(cpufreq_frequency_table_target(policy,
474 per_cpu(centrino_model, cpu)->op_points,
475 target_freq,
476 relation,
477 &newstate))) {
478 retval = -EINVAL;
479 goto out;
480 }
481
482 first_cpu = 1;
483 for_each_cpu(j, policy->cpus) {
484 int good_cpu;
485
486 /* cpufreq holds the hotplug lock, so we are safe here */
487 if (!cpu_online(j))
488 continue;
489
490 /*
491 * Support for SMP systems.
492 * Make sure we are running on CPU that wants to change freq
493 */
494 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
495 good_cpu = cpumask_any_and(policy->cpus,
496 cpu_online_mask);
497 else
498 good_cpu = j;
499
500 if (good_cpu >= nr_cpu_ids) {
501 dprintk("couldn't limit to CPUs in this domain\n");
502 retval = -EAGAIN;
503 if (first_cpu) {
504 /* We haven't started the transition yet. */
505 goto out;
506 }
507 break;
508 }
509
510 msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
511
512 if (first_cpu) {
513 rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
514 if (msr == (oldmsr & 0xffff)) {
515 dprintk("no change needed - msr was and needs "
516 "to be %x\n", oldmsr);
517 retval = 0;
518 goto out;
519 }
520
521 freqs.old = extract_clock(oldmsr, cpu, 0);
522 freqs.new = extract_clock(msr, cpu, 0);
523
524 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
525 target_freq, freqs.old, freqs.new, msr);
526
527 for_each_cpu(k, policy->cpus) {
528 if (!cpu_online(k))
529 continue;
530 freqs.cpu = k;
531 cpufreq_notify_transition(&freqs,
532 CPUFREQ_PRECHANGE);
533 }
534
535 first_cpu = 0;
536 /* all but 16 LSB are reserved, treat them with care */
537 oldmsr &= ~0xffff;
538 msr &= 0xffff;
539 oldmsr |= msr;
540 }
541
542 wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
543 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
544 break;
545
546 cpumask_set_cpu(j, covered_cpus);
547 }
548
549 for_each_cpu(k, policy->cpus) {
550 if (!cpu_online(k))
551 continue;
552 freqs.cpu = k;
553 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
554 }
555
556 if (unlikely(retval)) {
557 /*
558 * We have failed halfway through the frequency change.
559 * We have sent callbacks to policy->cpus and
560 * MSRs have already been written on coverd_cpus.
561 * Best effort undo..
562 */
563
564 for_each_cpu(j, covered_cpus)
565 wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
566
567 tmp = freqs.new;
568 freqs.new = freqs.old;
569 freqs.old = tmp;
570 for_each_cpu(j, policy->cpus) {
571 if (!cpu_online(j))
572 continue;
573 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
574 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
575 }
576 }
577 retval = 0;
578
579out:
580 free_cpumask_var(covered_cpus);
581 return retval;
582}
583
584static struct freq_attr* centrino_attr[] = {
585 &cpufreq_freq_attr_scaling_available_freqs,
586 NULL,
587};
588
589static struct cpufreq_driver centrino_driver = {
590 .name = "centrino", /* should be speedstep-centrino,
591 but there's a 16 char limit */
592 .init = centrino_cpu_init,
593 .exit = centrino_cpu_exit,
594 .verify = centrino_verify,
595 .target = centrino_target,
596 .get = get_cur_freq,
597 .attr = centrino_attr,
598 .owner = THIS_MODULE,
599};
600
601
602/**
603 * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
604 *
605 * Initializes the Enhanced SpeedStep support. Returns -ENODEV on
606 * unsupported devices, -ENOENT if there's no voltage table for this
607 * particular CPU model, -EINVAL on problems during initiatization,
608 * and zero on success.
609 *
610 * This is quite picky. Not only does the CPU have to advertise the
611 * "est" flag in the cpuid capability flags, we look for a specific
612 * CPU model and stepping, and we need to have the exact model name in
613 * our voltage tables. That is, be paranoid about not releasing
614 * someone's valuable magic smoke.
615 */
616static int __init centrino_init(void)
617{
618 struct cpuinfo_x86 *cpu = &cpu_data(0);
619
620 if (!cpu_has(cpu, X86_FEATURE_EST))
621 return -ENODEV;
622
623 return cpufreq_register_driver(&centrino_driver);
624}
625
626static void __exit centrino_exit(void)
627{
628 cpufreq_unregister_driver(&centrino_driver);
629}
630
631MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
632MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
633MODULE_LICENSE ("GPL");
634
635late_initcall(centrino_init);
636module_exit(centrino_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
deleted file mode 100644
index 561758e95180..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ /dev/null
@@ -1,452 +0,0 @@
1/*
2 * (C) 2001 Dave Jones, Arjan van de ven.
3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
4 *
5 * Licensed under the terms of the GNU GPL License version 2.
6 * Based upon reverse engineered information, and on Intel documentation
7 * for chipsets ICH2-M and ICH3-M.
8 *
9 * Many thanks to Ducrot Bruno for finding and fixing the last
10 * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler
11 * for extensive testing.
12 *
13 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
14 */
15
16
17/*********************************************************************
18 * SPEEDSTEP - DEFINITIONS *
19 *********************************************************************/
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/cpufreq.h>
25#include <linux/pci.h>
26#include <linux/sched.h>
27
28#include "speedstep-lib.h"
29
30
31/* speedstep_chipset:
32 * It is necessary to know which chipset is used. As accesses to
33 * this device occur at various places in this module, we need a
34 * static struct pci_dev * pointing to that device.
35 */
36static struct pci_dev *speedstep_chipset_dev;
37
38
39/* speedstep_processor
40 */
41static enum speedstep_processor speedstep_processor;
42
43static u32 pmbase;
44
45/*
46 * There are only two frequency states for each processor. Values
47 * are in kHz for the time being.
48 */
49static struct cpufreq_frequency_table speedstep_freqs[] = {
50 {SPEEDSTEP_HIGH, 0},
51 {SPEEDSTEP_LOW, 0},
52 {0, CPUFREQ_TABLE_END},
53};
54
55
56#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
57 "speedstep-ich", msg)
58
59
60/**
61 * speedstep_find_register - read the PMBASE address
62 *
63 * Returns: -ENODEV if no register could be found
64 */
65static int speedstep_find_register(void)
66{
67 if (!speedstep_chipset_dev)
68 return -ENODEV;
69
70 /* get PMBASE */
71 pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
72 if (!(pmbase & 0x01)) {
73 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
74 return -ENODEV;
75 }
76
77 pmbase &= 0xFFFFFFFE;
78 if (!pmbase) {
79 printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
80 return -ENODEV;
81 }
82
83 dprintk("pmbase is 0x%x\n", pmbase);
84 return 0;
85}
86
87/**
88 * speedstep_set_state - set the SpeedStep state
89 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
90 *
91 * Tries to change the SpeedStep state. Can be called from
92 * smp_call_function_single.
93 */
94static void speedstep_set_state(unsigned int state)
95{
96 u8 pm2_blk;
97 u8 value;
98 unsigned long flags;
99
100 if (state > 0x1)
101 return;
102
103 /* Disable IRQs */
104 local_irq_save(flags);
105
106 /* read state */
107 value = inb(pmbase + 0x50);
108
109 dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
110
111 /* write new state */
112 value &= 0xFE;
113 value |= state;
114
115 dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase);
116
117 /* Disable bus master arbitration */
118 pm2_blk = inb(pmbase + 0x20);
119 pm2_blk |= 0x01;
120 outb(pm2_blk, (pmbase + 0x20));
121
122 /* Actual transition */
123 outb(value, (pmbase + 0x50));
124
125 /* Restore bus master arbitration */
126 pm2_blk &= 0xfe;
127 outb(pm2_blk, (pmbase + 0x20));
128
129 /* check if transition was successful */
130 value = inb(pmbase + 0x50);
131
132 /* Enable IRQs */
133 local_irq_restore(flags);
134
135 dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
136
137 if (state == (value & 0x1))
138 dprintk("change to %u MHz succeeded\n",
139 speedstep_get_frequency(speedstep_processor) / 1000);
140 else
141 printk(KERN_ERR "cpufreq: change failed - I/O error\n");
142
143 return;
144}
145
146/* Wrapper for smp_call_function_single. */
147static void _speedstep_set_state(void *_state)
148{
149 speedstep_set_state(*(unsigned int *)_state);
150}
151
152/**
153 * speedstep_activate - activate SpeedStep control in the chipset
154 *
155 * Tries to activate the SpeedStep status and control registers.
156 * Returns -EINVAL on an unsupported chipset, and zero on success.
157 */
158static int speedstep_activate(void)
159{
160 u16 value = 0;
161
162 if (!speedstep_chipset_dev)
163 return -EINVAL;
164
165 pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value);
166 if (!(value & 0x08)) {
167 value |= 0x08;
168 dprintk("activating SpeedStep (TM) registers\n");
169 pci_write_config_word(speedstep_chipset_dev, 0x00A0, value);
170 }
171
172 return 0;
173}
174
175
176/**
177 * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic
178 *
179 * Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to
180 * the LPC bridge / PM module which contains all power-management
181 * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected
182 * chipset, or zero on failure.
183 */
184static unsigned int speedstep_detect_chipset(void)
185{
186 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
187 PCI_DEVICE_ID_INTEL_82801DB_12,
188 PCI_ANY_ID, PCI_ANY_ID,
189 NULL);
190 if (speedstep_chipset_dev)
191 return 4; /* 4-M */
192
193 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
194 PCI_DEVICE_ID_INTEL_82801CA_12,
195 PCI_ANY_ID, PCI_ANY_ID,
196 NULL);
197 if (speedstep_chipset_dev)
198 return 3; /* 3-M */
199
200
201 speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
202 PCI_DEVICE_ID_INTEL_82801BA_10,
203 PCI_ANY_ID, PCI_ANY_ID,
204 NULL);
205 if (speedstep_chipset_dev) {
206 /* speedstep.c causes lockups on Dell Inspirons 8000 and
207 * 8100 which use a pretty old revision of the 82815
208 * host brige. Abort on these systems.
209 */
210 static struct pci_dev *hostbridge;
211
212 hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
213 PCI_DEVICE_ID_INTEL_82815_MC,
214 PCI_ANY_ID, PCI_ANY_ID,
215 NULL);
216
217 if (!hostbridge)
218 return 2; /* 2-M */
219
220 if (hostbridge->revision < 5) {
221 dprintk("hostbridge does not support speedstep\n");
222 speedstep_chipset_dev = NULL;
223 pci_dev_put(hostbridge);
224 return 0;
225 }
226
227 pci_dev_put(hostbridge);
228 return 2; /* 2-M */
229 }
230
231 return 0;
232}
233
234static void get_freq_data(void *_speed)
235{
236 unsigned int *speed = _speed;
237
238 *speed = speedstep_get_frequency(speedstep_processor);
239}
240
241static unsigned int speedstep_get(unsigned int cpu)
242{
243 unsigned int speed;
244
245 /* You're supposed to ensure CPU is online. */
246 if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
247 BUG();
248
249 dprintk("detected %u kHz as current frequency\n", speed);
250 return speed;
251}
252
253/**
254 * speedstep_target - set a new CPUFreq policy
255 * @policy: new policy
256 * @target_freq: the target frequency
257 * @relation: how that frequency relates to achieved frequency
258 * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
259 *
260 * Sets a new CPUFreq policy.
261 */
262static int speedstep_target(struct cpufreq_policy *policy,
263 unsigned int target_freq,
264 unsigned int relation)
265{
266 unsigned int newstate = 0, policy_cpu;
267 struct cpufreq_freqs freqs;
268 int i;
269
270 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
271 target_freq, relation, &newstate))
272 return -EINVAL;
273
274 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
275 freqs.old = speedstep_get(policy_cpu);
276 freqs.new = speedstep_freqs[newstate].frequency;
277 freqs.cpu = policy->cpu;
278
279 dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new);
280
281 /* no transition necessary */
282 if (freqs.old == freqs.new)
283 return 0;
284
285 for_each_cpu(i, policy->cpus) {
286 freqs.cpu = i;
287 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
288 }
289
290 smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
291 true);
292
293 for_each_cpu(i, policy->cpus) {
294 freqs.cpu = i;
295 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
296 }
297
298 return 0;
299}
300
301
302/**
303 * speedstep_verify - verifies a new CPUFreq policy
304 * @policy: new policy
305 *
306 * Limit must be within speedstep_low_freq and speedstep_high_freq, with
307 * at least one border included.
308 */
309static int speedstep_verify(struct cpufreq_policy *policy)
310{
311 return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
312}
313
314struct get_freqs {
315 struct cpufreq_policy *policy;
316 int ret;
317};
318
319static void get_freqs_on_cpu(void *_get_freqs)
320{
321 struct get_freqs *get_freqs = _get_freqs;
322
323 get_freqs->ret =
324 speedstep_get_freqs(speedstep_processor,
325 &speedstep_freqs[SPEEDSTEP_LOW].frequency,
326 &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
327 &get_freqs->policy->cpuinfo.transition_latency,
328 &speedstep_set_state);
329}
330
331static int speedstep_cpu_init(struct cpufreq_policy *policy)
332{
333 int result;
334 unsigned int policy_cpu, speed;
335 struct get_freqs gf;
336
337 /* only run on CPU to be set, or on its sibling */
338#ifdef CONFIG_SMP
339 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
340#endif
341 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
342
343 /* detect low and high frequency and transition latency */
344 gf.policy = policy;
345 smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1);
346 if (gf.ret)
347 return gf.ret;
348
349 /* get current speed setting */
350 speed = speedstep_get(policy_cpu);
351 if (!speed)
352 return -EIO;
353
354 dprintk("currently at %s speed setting - %i MHz\n",
355 (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
356 ? "low" : "high",
357 (speed / 1000));
358
359 /* cpuinfo and default policy values */
360 policy->cur = speed;
361
362 result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
363 if (result)
364 return result;
365
366 cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
367
368 return 0;
369}
370
371
372static int speedstep_cpu_exit(struct cpufreq_policy *policy)
373{
374 cpufreq_frequency_table_put_attr(policy->cpu);
375 return 0;
376}
377
378static struct freq_attr *speedstep_attr[] = {
379 &cpufreq_freq_attr_scaling_available_freqs,
380 NULL,
381};
382
383
384static struct cpufreq_driver speedstep_driver = {
385 .name = "speedstep-ich",
386 .verify = speedstep_verify,
387 .target = speedstep_target,
388 .init = speedstep_cpu_init,
389 .exit = speedstep_cpu_exit,
390 .get = speedstep_get,
391 .owner = THIS_MODULE,
392 .attr = speedstep_attr,
393};
394
395
396/**
397 * speedstep_init - initializes the SpeedStep CPUFreq driver
398 *
399 * Initializes the SpeedStep support. Returns -ENODEV on unsupported
400 * devices, -EINVAL on problems during initiatization, and zero on
401 * success.
402 */
403static int __init speedstep_init(void)
404{
405 /* detect processor */
406 speedstep_processor = speedstep_detect_processor();
407 if (!speedstep_processor) {
408 dprintk("Intel(R) SpeedStep(TM) capable processor "
409 "not found\n");
410 return -ENODEV;
411 }
412
413 /* detect chipset */
414 if (!speedstep_detect_chipset()) {
415 dprintk("Intel(R) SpeedStep(TM) for this chipset not "
416 "(yet) available.\n");
417 return -ENODEV;
418 }
419
420 /* activate speedstep support */
421 if (speedstep_activate()) {
422 pci_dev_put(speedstep_chipset_dev);
423 return -EINVAL;
424 }
425
426 if (speedstep_find_register())
427 return -ENODEV;
428
429 return cpufreq_register_driver(&speedstep_driver);
430}
431
432
433/**
434 * speedstep_exit - unregisters SpeedStep support
435 *
436 * Unregisters SpeedStep support.
437 */
438static void __exit speedstep_exit(void)
439{
440 pci_dev_put(speedstep_chipset_dev);
441 cpufreq_unregister_driver(&speedstep_driver);
442}
443
444
445MODULE_AUTHOR("Dave Jones <davej@redhat.com>, "
446 "Dominik Brodowski <linux@brodo.de>");
447MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
448 "with ICH-M southbridges.");
449MODULE_LICENSE("GPL");
450
451module_init(speedstep_init);
452module_exit(speedstep_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
deleted file mode 100644
index a94ec6be69fa..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ /dev/null
@@ -1,481 +0,0 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * Library for common functions for Intel SpeedStep v.1 and v.2 support
7 *
8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
16
17#include <asm/msr.h>
18#include <asm/tsc.h>
19#include "speedstep-lib.h"
20
21#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
22 "speedstep-lib", msg)
23
24#define PFX "speedstep-lib: "
25
26#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
27static int relaxed_check;
28#else
29#define relaxed_check 0
30#endif
31
32/*********************************************************************
33 * GET PROCESSOR CORE SPEED IN KHZ *
34 *********************************************************************/
35
36static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
37{
38 /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
39 struct {
40 unsigned int ratio; /* Frequency Multiplier (x10) */
41 u8 bitmap; /* power on configuration bits
42 [27, 25:22] (in MSR 0x2a) */
43 } msr_decode_mult[] = {
44 { 30, 0x01 },
45 { 35, 0x05 },
46 { 40, 0x02 },
47 { 45, 0x06 },
48 { 50, 0x00 },
49 { 55, 0x04 },
50 { 60, 0x0b },
51 { 65, 0x0f },
52 { 70, 0x09 },
53 { 75, 0x0d },
54 { 80, 0x0a },
55 { 85, 0x26 },
56 { 90, 0x20 },
57 { 100, 0x2b },
58 { 0, 0xff } /* error or unknown value */
59 };
60
61 /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
62 struct {
63 unsigned int value; /* Front Side Bus speed in MHz */
64 u8 bitmap; /* power on configuration bits [18: 19]
65 (in MSR 0x2a) */
66 } msr_decode_fsb[] = {
67 { 66, 0x0 },
68 { 100, 0x2 },
69 { 133, 0x1 },
70 { 0, 0xff}
71 };
72
73 u32 msr_lo, msr_tmp;
74 int i = 0, j = 0;
75
76 /* read MSR 0x2a - we only need the low 32 bits */
77 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
78 dprintk("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
79 msr_tmp = msr_lo;
80
81 /* decode the FSB */
82 msr_tmp &= 0x00c0000;
83 msr_tmp >>= 18;
84 while (msr_tmp != msr_decode_fsb[i].bitmap) {
85 if (msr_decode_fsb[i].bitmap == 0xff)
86 return 0;
87 i++;
88 }
89
90 /* decode the multiplier */
91 if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) {
92 dprintk("workaround for early PIIIs\n");
93 msr_lo &= 0x03c00000;
94 } else
95 msr_lo &= 0x0bc00000;
96 msr_lo >>= 22;
97 while (msr_lo != msr_decode_mult[j].bitmap) {
98 if (msr_decode_mult[j].bitmap == 0xff)
99 return 0;
100 j++;
101 }
102
103 dprintk("speed is %u\n",
104 (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100));
105
106 return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100;
107}
108
109
110static unsigned int pentiumM_get_frequency(void)
111{
112 u32 msr_lo, msr_tmp;
113
114 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
115 dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
116
117 /* see table B-2 of 24547212.pdf */
118 if (msr_lo & 0x00040000) {
119 printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n",
120 msr_lo, msr_tmp);
121 return 0;
122 }
123
124 msr_tmp = (msr_lo >> 22) & 0x1f;
125 dprintk("bits 22-26 are 0x%x, speed is %u\n",
126 msr_tmp, (msr_tmp * 100 * 1000));
127
128 return msr_tmp * 100 * 1000;
129}
130
131static unsigned int pentium_core_get_frequency(void)
132{
133 u32 fsb = 0;
134 u32 msr_lo, msr_tmp;
135 int ret;
136
137 rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
138 /* see table B-2 of 25366920.pdf */
139 switch (msr_lo & 0x07) {
140 case 5:
141 fsb = 100000;
142 break;
143 case 1:
144 fsb = 133333;
145 break;
146 case 3:
147 fsb = 166667;
148 break;
149 case 2:
150 fsb = 200000;
151 break;
152 case 0:
153 fsb = 266667;
154 break;
155 case 4:
156 fsb = 333333;
157 break;
158 default:
159 printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
160 }
161
162 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
163 dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n",
164 msr_lo, msr_tmp);
165
166 msr_tmp = (msr_lo >> 22) & 0x1f;
167 dprintk("bits 22-26 are 0x%x, speed is %u\n",
168 msr_tmp, (msr_tmp * fsb));
169
170 ret = (msr_tmp * fsb);
171 return ret;
172}
173
174
175static unsigned int pentium4_get_frequency(void)
176{
177 struct cpuinfo_x86 *c = &boot_cpu_data;
178 u32 msr_lo, msr_hi, mult;
179 unsigned int fsb = 0;
180 unsigned int ret;
181 u8 fsb_code;
182
183 /* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency
184 * to System Bus Frequency Ratio Field in the Processor Frequency
185 * Configuration Register of the MSR. Therefore the current
186 * frequency cannot be calculated and has to be measured.
187 */
188 if (c->x86_model < 2)
189 return cpu_khz;
190
191 rdmsr(0x2c, msr_lo, msr_hi);
192
193 dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi);
194
195 /* decode the FSB: see IA-32 Intel (C) Architecture Software
196 * Developer's Manual, Volume 3: System Prgramming Guide,
197 * revision #12 in Table B-1: MSRs in the Pentium 4 and
198 * Intel Xeon Processors, on page B-4 and B-5.
199 */
200 fsb_code = (msr_lo >> 16) & 0x7;
201 switch (fsb_code) {
202 case 0:
203 fsb = 100 * 1000;
204 break;
205 case 1:
206 fsb = 13333 * 10;
207 break;
208 case 2:
209 fsb = 200 * 1000;
210 break;
211 }
212
213 if (!fsb)
214 printk(KERN_DEBUG PFX "couldn't detect FSB speed. "
215 "Please send an e-mail to <linux@brodo.de>\n");
216
217 /* Multiplier. */
218 mult = msr_lo >> 24;
219
220 dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n",
221 fsb, mult, (fsb * mult));
222
223 ret = (fsb * mult);
224 return ret;
225}
226
227
228/* Warning: may get called from smp_call_function_single. */
229unsigned int speedstep_get_frequency(enum speedstep_processor processor)
230{
231 switch (processor) {
232 case SPEEDSTEP_CPU_PCORE:
233 return pentium_core_get_frequency();
234 case SPEEDSTEP_CPU_PM:
235 return pentiumM_get_frequency();
236 case SPEEDSTEP_CPU_P4D:
237 case SPEEDSTEP_CPU_P4M:
238 return pentium4_get_frequency();
239 case SPEEDSTEP_CPU_PIII_T:
240 case SPEEDSTEP_CPU_PIII_C:
241 case SPEEDSTEP_CPU_PIII_C_EARLY:
242 return pentium3_get_frequency(processor);
243 default:
244 return 0;
245 };
246 return 0;
247}
248EXPORT_SYMBOL_GPL(speedstep_get_frequency);
249
250
251/*********************************************************************
252 * DETECT SPEEDSTEP-CAPABLE PROCESSOR *
253 *********************************************************************/
254
255unsigned int speedstep_detect_processor(void)
256{
257 struct cpuinfo_x86 *c = &cpu_data(0);
258 u32 ebx, msr_lo, msr_hi;
259
260 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
261
262 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
263 ((c->x86 != 6) && (c->x86 != 0xF)))
264 return 0;
265
266 if (c->x86 == 0xF) {
267 /* Intel Mobile Pentium 4-M
268 * or Intel Mobile Pentium 4 with 533 MHz FSB */
269 if (c->x86_model != 2)
270 return 0;
271
272 ebx = cpuid_ebx(0x00000001);
273 ebx &= 0x000000FF;
274
275 dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
276
277 switch (c->x86_mask) {
278 case 4:
279 /*
280 * B-stepping [M-P4-M]
281 * sample has ebx = 0x0f, production has 0x0e.
282 */
283 if ((ebx == 0x0e) || (ebx == 0x0f))
284 return SPEEDSTEP_CPU_P4M;
285 break;
286 case 7:
287 /*
288 * C-stepping [M-P4-M]
289 * needs to have ebx=0x0e, else it's a celeron:
290 * cf. 25130917.pdf / page 7, footnote 5 even
291 * though 25072120.pdf / page 7 doesn't say
292 * samples are only of B-stepping...
293 */
294 if (ebx == 0x0e)
295 return SPEEDSTEP_CPU_P4M;
296 break;
297 case 9:
298 /*
299 * D-stepping [M-P4-M or M-P4/533]
300 *
301 * this is totally strange: CPUID 0x0F29 is
302 * used by M-P4-M, M-P4/533 and(!) Celeron CPUs.
303 * The latter need to be sorted out as they don't
304 * support speedstep.
305 * Celerons with CPUID 0x0F29 may have either
306 * ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything
307 * specific.
308 * M-P4-Ms may have either ebx=0xe or 0xf [see above]
309 * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf]
310 * also, M-P4M HTs have ebx=0x8, too
311 * For now, they are distinguished by the model_id
312 * string
313 */
314 if ((ebx == 0x0e) ||
315 (strstr(c->x86_model_id,
316 "Mobile Intel(R) Pentium(R) 4") != NULL))
317 return SPEEDSTEP_CPU_P4M;
318 break;
319 default:
320 break;
321 }
322 return 0;
323 }
324
325 switch (c->x86_model) {
326 case 0x0B: /* Intel PIII [Tualatin] */
327 /* cpuid_ebx(1) is 0x04 for desktop PIII,
328 * 0x06 for mobile PIII-M */
329 ebx = cpuid_ebx(0x00000001);
330 dprintk("ebx is %x\n", ebx);
331
332 ebx &= 0x000000FF;
333
334 if (ebx != 0x06)
335 return 0;
336
337 /* So far all PIII-M processors support SpeedStep. See
338 * Intel's 24540640.pdf of June 2003
339 */
340 return SPEEDSTEP_CPU_PIII_T;
341
342 case 0x08: /* Intel PIII [Coppermine] */
343
344 /* all mobile PIII Coppermines have FSB 100 MHz
345 * ==> sort out a few desktop PIIIs. */
346 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi);
347 dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n",
348 msr_lo, msr_hi);
349 msr_lo &= 0x00c0000;
350 if (msr_lo != 0x0080000)
351 return 0;
352
353 /*
354 * If the processor is a mobile version,
355 * platform ID has bit 50 set
356 * it has SpeedStep technology if either
357 * bit 56 or 57 is set
358 */
359 rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi);
360 dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n",
361 msr_lo, msr_hi);
362 if ((msr_hi & (1<<18)) &&
363 (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
364 if (c->x86_mask == 0x01) {
365 dprintk("early PIII version\n");
366 return SPEEDSTEP_CPU_PIII_C_EARLY;
367 } else
368 return SPEEDSTEP_CPU_PIII_C;
369 }
370
371 default:
372 return 0;
373 }
374}
375EXPORT_SYMBOL_GPL(speedstep_detect_processor);
376
377
378/*********************************************************************
379 * DETECT SPEEDSTEP SPEEDS *
380 *********************************************************************/
381
382unsigned int speedstep_get_freqs(enum speedstep_processor processor,
383 unsigned int *low_speed,
384 unsigned int *high_speed,
385 unsigned int *transition_latency,
386 void (*set_state) (unsigned int state))
387{
388 unsigned int prev_speed;
389 unsigned int ret = 0;
390 unsigned long flags;
391 struct timeval tv1, tv2;
392
393 if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
394 return -EINVAL;
395
396 dprintk("trying to determine both speeds\n");
397
398 /* get current speed */
399 prev_speed = speedstep_get_frequency(processor);
400 if (!prev_speed)
401 return -EIO;
402
403 dprintk("previous speed is %u\n", prev_speed);
404
405 local_irq_save(flags);
406
407 /* switch to low state */
408 set_state(SPEEDSTEP_LOW);
409 *low_speed = speedstep_get_frequency(processor);
410 if (!*low_speed) {
411 ret = -EIO;
412 goto out;
413 }
414
415 dprintk("low speed is %u\n", *low_speed);
416
417 /* start latency measurement */
418 if (transition_latency)
419 do_gettimeofday(&tv1);
420
421 /* switch to high state */
422 set_state(SPEEDSTEP_HIGH);
423
424 /* end latency measurement */
425 if (transition_latency)
426 do_gettimeofday(&tv2);
427
428 *high_speed = speedstep_get_frequency(processor);
429 if (!*high_speed) {
430 ret = -EIO;
431 goto out;
432 }
433
434 dprintk("high speed is %u\n", *high_speed);
435
436 if (*low_speed == *high_speed) {
437 ret = -ENODEV;
438 goto out;
439 }
440
441 /* switch to previous state, if necessary */
442 if (*high_speed != prev_speed)
443 set_state(SPEEDSTEP_LOW);
444
445 if (transition_latency) {
446 *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC +
447 tv2.tv_usec - tv1.tv_usec;
448 dprintk("transition latency is %u uSec\n", *transition_latency);
449
450 /* convert uSec to nSec and add 20% for safety reasons */
451 *transition_latency *= 1200;
452
453 /* check if the latency measurement is too high or too low
454 * and set it to a safe value (500uSec) in that case
455 */
456 if (*transition_latency > 10000000 ||
457 *transition_latency < 50000) {
458 printk(KERN_WARNING PFX "frequency transition "
459 "measured seems out of range (%u "
460 "nSec), falling back to a safe one of"
461 "%u nSec.\n",
462 *transition_latency, 500000);
463 *transition_latency = 500000;
464 }
465 }
466
467out:
468 local_irq_restore(flags);
469 return ret;
470}
471EXPORT_SYMBOL_GPL(speedstep_get_freqs);
472
473#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
474module_param(relaxed_check, int, 0444);
475MODULE_PARM_DESC(relaxed_check,
476 "Don't do all checks for speedstep capability.");
477#endif
478
479MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
480MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers.");
481MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h
deleted file mode 100644
index 70d9cea1219d..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
3 *
4 * Licensed under the terms of the GNU GPL License version 2.
5 *
6 * Library for common functions for Intel SpeedStep v.1 and v.2 support
7 *
8 * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
9 */
10
11
12
13/* processors */
14enum speedstep_processor {
15 SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */
16 SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */
17 SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */
18 SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */
19/* the following processors are not speedstep-capable and are not auto-detected
20 * in speedstep_detect_processor(). However, their speed can be detected using
21 * the speedstep_get_frequency() call. */
22 SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */
23 SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */
24 SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */
25};
26
27/* speedstep states -- only two of them */
28
29#define SPEEDSTEP_HIGH 0x00000000
30#define SPEEDSTEP_LOW 0x00000001
31
32
33/* detect a speedstep-capable processor */
34extern enum speedstep_processor speedstep_detect_processor(void);
35
36/* detect the current speed (in khz) of the processor */
37extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
38
39
40/* detect the low and high speeds of the processor. The callback
41 * set_state"'s first argument is either SPEEDSTEP_HIGH or
42 * SPEEDSTEP_LOW; the second argument is zero so that no
43 * cpufreq_notify_transition calls are initiated.
44 */
45extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
46 unsigned int *low_speed,
47 unsigned int *high_speed,
48 unsigned int *transition_latency,
49 void (*set_state) (unsigned int state));
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
deleted file mode 100644
index 91bc25b67bc1..000000000000
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
+++ /dev/null
@@ -1,467 +0,0 @@
1/*
2 * Intel SpeedStep SMI driver.
3 *
4 * (C) 2003 Hiroshi Miura <miura@da-cha.org>
5 *
6 * Licensed under the terms of the GNU GPL License version 2.
7 *
8 */
9
10
11/*********************************************************************
12 * SPEEDSTEP - DEFINITIONS *
13 *********************************************************************/
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/init.h>
19#include <linux/cpufreq.h>
20#include <linux/delay.h>
21#include <linux/io.h>
22#include <asm/ist.h>
23
24#include "speedstep-lib.h"
25
26/* speedstep system management interface port/command.
27 *
28 * These parameters are got from IST-SMI BIOS call.
29 * If user gives it, these are used.
30 *
31 */
32static int smi_port;
33static int smi_cmd;
34static unsigned int smi_sig;
35
36/* info about the processor */
37static enum speedstep_processor speedstep_processor;
38
39/*
40 * There are only two frequency states for each processor. Values
41 * are in kHz for the time being.
42 */
43static struct cpufreq_frequency_table speedstep_freqs[] = {
44 {SPEEDSTEP_HIGH, 0},
45 {SPEEDSTEP_LOW, 0},
46 {0, CPUFREQ_TABLE_END},
47};
48
49#define GET_SPEEDSTEP_OWNER 0
50#define GET_SPEEDSTEP_STATE 1
51#define SET_SPEEDSTEP_STATE 2
52#define GET_SPEEDSTEP_FREQS 4
53
54/* how often shall the SMI call be tried if it failed, e.g. because
55 * of DMA activity going on? */
56#define SMI_TRIES 5
57
58#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
59 "speedstep-smi", msg)
60
61/**
62 * speedstep_smi_ownership
63 */
64static int speedstep_smi_ownership(void)
65{
66 u32 command, result, magic, dummy;
67 u32 function = GET_SPEEDSTEP_OWNER;
68 unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation";
69
70 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
71 magic = virt_to_phys(magic_data);
72
73 dprintk("trying to obtain ownership with command %x at port %x\n",
74 command, smi_port);
75
76 __asm__ __volatile__(
77 "push %%ebp\n"
78 "out %%al, (%%dx)\n"
79 "pop %%ebp\n"
80 : "=D" (result),
81 "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy),
82 "=S" (dummy)
83 : "a" (command), "b" (function), "c" (0), "d" (smi_port),
84 "D" (0), "S" (magic)
85 : "memory"
86 );
87
88 dprintk("result is %x\n", result);
89
90 return result;
91}
92
93/**
94 * speedstep_smi_get_freqs - get SpeedStep preferred & current freq.
95 * @low: the low frequency value is placed here
96 * @high: the high frequency value is placed here
97 *
98 * Only available on later SpeedStep-enabled systems, returns false results or
99 * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing
100 * shows that the latter occurs if !(ist_info.event & 0xFFFF).
101 */
102static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
103{
104 u32 command, result = 0, edi, high_mhz, low_mhz, dummy;
105 u32 state = 0;
106 u32 function = GET_SPEEDSTEP_FREQS;
107
108 if (!(ist_info.event & 0xFFFF)) {
109 dprintk("bug #1422 -- can't read freqs from BIOS\n");
110 return -ENODEV;
111 }
112
113 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
114
115 dprintk("trying to determine frequencies with command %x at port %x\n",
116 command, smi_port);
117
118 __asm__ __volatile__(
119 "push %%ebp\n"
120 "out %%al, (%%dx)\n"
121 "pop %%ebp"
122 : "=a" (result),
123 "=b" (high_mhz),
124 "=c" (low_mhz),
125 "=d" (state), "=D" (edi), "=S" (dummy)
126 : "a" (command),
127 "b" (function),
128 "c" (state),
129 "d" (smi_port), "S" (0), "D" (0)
130 );
131
132 dprintk("result %x, low_freq %u, high_freq %u\n",
133 result, low_mhz, high_mhz);
134
135 /* abort if results are obviously incorrect... */
136 if ((high_mhz + low_mhz) < 600)
137 return -EINVAL;
138
139 *high = high_mhz * 1000;
140 *low = low_mhz * 1000;
141
142 return result;
143}
144
145/**
146 * speedstep_get_state - set the SpeedStep state
147 * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
148 *
149 */
150static int speedstep_get_state(void)
151{
152 u32 function = GET_SPEEDSTEP_STATE;
153 u32 result, state, edi, command, dummy;
154
155 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
156
157 dprintk("trying to determine current setting with command %x "
158 "at port %x\n", command, smi_port);
159
160 __asm__ __volatile__(
161 "push %%ebp\n"
162 "out %%al, (%%dx)\n"
163 "pop %%ebp\n"
164 : "=a" (result),
165 "=b" (state), "=D" (edi),
166 "=c" (dummy), "=d" (dummy), "=S" (dummy)
167 : "a" (command), "b" (function), "c" (0),
168 "d" (smi_port), "S" (0), "D" (0)
169 );
170
171 dprintk("state is %x, result is %x\n", state, result);
172
173 return state & 1;
174}
175
176
177/**
178 * speedstep_set_state - set the SpeedStep state
179 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
180 *
181 */
182static void speedstep_set_state(unsigned int state)
183{
184 unsigned int result = 0, command, new_state, dummy;
185 unsigned long flags;
186 unsigned int function = SET_SPEEDSTEP_STATE;
187 unsigned int retry = 0;
188
189 if (state > 0x1)
190 return;
191
192 /* Disable IRQs */
193 local_irq_save(flags);
194
195 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
196
197 dprintk("trying to set frequency to state %u "
198 "with command %x at port %x\n",
199 state, command, smi_port);
200
201 do {
202 if (retry) {
203 dprintk("retry %u, previous result %u, waiting...\n",
204 retry, result);
205 mdelay(retry * 50);
206 }
207 retry++;
208 __asm__ __volatile__(
209 "push %%ebp\n"
210 "out %%al, (%%dx)\n"
211 "pop %%ebp"
212 : "=b" (new_state), "=D" (result),
213 "=c" (dummy), "=a" (dummy),
214 "=d" (dummy), "=S" (dummy)
215 : "a" (command), "b" (function), "c" (state),
216 "d" (smi_port), "S" (0), "D" (0)
217 );
218 } while ((new_state != state) && (retry <= SMI_TRIES));
219
220 /* enable IRQs */
221 local_irq_restore(flags);
222
223 if (new_state == state)
224 dprintk("change to %u MHz succeeded after %u tries "
225 "with result %u\n",
226 (speedstep_freqs[new_state].frequency / 1000),
227 retry, result);
228 else
229 printk(KERN_ERR "cpufreq: change to state %u "
230 "failed with new_state %u and result %u\n",
231 state, new_state, result);
232
233 return;
234}
235
236
237/**
238 * speedstep_target - set a new CPUFreq policy
239 * @policy: new policy
240 * @target_freq: new freq
241 * @relation:
242 *
243 * Sets a new CPUFreq policy/freq.
244 */
245static int speedstep_target(struct cpufreq_policy *policy,
246 unsigned int target_freq, unsigned int relation)
247{
248 unsigned int newstate = 0;
249 struct cpufreq_freqs freqs;
250
251 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
252 target_freq, relation, &newstate))
253 return -EINVAL;
254
255 freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
256 freqs.new = speedstep_freqs[newstate].frequency;
257 freqs.cpu = 0; /* speedstep.c is UP only driver */
258
259 if (freqs.old == freqs.new)
260 return 0;
261
262 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
263 speedstep_set_state(newstate);
264 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
265
266 return 0;
267}
268
269
270/**
271 * speedstep_verify - verifies a new CPUFreq policy
272 * @policy: new policy
273 *
274 * Limit must be within speedstep_low_freq and speedstep_high_freq, with
275 * at least one border included.
276 */
277static int speedstep_verify(struct cpufreq_policy *policy)
278{
279 return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
280}
281
282
283static int speedstep_cpu_init(struct cpufreq_policy *policy)
284{
285 int result;
286 unsigned int speed, state;
287 unsigned int *low, *high;
288
289 /* capability check */
290 if (policy->cpu != 0)
291 return -ENODEV;
292
293 result = speedstep_smi_ownership();
294 if (result) {
295 dprintk("fails in acquiring ownership of a SMI interface.\n");
296 return -EINVAL;
297 }
298
299 /* detect low and high frequency */
300 low = &speedstep_freqs[SPEEDSTEP_LOW].frequency;
301 high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency;
302
303 result = speedstep_smi_get_freqs(low, high);
304 if (result) {
305 /* fall back to speedstep_lib.c dection mechanism:
306 * try both states out */
307 dprintk("could not detect low and high frequencies "
308 "by SMI call.\n");
309 result = speedstep_get_freqs(speedstep_processor,
310 low, high,
311 NULL,
312 &speedstep_set_state);
313
314 if (result) {
315 dprintk("could not detect two different speeds"
316 " -- aborting.\n");
317 return result;
318 } else
319 dprintk("workaround worked.\n");
320 }
321
322 /* get current speed setting */
323 state = speedstep_get_state();
324 speed = speedstep_freqs[state].frequency;
325
326 dprintk("currently at %s speed setting - %i MHz\n",
327 (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
328 ? "low" : "high",
329 (speed / 1000));
330
331 /* cpuinfo and default policy values */
332 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
333 policy->cur = speed;
334
335 result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
336 if (result)
337 return result;
338
339 cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
340
341 return 0;
342}
343
344static int speedstep_cpu_exit(struct cpufreq_policy *policy)
345{
346 cpufreq_frequency_table_put_attr(policy->cpu);
347 return 0;
348}
349
350static unsigned int speedstep_get(unsigned int cpu)
351{
352 if (cpu)
353 return -ENODEV;
354 return speedstep_get_frequency(speedstep_processor);
355}
356
357
358static int speedstep_resume(struct cpufreq_policy *policy)
359{
360 int result = speedstep_smi_ownership();
361
362 if (result)
363 dprintk("fails in re-acquiring ownership of a SMI interface.\n");
364
365 return result;
366}
367
368static struct freq_attr *speedstep_attr[] = {
369 &cpufreq_freq_attr_scaling_available_freqs,
370 NULL,
371};
372
373static struct cpufreq_driver speedstep_driver = {
374 .name = "speedstep-smi",
375 .verify = speedstep_verify,
376 .target = speedstep_target,
377 .init = speedstep_cpu_init,
378 .exit = speedstep_cpu_exit,
379 .get = speedstep_get,
380 .resume = speedstep_resume,
381 .owner = THIS_MODULE,
382 .attr = speedstep_attr,
383};
384
385/**
386 * speedstep_init - initializes the SpeedStep CPUFreq driver
387 *
388 * Initializes the SpeedStep support. Returns -ENODEV on unsupported
389 * BIOS, -EINVAL on problems during initiatization, and zero on
390 * success.
391 */
392static int __init speedstep_init(void)
393{
394 speedstep_processor = speedstep_detect_processor();
395
396 switch (speedstep_processor) {
397 case SPEEDSTEP_CPU_PIII_T:
398 case SPEEDSTEP_CPU_PIII_C:
399 case SPEEDSTEP_CPU_PIII_C_EARLY:
400 break;
401 default:
402 speedstep_processor = 0;
403 }
404
405 if (!speedstep_processor) {
406 dprintk("No supported Intel CPU detected.\n");
407 return -ENODEV;
408 }
409
410 dprintk("signature:0x%.8lx, command:0x%.8lx, "
411 "event:0x%.8lx, perf_level:0x%.8lx.\n",
412 ist_info.signature, ist_info.command,
413 ist_info.event, ist_info.perf_level);
414
415 /* Error if no IST-SMI BIOS or no PARM
416 sig= 'ISGE' aka 'Intel Speedstep Gate E' */
417 if ((ist_info.signature != 0x47534943) && (
418 (smi_port == 0) || (smi_cmd == 0)))
419 return -ENODEV;
420
421 if (smi_sig == 1)
422 smi_sig = 0x47534943;
423 else
424 smi_sig = ist_info.signature;
425
426 /* setup smi_port from MODLULE_PARM or BIOS */
427 if ((smi_port > 0xff) || (smi_port < 0))
428 return -EINVAL;
429 else if (smi_port == 0)
430 smi_port = ist_info.command & 0xff;
431
432 if ((smi_cmd > 0xff) || (smi_cmd < 0))
433 return -EINVAL;
434 else if (smi_cmd == 0)
435 smi_cmd = (ist_info.command >> 16) & 0xff;
436
437 return cpufreq_register_driver(&speedstep_driver);
438}
439
440
441/**
442 * speedstep_exit - unregisters SpeedStep support
443 *
444 * Unregisters SpeedStep support.
445 */
446static void __exit speedstep_exit(void)
447{
448 cpufreq_unregister_driver(&speedstep_driver);
449}
450
451module_param(smi_port, int, 0444);
452module_param(smi_cmd, int, 0444);
453module_param(smi_sig, uint, 0444);
454
455MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value "
456 "-- Intel's default setting is 0xb2");
457MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value "
458 "-- Intel's default setting is 0x82");
459MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the "
460 "SMI interface.");
461
462MODULE_AUTHOR("Hiroshi Miura");
463MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface.");
464MODULE_LICENSE("GPL");
465
466module_init(speedstep_init);
467module_exit(speedstep_exit);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index df86bc8c859d..fc73a34ba8c9 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -29,10 +29,10 @@
29 29
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
31{ 31{
32 u64 misc_enable;
33
32 /* Unmask CPUID levels if masked: */ 34 /* Unmask CPUID levels if masked: */
33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 35 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
34 u64 misc_enable;
35
36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
37 37
38 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { 38 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
@@ -118,8 +118,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
118 * (model 2) with the same problem. 118 * (model 2) with the same problem.
119 */ 119 */
120 if (c->x86 == 15) { 120 if (c->x86 == 15) {
121 u64 misc_enable;
122
123 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 121 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
124 122
125 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { 123 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
@@ -130,6 +128,19 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
130 } 128 }
131 } 129 }
132#endif 130#endif
131
132 /*
133 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
134 * clear the fast string and enhanced fast string CPU capabilities.
135 */
136 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
137 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
138 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
139 printk(KERN_INFO "Disabled fast string operations\n");
140 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
141 setup_clear_cpu_cap(X86_FEATURE_ERMS);
142 }
143 }
133} 144}
134 145
135#ifdef CONFIG_X86_32 146#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 167f97b5596e..bb0adad35143 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -509,6 +509,7 @@ recurse:
509out_free: 509out_free:
510 if (b) { 510 if (b) {
511 kobject_put(&b->kobj); 511 kobject_put(&b->kobj);
512 list_del(&b->miscj);
512 kfree(b); 513 kfree(b);
513 } 514 }
514 return err; 515 return err;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 6f8c5e9da97f..0f034460260d 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -446,18 +446,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
446 */ 446 */
447 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 447 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
448 448
449 h = lvtthmr_init;
449 /* 450 /*
450 * The initial value of thermal LVT entries on all APs always reads 451 * The initial value of thermal LVT entries on all APs always reads
451 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI 452 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
452 * sequence to them and LVT registers are reset to 0s except for 453 * sequence to them and LVT registers are reset to 0s except for
453 * the mask bits which are set to 1s when APs receive INIT IPI. 454 * the mask bits which are set to 1s when APs receive INIT IPI.
454 * Always restore the value that BIOS has programmed on AP based on 455 * If BIOS takes over the thermal interrupt and sets its interrupt
455 * BSP's info we saved since BIOS is always setting the same value 456 * delivery mode to SMI (not fixed), it restores the value that the
456 * for all threads/cores 457 * BIOS has programmed on AP based on BSP's info we saved since BIOS
458 * is always setting the same value for all threads/cores.
457 */ 459 */
458 apic_write(APIC_LVTTHMR, lvtthmr_init); 460 if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
461 apic_write(APIC_LVTTHMR, lvtthmr_init);
459 462
460 h = lvtthmr_init;
461 463
462 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { 464 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
463 printk(KERN_DEBUG 465 printk(KERN_DEBUG
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e638689279d3..3a0338b4b179 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -31,6 +31,7 @@
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h> 32#include <asm/compat.h>
33#include <asm/smp.h> 33#include <asm/smp.h>
34#include <asm/alternative.h>
34 35
35#if 0 36#if 0
36#undef wrmsrl 37#undef wrmsrl
@@ -363,12 +364,18 @@ again:
363 return new_raw_count; 364 return new_raw_count;
364} 365}
365 366
366/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */
367static inline int x86_pmu_addr_offset(int index) 367static inline int x86_pmu_addr_offset(int index)
368{ 368{
369 if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) 369 int offset;
370 return index << 1; 370
371 return index; 371 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
372 alternative_io(ASM_NOP2,
373 "shll $1, %%eax",
374 X86_FEATURE_PERFCTR_CORE,
375 "=a" (offset),
376 "a" (index));
377
378 return offset;
372} 379}
373 380
374static inline unsigned int x86_pmu_config_addr(int index) 381static inline unsigned int x86_pmu_config_addr(int index)
@@ -1766,17 +1773,6 @@ static struct pmu pmu = {
1766 * callchain support 1773 * callchain support
1767 */ 1774 */
1768 1775
1769static void
1770backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1771{
1772 /* Ignore warnings */
1773}
1774
1775static void backtrace_warning(void *data, char *msg)
1776{
1777 /* Ignore warnings */
1778}
1779
1780static int backtrace_stack(void *data, char *name) 1776static int backtrace_stack(void *data, char *name)
1781{ 1777{
1782 return 0; 1778 return 0;
@@ -1790,8 +1786,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
1790} 1786}
1791 1787
1792static const struct stacktrace_ops backtrace_ops = { 1788static const struct stacktrace_ops backtrace_ops = {
1793 .warning = backtrace_warning,
1794 .warning_symbol = backtrace_warning_symbol,
1795 .stack = backtrace_stack, 1789 .stack = backtrace_stack,
1796 .address = backtrace_address, 1790 .address = backtrace_address,
1797 .walk_stack = print_context_stack_bp, 1791 .walk_stack = print_context_stack_bp,
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index cf4e369cea67..fe29c1d2219e 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -96,12 +96,14 @@ static __initconst const u64 amd_hw_cache_event_ids
96 */ 96 */
97static const u64 amd_perfmon_event_map[] = 97static const u64 amd_perfmon_event_map[] =
98{ 98{
99 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 99 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
100 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 100 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
101 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, 101 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
102 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, 102 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
103 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, 103 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
104 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, 104 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
105 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
106 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
105}; 107};
106 108
107static u64 amd_pmu_event_map(int hw_event) 109static u64 amd_pmu_event_map(int hw_event)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 447a28de6f09..41178c826c48 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -36,7 +36,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
36 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 36 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
37}; 37};
38 38
39static struct event_constraint intel_core_event_constraints[] = 39static struct event_constraint intel_core_event_constraints[] __read_mostly =
40{ 40{
41 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 41 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
42 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 42 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -47,7 +47,7 @@ static struct event_constraint intel_core_event_constraints[] =
47 EVENT_CONSTRAINT_END 47 EVENT_CONSTRAINT_END
48}; 48};
49 49
50static struct event_constraint intel_core2_event_constraints[] = 50static struct event_constraint intel_core2_event_constraints[] __read_mostly =
51{ 51{
52 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 52 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
53 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 53 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -70,7 +70,7 @@ static struct event_constraint intel_core2_event_constraints[] =
70 EVENT_CONSTRAINT_END 70 EVENT_CONSTRAINT_END
71}; 71};
72 72
73static struct event_constraint intel_nehalem_event_constraints[] = 73static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
74{ 74{
75 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 75 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
76 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 76 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -86,19 +86,19 @@ static struct event_constraint intel_nehalem_event_constraints[] =
86 EVENT_CONSTRAINT_END 86 EVENT_CONSTRAINT_END
87}; 87};
88 88
89static struct extra_reg intel_nehalem_extra_regs[] = 89static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
90{ 90{
91 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), 91 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
92 EVENT_EXTRA_END 92 EVENT_EXTRA_END
93}; 93};
94 94
95static struct event_constraint intel_nehalem_percore_constraints[] = 95static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly =
96{ 96{
97 INTEL_EVENT_CONSTRAINT(0xb7, 0), 97 INTEL_EVENT_CONSTRAINT(0xb7, 0),
98 EVENT_CONSTRAINT_END 98 EVENT_CONSTRAINT_END
99}; 99};
100 100
101static struct event_constraint intel_westmere_event_constraints[] = 101static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
102{ 102{
103 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 103 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
104 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 104 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -110,7 +110,7 @@ static struct event_constraint intel_westmere_event_constraints[] =
110 EVENT_CONSTRAINT_END 110 EVENT_CONSTRAINT_END
111}; 111};
112 112
113static struct event_constraint intel_snb_event_constraints[] = 113static struct event_constraint intel_snb_event_constraints[] __read_mostly =
114{ 114{
115 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 115 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
116 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 116 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -123,21 +123,21 @@ static struct event_constraint intel_snb_event_constraints[] =
123 EVENT_CONSTRAINT_END 123 EVENT_CONSTRAINT_END
124}; 124};
125 125
126static struct extra_reg intel_westmere_extra_regs[] = 126static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
127{ 127{
128 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), 128 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
129 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), 129 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
130 EVENT_EXTRA_END 130 EVENT_EXTRA_END
131}; 131};
132 132
133static struct event_constraint intel_westmere_percore_constraints[] = 133static struct event_constraint intel_westmere_percore_constraints[] __read_mostly =
134{ 134{
135 INTEL_EVENT_CONSTRAINT(0xb7, 0), 135 INTEL_EVENT_CONSTRAINT(0xb7, 0),
136 INTEL_EVENT_CONSTRAINT(0xbb, 0), 136 INTEL_EVENT_CONSTRAINT(0xbb, 0),
137 EVENT_CONSTRAINT_END 137 EVENT_CONSTRAINT_END
138}; 138};
139 139
140static struct event_constraint intel_gen_event_constraints[] = 140static struct event_constraint intel_gen_event_constraints[] __read_mostly =
141{ 141{
142 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 142 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
143 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 143 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -1440,6 +1440,11 @@ static __init int intel_pmu_init(void)
1440 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 1440 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1441 x86_pmu.extra_regs = intel_nehalem_extra_regs; 1441 x86_pmu.extra_regs = intel_nehalem_extra_regs;
1442 1442
1443 /* UOPS_ISSUED.STALLED_CYCLES */
1444 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1445 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1446 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
1447
1443 if (ebx & 0x40) { 1448 if (ebx & 0x40) {
1444 /* 1449 /*
1445 * Erratum AAJ80 detected, we work it around by using 1450 * Erratum AAJ80 detected, we work it around by using
@@ -1480,6 +1485,12 @@ static __init int intel_pmu_init(void)
1480 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 1485 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1481 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; 1486 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
1482 x86_pmu.extra_regs = intel_westmere_extra_regs; 1487 x86_pmu.extra_regs = intel_westmere_extra_regs;
1488
1489 /* UOPS_ISSUED.STALLED_CYCLES */
1490 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1491 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1492 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
1493
1483 pr_cont("Westmere events, "); 1494 pr_cont("Westmere events, ");
1484 break; 1495 break;
1485 1496
@@ -1491,6 +1502,12 @@ static __init int intel_pmu_init(void)
1491 1502
1492 x86_pmu.event_constraints = intel_snb_event_constraints; 1503 x86_pmu.event_constraints = intel_snb_event_constraints;
1493 x86_pmu.pebs_constraints = intel_snb_pebs_events; 1504 x86_pmu.pebs_constraints = intel_snb_pebs_events;
1505
1506 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1507 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1508 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1509 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1;
1510
1494 pr_cont("SandyBridge events, "); 1511 pr_cont("SandyBridge events, ");
1495 break; 1512 break;
1496 1513
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index e93fcd55fae1..ead584fb6a7d 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -468,7 +468,7 @@ static struct p4_event_bind p4_event_bind_map[] = {
468 .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), 468 .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
469 .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, 469 .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
470 .escr_emask = 470 .escr_emask =
471 P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), 471 P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS),
472 .cntr = { {12, 13, 16}, {14, 15, 17} }, 472 .cntr = { {12, 13, 16}, {14, 15, 17} },
473 }, 473 },
474 [P4_EVENT_X87_ASSIST] = { 474 [P4_EVENT_X87_ASSIST] = {
@@ -912,8 +912,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
912 int idx, handled = 0; 912 int idx, handled = 0;
913 u64 val; 913 u64 val;
914 914
915 data.addr = 0; 915 perf_sample_data_init(&data, 0);
916 data.raw = NULL;
917 916
918 cpuc = &__get_cpu_var(cpu_hw_events); 917 cpuc = &__get_cpu_var(cpu_hw_events);
919 918
@@ -1197,7 +1196,7 @@ static __init int p4_pmu_init(void)
1197{ 1196{
1198 unsigned int low, high; 1197 unsigned int low, high;
1199 1198
1200 /* If we get stripped -- indexig fails */ 1199 /* If we get stripped -- indexing fails */
1201 BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); 1200 BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
1202 1201
1203 rdmsr(MSR_IA32_MISC_ENABLE, low, high); 1202 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index e2a3f0606da4..f478ff6877ef 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -135,20 +135,6 @@ print_context_stack_bp(struct thread_info *tinfo,
135} 135}
136EXPORT_SYMBOL_GPL(print_context_stack_bp); 136EXPORT_SYMBOL_GPL(print_context_stack_bp);
137 137
138
139static void
140print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
141{
142 printk(data);
143 print_symbol(msg, symbol);
144 printk("\n");
145}
146
147static void print_trace_warning(void *data, char *msg)
148{
149 printk("%s%s\n", (char *)data, msg);
150}
151
152static int print_trace_stack(void *data, char *name) 138static int print_trace_stack(void *data, char *name)
153{ 139{
154 printk("%s <%s> ", (char *)data, name); 140 printk("%s <%s> ", (char *)data, name);
@@ -166,8 +152,6 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
166} 152}
167 153
168static const struct stacktrace_ops print_trace_ops = { 154static const struct stacktrace_ops print_trace_ops = {
169 .warning = print_trace_warning,
170 .warning_symbol = print_trace_warning_symbol,
171 .stack = print_trace_stack, 155 .stack = print_trace_stack,
172 .address = print_trace_address, 156 .address = print_trace_address,
173 .walk_stack = print_context_stack, 157 .walk_stack = print_context_stack,
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index c969fd9d1566..f1a6244d7d93 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
1183 struct pt_regs *regs) 1183 struct pt_regs *regs)
1184{ 1184{
1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1186 unsigned long flags;
1186 1187
1187 /* This is possible if op is under delayed unoptimizing */ 1188 /* This is possible if op is under delayed unoptimizing */
1188 if (kprobe_disabled(&op->kp)) 1189 if (kprobe_disabled(&op->kp))
1189 return; 1190 return;
1190 1191
1191 preempt_disable(); 1192 local_irq_save(flags);
1192 if (kprobe_running()) { 1193 if (kprobe_running()) {
1193 kprobes_inc_nmissed_count(&op->kp); 1194 kprobes_inc_nmissed_count(&op->kp);
1194 } else { 1195 } else {
@@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
1207 opt_pre_handler(&op->kp, regs); 1208 opt_pre_handler(&op->kp, regs);
1208 __this_cpu_write(current_kprobe, NULL); 1209 __this_cpu_write(current_kprobe, NULL);
1209 } 1210 }
1210 preempt_enable_no_resched(); 1211 local_irq_restore(flags);
1211} 1212}
1212 1213
1213static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) 1214static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index ab23f1ad4bf1..52f256f2cc81 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -24,6 +24,7 @@
24#include <linux/bug.h> 24#include <linux/bug.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/gfp.h> 26#include <linux/gfp.h>
27#include <linux/jump_label.h>
27 28
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/page.h> 30#include <asm/page.h>
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
index 55d745ec1181..35ccf75696eb 100644
--- a/arch/x86/kernel/pci-iommu_table.c
+++ b/arch/x86/kernel/pci-iommu_table.c
@@ -50,20 +50,14 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
50 struct iommu_table_entry *finish) 50 struct iommu_table_entry *finish)
51{ 51{
52 struct iommu_table_entry *p, *q, *x; 52 struct iommu_table_entry *p, *q, *x;
53 char sym_p[KSYM_SYMBOL_LEN];
54 char sym_q[KSYM_SYMBOL_LEN];
55 53
56 /* Simple cyclic dependency checker. */ 54 /* Simple cyclic dependency checker. */
57 for (p = start; p < finish; p++) { 55 for (p = start; p < finish; p++) {
58 q = find_dependents_of(start, finish, p); 56 q = find_dependents_of(start, finish, p);
59 x = find_dependents_of(start, finish, q); 57 x = find_dependents_of(start, finish, q);
60 if (p == x) { 58 if (p == x) {
61 sprint_symbol(sym_p, (unsigned long)p->detect); 59 printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
62 sprint_symbol(sym_q, (unsigned long)q->detect); 60 p->detect, q->detect);
63
64 printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \
65 " on %s and vice-versa. BREAKING IT.\n",
66 sym_p, sym_q);
67 /* Heavy handed way..*/ 61 /* Heavy handed way..*/
68 x->depend = 0; 62 x->depend = 0;
69 } 63 }
@@ -72,12 +66,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
72 for (p = start; p < finish; p++) { 66 for (p = start; p < finish; p++) {
73 q = find_dependents_of(p, finish, p); 67 q = find_dependents_of(p, finish, p);
74 if (q && q > p) { 68 if (q && q > p) {
75 sprint_symbol(sym_p, (unsigned long)p->detect); 69 printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n",
76 sprint_symbol(sym_q, (unsigned long)q->detect); 70 p->detect, q->detect);
77
78 printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\
79 "should be called before %s!\n",
80 sym_p, sym_q);
81 } 71 }
82 } 72 }
83} 73}
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 6515733a289d..55d9bc03f696 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -9,15 +9,6 @@
9#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10#include <asm/stacktrace.h> 10#include <asm/stacktrace.h>
11 11
12static void save_stack_warning(void *data, char *msg)
13{
14}
15
16static void
17save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
18{
19}
20
21static int save_stack_stack(void *data, char *name) 12static int save_stack_stack(void *data, char *name)
22{ 13{
23 return 0; 14 return 0;
@@ -53,16 +44,12 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable)
53} 44}
54 45
55static const struct stacktrace_ops save_stack_ops = { 46static const struct stacktrace_ops save_stack_ops = {
56 .warning = save_stack_warning,
57 .warning_symbol = save_stack_warning_symbol,
58 .stack = save_stack_stack, 47 .stack = save_stack_stack,
59 .address = save_stack_address, 48 .address = save_stack_address,
60 .walk_stack = print_context_stack, 49 .walk_stack = print_context_stack,
61}; 50};
62 51
63static const struct stacktrace_ops save_stack_ops_nosched = { 52static const struct stacktrace_ops save_stack_ops_nosched = {
64 .warning = save_stack_warning,
65 .warning_symbol = save_stack_warning_symbol,
66 .stack = save_stack_stack, 53 .stack = save_stack_stack,
67 .address = save_stack_address_nosched, 54 .address = save_stack_address_nosched,
68 .walk_stack = print_context_stack, 55 .walk_stack = print_context_stack,
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index c11514e9128b..75ef4b18e9b7 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = {
61 .banner = default_banner, 61 .banner = default_banner,
62 }, 62 },
63 63
64 .mapping = {
65 .pagetable_reserve = native_pagetable_reserve,
66 },
67
64 .paging = { 68 .paging = {
65 .pagetable_setup_start = native_pagetable_setup_start, 69 .pagetable_setup_start = native_pagetable_setup_start,
66 .pagetable_setup_done = native_pagetable_setup_done, 70 .pagetable_setup_done = native_pagetable_setup_done,
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 1cd608973ce5..395bf0114aad 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -7,7 +7,7 @@
7 * kernel and insert a module (lg.ko) which allows us to run other Linux 7 * kernel and insert a module (lg.ko) which allows us to run other Linux
8 * kernels the same way we'd run processes. We call the first kernel the Host, 8 * kernels the same way we'd run processes. We call the first kernel the Host,
9 * and the others the Guests. The program which sets up and configures Guests 9 * and the others the Guests. The program which sets up and configures Guests
10 * (such as the example in Documentation/lguest/lguest.c) is called the 10 * (such as the example in Documentation/virtual/lguest/lguest.c) is called the
11 * Launcher. 11 * Launcher.
12 * 12 *
13 * Secondly, we only run specially modified Guests, not normal kernels: setting 13 * Secondly, we only run specially modified Guests, not normal kernels: setting
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index aa4326bfb24a..f2145cfa12a6 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,5 +1,6 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/dwarf2.h> 2#include <asm/dwarf2.h>
3#include <asm/alternative-asm.h>
3 4
4/* 5/*
5 * Zero a page. 6 * Zero a page.
@@ -14,6 +15,15 @@ ENTRY(clear_page_c)
14 CFI_ENDPROC 15 CFI_ENDPROC
15ENDPROC(clear_page_c) 16ENDPROC(clear_page_c)
16 17
18ENTRY(clear_page_c_e)
19 CFI_STARTPROC
20 movl $4096,%ecx
21 xorl %eax,%eax
22 rep stosb
23 ret
24 CFI_ENDPROC
25ENDPROC(clear_page_c_e)
26
17ENTRY(clear_page) 27ENTRY(clear_page)
18 CFI_STARTPROC 28 CFI_STARTPROC
19 xorl %eax,%eax 29 xorl %eax,%eax
@@ -38,21 +48,26 @@ ENTRY(clear_page)
38.Lclear_page_end: 48.Lclear_page_end:
39ENDPROC(clear_page) 49ENDPROC(clear_page)
40 50
41 /* Some CPUs run faster using the string instructions. 51 /*
42 It is also a lot simpler. Use this when possible */ 52 * Some CPUs support enhanced REP MOVSB/STOSB instructions.
53 * It is recommended to use this when possible.
54 * If enhanced REP MOVSB/STOSB is not available, try to use fast string.
55 * Otherwise, use original function.
56 *
57 */
43 58
44#include <asm/cpufeature.h> 59#include <asm/cpufeature.h>
45 60
46 .section .altinstr_replacement,"ax" 61 .section .altinstr_replacement,"ax"
471: .byte 0xeb /* jmp <disp8> */ 621: .byte 0xeb /* jmp <disp8> */
48 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ 63 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
492: 642: .byte 0xeb /* jmp <disp8> */
65 .byte (clear_page_c_e - clear_page) - (3f - 2b) /* offset */
663:
50 .previous 67 .previous
51 .section .altinstructions,"a" 68 .section .altinstructions,"a"
52 .align 8 69 altinstruction_entry clear_page,1b,X86_FEATURE_REP_GOOD,\
53 .quad clear_page 70 .Lclear_page_end-clear_page, 2b-1b
54 .quad 1b 71 altinstruction_entry clear_page,2b,X86_FEATURE_ERMS, \
55 .word X86_FEATURE_REP_GOOD 72 .Lclear_page_end-clear_page,3b-2b
56 .byte .Lclear_page_end - clear_page
57 .byte 2b - 1b
58 .previous 73 .previous
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 99e482615195..024840266ba0 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -15,23 +15,30 @@
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/cpufeature.h> 17#include <asm/cpufeature.h>
18#include <asm/alternative-asm.h>
18 19
19 .macro ALTERNATIVE_JUMP feature,orig,alt 20/*
21 * By placing feature2 after feature1 in altinstructions section, we logically
22 * implement:
23 * If CPU has feature2, jmp to alt2 is used
24 * else if CPU has feature1, jmp to alt1 is used
25 * else jmp to orig is used.
26 */
27 .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
200: 280:
21 .byte 0xe9 /* 32bit jump */ 29 .byte 0xe9 /* 32bit jump */
22 .long \orig-1f /* by default jump to orig */ 30 .long \orig-1f /* by default jump to orig */
231: 311:
24 .section .altinstr_replacement,"ax" 32 .section .altinstr_replacement,"ax"
252: .byte 0xe9 /* near jump with 32bit immediate */ 332: .byte 0xe9 /* near jump with 32bit immediate */
26 .long \alt-1b /* offset */ /* or alternatively to alt */ 34 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
353: .byte 0xe9 /* near jump with 32bit immediate */
36 .long \alt2-1b /* offset */ /* or alternatively to alt2 */
27 .previous 37 .previous
38
28 .section .altinstructions,"a" 39 .section .altinstructions,"a"
29 .align 8 40 altinstruction_entry 0b,2b,\feature1,5,5
30 .quad 0b 41 altinstruction_entry 0b,3b,\feature2,5,5
31 .quad 2b
32 .word \feature /* when feature is set */
33 .byte 5
34 .byte 5
35 .previous 42 .previous
36 .endm 43 .endm
37 44
@@ -72,8 +79,10 @@ ENTRY(_copy_to_user)
72 addq %rdx,%rcx 79 addq %rdx,%rcx
73 jc bad_to_user 80 jc bad_to_user
74 cmpq TI_addr_limit(%rax),%rcx 81 cmpq TI_addr_limit(%rax),%rcx
75 jae bad_to_user 82 ja bad_to_user
76 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string 83 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
84 copy_user_generic_unrolled,copy_user_generic_string, \
85 copy_user_enhanced_fast_string
77 CFI_ENDPROC 86 CFI_ENDPROC
78ENDPROC(_copy_to_user) 87ENDPROC(_copy_to_user)
79 88
@@ -85,8 +94,10 @@ ENTRY(_copy_from_user)
85 addq %rdx,%rcx 94 addq %rdx,%rcx
86 jc bad_from_user 95 jc bad_from_user
87 cmpq TI_addr_limit(%rax),%rcx 96 cmpq TI_addr_limit(%rax),%rcx
88 jae bad_from_user 97 ja bad_from_user
89 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string 98 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
99 copy_user_generic_unrolled,copy_user_generic_string, \
100 copy_user_enhanced_fast_string
90 CFI_ENDPROC 101 CFI_ENDPROC
91ENDPROC(_copy_from_user) 102ENDPROC(_copy_from_user)
92 103
@@ -255,3 +266,37 @@ ENTRY(copy_user_generic_string)
255 .previous 266 .previous
256 CFI_ENDPROC 267 CFI_ENDPROC
257ENDPROC(copy_user_generic_string) 268ENDPROC(copy_user_generic_string)
269
270/*
271 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
272 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
273 *
274 * Input:
275 * rdi destination
276 * rsi source
277 * rdx count
278 *
279 * Output:
280 * eax uncopied bytes or 0 if successful.
281 */
282ENTRY(copy_user_enhanced_fast_string)
283 CFI_STARTPROC
284 andl %edx,%edx
285 jz 2f
286 movl %edx,%ecx
2871: rep
288 movsb
2892: xorl %eax,%eax
290 ret
291
292 .section .fixup,"ax"
29312: movl %ecx,%edx /* ecx is zerorest also */
294 jmp copy_user_handle_tail
295 .previous
296
297 .section __ex_table,"a"
298 .align 8
299 .quad 1b,12b
300 .previous
301 CFI_ENDPROC
302ENDPROC(copy_user_enhanced_fast_string)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 75ef61e35e38..daab21dae2d1 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -4,6 +4,7 @@
4 4
5#include <asm/cpufeature.h> 5#include <asm/cpufeature.h>
6#include <asm/dwarf2.h> 6#include <asm/dwarf2.h>
7#include <asm/alternative-asm.h>
7 8
8/* 9/*
9 * memcpy - Copy a memory block. 10 * memcpy - Copy a memory block.
@@ -37,6 +38,23 @@
37.Lmemcpy_e: 38.Lmemcpy_e:
38 .previous 39 .previous
39 40
41/*
42 * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than
43 * memcpy_c. Use memcpy_c_e when possible.
44 *
45 * This gets patched over the unrolled variant (below) via the
46 * alternative instructions framework:
47 */
48 .section .altinstr_replacement, "ax", @progbits
49.Lmemcpy_c_e:
50 movq %rdi, %rax
51
52 movl %edx, %ecx
53 rep movsb
54 ret
55.Lmemcpy_e_e:
56 .previous
57
40ENTRY(__memcpy) 58ENTRY(__memcpy)
41ENTRY(memcpy) 59ENTRY(memcpy)
42 CFI_STARTPROC 60 CFI_STARTPROC
@@ -171,21 +189,22 @@ ENDPROC(memcpy)
171ENDPROC(__memcpy) 189ENDPROC(__memcpy)
172 190
173 /* 191 /*
174 * Some CPUs run faster using the string copy instructions. 192 * Some CPUs are adding enhanced REP MOVSB/STOSB feature
175 * It is also a lot simpler. Use this when possible: 193 * If the feature is supported, memcpy_c_e() is the first choice.
176 */ 194 * If enhanced rep movsb copy is not available, use fast string copy
177 195 * memcpy_c() when possible. This is faster and code is simpler than
178 .section .altinstructions, "a" 196 * original memcpy().
179 .align 8 197 * Otherwise, original memcpy() is used.
180 .quad memcpy 198 * In .altinstructions section, ERMS feature is placed after REG_GOOD
181 .quad .Lmemcpy_c 199 * feature to implement the right patch order.
182 .word X86_FEATURE_REP_GOOD 200 *
183
184 /*
185 * Replace only beginning, memcpy is used to apply alternatives, 201 * Replace only beginning, memcpy is used to apply alternatives,
186 * so it is silly to overwrite itself with nops - reboot is the 202 * so it is silly to overwrite itself with nops - reboot is the
187 * only outcome... 203 * only outcome...
188 */ 204 */
189 .byte .Lmemcpy_e - .Lmemcpy_c 205 .section .altinstructions, "a"
190 .byte .Lmemcpy_e - .Lmemcpy_c 206 altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
207 .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c
208 altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
209 .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e
191 .previous 210 .previous
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 0ecb8433e5a8..d0ec9c2936d7 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -8,6 +8,7 @@
8#define _STRING_C 8#define _STRING_C
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/dwarf2.h> 10#include <asm/dwarf2.h>
11#include <asm/cpufeature.h>
11 12
12#undef memmove 13#undef memmove
13 14
@@ -24,6 +25,7 @@
24 */ 25 */
25ENTRY(memmove) 26ENTRY(memmove)
26 CFI_STARTPROC 27 CFI_STARTPROC
28
27 /* Handle more 32bytes in loop */ 29 /* Handle more 32bytes in loop */
28 mov %rdi, %rax 30 mov %rdi, %rax
29 cmp $0x20, %rdx 31 cmp $0x20, %rdx
@@ -31,8 +33,13 @@ ENTRY(memmove)
31 33
32 /* Decide forward/backward copy mode */ 34 /* Decide forward/backward copy mode */
33 cmp %rdi, %rsi 35 cmp %rdi, %rsi
34 jb 2f 36 jge .Lmemmove_begin_forward
37 mov %rsi, %r8
38 add %rdx, %r8
39 cmp %rdi, %r8
40 jg 2f
35 41
42.Lmemmove_begin_forward:
36 /* 43 /*
37 * movsq instruction have many startup latency 44 * movsq instruction have many startup latency
38 * so we handle small size by general register. 45 * so we handle small size by general register.
@@ -78,6 +85,8 @@ ENTRY(memmove)
78 rep movsq 85 rep movsq
79 movq %r11, (%r10) 86 movq %r11, (%r10)
80 jmp 13f 87 jmp 13f
88.Lmemmove_end_forward:
89
81 /* 90 /*
82 * Handle data backward by movsq. 91 * Handle data backward by movsq.
83 */ 92 */
@@ -194,4 +203,22 @@ ENTRY(memmove)
19413: 20313:
195 retq 204 retq
196 CFI_ENDPROC 205 CFI_ENDPROC
206
207 .section .altinstr_replacement,"ax"
208.Lmemmove_begin_forward_efs:
209 /* Forward moving data. */
210 movq %rdx, %rcx
211 rep movsb
212 retq
213.Lmemmove_end_forward_efs:
214 .previous
215
216 .section .altinstructions,"a"
217 .align 8
218 .quad .Lmemmove_begin_forward
219 .quad .Lmemmove_begin_forward_efs
220 .word X86_FEATURE_ERMS
221 .byte .Lmemmove_end_forward-.Lmemmove_begin_forward
222 .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
223 .previous
197ENDPROC(memmove) 224ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 09d344269652..79bd454b78a3 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -2,9 +2,13 @@
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/dwarf2.h> 4#include <asm/dwarf2.h>
5#include <asm/cpufeature.h>
6#include <asm/alternative-asm.h>
5 7
6/* 8/*
7 * ISO C memset - set a memory block to a byte value. 9 * ISO C memset - set a memory block to a byte value. This function uses fast
10 * string to get better performance than the original function. The code is
11 * simpler and shorter than the orignal function as well.
8 * 12 *
9 * rdi destination 13 * rdi destination
10 * rsi value (char) 14 * rsi value (char)
@@ -31,6 +35,28 @@
31.Lmemset_e: 35.Lmemset_e:
32 .previous 36 .previous
33 37
38/*
39 * ISO C memset - set a memory block to a byte value. This function uses
40 * enhanced rep stosb to override the fast string function.
41 * The code is simpler and shorter than the fast string function as well.
42 *
43 * rdi destination
44 * rsi value (char)
45 * rdx count (bytes)
46 *
47 * rax original destination
48 */
49 .section .altinstr_replacement, "ax", @progbits
50.Lmemset_c_e:
51 movq %rdi,%r9
52 movb %sil,%al
53 movl %edx,%ecx
54 rep stosb
55 movq %r9,%rax
56 ret
57.Lmemset_e_e:
58 .previous
59
34ENTRY(memset) 60ENTRY(memset)
35ENTRY(__memset) 61ENTRY(__memset)
36 CFI_STARTPROC 62 CFI_STARTPROC
@@ -112,16 +138,20 @@ ENTRY(__memset)
112ENDPROC(memset) 138ENDPROC(memset)
113ENDPROC(__memset) 139ENDPROC(__memset)
114 140
115 /* Some CPUs run faster using the string instructions. 141 /* Some CPUs support enhanced REP MOVSB/STOSB feature.
116 It is also a lot simpler. Use this when possible */ 142 * It is recommended to use this when possible.
117 143 *
118#include <asm/cpufeature.h> 144 * If enhanced REP MOVSB/STOSB feature is not available, use fast string
119 145 * instructions.
146 *
147 * Otherwise, use original memset function.
148 *
149 * In .altinstructions section, ERMS feature is placed after REG_GOOD
150 * feature to implement the right patch order.
151 */
120 .section .altinstructions,"a" 152 .section .altinstructions,"a"
121 .align 8 153 altinstruction_entry memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\
122 .quad memset 154 .Lfinal-memset,.Lmemset_e-.Lmemset_c
123 .quad .Lmemset_c 155 altinstruction_entry memset,.Lmemset_c_e,X86_FEATURE_ERMS, \
124 .word X86_FEATURE_REP_GOOD 156 .Lfinal-memset,.Lmemset_e_e-.Lmemset_c_e
125 .byte .Lfinal - memset
126 .byte .Lmemset_e - .Lmemset_c
127 .previous 157 .previous
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 286d289b039b..37b8b0fe8320 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
81 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); 81 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
82} 82}
83 83
84void __init native_pagetable_reserve(u64 start, u64 end)
85{
86 memblock_x86_reserve_range(start, end, "PGTABLE");
87}
88
84struct map_range { 89struct map_range {
85 unsigned long start; 90 unsigned long start;
86 unsigned long end; 91 unsigned long end;
@@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
272 277
273 __flush_tlb_all(); 278 __flush_tlb_all();
274 279
280 /*
281 * Reserve the kernel pagetable pages we used (pgt_buf_start -
282 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
283 * so that they can be reused for other purposes.
284 *
285 * On native it just means calling memblock_x86_reserve_range, on Xen it
286 * also means marking RW the pagetable pages that we allocated before
287 * but that haven't been used.
288 *
289 * In fact on xen we mark RO the whole range pgt_buf_start -
290 * pgt_buf_top, because we have to make sure that when
291 * init_memory_mapping reaches the pagetable pages area, it maps
292 * RO all the pagetable pages, including the ones that are beyond
293 * pgt_buf_end at that time.
294 */
275 if (!after_bootmem && pgt_buf_end > pgt_buf_start) 295 if (!after_bootmem && pgt_buf_end > pgt_buf_start)
276 memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, 296 x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
277 pgt_buf_end << PAGE_SHIFT, "PGTABLE"); 297 PFN_PHYS(pgt_buf_end));
278 298
279 if (!after_bootmem) 299 if (!after_bootmem)
280 early_memtest(start, end); 300 early_memtest(start, end);
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 2d49d4e19a36..a5b64ab4cd6e 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -16,17 +16,6 @@
16#include <asm/stacktrace.h> 16#include <asm/stacktrace.h>
17#include <linux/compat.h> 17#include <linux/compat.h>
18 18
19static void backtrace_warning_symbol(void *data, char *msg,
20 unsigned long symbol)
21{
22 /* Ignore warnings */
23}
24
25static void backtrace_warning(void *data, char *msg)
26{
27 /* Ignore warnings */
28}
29
30static int backtrace_stack(void *data, char *name) 19static int backtrace_stack(void *data, char *name)
31{ 20{
32 /* Yes, we want all stacks */ 21 /* Yes, we want all stacks */
@@ -42,8 +31,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
42} 31}
43 32
44static struct stacktrace_ops backtrace_ops = { 33static struct stacktrace_ops backtrace_ops = {
45 .warning = backtrace_warning,
46 .warning_symbol = backtrace_warning_symbol,
47 .stack = backtrace_stack, 34 .stack = backtrace_stack,
48 .address = backtrace_address, 35 .address = backtrace_address,
49 .walk_stack = print_context_stack, 36 .walk_stack = print_context_stack,
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index e37b407a0ee8..8214724ce54d 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -108,7 +108,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
108 } 108 }
109 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, 109 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
110 (type == PCI_CAP_ID_MSIX) ? 110 (type == PCI_CAP_ID_MSIX) ?
111 "msi-x" : "msi"); 111 "msi-x" : "msi",
112 DOMID_SELF);
112 if (irq < 0) 113 if (irq < 0)
113 goto error; 114 goto error;
114 dev_dbg(&dev->dev, 115 dev_dbg(&dev->dev,
@@ -148,7 +149,8 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
148 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, 149 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
149 (type == PCI_CAP_ID_MSIX) ? 150 (type == PCI_CAP_ID_MSIX) ?
150 "pcifront-msi-x" : 151 "pcifront-msi-x" :
151 "pcifront-msi"); 152 "pcifront-msi",
153 DOMID_SELF);
152 if (irq < 0) 154 if (irq < 0)
153 goto free; 155 goto free;
154 i++; 156 i++;
@@ -190,9 +192,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
190 192
191 list_for_each_entry(msidesc, &dev->msi_list, list) { 193 list_for_each_entry(msidesc, &dev->msi_list, list) {
192 struct physdev_map_pirq map_irq; 194 struct physdev_map_pirq map_irq;
195 domid_t domid;
196
197 domid = ret = xen_find_device_domain_owner(dev);
198 /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
199 * hence check ret value for < 0. */
200 if (ret < 0)
201 domid = DOMID_SELF;
193 202
194 memset(&map_irq, 0, sizeof(map_irq)); 203 memset(&map_irq, 0, sizeof(map_irq));
195 map_irq.domid = DOMID_SELF; 204 map_irq.domid = domid;
196 map_irq.type = MAP_PIRQ_TYPE_MSI; 205 map_irq.type = MAP_PIRQ_TYPE_MSI;
197 map_irq.index = -1; 206 map_irq.index = -1;
198 map_irq.pirq = -1; 207 map_irq.pirq = -1;
@@ -215,14 +224,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
215 224
216 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); 225 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
217 if (ret) { 226 if (ret) {
218 dev_warn(&dev->dev, "xen map irq failed %d\n", ret); 227 dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
228 ret, domid);
219 goto out; 229 goto out;
220 } 230 }
221 231
222 ret = xen_bind_pirq_msi_to_irq(dev, msidesc, 232 ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
223 map_irq.pirq, map_irq.index, 233 map_irq.pirq, map_irq.index,
224 (type == PCI_CAP_ID_MSIX) ? 234 (type == PCI_CAP_ID_MSIX) ?
225 "msi-x" : "msi"); 235 "msi-x" : "msi",
236 domid);
226 if (ret < 0) 237 if (ret < 0)
227 goto out; 238 goto out;
228 } 239 }
@@ -461,3 +472,78 @@ void __init xen_setup_pirqs(void)
461 } 472 }
462} 473}
463#endif 474#endif
475
476#ifdef CONFIG_XEN_DOM0
477struct xen_device_domain_owner {
478 domid_t domain;
479 struct pci_dev *dev;
480 struct list_head list;
481};
482
483static DEFINE_SPINLOCK(dev_domain_list_spinlock);
484static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
485
486static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
487{
488 struct xen_device_domain_owner *owner;
489
490 list_for_each_entry(owner, &dev_domain_list, list) {
491 if (owner->dev == dev)
492 return owner;
493 }
494 return NULL;
495}
496
497int xen_find_device_domain_owner(struct pci_dev *dev)
498{
499 struct xen_device_domain_owner *owner;
500 int domain = -ENODEV;
501
502 spin_lock(&dev_domain_list_spinlock);
503 owner = find_device(dev);
504 if (owner)
505 domain = owner->domain;
506 spin_unlock(&dev_domain_list_spinlock);
507 return domain;
508}
509EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
510
511int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
512{
513 struct xen_device_domain_owner *owner;
514
515 owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
516 if (!owner)
517 return -ENODEV;
518
519 spin_lock(&dev_domain_list_spinlock);
520 if (find_device(dev)) {
521 spin_unlock(&dev_domain_list_spinlock);
522 kfree(owner);
523 return -EEXIST;
524 }
525 owner->domain = domain;
526 owner->dev = dev;
527 list_add_tail(&owner->list, &dev_domain_list);
528 spin_unlock(&dev_domain_list_spinlock);
529 return 0;
530}
531EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
532
533int xen_unregister_device_domain_owner(struct pci_dev *dev)
534{
535 struct xen_device_domain_owner *owner;
536
537 spin_lock(&dev_domain_list_spinlock);
538 owner = find_device(dev);
539 if (!owner) {
540 spin_unlock(&dev_domain_list_spinlock);
541 return -ENODEV;
542 }
543 list_del(&owner->list);
544 spin_unlock(&dev_domain_list_spinlock);
545 kfree(owner);
546 return 0;
547}
548EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
549#endif
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 7cb6424317f6..c58e0ea39ef5 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
699 struct mm_struct *mm, 699 struct mm_struct *mm,
700 unsigned long va, unsigned int cpu) 700 unsigned long va, unsigned int cpu)
701{ 701{
702 int tcpu;
703 int uvhub;
704 int locals = 0; 702 int locals = 0;
705 int remotes = 0; 703 int remotes = 0;
706 int hubs = 0; 704 int hubs = 0;
705 int tcpu;
706 int tpnode;
707 struct bau_desc *bau_desc; 707 struct bau_desc *bau_desc;
708 struct cpumask *flush_mask; 708 struct cpumask *flush_mask;
709 struct ptc_stats *stat; 709 struct ptc_stats *stat;
710 struct bau_control *bcp; 710 struct bau_control *bcp;
711 struct bau_control *tbcp; 711 struct bau_control *tbcp;
712 struct hub_and_pnode *hpp;
712 713
713 /* kernel was booted 'nobau' */ 714 /* kernel was booted 'nobau' */
714 if (nobau) 715 if (nobau)
@@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
750 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 751 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
751 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 752 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
752 753
753 /* cpu statistics */
754 for_each_cpu(tcpu, flush_mask) { 754 for_each_cpu(tcpu, flush_mask) {
755 uvhub = uv_cpu_to_blade_id(tcpu); 755 /*
756 bau_uvhub_set(uvhub, &bau_desc->distribution); 756 * The distribution vector is a bit map of pnodes, relative
757 if (uvhub == bcp->uvhub) 757 * to the partition base pnode (and the partition base nasid
758 * in the header).
759 * Translate cpu to pnode and hub using an array stored
760 * in local memory.
761 */
762 hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
763 tpnode = hpp->pnode - bcp->partition_base_pnode;
764 bau_uvhub_set(tpnode, &bau_desc->distribution);
765 if (hpp->uvhub == bcp->uvhub)
758 locals++; 766 locals++;
759 else 767 else
760 remotes++; 768 remotes++;
@@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
855 * an interrupt, but causes an error message to be returned to 863 * an interrupt, but causes an error message to be returned to
856 * the sender. 864 * the sender.
857 */ 865 */
858static void uv_enable_timeouts(void) 866static void __init uv_enable_timeouts(void)
859{ 867{
860 int uvhub; 868 int uvhub;
861 int nuvhubs; 869 int nuvhubs;
@@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void)
1326} 1334}
1327 1335
1328/* 1336/*
1329 * initialize the sending side's sending buffers 1337 * Initialize the sending side's sending buffers.
1330 */ 1338 */
1331static void 1339static void
1332uv_activation_descriptor_init(int node, int pnode) 1340uv_activation_descriptor_init(int node, int pnode, int base_pnode)
1333{ 1341{
1334 int i; 1342 int i;
1335 int cpu; 1343 int cpu;
@@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode)
1352 n = pa >> uv_nshift; 1360 n = pa >> uv_nshift;
1353 m = pa & uv_mmask; 1361 m = pa & uv_mmask;
1354 1362
1363 /* the 14-bit pnode */
1355 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, 1364 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
1356 (n << UV_DESC_BASE_PNODE_SHIFT | m)); 1365 (n << UV_DESC_BASE_PNODE_SHIFT | m));
1357
1358 /* 1366 /*
1359 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1367 * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1360 * cpu even though we only use the first one; one descriptor can 1368 * cpu even though we only use the first one; one descriptor can
1361 * describe a broadcast to 256 uv hubs. 1369 * describe a broadcast to 256 uv hubs.
1362 */ 1370 */
@@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode)
1365 memset(bd2, 0, sizeof(struct bau_desc)); 1373 memset(bd2, 0, sizeof(struct bau_desc));
1366 bd2->header.sw_ack_flag = 1; 1374 bd2->header.sw_ack_flag = 1;
1367 /* 1375 /*
1368 * base_dest_nodeid is the nasid of the first uvhub 1376 * The base_dest_nasid set in the message header is the nasid
1369 * in the partition. The bit map will indicate uvhub numbers, 1377 * of the first uvhub in the partition. The bit map will
1370 * which are 0-N in a partition. Pnodes are unique system-wide. 1378 * indicate destination pnode numbers relative to that base.
1379 * They may not be consecutive if nasid striding is being used.
1371 */ 1380 */
1372 bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); 1381 bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1373 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1382 bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
1374 bd2->header.command = UV_NET_ENDPOINT_INTD; 1383 bd2->header.command = UV_NET_ENDPOINT_INTD;
1375 bd2->header.int_both = 1; 1384 bd2->header.int_both = 1;
1376 /* 1385 /*
@@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode)
1442/* 1451/*
1443 * Initialization of each UV hub's structures 1452 * Initialization of each UV hub's structures
1444 */ 1453 */
1445static void __init uv_init_uvhub(int uvhub, int vector) 1454static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
1446{ 1455{
1447 int node; 1456 int node;
1448 int pnode; 1457 int pnode;
@@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector)
1450 1459
1451 node = uvhub_to_first_node(uvhub); 1460 node = uvhub_to_first_node(uvhub);
1452 pnode = uv_blade_to_pnode(uvhub); 1461 pnode = uv_blade_to_pnode(uvhub);
1453 uv_activation_descriptor_init(node, pnode); 1462 uv_activation_descriptor_init(node, pnode, base_pnode);
1454 uv_payload_queue_init(node, pnode); 1463 uv_payload_queue_init(node, pnode);
1455 /* 1464 /*
1456 * the below initialization can't be in firmware because the 1465 * The below initialization can't be in firmware because the
1457 * messaging IRQ will be determined by the OS 1466 * messaging IRQ will be determined by the OS.
1458 */ 1467 */
1459 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; 1468 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1460 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1469 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -1491,10 +1500,11 @@ calculate_destination_timeout(void)
1491/* 1500/*
1492 * initialize the bau_control structure for each cpu 1501 * initialize the bau_control structure for each cpu
1493 */ 1502 */
1494static int __init uv_init_per_cpu(int nuvhubs) 1503static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
1495{ 1504{
1496 int i; 1505 int i;
1497 int cpu; 1506 int cpu;
1507 int tcpu;
1498 int pnode; 1508 int pnode;
1499 int uvhub; 1509 int uvhub;
1500 int have_hmaster; 1510 int have_hmaster;
@@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs)
1528 bcp = &per_cpu(bau_control, cpu); 1538 bcp = &per_cpu(bau_control, cpu);
1529 memset(bcp, 0, sizeof(struct bau_control)); 1539 memset(bcp, 0, sizeof(struct bau_control));
1530 pnode = uv_cpu_hub_info(cpu)->pnode; 1540 pnode = uv_cpu_hub_info(cpu)->pnode;
1541 if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
1542 printk(KERN_EMERG
1543 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1544 cpu, pnode, base_part_pnode,
1545 UV_DISTRIBUTION_SIZE);
1546 return 1;
1547 }
1548 bcp->osnode = cpu_to_node(cpu);
1549 bcp->partition_base_pnode = uv_partition_base_pnode;
1531 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1550 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1532 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); 1551 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1533 bdp = &uvhub_descs[uvhub]; 1552 bdp = &uvhub_descs[uvhub];
@@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs)
1536 bdp->pnode = pnode; 1555 bdp->pnode = pnode;
1537 /* kludge: 'assuming' one node per socket, and assuming that 1556 /* kludge: 'assuming' one node per socket, and assuming that
1538 disabling a socket just leaves a gap in node numbers */ 1557 disabling a socket just leaves a gap in node numbers */
1539 socket = (cpu_to_node(cpu) & 1); 1558 socket = bcp->osnode & 1;
1540 bdp->socket_mask |= (1 << socket); 1559 bdp->socket_mask |= (1 << socket);
1541 sdp = &bdp->socket[socket]; 1560 sdp = &bdp->socket[socket];
1542 sdp->cpu_number[sdp->num_cpus] = cpu; 1561 sdp->cpu_number[sdp->num_cpus] = cpu;
@@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs)
1585nextsocket: 1604nextsocket:
1586 socket++; 1605 socket++;
1587 socket_mask = (socket_mask >> 1); 1606 socket_mask = (socket_mask >> 1);
1607 /* each socket gets a local array of pnodes/hubs */
1608 bcp = smaster;
1609 bcp->target_hub_and_pnode = kmalloc_node(
1610 sizeof(struct hub_and_pnode) *
1611 num_possible_cpus(), GFP_KERNEL, bcp->osnode);
1612 memset(bcp->target_hub_and_pnode, 0,
1613 sizeof(struct hub_and_pnode) *
1614 num_possible_cpus());
1615 for_each_present_cpu(tcpu) {
1616 bcp->target_hub_and_pnode[tcpu].pnode =
1617 uv_cpu_hub_info(tcpu)->pnode;
1618 bcp->target_hub_and_pnode[tcpu].uvhub =
1619 uv_cpu_hub_info(tcpu)->numa_blade_id;
1620 }
1588 } 1621 }
1589 } 1622 }
1590 kfree(uvhub_descs); 1623 kfree(uvhub_descs);
@@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void)
1637 spin_lock_init(&disable_lock); 1670 spin_lock_init(&disable_lock);
1638 congested_cycles = microsec_2_cycles(congested_response_us); 1671 congested_cycles = microsec_2_cycles(congested_response_us);
1639 1672
1640 if (uv_init_per_cpu(nuvhubs)) {
1641 nobau = 1;
1642 return 0;
1643 }
1644
1645 uv_partition_base_pnode = 0x7fffffff; 1673 uv_partition_base_pnode = 0x7fffffff;
1646 for (uvhub = 0; uvhub < nuvhubs; uvhub++) 1674 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1647 if (uv_blade_nr_possible_cpus(uvhub) && 1675 if (uv_blade_nr_possible_cpus(uvhub) &&
1648 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) 1676 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
1649 uv_partition_base_pnode = uv_blade_to_pnode(uvhub); 1677 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
1678 }
1679
1680 if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
1681 nobau = 1;
1682 return 0;
1683 }
1650 1684
1651 vector = UV_BAU_MESSAGE; 1685 vector = UV_BAU_MESSAGE;
1652 for_each_possible_blade(uvhub) 1686 for_each_possible_blade(uvhub)
1653 if (uv_blade_nr_possible_cpus(uvhub)) 1687 if (uv_blade_nr_possible_cpus(uvhub))
1654 uv_init_uvhub(uvhub, vector); 1688 uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
1655 1689
1656 uv_enable_timeouts(); 1690 uv_enable_timeouts();
1657 alloc_intr_gate(vector, uv_bau_message_intr1); 1691 alloc_intr_gate(vector, uv_bau_message_intr1);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e3c6a06cf725..dd7b88f2ec7a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -235,7 +235,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
235 *dx &= maskedx; 235 *dx &= maskedx;
236} 236}
237 237
238static __init void xen_init_cpuid_mask(void) 238static void __init xen_init_cpuid_mask(void)
239{ 239{
240 unsigned int ax, bx, cx, dx; 240 unsigned int ax, bx, cx, dx;
241 unsigned int xsave_mask; 241 unsigned int xsave_mask;
@@ -400,7 +400,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
400/* 400/*
401 * load_gdt for early boot, when the gdt is only mapped once 401 * load_gdt for early boot, when the gdt is only mapped once
402 */ 402 */
403static __init void xen_load_gdt_boot(const struct desc_ptr *dtr) 403static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
404{ 404{
405 unsigned long va = dtr->address; 405 unsigned long va = dtr->address;
406 unsigned int size = dtr->size + 1; 406 unsigned int size = dtr->size + 1;
@@ -662,7 +662,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
662 * Version of write_gdt_entry for use at early boot-time needed to 662 * Version of write_gdt_entry for use at early boot-time needed to
663 * update an entry as simply as possible. 663 * update an entry as simply as possible.
664 */ 664 */
665static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, 665static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
666 const void *desc, int type) 666 const void *desc, int type)
667{ 667{
668 switch (type) { 668 switch (type) {
@@ -933,18 +933,18 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
933 return ret; 933 return ret;
934} 934}
935 935
936static const struct pv_info xen_info __initdata = { 936static const struct pv_info xen_info __initconst = {
937 .paravirt_enabled = 1, 937 .paravirt_enabled = 1,
938 .shared_kernel_pmd = 0, 938 .shared_kernel_pmd = 0,
939 939
940 .name = "Xen", 940 .name = "Xen",
941}; 941};
942 942
943static const struct pv_init_ops xen_init_ops __initdata = { 943static const struct pv_init_ops xen_init_ops __initconst = {
944 .patch = xen_patch, 944 .patch = xen_patch,
945}; 945};
946 946
947static const struct pv_cpu_ops xen_cpu_ops __initdata = { 947static const struct pv_cpu_ops xen_cpu_ops __initconst = {
948 .cpuid = xen_cpuid, 948 .cpuid = xen_cpuid,
949 949
950 .set_debugreg = xen_set_debugreg, 950 .set_debugreg = xen_set_debugreg,
@@ -1004,7 +1004,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
1004 .end_context_switch = xen_end_context_switch, 1004 .end_context_switch = xen_end_context_switch,
1005}; 1005};
1006 1006
1007static const struct pv_apic_ops xen_apic_ops __initdata = { 1007static const struct pv_apic_ops xen_apic_ops __initconst = {
1008#ifdef CONFIG_X86_LOCAL_APIC 1008#ifdef CONFIG_X86_LOCAL_APIC
1009 .startup_ipi_hook = paravirt_nop, 1009 .startup_ipi_hook = paravirt_nop,
1010#endif 1010#endif
@@ -1055,7 +1055,7 @@ int xen_panic_handler_init(void)
1055 return 0; 1055 return 0;
1056} 1056}
1057 1057
1058static const struct machine_ops __initdata xen_machine_ops = { 1058static const struct machine_ops xen_machine_ops __initconst = {
1059 .restart = xen_restart, 1059 .restart = xen_restart,
1060 .halt = xen_machine_halt, 1060 .halt = xen_machine_halt,
1061 .power_off = xen_machine_halt, 1061 .power_off = xen_machine_halt,
@@ -1332,7 +1332,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1332 return NOTIFY_OK; 1332 return NOTIFY_OK;
1333} 1333}
1334 1334
1335static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = { 1335static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
1336 .notifier_call = xen_hvm_cpu_notify, 1336 .notifier_call = xen_hvm_cpu_notify,
1337}; 1337};
1338 1338
@@ -1381,7 +1381,7 @@ bool xen_hvm_need_lapic(void)
1381} 1381}
1382EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); 1382EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1383 1383
1384const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = { 1384const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
1385 .name = "Xen HVM", 1385 .name = "Xen HVM",
1386 .detect = xen_hvm_platform, 1386 .detect = xen_hvm_platform,
1387 .init_platform = xen_hvm_guest_init, 1387 .init_platform = xen_hvm_guest_init,
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 6a6fe8939645..8bbb465b6f0a 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -113,7 +113,7 @@ static void xen_halt(void)
113 xen_safe_halt(); 113 xen_safe_halt();
114} 114}
115 115
116static const struct pv_irq_ops xen_irq_ops __initdata = { 116static const struct pv_irq_ops xen_irq_ops __initconst = {
117 .save_fl = PV_CALLEE_SAVE(xen_save_fl), 117 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
118 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), 118 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
119 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), 119 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 55c965b38c27..02d752460371 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1054,7 +1054,7 @@ void xen_mm_pin_all(void)
1054 * that's before we have page structures to store the bits. So do all 1054 * that's before we have page structures to store the bits. So do all
1055 * the book-keeping now. 1055 * the book-keeping now.
1056 */ 1056 */
1057static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, 1057static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
1058 enum pt_level level) 1058 enum pt_level level)
1059{ 1059{
1060 SetPagePinned(page); 1060 SetPagePinned(page);
@@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info)
1187 1187
1188 active_mm = percpu_read(cpu_tlbstate.active_mm); 1188 active_mm = percpu_read(cpu_tlbstate.active_mm);
1189 1189
1190 if (active_mm == mm) 1190 if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1191 leave_mm(smp_processor_id()); 1191 leave_mm(smp_processor_id());
1192 1192
1193 /* If this cpu still has a stale cr3 reference, then make sure 1193 /* If this cpu still has a stale cr3 reference, then make sure
@@ -1271,13 +1271,27 @@ void xen_exit_mmap(struct mm_struct *mm)
1271 spin_unlock(&mm->page_table_lock); 1271 spin_unlock(&mm->page_table_lock);
1272} 1272}
1273 1273
1274static __init void xen_pagetable_setup_start(pgd_t *base) 1274static void __init xen_pagetable_setup_start(pgd_t *base)
1275{ 1275{
1276} 1276}
1277 1277
1278static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1279{
1280 /* reserve the range used */
1281 native_pagetable_reserve(start, end);
1282
1283 /* set as RW the rest */
1284 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1285 PFN_PHYS(pgt_buf_top));
1286 while (end < PFN_PHYS(pgt_buf_top)) {
1287 make_lowmem_page_readwrite(__va(end));
1288 end += PAGE_SIZE;
1289 }
1290}
1291
1278static void xen_post_allocator_init(void); 1292static void xen_post_allocator_init(void);
1279 1293
1280static __init void xen_pagetable_setup_done(pgd_t *base) 1294static void __init xen_pagetable_setup_done(pgd_t *base)
1281{ 1295{
1282 xen_setup_shared_info(); 1296 xen_setup_shared_info();
1283 xen_post_allocator_init(); 1297 xen_post_allocator_init();
@@ -1463,119 +1477,6 @@ static int xen_pgd_alloc(struct mm_struct *mm)
1463 return ret; 1477 return ret;
1464} 1478}
1465 1479
1466#ifdef CONFIG_X86_64
1467static __initdata u64 __last_pgt_set_rw = 0;
1468static __initdata u64 __pgt_buf_start = 0;
1469static __initdata u64 __pgt_buf_end = 0;
1470static __initdata u64 __pgt_buf_top = 0;
1471/*
1472 * As a consequence of the commit:
1473 *
1474 * commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
1475 * Author: Yinghai Lu <yinghai@kernel.org>
1476 * Date: Fri Dec 17 16:58:28 2010 -0800
1477 *
1478 * x86-64, mm: Put early page table high
1479 *
1480 * at some point init_memory_mapping is going to reach the pagetable pages
1481 * area and map those pages too (mapping them as normal memory that falls
1482 * in the range of addresses passed to init_memory_mapping as argument).
1483 * Some of those pages are already pagetable pages (they are in the range
1484 * pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
1485 * everything is fine.
1486 * Some of these pages are not pagetable pages yet (they fall in the range
1487 * pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
1488 * are going to be mapped RW. When these pages become pagetable pages and
1489 * are hooked into the pagetable, xen will find that the guest has already
1490 * a RW mapping of them somewhere and fail the operation.
1491 * The reason Xen requires pagetables to be RO is that the hypervisor needs
1492 * to verify that the pagetables are valid before using them. The validation
1493 * operations are called "pinning".
1494 *
1495 * In order to fix the issue we mark all the pages in the entire range
1496 * pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
1497 * is completed only the range pgt_buf_start-pgt_buf_end is reserved by
1498 * init_memory_mapping. Hence the kernel is going to crash as soon as one
1499 * of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
1500 * ranges are RO).
1501 *
1502 * For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
1503 * the init_memory_mapping has completed (in a perfect world we would
1504 * call this function from init_memory_mapping, but lets ignore that).
1505 *
1506 * Because we are called _after_ init_memory_mapping the pgt_buf_[start,
1507 * end,top] have all changed to new values (b/c init_memory_mapping
1508 * is called and setting up another new page-table). Hence, the first time
1509 * we enter this function, we save away the pgt_buf_start value and update
1510 * the pgt_buf_[end,top].
1511 *
1512 * When we detect that the "old" pgt_buf_start through pgt_buf_end
1513 * PFNs have been reserved (so memblock_x86_reserve_range has been called),
1514 * we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
1515 *
1516 * And then we update those "old" pgt_buf_[end|top] with the new ones
1517 * so that we can redo this on the next pagetable.
1518 */
1519static __init void mark_rw_past_pgt(void) {
1520
1521 if (pgt_buf_end > pgt_buf_start) {
1522 u64 addr, size;
1523
1524 /* Save it away. */
1525 if (!__pgt_buf_start) {
1526 __pgt_buf_start = pgt_buf_start;
1527 __pgt_buf_end = pgt_buf_end;
1528 __pgt_buf_top = pgt_buf_top;
1529 return;
1530 }
1531 /* If we get the range that starts at __pgt_buf_end that means
1532 * the range is reserved, and that in 'init_memory_mapping'
1533 * the 'memblock_x86_reserve_range' has been called with the
1534 * outdated __pgt_buf_start, __pgt_buf_end (the "new"
1535 * pgt_buf_[start|end|top] refer now to a new pagetable.
1536 * Note: we are called _after_ the pgt_buf_[..] have been
1537 * updated.*/
1538
1539 addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
1540 &size, PAGE_SIZE);
1541
1542 /* Still not reserved, meaning 'memblock_x86_reserve_range'
1543 * hasn't been called yet. Update the _end and _top.*/
1544 if (addr == PFN_PHYS(__pgt_buf_start)) {
1545 __pgt_buf_end = pgt_buf_end;
1546 __pgt_buf_top = pgt_buf_top;
1547 return;
1548 }
1549
1550 /* OK, the area is reserved, meaning it is time for us to
1551 * set RW for the old end->top PFNs. */
1552
1553 /* ..unless we had already done this. */
1554 if (__pgt_buf_end == __last_pgt_set_rw)
1555 return;
1556
1557 addr = PFN_PHYS(__pgt_buf_end);
1558
1559 /* set as RW the rest */
1560 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
1561 PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
1562
1563 while (addr < PFN_PHYS(__pgt_buf_top)) {
1564 make_lowmem_page_readwrite(__va(addr));
1565 addr += PAGE_SIZE;
1566 }
1567 /* And update everything so that we are ready for the next
1568 * pagetable (the one created for regions past 4GB) */
1569 __last_pgt_set_rw = __pgt_buf_end;
1570 __pgt_buf_start = pgt_buf_start;
1571 __pgt_buf_end = pgt_buf_end;
1572 __pgt_buf_top = pgt_buf_top;
1573 }
1574 return;
1575}
1576#else
1577static __init void mark_rw_past_pgt(void) { }
1578#endif
1579static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) 1480static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1580{ 1481{
1581#ifdef CONFIG_X86_64 1482#ifdef CONFIG_X86_64
@@ -1587,7 +1488,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1587} 1488}
1588 1489
1589#ifdef CONFIG_X86_32 1490#ifdef CONFIG_X86_32
1590static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1491static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1591{ 1492{
1592 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1493 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1593 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 1494 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
@@ -1597,19 +1498,11 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1597 return pte; 1498 return pte;
1598} 1499}
1599#else /* CONFIG_X86_64 */ 1500#else /* CONFIG_X86_64 */
1600static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1501static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1601{ 1502{
1602 unsigned long pfn = pte_pfn(pte); 1503 unsigned long pfn = pte_pfn(pte);
1603 1504
1604 /* 1505 /*
1605 * A bit of optimization. We do not need to call the workaround
1606 * when xen_set_pte_init is called with a PTE with 0 as PFN.
1607 * That is b/c the pagetable at that point are just being populated
1608 * with empty values and we can save some cycles by not calling
1609 * the 'memblock' code.*/
1610 if (pfn)
1611 mark_rw_past_pgt();
1612 /*
1613 * If the new pfn is within the range of the newly allocated 1506 * If the new pfn is within the range of the newly allocated
1614 * kernel pagetable, and it isn't being mapped into an 1507 * kernel pagetable, and it isn't being mapped into an
1615 * early_ioremap fixmap slot as a freshly allocated page, make sure 1508 * early_ioremap fixmap slot as a freshly allocated page, make sure
@@ -1626,7 +1519,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1626 1519
1627/* Init-time set_pte while constructing initial pagetables, which 1520/* Init-time set_pte while constructing initial pagetables, which
1628 doesn't allow RO pagetable pages to be remapped RW */ 1521 doesn't allow RO pagetable pages to be remapped RW */
1629static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) 1522static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1630{ 1523{
1631 pte = mask_rw_pte(ptep, pte); 1524 pte = mask_rw_pte(ptep, pte);
1632 1525
@@ -1644,7 +1537,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1644 1537
1645/* Early in boot, while setting up the initial pagetable, assume 1538/* Early in boot, while setting up the initial pagetable, assume
1646 everything is pinned. */ 1539 everything is pinned. */
1647static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) 1540static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1648{ 1541{
1649#ifdef CONFIG_FLATMEM 1542#ifdef CONFIG_FLATMEM
1650 BUG_ON(mem_map); /* should only be used early */ 1543 BUG_ON(mem_map); /* should only be used early */
@@ -1654,7 +1547,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1654} 1547}
1655 1548
1656/* Used for pmd and pud */ 1549/* Used for pmd and pud */
1657static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) 1550static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1658{ 1551{
1659#ifdef CONFIG_FLATMEM 1552#ifdef CONFIG_FLATMEM
1660 BUG_ON(mem_map); /* should only be used early */ 1553 BUG_ON(mem_map); /* should only be used early */
@@ -1664,13 +1557,13 @@ static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1664 1557
1665/* Early release_pte assumes that all pts are pinned, since there's 1558/* Early release_pte assumes that all pts are pinned, since there's
1666 only init_mm and anything attached to that is pinned. */ 1559 only init_mm and anything attached to that is pinned. */
1667static __init void xen_release_pte_init(unsigned long pfn) 1560static void __init xen_release_pte_init(unsigned long pfn)
1668{ 1561{
1669 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1562 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1670 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1563 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1671} 1564}
1672 1565
1673static __init void xen_release_pmd_init(unsigned long pfn) 1566static void __init xen_release_pmd_init(unsigned long pfn)
1674{ 1567{
1675 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1568 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1676} 1569}
@@ -1796,7 +1689,7 @@ static void set_page_prot(void *addr, pgprot_t prot)
1796 BUG(); 1689 BUG();
1797} 1690}
1798 1691
1799static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) 1692static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1800{ 1693{
1801 unsigned pmdidx, pteidx; 1694 unsigned pmdidx, pteidx;
1802 unsigned ident_pte; 1695 unsigned ident_pte;
@@ -1879,7 +1772,7 @@ static void convert_pfn_mfn(void *v)
1879 * of the physical mapping once some sort of allocator has been set 1772 * of the physical mapping once some sort of allocator has been set
1880 * up. 1773 * up.
1881 */ 1774 */
1882__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 1775pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1883 unsigned long max_pfn) 1776 unsigned long max_pfn)
1884{ 1777{
1885 pud_t *l3; 1778 pud_t *l3;
@@ -1950,7 +1843,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1950static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); 1843static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1951static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); 1844static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1952 1845
1953static __init void xen_write_cr3_init(unsigned long cr3) 1846static void __init xen_write_cr3_init(unsigned long cr3)
1954{ 1847{
1955 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); 1848 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1956 1849
@@ -1987,7 +1880,7 @@ static __init void xen_write_cr3_init(unsigned long cr3)
1987 pv_mmu_ops.write_cr3 = &xen_write_cr3; 1880 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1988} 1881}
1989 1882
1990__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 1883pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1991 unsigned long max_pfn) 1884 unsigned long max_pfn)
1992{ 1885{
1993 pmd_t *kernel_pmd; 1886 pmd_t *kernel_pmd;
@@ -2093,7 +1986,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2093#endif 1986#endif
2094} 1987}
2095 1988
2096__init void xen_ident_map_ISA(void) 1989void __init xen_ident_map_ISA(void)
2097{ 1990{
2098 unsigned long pa; 1991 unsigned long pa;
2099 1992
@@ -2116,10 +2009,8 @@ __init void xen_ident_map_ISA(void)
2116 xen_flush_tlb(); 2009 xen_flush_tlb();
2117} 2010}
2118 2011
2119static __init void xen_post_allocator_init(void) 2012static void __init xen_post_allocator_init(void)
2120{ 2013{
2121 mark_rw_past_pgt();
2122
2123#ifdef CONFIG_XEN_DEBUG 2014#ifdef CONFIG_XEN_DEBUG
2124 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); 2015 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
2125#endif 2016#endif
@@ -2155,7 +2046,7 @@ static void xen_leave_lazy_mmu(void)
2155 preempt_enable(); 2046 preempt_enable();
2156} 2047}
2157 2048
2158static const struct pv_mmu_ops xen_mmu_ops __initdata = { 2049static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2159 .read_cr2 = xen_read_cr2, 2050 .read_cr2 = xen_read_cr2,
2160 .write_cr2 = xen_write_cr2, 2051 .write_cr2 = xen_write_cr2,
2161 2052
@@ -2228,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
2228 2119
2229void __init xen_init_mmu_ops(void) 2120void __init xen_init_mmu_ops(void)
2230{ 2121{
2122 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
2231 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; 2123 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2232 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2124 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2233 pv_mmu_ops = xen_mmu_ops; 2125 pv_mmu_ops = xen_mmu_ops;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 141eb0de8b06..58efeb9d5440 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
522 /* Boundary cross-over for the edges: */ 522 /* Boundary cross-over for the edges: */
523 if (idx) { 523 if (idx) {
524 unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); 524 unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
525 unsigned long *mid_mfn_p;
525 526
526 p2m_init(p2m); 527 p2m_init(p2m);
527 528
528 p2m_top[topidx][mididx] = p2m; 529 p2m_top[topidx][mididx] = p2m;
529 530
531 /* For save/restore we need to MFN of the P2M saved */
532
533 mid_mfn_p = p2m_top_mfn_p[topidx];
534 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
535 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
536 topidx, mididx);
537 mid_mfn_p[mididx] = virt_to_mfn(p2m);
538
530 } 539 }
531 return idx != 0; 540 return idx != 0;
532} 541}
@@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
549 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) 558 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
550 { 559 {
551 unsigned topidx = p2m_top_index(pfn); 560 unsigned topidx = p2m_top_index(pfn);
552 if (p2m_top[topidx] == p2m_mid_missing) { 561 unsigned long *mid_mfn_p;
553 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); 562 unsigned long **mid;
563
564 mid = p2m_top[topidx];
565 mid_mfn_p = p2m_top_mfn_p[topidx];
566 if (mid == p2m_mid_missing) {
567 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
554 568
555 p2m_mid_init(mid); 569 p2m_mid_init(mid);
556 570
557 p2m_top[topidx] = mid; 571 p2m_top[topidx] = mid;
572
573 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
574 }
575 /* And the save/restore P2M tables.. */
576 if (mid_mfn_p == p2m_mid_missing_mfn) {
577 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
578 p2m_mid_mfn_init(mid_mfn_p);
579
580 p2m_top_mfn_p[topidx] = mid_mfn_p;
581 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
582 /* Note: we don't set mid_mfn_p[midix] here,
583 * look in __early_alloc_p2m */
558 } 584 }
559 } 585 }
560 586
@@ -650,7 +676,7 @@ static unsigned long mfn_hash(unsigned long mfn)
650} 676}
651 677
652/* Add an MFN override for a particular page */ 678/* Add an MFN override for a particular page */
653int m2p_add_override(unsigned long mfn, struct page *page) 679int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
654{ 680{
655 unsigned long flags; 681 unsigned long flags;
656 unsigned long pfn; 682 unsigned long pfn;
@@ -662,7 +688,6 @@ int m2p_add_override(unsigned long mfn, struct page *page)
662 if (!PageHighMem(page)) { 688 if (!PageHighMem(page)) {
663 address = (unsigned long)__va(pfn << PAGE_SHIFT); 689 address = (unsigned long)__va(pfn << PAGE_SHIFT);
664 ptep = lookup_address(address, &level); 690 ptep = lookup_address(address, &level);
665
666 if (WARN(ptep == NULL || level != PG_LEVEL_4K, 691 if (WARN(ptep == NULL || level != PG_LEVEL_4K,
667 "m2p_add_override: pfn %lx not mapped", pfn)) 692 "m2p_add_override: pfn %lx not mapped", pfn))
668 return -EINVAL; 693 return -EINVAL;
@@ -674,18 +699,17 @@ int m2p_add_override(unsigned long mfn, struct page *page)
674 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) 699 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
675 return -ENOMEM; 700 return -ENOMEM;
676 701
677 if (!PageHighMem(page)) 702 if (clear_pte && !PageHighMem(page))
678 /* Just zap old mapping for now */ 703 /* Just zap old mapping for now */
679 pte_clear(&init_mm, address, ptep); 704 pte_clear(&init_mm, address, ptep);
680
681 spin_lock_irqsave(&m2p_override_lock, flags); 705 spin_lock_irqsave(&m2p_override_lock, flags);
682 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); 706 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
683 spin_unlock_irqrestore(&m2p_override_lock, flags); 707 spin_unlock_irqrestore(&m2p_override_lock, flags);
684 708
685 return 0; 709 return 0;
686} 710}
687 711EXPORT_SYMBOL_GPL(m2p_add_override);
688int m2p_remove_override(struct page *page) 712int m2p_remove_override(struct page *page, bool clear_pte)
689{ 713{
690 unsigned long flags; 714 unsigned long flags;
691 unsigned long mfn; 715 unsigned long mfn;
@@ -713,7 +737,7 @@ int m2p_remove_override(struct page *page)
713 spin_unlock_irqrestore(&m2p_override_lock, flags); 737 spin_unlock_irqrestore(&m2p_override_lock, flags);
714 set_phys_to_machine(pfn, page->index); 738 set_phys_to_machine(pfn, page->index);
715 739
716 if (!PageHighMem(page)) 740 if (clear_pte && !PageHighMem(page))
717 set_pte_at(&init_mm, address, ptep, 741 set_pte_at(&init_mm, address, ptep,
718 pfn_pte(pfn, PAGE_KERNEL)); 742 pfn_pte(pfn, PAGE_KERNEL));
719 /* No tlb flush necessary because the caller already 743 /* No tlb flush necessary because the caller already
@@ -721,6 +745,7 @@ int m2p_remove_override(struct page *page)
721 745
722 return 0; 746 return 0;
723} 747}
748EXPORT_SYMBOL_GPL(m2p_remove_override);
724 749
725struct page *m2p_find_override(unsigned long mfn) 750struct page *m2p_find_override(unsigned long mfn)
726{ 751{
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 90bac0aac3a5..be1a464f6d66 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -50,7 +50,7 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
50 */ 50 */
51#define EXTRA_MEM_RATIO (10) 51#define EXTRA_MEM_RATIO (10)
52 52
53static __init void xen_add_extra_mem(unsigned long pages) 53static void __init xen_add_extra_mem(unsigned long pages)
54{ 54{
55 unsigned long pfn; 55 unsigned long pfn;
56 56
@@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
166 if (last > end) 166 if (last > end)
167 continue; 167 continue;
168 168
169 if (entry->type == E820_RAM) { 169 if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
170 if (start > start_pci) 170 if (start > start_pci)
171 identity += set_phys_range_identity( 171 identity += set_phys_range_identity(
172 PFN_UP(start_pci), PFN_DOWN(start)); 172 PFN_UP(start_pci), PFN_DOWN(start));
@@ -227,7 +227,11 @@ char * __init xen_memory_setup(void)
227 227
228 memcpy(map_raw, map, sizeof(map)); 228 memcpy(map_raw, map, sizeof(map));
229 e820.nr_map = 0; 229 e820.nr_map = 0;
230#ifdef CONFIG_X86_32
231 xen_extra_mem_start = mem_end;
232#else
230 xen_extra_mem_start = max((1ULL << 32), mem_end); 233 xen_extra_mem_start = max((1ULL << 32), mem_end);
234#endif
231 for (i = 0; i < memmap.nr_entries; i++) { 235 for (i = 0; i < memmap.nr_entries; i++) {
232 unsigned long long end; 236 unsigned long long end;
233 237
@@ -336,7 +340,7 @@ static void __init fiddle_vdso(void)
336#endif 340#endif
337} 341}
338 342
339static __cpuinit int register_callback(unsigned type, const void *func) 343static int __cpuinit register_callback(unsigned type, const void *func)
340{ 344{
341 struct callback_register callback = { 345 struct callback_register callback = {
342 .type = type, 346 .type = type,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 762b46ab14d5..41038c01de40 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -56,7 +56,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
56 return IRQ_HANDLED; 56 return IRQ_HANDLED;
57} 57}
58 58
59static __cpuinit void cpu_bringup(void) 59static void __cpuinit cpu_bringup(void)
60{ 60{
61 int cpu = smp_processor_id(); 61 int cpu = smp_processor_id();
62 62
@@ -84,7 +84,7 @@ static __cpuinit void cpu_bringup(void)
84 wmb(); /* make sure everything is out */ 84 wmb(); /* make sure everything is out */
85} 85}
86 86
87static __cpuinit void cpu_bringup_and_idle(void) 87static void __cpuinit cpu_bringup_and_idle(void)
88{ 88{
89 cpu_bringup(); 89 cpu_bringup();
90 cpu_idle(); 90 cpu_idle();
@@ -241,7 +241,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
241 } 241 }
242} 242}
243 243
244static __cpuinit int 244static int __cpuinit
245cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 245cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
246{ 246{
247 struct vcpu_guest_context *ctxt; 247 struct vcpu_guest_context *ctxt;
@@ -485,7 +485,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
485 return IRQ_HANDLED; 485 return IRQ_HANDLED;
486} 486}
487 487
488static const struct smp_ops xen_smp_ops __initdata = { 488static const struct smp_ops xen_smp_ops __initconst = {
489 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 489 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
490 .smp_prepare_cpus = xen_smp_prepare_cpus, 490 .smp_prepare_cpus = xen_smp_prepare_cpus,
491 .smp_cpus_done = xen_smp_cpus_done, 491 .smp_cpus_done = xen_smp_cpus_done,
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 2e2d370a47b1..bd4ffd7d9589 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -439,11 +439,11 @@ void xen_timer_resume(void)
439 } 439 }
440} 440}
441 441
442static const struct pv_time_ops xen_time_ops __initdata = { 442static const struct pv_time_ops xen_time_ops __initconst = {
443 .sched_clock = xen_clocksource_read, 443 .sched_clock = xen_clocksource_read,
444}; 444};
445 445
446static __init void xen_time_init(void) 446static void __init xen_time_init(void)
447{ 447{
448 int cpu = smp_processor_id(); 448 int cpu = smp_processor_id();
449 struct timespec tp; 449 struct timespec tp;
@@ -468,7 +468,7 @@ static __init void xen_time_init(void)
468 xen_setup_cpu_clockevents(); 468 xen_setup_cpu_clockevents();
469} 469}
470 470
471__init void xen_init_time_ops(void) 471void __init xen_init_time_ops(void)
472{ 472{
473 pv_time_ops = xen_time_ops; 473 pv_time_ops = xen_time_ops;
474 474
@@ -490,7 +490,7 @@ static void xen_hvm_setup_cpu_clockevents(void)
490 xen_setup_cpu_clockevents(); 490 xen_setup_cpu_clockevents();
491} 491}
492 492
493__init void xen_hvm_init_time_ops(void) 493void __init xen_hvm_init_time_ops(void)
494{ 494{
495 /* vector callback is needed otherwise we cannot receive interrupts 495 /* vector callback is needed otherwise we cannot receive interrupts
496 * on cpu > 0 and at this point we don't know how many cpus are 496 * on cpu > 0 and at this point we don't know how many cpus are
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 3112f55638c4..97dfdc8757b3 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -74,7 +74,7 @@ static inline void xen_hvm_smp_init(void) {}
74 74
75#ifdef CONFIG_PARAVIRT_SPINLOCKS 75#ifdef CONFIG_PARAVIRT_SPINLOCKS
76void __init xen_init_spinlocks(void); 76void __init xen_init_spinlocks(void);
77__cpuinit void xen_init_lock_cpu(int cpu); 77void __cpuinit xen_init_lock_cpu(int cpu);
78void xen_uninit_lock_cpu(int cpu); 78void xen_uninit_lock_cpu(int cpu);
79#else 79#else
80static inline void xen_init_spinlocks(void) 80static inline void xen_init_spinlocks(void)