aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/Kconfig8
-rw-r--r--arch/arc/Makefile28
-rw-r--r--arch/arc/configs/fpga_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/tb10x_defconfig2
-rw-r--r--arch/arc/include/asm/arcregs.h127
-rw-r--r--arch/arc/include/asm/bug.h5
-rw-r--r--arch/arc/include/asm/cache.h26
-rw-r--r--arch/arc/include/asm/cacheflush.h13
-rw-r--r--arch/arc/include/asm/defines.h56
-rw-r--r--arch/arc/include/asm/entry.h521
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags.h20
-rw-r--r--arch/arc/include/asm/kgdb.h4
-rw-r--r--arch/arc/include/asm/kprobes.h6
-rw-r--r--arch/arc/include/asm/mmu.h44
-rw-r--r--arch/arc/include/asm/page.h7
-rw-r--r--arch/arc/include/asm/pgtable.h6
-rw-r--r--arch/arc/include/asm/processor.h17
-rw-r--r--arch/arc/include/asm/ptrace.h47
-rw-r--r--arch/arc/include/asm/syscall.h5
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h4
-rw-r--r--arch/arc/include/asm/tlb.h26
-rw-r--r--arch/arc/include/asm/unaligned.h4
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h15
-rw-r--r--arch/arc/kernel/asm-offsets.c7
-rw-r--r--arch/arc/kernel/ctx_sw.c14
-rw-r--r--arch/arc/kernel/entry.S103
-rw-r--r--arch/arc/kernel/head.S2
-rw-r--r--arch/arc/kernel/irq.c16
-rw-r--r--arch/arc/kernel/kgdb.c4
-rw-r--r--arch/arc/kernel/kprobes.c5
-rw-r--r--arch/arc/kernel/process.c9
-rw-r--r--arch/arc/kernel/ptrace.c14
-rw-r--r--arch/arc/kernel/setup.c18
-rw-r--r--arch/arc/kernel/smp.c4
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/time.c17
-rw-r--r--arch/arc/kernel/traps.c52
-rw-r--r--arch/arc/kernel/troubleshoot.c31
-rw-r--r--arch/arc/kernel/unaligned.c2
-rw-r--r--arch/arc/kernel/unwind.c2
-rw-r--r--arch/arc/kernel/vmlinux.lds.S24
-rw-r--r--arch/arc/mm/cache_arc700.c126
-rw-r--r--arch/arc/mm/fault.c12
-rw-r--r--arch/arc/mm/tlb.c38
-rw-r--r--arch/arc/mm/tlbex.S35
-rw-r--r--arch/arc/plat-arcfpga/platform.c12
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/include/asm/kvm_arch_timer.h85
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_asm.h24
-rw-r--r--arch/arm/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm/include/asm/kvm_host.h13
-rw-r--r--arch/arm/include/asm/kvm_vgic.h220
-rw-r--r--arch/arm/kvm/Kconfig8
-rw-r--r--arch/arm/kvm/Makefile7
-rw-r--r--arch/arm/kvm/arch_timer.c273
-rw-r--r--arch/arm/kvm/arm.c8
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/handle_exit.c3
-rw-r--r--arch/arm/kvm/interrupts.S16
-rw-r--r--arch/arm/kvm/interrupts_head.S10
-rw-r--r--arch/arm/kvm/mmio.c6
-rw-r--r--arch/arm/kvm/mmu.c3
-rw-r--r--arch/arm/kvm/psci.c2
-rw-r--r--arch/arm/kvm/reset.c12
-rw-r--r--arch/arm/kvm/vgic.c1499
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-davinci/da850.c8
-rw-r--r--arch/arm/mach-omap2/omap_device.c7
-rw-r--r--arch/arm/mach-omap2/smartreflex-class3.c8
-rw-r--r--arch/arm/mach-pxa/Kconfig3
-rw-r--r--arch/arm/mach-s3c24xx/cpufreq-utils.c2
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2410.c54
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c54
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c110
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c6
-rw-r--r--arch/arm/mach-tegra/Kconfig3
-rw-r--r--arch/arm/mach-tegra/common.c4
-rw-r--r--arch/arm/mach-ux500/cpu.c6
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu-freq-core.h2
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c2
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/cris/Kconfig34
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c870
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig7
-rw-r--r--arch/cris/include/asm/Kbuild4
-rw-r--r--arch/cris/include/asm/io.h3
-rw-r--r--arch/cris/include/asm/linkage.h6
-rw-r--r--arch/frv/kernel/head.S5
-rw-r--r--arch/h8300/Kconfig118
-rw-r--r--arch/h8300/Kconfig.cpu4
-rw-r--r--arch/h8300/boot/compressed/Makefile2
-rw-r--r--arch/h8300/include/asm/Kbuild2
-rw-r--r--arch/h8300/include/asm/barrier.h2
-rw-r--r--arch/h8300/include/asm/linkage.h6
-rw-r--r--arch/h8300/include/asm/tlb.h15
-rw-r--r--arch/h8300/kernel/entry.S118
-rw-r--r--arch/h8300/kernel/syscalls.S648
-rw-r--r--arch/h8300/lib/abs.S4
-rw-r--r--arch/h8300/lib/memcpy.S4
-rw-r--r--arch/h8300/lib/memset.S4
-rw-r--r--arch/h8300/platform/h8300h/aki3068net/crt0_ram.S16
-rw-r--r--arch/h8300/platform/h8300h/generic/crt0_ram.S14
-rw-r--r--arch/h8300/platform/h8300h/generic/crt0_rom.S14
-rw-r--r--arch/h8300/platform/h8300h/h8max/crt0_ram.S16
-rw-r--r--arch/h8300/platform/h8s/edosk2674/crt0_ram.S16
-rw-r--r--arch/h8300/platform/h8s/edosk2674/crt0_rom.S14
-rw-r--r--arch/h8300/platform/h8s/generic/crt0_ram.S16
-rw-r--r--arch/h8300/platform/h8s/generic/crt0_rom.S12
-rw-r--r--arch/ia64/hp/common/sba_iommu.c24
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/include/asm/pci.h10
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/err_inject.c8
-rw-r--r--arch/ia64/kernel/mca.c12
-rw-r--r--arch/ia64/kernel/numa.c4
-rw-r--r--arch/ia64/kernel/palinfo.c4
-rw-r--r--arch/ia64/kernel/pci-dma.c9
-rw-r--r--arch/ia64/kernel/perfmon.c20
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/smpboot.c8
-rw-r--r--arch/ia64/kernel/topology.c18
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/kvm/Makefile7
-rw-r--r--arch/ia64/mm/contig.c3
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/numa.c2
-rw-r--r--arch/ia64/pci/pci.c239
-rw-r--r--arch/ia64/sn/kernel/io_init.c122
-rw-r--r--arch/ia64/sn/kernel/setup.c8
-rw-r--r--arch/ia64/xen/hypervisor.c2
-rw-r--r--arch/m68k/Kconfig.debug3
-rw-r--r--arch/m68k/configs/multi_defconfig2
-rw-r--r--arch/m68k/configs/q40_defconfig6
-rw-r--r--arch/m68k/include/asm/parport.h2
-rw-r--r--arch/m68k/include/asm/string.h32
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h8
-rw-r--r--arch/m68k/kernel/asm-offsets.c2
-rw-r--r--arch/m68k/kernel/ints.c2
-rw-r--r--arch/m68k/lib/Makefile2
-rw-r--r--arch/m68k/lib/string.c22
-rw-r--r--arch/m68k/lib/uaccess.c6
-rw-r--r--arch/m68k/math-emu/fp_arith.c2
-rw-r--r--arch/m68k/platform/coldfire/pci.c1
-rw-r--r--arch/m68k/sun3/sun3dvma.c2
-rw-r--r--arch/mips/loongson/lemote-2f/clock.c3
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h6
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c5
-rw-r--r--arch/powerpc/kvm/Makefile13
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c81
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S13
-rw-r--r--arch/powerpc/kvm/book3s_pr.c3
-rw-r--r--arch/powerpc/kvm/booke.c2
-rw-r--r--arch/powerpc/kvm/emulate.c3
-rw-r--r--arch/powerpc/platforms/Kconfig31
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/pasemi/Makefile1
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c330
-rw-r--r--arch/powerpc/platforms/powermac/Makefile2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c721
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c746
-rw-r--r--arch/s390/appldata/appldata_mem.c18
-rw-r--r--arch/s390/appldata/appldata_net_sum.c18
-rw-r--r--arch/s390/hypfs/hypfs_diag.c8
-rw-r--r--arch/s390/include/asm/airq.h15
-rw-r--r--arch/s390/include/asm/dma-mapping.h2
-rw-r--r--arch/s390/include/asm/facility.h17
-rw-r--r--arch/s390/include/asm/io.h22
-rw-r--r--arch/s390/include/asm/kvm_host.h18
-rw-r--r--arch/s390/include/asm/pci.h2
-rw-r--r--arch/s390/include/asm/perf_event.h10
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/pgtable.h83
-rw-r--r--arch/s390/include/asm/ptrace.h1
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/chsc.h13
-rw-r--r--arch/s390/include/uapi/asm/dasd.h4
-rw-r--r--arch/s390/include/uapi/asm/sclp_ctl.h24
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/entry.S12
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/entry64.S97
-rw-r--r--arch/s390/kernel/irq.c8
-rw-r--r--arch/s390/kernel/perf_event.c52
-rw-r--r--arch/s390/kernel/s390_ksyms.c1
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kvm/Makefile3
-rw-r--r--arch/s390/kvm/diag.c3
-rw-r--r--arch/s390/kvm/intercept.c124
-rw-r--r--arch/s390/kvm/interrupt.c18
-rw-r--r--arch/s390/kvm/kvm-s390.c105
-rw-r--r--arch/s390/kvm/kvm-s390.h14
-rw-r--r--arch/s390/kvm/priv.c274
-rw-r--r--arch/s390/kvm/sigp.c19
-rw-r--r--arch/s390/mm/pgtable.c50
-rw-r--r--arch/s390/oprofile/hwsampler.h4
-rw-r--r--arch/s390/pci/pci.c83
-rw-r--r--arch/s390/pci/pci_clp.c1
-rw-r--r--arch/s390/pci/pci_debug.c29
-rw-r--r--arch/s390/pci/pci_dma.c6
-rw-r--r--arch/s390/pci/pci_sysfs.c20
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/dma-mapping.h6
-rw-r--r--arch/sparc/kernel/pci.c5
-rw-r--r--arch/unicore32/boot/compressed/Makefile2
-rw-r--r--arch/unicore32/kernel/pci.c5
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h15
-rw-r--r--arch/x86/kernel/acpi/boot.c7
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c43
-rw-r--r--arch/x86/kvm/Makefile13
-rw-r--r--arch/x86/kvm/emulate.c391
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/mmu.c301
-rw-r--r--arch/x86/kvm/mmu.h18
-rw-r--r--arch/x86/kvm/mmutrace.h76
-rw-r--r--arch/x86/kvm/paging_tmpl.h10
-rw-r--r--arch/x86/kvm/svm.c10
-rw-r--r--arch/x86/kvm/trace.h21
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/kvm/x86.c80
-rw-r--r--arch/x86/lguest/Makefile2
-rw-r--r--arch/x86/lguest/head_32.S (renamed from arch/x86/lguest/i386_head.S)0
-rw-r--r--arch/x86/pci/acpi.c7
-rw-r--r--arch/x86/xen/smp.c91
-rw-r--r--arch/x86/xen/spinlock.c7
-rw-r--r--arch/x86/xen/time.c58
234 files changed, 3461 insertions, 7525 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5917099470ea..4a0e54fc01b2 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -184,6 +184,7 @@ config ARC_CACHE_PAGES
184 184
185config ARC_CACHE_VIPT_ALIASING 185config ARC_CACHE_VIPT_ALIASING
186 bool "Support VIPT Aliasing D$" 186 bool "Support VIPT Aliasing D$"
187 depends on ARC_HAS_DCACHE
187 default n 188 default n
188 189
189endif #ARC_CACHE 190endif #ARC_CACHE
@@ -361,13 +362,6 @@ config ARC_MISALIGN_ACCESS
361 Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide 362 Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
362 potential bugs in code 363 potential bugs in code
363 364
364config ARC_STACK_NONEXEC
365 bool "Make stack non-executable"
366 default n
367 help
368 To disable the execute permissions of stack/heap of processes
369 which are enabled by default.
370
371config HZ 365config HZ
372 int "Timer Frequency" 366 int "Timer Frequency"
373 default 100 367 default 100
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 183397fd289e..8c0b1aa56f7e 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -9,25 +9,27 @@
9UTS_MACHINE := arc 9UTS_MACHINE := arc
10 10
11ifeq ($(CROSS_COMPILE),) 11ifeq ($(CROSS_COMPILE),)
12CROSS_COMPILE := arc-elf32- 12CROSS_COMPILE := arc-linux-uclibc-
13endif 13endif
14 14
15KBUILD_DEFCONFIG := fpga_defconfig 15KBUILD_DEFCONFIG := fpga_defconfig
16 16
17cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__ 17cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
18 18
19LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h
20
21ifdef CONFIG_ARC_CURR_IN_REG 19ifdef CONFIG_ARC_CURR_IN_REG
22# For a global register defintion, make sure it gets passed to every file 20# For a global register defintion, make sure it gets passed to every file
23# We had a customer reported bug where some code built in kernel was NOT using 21# We had a customer reported bug where some code built in kernel was NOT using
24# any kernel headers, and missing the r25 global register 22# any kernel headers, and missing the r25 global register
25# Can't do unconditionally (like above) because of recursive include issues 23# Can't do unconditionally because of recursive include issues
26# due to <linux/thread_info.h> 24# due to <linux/thread_info.h>
27LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h 25LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
28endif 26endif
29 27
30atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y) 28upto_gcc42 := $(call cc-ifversion, -le, 0402, y)
29upto_gcc44 := $(call cc-ifversion, -le, 0404, y)
30atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
31atleast_gcc48 := $(call cc-ifversion, -ge, 0408, y)
32
31cflags-$(atleast_gcc44) += -fsection-anchors 33cflags-$(atleast_gcc44) += -fsection-anchors
32 34
33cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 35cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
@@ -35,6 +37,11 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
35cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc 37cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
36cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables 38cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
37 39
40# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
41ifeq ($(atleast_gcc48),y)
42cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
43endif
44
38ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 45ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
39# Generic build system uses -O2, we want -O3 46# Generic build system uses -O2, we want -O3
40cflags-y += -O3 47cflags-y += -O3
@@ -48,11 +55,10 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
48cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian 55cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
49ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB 56ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
50 57
51# STAR 9000518362: 58# STAR 9000518362: (fixed with binutils shipping with gcc 4.8)
52# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept 59# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
53# --build-id w/o "-marclinux". 60# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
54# Default arc-elf32-ld is OK 61ldflags-$(upto_gcc44) += -marclinux
55ldflags-y += -marclinux
56 62
57ARC_LIBGCC := -mA7 63ARC_LIBGCC := -mA7
58cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16 64cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16
@@ -66,8 +72,8 @@ ifndef CONFIG_ARC_HAS_HW_MPY
66# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments, 72# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
67# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted 73# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
68 74
69 ARC_LIBGCC := -marc600 75 ifeq ($(upto_gcc42),y)
70 ifneq ($(atleast_gcc44),y) 76 ARC_LIBGCC := -marc600
71 cflags-y += -multcost=30 77 cflags-y += -multcost=30
72 endif 78 endif
73endif 79endif
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
index 95350be6ef6f..c109af320274 100644
--- a/arch/arc/configs/fpga_defconfig
+++ b/arch/arc/configs/fpga_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-elf32-" 1CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 446c96c24eff..451af30914f6 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-elf32-" 1CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 4fa5cd9f2202..6be6492442d6 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-elf32-" 1CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="tb10x" 3CONFIG_DEFAULT_HOSTNAME="tb10x"
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 1b907c465666..355cb470c2a4 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -20,7 +20,6 @@
20#define ARC_REG_PERIBASE_BCR 0x69 20#define ARC_REG_PERIBASE_BCR 0x69
21#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */ 21#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
22#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */ 22#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
23#define ARC_REG_MMU_BCR 0x6f
24#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ 23#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
25#define ARC_REG_TIMERS_BCR 0x75 24#define ARC_REG_TIMERS_BCR 0x75
26#define ARC_REG_ICCM_BCR 0x78 25#define ARC_REG_ICCM_BCR 0x78
@@ -34,22 +33,12 @@
34#define ARC_REG_D_UNCACH_BCR 0x6A 33#define ARC_REG_D_UNCACH_BCR 0x6A
35 34
36/* status32 Bits Positions */ 35/* status32 Bits Positions */
37#define STATUS_H_BIT 0 /* CPU Halted */
38#define STATUS_E1_BIT 1 /* Int 1 enable */
39#define STATUS_E2_BIT 2 /* Int 2 enable */
40#define STATUS_A1_BIT 3 /* Int 1 active */
41#define STATUS_A2_BIT 4 /* Int 2 active */
42#define STATUS_AE_BIT 5 /* Exception active */ 36#define STATUS_AE_BIT 5 /* Exception active */
43#define STATUS_DE_BIT 6 /* PC is in delay slot */ 37#define STATUS_DE_BIT 6 /* PC is in delay slot */
44#define STATUS_U_BIT 7 /* User/Kernel mode */ 38#define STATUS_U_BIT 7 /* User/Kernel mode */
45#define STATUS_L_BIT 12 /* Loop inhibit */ 39#define STATUS_L_BIT 12 /* Loop inhibit */
46 40
47/* These masks correspond to the status word(STATUS_32) bits */ 41/* These masks correspond to the status word(STATUS_32) bits */
48#define STATUS_H_MASK (1<<STATUS_H_BIT)
49#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
50#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
51#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
52#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
53#define STATUS_AE_MASK (1<<STATUS_AE_BIT) 42#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
54#define STATUS_DE_MASK (1<<STATUS_DE_BIT) 43#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
55#define STATUS_U_MASK (1<<STATUS_U_BIT) 44#define STATUS_U_MASK (1<<STATUS_U_BIT)
@@ -71,6 +60,7 @@
71#define ECR_V_ITLB_MISS 0x21 60#define ECR_V_ITLB_MISS 0x21
72#define ECR_V_DTLB_MISS 0x22 61#define ECR_V_DTLB_MISS 0x22
73#define ECR_V_PROTV 0x23 62#define ECR_V_PROTV 0x23
63#define ECR_V_TRAP 0x25
74 64
75/* Protection Violation Exception Cause Code Values */ 65/* Protection Violation Exception Cause Code Values */
76#define ECR_C_PROTV_INST_FETCH 0x00 66#define ECR_C_PROTV_INST_FETCH 0x00
@@ -79,94 +69,23 @@
79#define ECR_C_PROTV_XCHG 0x03 69#define ECR_C_PROTV_XCHG 0x03
80#define ECR_C_PROTV_MISALIG_DATA 0x04 70#define ECR_C_PROTV_MISALIG_DATA 0x04
81 71
72#define ECR_C_BIT_PROTV_MISALIG_DATA 10
73
74/* Machine Check Cause Code Values */
75#define ECR_C_MCHK_DUP_TLB 0x01
76
82/* DTLB Miss Exception Cause Code Values */ 77/* DTLB Miss Exception Cause Code Values */
83#define ECR_C_BIT_DTLB_LD_MISS 8 78#define ECR_C_BIT_DTLB_LD_MISS 8
84#define ECR_C_BIT_DTLB_ST_MISS 9 79#define ECR_C_BIT_DTLB_ST_MISS 9
85 80
81/* Dummy ECR values for Interrupts */
82#define event_IRQ1 0x0031abcd
83#define event_IRQ2 0x0032abcd
86 84
87/* Auxiliary registers */ 85/* Auxiliary registers */
88#define AUX_IDENTITY 4 86#define AUX_IDENTITY 4
89#define AUX_INTR_VEC_BASE 0x25 87#define AUX_INTR_VEC_BASE 0x25
90#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
91#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
92#define AUX_IRQ_LV12 0x43 /* interrupt level register */
93
94#define AUX_IENABLE 0x40c
95#define AUX_ITRIGGER 0x40d
96#define AUX_IPULSE 0x415
97
98/* Timer related Aux registers */
99#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
100#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
101#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
102#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
103#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
104#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
105
106#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
107#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
108
109/* MMU Management regs */
110#define ARC_REG_TLBPD0 0x405
111#define ARC_REG_TLBPD1 0x406
112#define ARC_REG_TLBINDEX 0x407
113#define ARC_REG_TLBCOMMAND 0x408
114#define ARC_REG_PID 0x409
115#define ARC_REG_SCRATCH_DATA0 0x418
116
117/* Bits in MMU PID register */
118#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
119
120/* Error code if probe fails */
121#define TLB_LKUP_ERR 0x80000000
122
123/* TLB Commands */
124#define TLBWrite 0x1
125#define TLBRead 0x2
126#define TLBGetIndex 0x3
127#define TLBProbe 0x4
128
129#if (CONFIG_ARC_MMU_VER >= 2)
130#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
131#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
132#else
133#undef TLBWriteNI /* These cmds don't exist on older MMU */
134#undef TLBIVUTLB
135#endif
136 88
137/* Instruction cache related Auxiliary registers */
138#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
139#define ARC_REG_IC_IVIC 0x10
140#define ARC_REG_IC_CTRL 0x11
141#define ARC_REG_IC_IVIL 0x19
142#if (CONFIG_ARC_MMU_VER > 2)
143#define ARC_REG_IC_PTAG 0x1E
144#endif
145
146/* Bit val in IC_CTRL */
147#define IC_CTRL_CACHE_DISABLE 0x1
148
149/* Data cache related Auxiliary registers */
150#define ARC_REG_DC_BCR 0x72
151#define ARC_REG_DC_IVDC 0x47
152#define ARC_REG_DC_CTRL 0x48
153#define ARC_REG_DC_IVDL 0x4A
154#define ARC_REG_DC_FLSH 0x4B
155#define ARC_REG_DC_FLDL 0x4C
156#if (CONFIG_ARC_MMU_VER > 2)
157#define ARC_REG_DC_PTAG 0x5C
158#endif
159
160/* Bit val in DC_CTRL */
161#define DC_CTRL_INV_MODE_FLUSH 0x40
162#define DC_CTRL_FLUSH_STATUS 0x100
163
164/* MMU Management regs */
165#define ARC_REG_PID 0x409
166#define ARC_REG_SCRATCH_DATA0 0x418
167
168/* Bits in MMU PID register */
169#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
170 89
171/* 90/*
172 * Floating Pt Registers 91 * Floating Pt Registers
@@ -293,24 +212,6 @@ struct bcr_identity {
293#endif 212#endif
294}; 213};
295 214
296struct bcr_mmu_1_2 {
297#ifdef CONFIG_CPU_BIG_ENDIAN
298 unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
299#else
300 unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
301#endif
302};
303
304struct bcr_mmu_3 {
305#ifdef CONFIG_CPU_BIG_ENDIAN
306 unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
307 u_itlb:4, u_dtlb:4;
308#else
309 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
310 ways:4, ver:8;
311#endif
312};
313
314#define EXTN_SWAP_VALID 0x1 215#define EXTN_SWAP_VALID 0x1
315#define EXTN_NORM_VALID 0x2 216#define EXTN_NORM_VALID 0x2
316#define EXTN_MINMAX_VALID 0x2 217#define EXTN_MINMAX_VALID 0x2
@@ -343,14 +244,6 @@ struct bcr_extn_xymem {
343#endif 244#endif
344}; 245};
345 246
346struct bcr_cache {
347#ifdef CONFIG_CPU_BIG_ENDIAN
348 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
349#else
350 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
351#endif
352};
353
354struct bcr_perip { 247struct bcr_perip {
355#ifdef CONFIG_CPU_BIG_ENDIAN 248#ifdef CONFIG_CPU_BIG_ENDIAN
356 unsigned int start:8, pad2:8, sz:8, pad:8; 249 unsigned int start:8, pad2:8, sz:8, pad:8;
@@ -403,7 +296,7 @@ struct cpuinfo_arc_mmu {
403}; 296};
404 297
405struct cpuinfo_arc_cache { 298struct cpuinfo_arc_cache {
406 unsigned int has_aliasing, sz, line_len, assoc, ver; 299 unsigned int sz, line_len, assoc, ver;
407}; 300};
408 301
409struct cpuinfo_arc_ccm { 302struct cpuinfo_arc_ccm {
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index 2ad8f9b1c54b..5b18e94c6678 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -18,9 +18,8 @@ struct task_struct;
18void show_regs(struct pt_regs *regs); 18void show_regs(struct pt_regs *regs);
19void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs); 19void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
20void show_kernel_fault_diag(const char *str, struct pt_regs *regs, 20void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
21 unsigned long address, unsigned long cause_reg); 21 unsigned long address);
22void die(const char *str, struct pt_regs *regs, unsigned long address, 22void die(const char *str, struct pt_regs *regs, unsigned long address);
23 unsigned long cause_reg);
24 23
25#define BUG() do { \ 24#define BUG() do { \
26 dump_stack(); \ 25 dump_stack(); \
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index d5555fe4742a..5802849a6cae 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -18,21 +18,19 @@
18 18
19#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 19#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
20 20
21#define ARC_ICACHE_WAYS 2 21/* For a rare case where customers have differently config I/D */
22#define ARC_DCACHE_WAYS 4
23
24/* Helpers */
25#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES 22#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
26#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES 23#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
27 24
28#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1)) 25#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
29#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1)) 26#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
30 27
31#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN 28/*
32#error "Need to fix some code as I/D cache lines not same" 29 * ARC700 doesn't cache any access in top 256M.
33#else 30 * Ideal for wiring memory mapped peripherals as we don't need to do
34#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK)) 31 * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
35#endif 32 */
33#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
36 34
37#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
38 36
@@ -57,16 +55,10 @@
57 55
58#define ARCH_DMA_MINALIGN L1_CACHE_BYTES 56#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
59 57
60/*
61 * ARC700 doesn't cache any access in top 256M.
62 * Ideal for wiring memory mapped peripherals as we don't need to do
63 * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
64 */
65#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
66
67extern void arc_cache_init(void); 58extern void arc_cache_init(void);
68extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); 59extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
69extern void __init read_decode_cache_bcr(void); 60extern void __init read_decode_cache_bcr(void);
70#endif 61
62#endif /* !__ASSEMBLY__ */
71 63
72#endif /* _ASM_CACHE_H */ 64#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index ef62682e8d95..6abc4972bc93 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -81,16 +81,19 @@ void flush_anon_page(struct vm_area_struct *vma,
81#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ 81#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
82 82
83/* 83/*
84 * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
85 * This works around some PIO based drivers which don't call flush_dcache_page
86 * to record that they dirtied the dcache
87 */
88#define PG_dc_clean PG_arch_1
89
90/*
84 * Simple wrapper over config option 91 * Simple wrapper over config option
85 * Bootup code ensures that hardware matches kernel configuration 92 * Bootup code ensures that hardware matches kernel configuration
86 */ 93 */
87static inline int cache_is_vipt_aliasing(void) 94static inline int cache_is_vipt_aliasing(void)
88{ 95{
89#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 96 return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
90 return 1;
91#else
92 return 0;
93#endif
94} 97}
95 98
96#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1) 99#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h
deleted file mode 100644
index 6097bb439cc5..000000000000
--- a/arch/arc/include/asm/defines.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ARC_ASM_DEFINES_H__
10#define __ARC_ASM_DEFINES_H__
11
12#if defined(CONFIG_ARC_MMU_V1)
13#define CONFIG_ARC_MMU_VER 1
14#elif defined(CONFIG_ARC_MMU_V2)
15#define CONFIG_ARC_MMU_VER 2
16#elif defined(CONFIG_ARC_MMU_V3)
17#define CONFIG_ARC_MMU_VER 3
18#endif
19
20#ifdef CONFIG_ARC_HAS_LLSC
21#define __CONFIG_ARC_HAS_LLSC_VAL 1
22#else
23#define __CONFIG_ARC_HAS_LLSC_VAL 0
24#endif
25
26#ifdef CONFIG_ARC_HAS_SWAPE
27#define __CONFIG_ARC_HAS_SWAPE_VAL 1
28#else
29#define __CONFIG_ARC_HAS_SWAPE_VAL 0
30#endif
31
32#ifdef CONFIG_ARC_HAS_RTSC
33#define __CONFIG_ARC_HAS_RTSC_VAL 1
34#else
35#define __CONFIG_ARC_HAS_RTSC_VAL 0
36#endif
37
38#ifdef CONFIG_ARC_MMU_SASID
39#define __CONFIG_ARC_MMU_SASID_VAL 1
40#else
41#define __CONFIG_ARC_MMU_SASID_VAL 0
42#endif
43
44#ifdef CONFIG_ARC_HAS_ICACHE
45#define __CONFIG_ARC_HAS_ICACHE 1
46#else
47#define __CONFIG_ARC_HAS_ICACHE 0
48#endif
49
50#ifdef CONFIG_ARC_HAS_DCACHE
51#define __CONFIG_ARC_HAS_DCACHE 1
52#else
53#define __CONFIG_ARC_HAS_DCACHE 0
54#endif
55
56#endif /* __ARC_ASM_DEFINES_H__ */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index eb2ae53187d9..8943c028d4bb 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -50,194 +50,177 @@
50 * Eff Addr for load = [reg2] 50 * Eff Addr for load = [reg2]
51 */ 51 */
52 52
53.macro PUSH reg
54 st.a \reg, [sp, -4]
55.endm
56
57.macro PUSHAX aux
58 lr r9, [\aux]
59 PUSH r9
60.endm
61
62.macro POP reg
63 ld.ab \reg, [sp, 4]
64.endm
65
66.macro POPAX aux
67 POP r9
68 sr r9, [\aux]
69.endm
70
53/*-------------------------------------------------------------- 71/*--------------------------------------------------------------
54 * Save caller saved registers (scratch registers) ( r0 - r12 ) 72 * Helpers to save/restore Scratch Regs:
55 * Registers are pushed / popped in the order defined in struct ptregs 73 * used by Interrupt/Exception Prologue/Epilogue
56 * in asm/ptrace.h
57 *-------------------------------------------------------------*/ 74 *-------------------------------------------------------------*/
58.macro SAVE_CALLER_SAVED 75.macro SAVE_R0_TO_R12
59 st.a r0, [sp, -4] 76 PUSH r0
60 st.a r1, [sp, -4] 77 PUSH r1
61 st.a r2, [sp, -4] 78 PUSH r2
62 st.a r3, [sp, -4] 79 PUSH r3
63 st.a r4, [sp, -4] 80 PUSH r4
64 st.a r5, [sp, -4] 81 PUSH r5
65 st.a r6, [sp, -4] 82 PUSH r6
66 st.a r7, [sp, -4] 83 PUSH r7
67 st.a r8, [sp, -4] 84 PUSH r8
68 st.a r9, [sp, -4] 85 PUSH r9
69 st.a r10, [sp, -4] 86 PUSH r10
70 st.a r11, [sp, -4] 87 PUSH r11
71 st.a r12, [sp, -4] 88 PUSH r12
89.endm
90
91.macro RESTORE_R12_TO_R0
92 POP r12
93 POP r11
94 POP r10
95 POP r9
96 POP r8
97 POP r7
98 POP r6
99 POP r5
100 POP r4
101 POP r3
102 POP r2
103 POP r1
104 POP r0
105
106#ifdef CONFIG_ARC_CURR_IN_REG
107 ld r25, [sp, 12]
108#endif
72.endm 109.endm
73 110
74/*-------------------------------------------------------------- 111/*--------------------------------------------------------------
75 * Restore caller saved registers (scratch registers) 112 * Helpers to save/restore callee-saved regs:
113 * used by several macros below
76 *-------------------------------------------------------------*/ 114 *-------------------------------------------------------------*/
77.macro RESTORE_CALLER_SAVED 115.macro SAVE_R13_TO_R24
78 ld.ab r12, [sp, 4] 116 PUSH r13
79 ld.ab r11, [sp, 4] 117 PUSH r14
80 ld.ab r10, [sp, 4] 118 PUSH r15
81 ld.ab r9, [sp, 4] 119 PUSH r16
82 ld.ab r8, [sp, 4] 120 PUSH r17
83 ld.ab r7, [sp, 4] 121 PUSH r18
84 ld.ab r6, [sp, 4] 122 PUSH r19
85 ld.ab r5, [sp, 4] 123 PUSH r20
86 ld.ab r4, [sp, 4] 124 PUSH r21
87 ld.ab r3, [sp, 4] 125 PUSH r22
88 ld.ab r2, [sp, 4] 126 PUSH r23
89 ld.ab r1, [sp, 4] 127 PUSH r24
90 ld.ab r0, [sp, 4] 128.endm
129
130.macro RESTORE_R24_TO_R13
131 POP r24
132 POP r23
133 POP r22
134 POP r21
135 POP r20
136 POP r19
137 POP r18
138 POP r17
139 POP r16
140 POP r15
141 POP r14
142 POP r13
91.endm 143.endm
92 144
145#define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
93 146
94/*-------------------------------------------------------------- 147/*--------------------------------------------------------------
95 * Save callee saved registers (non scratch registers) ( r13 - r25 ) 148 * Collect User Mode callee regs as struct callee_regs - needed by
96 * on kernel stack. 149 * fork/do_signal/unaligned-access-emulation.
97 * User mode callee regs need to be saved in case of 150 * (By default only scratch regs are saved on entry to kernel)
98 * -fork and friends for replicating from parent to child 151 *
99 * -before going into do_signal( ) for ptrace/core-dump 152 * Special handling for r25 if used for caching Task Pointer.
100 * Special case handling is required for r25 in case it is used by kernel 153 * It would have been saved in task->thread.user_r25 already, but to keep
101 * for caching task ptr. Low level exception/ISR save user mode r25 154 * the interface same it is copied into regular r25 placeholder in
102 * into task->thread.user_r25. So it needs to be retrieved from there and 155 * struct callee_regs.
103 * saved into kernel stack with rest of callee reg-file
104 *-------------------------------------------------------------*/ 156 *-------------------------------------------------------------*/
105.macro SAVE_CALLEE_SAVED_USER 157.macro SAVE_CALLEE_SAVED_USER
106 st.a r13, [sp, -4] 158
107 st.a r14, [sp, -4] 159 SAVE_R13_TO_R24
108 st.a r15, [sp, -4]
109 st.a r16, [sp, -4]
110 st.a r17, [sp, -4]
111 st.a r18, [sp, -4]
112 st.a r19, [sp, -4]
113 st.a r20, [sp, -4]
114 st.a r21, [sp, -4]
115 st.a r22, [sp, -4]
116 st.a r23, [sp, -4]
117 st.a r24, [sp, -4]
118 160
119#ifdef CONFIG_ARC_CURR_IN_REG 161#ifdef CONFIG_ARC_CURR_IN_REG
120 ; Retrieve orig r25 and save it on stack 162 ; Retrieve orig r25 and save it on stack
121 ld r12, [r25, TASK_THREAD + THREAD_USER_R25] 163 ld.as r12, [sp, OFF_USER_R25_FROM_R24]
122 st.a r12, [sp, -4] 164 st.a r12, [sp, -4]
123#else 165#else
124 st.a r25, [sp, -4] 166 PUSH r25
125#endif 167#endif
126 168
127 /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
128 sub sp, sp, 4
129.endm 169.endm
130 170
131/*-------------------------------------------------------------- 171/*--------------------------------------------------------------
132 * Save callee saved registers (non scratch registers) ( r13 - r25 ) 172 * Save kernel Mode callee regs at the time of Contect Switch.
133 * kernel mode callee regs needed to be saved in case of context switch 173 *
134 * If r25 is used for caching task pointer then that need not be saved 174 * Special handling for r25 if used for caching Task Pointer.
135 * as it can be re-created from current task global 175 * Kernel simply skips saving it since it will be loaded with
176 * incoming task pointer anyways
136 *-------------------------------------------------------------*/ 177 *-------------------------------------------------------------*/
137.macro SAVE_CALLEE_SAVED_KERNEL 178.macro SAVE_CALLEE_SAVED_KERNEL
138 st.a r13, [sp, -4] 179
139 st.a r14, [sp, -4] 180 SAVE_R13_TO_R24
140 st.a r15, [sp, -4] 181
141 st.a r16, [sp, -4]
142 st.a r17, [sp, -4]
143 st.a r18, [sp, -4]
144 st.a r19, [sp, -4]
145 st.a r20, [sp, -4]
146 st.a r21, [sp, -4]
147 st.a r22, [sp, -4]
148 st.a r23, [sp, -4]
149 st.a r24, [sp, -4]
150#ifdef CONFIG_ARC_CURR_IN_REG 182#ifdef CONFIG_ARC_CURR_IN_REG
151 sub sp, sp, 8
152#else
153 st.a r25, [sp, -4]
154 sub sp, sp, 4 183 sub sp, sp, 4
184#else
185 PUSH r25
155#endif 186#endif
156.endm 187.endm
157 188
158/*-------------------------------------------------------------- 189/*--------------------------------------------------------------
159 * RESTORE_CALLEE_SAVED_KERNEL: 190 * Opposite of SAVE_CALLEE_SAVED_KERNEL
160 * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
161 * This is reverse of SAVE_CALLEE_SAVED,
162 *
163 * NOTE:
164 * Ideally this shd only be called in switch_to for loading
165 * switched-IN task's CALLEE Reg File.
166 * For all other cases RESTORE_CALLEE_SAVED_FAST must be used
167 * which simply pops the stack w/o touching regs.
168 *-------------------------------------------------------------*/ 191 *-------------------------------------------------------------*/
169.macro RESTORE_CALLEE_SAVED_KERNEL 192.macro RESTORE_CALLEE_SAVED_KERNEL
170 193
171
172#ifdef CONFIG_ARC_CURR_IN_REG 194#ifdef CONFIG_ARC_CURR_IN_REG
173 add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */ 195 add sp, sp, 4 /* skip usual r25 placeholder */
174#else 196#else
175 add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */ 197 POP r25
176 ld.ab r25, [sp, 4]
177#endif 198#endif
178 199 RESTORE_R24_TO_R13
179 ld.ab r24, [sp, 4]
180 ld.ab r23, [sp, 4]
181 ld.ab r22, [sp, 4]
182 ld.ab r21, [sp, 4]
183 ld.ab r20, [sp, 4]
184 ld.ab r19, [sp, 4]
185 ld.ab r18, [sp, 4]
186 ld.ab r17, [sp, 4]
187 ld.ab r16, [sp, 4]
188 ld.ab r15, [sp, 4]
189 ld.ab r14, [sp, 4]
190 ld.ab r13, [sp, 4]
191
192.endm 200.endm
193 201
194/*-------------------------------------------------------------- 202/*--------------------------------------------------------------
195 * RESTORE_CALLEE_SAVED_USER: 203 * Opposite of SAVE_CALLEE_SAVED_USER
196 * This is called after do_signal where tracer might have changed callee regs 204 *
197 * thus we need to restore the reg file. 205 * ptrace tracer or unaligned-access fixup might have changed a user mode
198 * Special case handling is required for r25 in case it is used by kernel 206 * callee reg which is saved back to usual r25 storage location
199 * for caching task ptr. Ptrace would have modified on-kernel-stack value of
200 * r25, which needs to be shoved back into task->thread.user_r25 where from
201 * Low level exception/ISR return code will retrieve to populate with rest of
202 * callee reg-file.
203 *-------------------------------------------------------------*/ 207 *-------------------------------------------------------------*/
204.macro RESTORE_CALLEE_SAVED_USER 208.macro RESTORE_CALLEE_SAVED_USER
205 209
206 add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
207
208#ifdef CONFIG_ARC_CURR_IN_REG 210#ifdef CONFIG_ARC_CURR_IN_REG
209 ld.ab r12, [sp, 4] 211 ld.ab r12, [sp, 4]
210 st r12, [r25, TASK_THREAD + THREAD_USER_R25] 212 st.as r12, [sp, OFF_USER_R25_FROM_R24]
211#else 213#else
212 ld.ab r25, [sp, 4] 214 POP r25
213#endif 215#endif
214 216 RESTORE_R24_TO_R13
215 ld.ab r24, [sp, 4]
216 ld.ab r23, [sp, 4]
217 ld.ab r22, [sp, 4]
218 ld.ab r21, [sp, 4]
219 ld.ab r20, [sp, 4]
220 ld.ab r19, [sp, 4]
221 ld.ab r18, [sp, 4]
222 ld.ab r17, [sp, 4]
223 ld.ab r16, [sp, 4]
224 ld.ab r15, [sp, 4]
225 ld.ab r14, [sp, 4]
226 ld.ab r13, [sp, 4]
227.endm 217.endm
228 218
229/*-------------------------------------------------------------- 219/*--------------------------------------------------------------
230 * Super FAST Restore callee saved regs by simply re-adjusting SP 220 * Super FAST Restore callee saved regs by simply re-adjusting SP
231 *-------------------------------------------------------------*/ 221 *-------------------------------------------------------------*/
232.macro DISCARD_CALLEE_SAVED_USER 222.macro DISCARD_CALLEE_SAVED_USER
233 add sp, sp, 14 * 4 223 add sp, sp, SZ_CALLEE_REGS
234.endm
235
236/*--------------------------------------------------------------
237 * Restore User mode r25 saved in task_struct->thread.user_r25
238 *-------------------------------------------------------------*/
239.macro RESTORE_USER_R25
240 ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
241.endm 224.endm
242 225
243/*------------------------------------------------------------- 226/*-------------------------------------------------------------
@@ -252,7 +235,7 @@
252 ld \out, [\tsk, TASK_THREAD_INFO] 235 ld \out, [\tsk, TASK_THREAD_INFO]
253 236
254 /* Go to end of page where stack begins (grows upwards) */ 237 /* Go to end of page where stack begins (grows upwards) */
255 add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */ 238 add2 \out, \out, (THREAD_SIZE)/4
256 239
257.endm 240.endm
258 241
@@ -305,33 +288,28 @@
305 * safe-keeping not really needed, but it keeps the epilogue code 288 * safe-keeping not really needed, but it keeps the epilogue code
306 * (SP restore) simpler/uniform. 289 * (SP restore) simpler/uniform.
307 */ 290 */
308 b.d 77f 291 b.d 66f
309 292 mov r9, sp
310 st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
311 293
31288: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */ 29488: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
313 295
314 GET_CURR_TASK_ON_CPU r9 296 GET_CURR_TASK_ON_CPU r9
315 297
316#ifdef CONFIG_ARC_CURR_IN_REG
317
318 /* If current task pointer cached in r25, time to
319 * -safekeep USER r25 in task->thread_struct->user_r25
320 * -load r25 with current task ptr
321 */
322 st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
323 mov r25, r9
324#endif
325
326 /* With current tsk in r9, get it's kernel mode stack base */ 298 /* With current tsk in r9, get it's kernel mode stack base */
327 GET_TSK_STACK_BASE r9, r9 299 GET_TSK_STACK_BASE r9, r9
328 300
329#ifdef PT_REGS_CANARY 30166:
330 st 0xabcdabcd, [r9, 0] 302#ifdef CONFIG_ARC_CURR_IN_REG
303 /*
304 * Treat r25 as scratch reg, save it on stack first
305 * Load it with current task pointer
306 */
307 st r25, [r9, -4]
308 GET_CURR_TASK_ON_CPU r25
331#endif 309#endif
332 310
333 /* Save Pre Intr/Exception User SP on kernel stack */ 311 /* Save Pre Intr/Exception User SP on kernel stack */
334 st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8 312 st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25
335 313
336 /* CAUTION: 314 /* CAUTION:
337 * SP should be set at the very end when we are done with everything 315 * SP should be set at the very end when we are done with everything
@@ -342,7 +320,7 @@
342 /* set SP to point to kernel mode stack */ 320 /* set SP to point to kernel mode stack */
343 mov sp, r9 321 mov sp, r9
344 322
34577: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */ 323 /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
346 324
347.endm 325.endm
348 326
@@ -369,7 +347,7 @@
369 * @reg [OUT] &thread_info of "current" 347 * @reg [OUT] &thread_info of "current"
370 */ 348 */
371.macro GET_CURR_THR_INFO_FROM_SP reg 349.macro GET_CURR_THR_INFO_FROM_SP reg
372 and \reg, sp, ~(THREAD_SIZE - 1) 350 bic \reg, sp, (THREAD_SIZE - 1)
373.endm 351.endm
374 352
375/* 353/*
@@ -413,62 +391,25 @@
413 * Note that syscalls are implemented via TRAP which is also a exception 391 * Note that syscalls are implemented via TRAP which is also a exception
414 * from CPU's point of view 392 * from CPU's point of view
415 *-------------------------------------------------------------*/ 393 *-------------------------------------------------------------*/
416.macro SAVE_ALL_EXCEPTION marker 394.macro SAVE_ALL_SYS
417 395
418 st \marker, [sp, 8] /* orig_r8 */ 396 lr r9, [ecr]
397 st r9, [sp, 8] /* ECR */
419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */ 398 st r0, [sp, 4] /* orig_r0, needed only for sys calls */
420 399
421 /* Restore r9 used to code the early prologue */ 400 /* Restore r9 used to code the early prologue */
422 EXCPN_PROLOG_RESTORE_REG r9 401 EXCPN_PROLOG_RESTORE_REG r9
423 402
424 SAVE_CALLER_SAVED 403 SAVE_R0_TO_R12
425 st.a r26, [sp, -4] /* gp */ 404 PUSH gp
426 st.a fp, [sp, -4] 405 PUSH fp
427 st.a blink, [sp, -4] 406 PUSH blink
428 lr r9, [eret] 407 PUSHAX eret
429 st.a r9, [sp, -4] 408 PUSHAX erstatus
430 lr r9, [erstatus] 409 PUSH lp_count
431 st.a r9, [sp, -4] 410 PUSHAX lp_end
432 st.a lp_count, [sp, -4] 411 PUSHAX lp_start
433 lr r9, [lp_end] 412 PUSHAX erbta
434 st.a r9, [sp, -4]
435 lr r9, [lp_start]
436 st.a r9, [sp, -4]
437 lr r9, [erbta]
438 st.a r9, [sp, -4]
439
440#ifdef PT_REGS_CANARY
441 mov r9, 0xdeadbeef
442 st r9, [sp, -4]
443#endif
444
445 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
446 sub sp, sp, 4
447.endm
448
449/*--------------------------------------------------------------
450 * Save scratch regs for exceptions
451 *-------------------------------------------------------------*/
452.macro SAVE_ALL_SYS
453 SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
454.endm
455
456/*--------------------------------------------------------------
457 * Save scratch regs for sys calls
458 *-------------------------------------------------------------*/
459.macro SAVE_ALL_TRAP
460 /*
461 * Setup pt_regs->orig_r8.
462 * Encode syscall number (r8) in upper short word of event type (r9)
463 * N.B. #1: This is already endian safe (see ptrace.h)
464 * #2: Only r9 can be used as scratch as it is already clobbered
465 * and it's contents are no longer needed by the latter part
466 * of exception prologue
467 */
468 lsl r9, r8, 16
469 or r9, r9, orig_r8_IS_SCALL
470
471 SAVE_ALL_EXCEPTION r9
472.endm 413.endm
473 414
474/*-------------------------------------------------------------- 415/*--------------------------------------------------------------
@@ -483,28 +424,22 @@
483 * by hardware and that is not good. 424 * by hardware and that is not good.
484 *-------------------------------------------------------------*/ 425 *-------------------------------------------------------------*/
485.macro RESTORE_ALL_SYS 426.macro RESTORE_ALL_SYS
427 POPAX erbta
428 POPAX lp_start
429 POPAX lp_end
430
431 POP r9
432 mov lp_count, r9 ;LD to lp_count is not allowed
486 433
487 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 434 POPAX erstatus
488 435 POPAX eret
489 ld.ab r9, [sp, 4] 436 POP blink
490 sr r9, [erbta] 437 POP fp
491 ld.ab r9, [sp, 4] 438 POP gp
492 sr r9, [lp_start] 439 RESTORE_R12_TO_R0
493 ld.ab r9, [sp, 4]
494 sr r9, [lp_end]
495 ld.ab r9, [sp, 4]
496 mov lp_count, r9
497 ld.ab r9, [sp, 4]
498 sr r9, [erstatus]
499 ld.ab r9, [sp, 4]
500 sr r9, [eret]
501 ld.ab blink, [sp, 4]
502 ld.ab fp, [sp, 4]
503 ld.ab r26, [sp, 4] /* gp */
504 RESTORE_CALLER_SAVED
505 440
506 ld sp, [sp] /* restore original sp */ 441 ld sp, [sp] /* restore original sp */
507 /* orig_r0 and orig_r8 skipped automatically */ 442 /* orig_r0, ECR, user_r25 skipped automatically */
508.endm 443.endm
509 444
510 445
@@ -513,9 +448,7 @@
513 *-------------------------------------------------------------*/ 448 *-------------------------------------------------------------*/
514.macro SAVE_ALL_INT1 449.macro SAVE_ALL_INT1
515 450
516 /* restore original r9 , saved in int1_saved_reg 451 /* restore original r9 to be saved as part of reg-file */
517 * It will be saved on stack in macro: SAVE_CALLER_SAVED
518 */
519#ifdef CONFIG_SMP 452#ifdef CONFIG_SMP
520 lr r9, [ARC_REG_SCRATCH_DATA0] 453 lr r9, [ARC_REG_SCRATCH_DATA0]
521#else 454#else
@@ -523,29 +456,19 @@
523#endif 456#endif
524 457
525 /* now we are ready to save the remaining context :) */ 458 /* now we are ready to save the remaining context :) */
526 st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */ 459 st event_IRQ1, [sp, 8] /* Dummy ECR */
527 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ 460 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
528 SAVE_CALLER_SAVED 461
529 st.a r26, [sp, -4] /* gp */ 462 SAVE_R0_TO_R12
530 st.a fp, [sp, -4] 463 PUSH gp
531 st.a blink, [sp, -4] 464 PUSH fp
532 st.a ilink1, [sp, -4] 465 PUSH blink
533 lr r9, [status32_l1] 466 PUSH ilink1
534 st.a r9, [sp, -4] 467 PUSHAX status32_l1
535 st.a lp_count, [sp, -4] 468 PUSH lp_count
536 lr r9, [lp_end] 469 PUSHAX lp_end
537 st.a r9, [sp, -4] 470 PUSHAX lp_start
538 lr r9, [lp_start] 471 PUSHAX bta_l1
539 st.a r9, [sp, -4]
540 lr r9, [bta_l1]
541 st.a r9, [sp, -4]
542
543#ifdef PT_REGS_CANARY
544 mov r9, 0xdeadbee1
545 st r9, [sp, -4]
546#endif
547 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
548 sub sp, sp, 4
549.endm 472.endm
550 473
551.macro SAVE_ALL_INT2 474.macro SAVE_ALL_INT2
@@ -558,30 +481,19 @@
558 ld r9, [@int2_saved_reg] 481 ld r9, [@int2_saved_reg]
559 482
560 /* now we are ready to save the remaining context :) */ 483 /* now we are ready to save the remaining context :) */
561 st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */ 484 st event_IRQ2, [sp, 8] /* Dummy ECR */
562 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ 485 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
563 SAVE_CALLER_SAVED
564 st.a r26, [sp, -4] /* gp */
565 st.a fp, [sp, -4]
566 st.a blink, [sp, -4]
567 st.a ilink2, [sp, -4]
568 lr r9, [status32_l2]
569 st.a r9, [sp, -4]
570 st.a lp_count, [sp, -4]
571 lr r9, [lp_end]
572 st.a r9, [sp, -4]
573 lr r9, [lp_start]
574 st.a r9, [sp, -4]
575 lr r9, [bta_l2]
576 st.a r9, [sp, -4]
577
578#ifdef PT_REGS_CANARY
579 mov r9, 0xdeadbee2
580 st r9, [sp, -4]
581#endif
582 486
583 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ 487 SAVE_R0_TO_R12
584 sub sp, sp, 4 488 PUSH gp
489 PUSH fp
490 PUSH blink
491 PUSH ilink2
492 PUSHAX status32_l2
493 PUSH lp_count
494 PUSHAX lp_end
495 PUSHAX lp_start
496 PUSHAX bta_l2
585.endm 497.endm
586 498
587/*-------------------------------------------------------------- 499/*--------------------------------------------------------------
@@ -595,52 +507,41 @@
595 *-------------------------------------------------------------*/ 507 *-------------------------------------------------------------*/
596 508
597.macro RESTORE_ALL_INT1 509.macro RESTORE_ALL_INT1
598 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 510 POPAX bta_l1
599 511 POPAX lp_start
600 ld.ab r9, [sp, 4] /* Actual reg file */ 512 POPAX lp_end
601 sr r9, [bta_l1] 513
602 ld.ab r9, [sp, 4] 514 POP r9
603 sr r9, [lp_start] 515 mov lp_count, r9 ;LD to lp_count is not allowed
604 ld.ab r9, [sp, 4] 516
605 sr r9, [lp_end] 517 POPAX status32_l1
606 ld.ab r9, [sp, 4] 518 POP ilink1
607 mov lp_count, r9 519 POP blink
608 ld.ab r9, [sp, 4] 520 POP fp
609 sr r9, [status32_l1] 521 POP gp
610 ld.ab r9, [sp, 4] 522 RESTORE_R12_TO_R0
611 mov ilink1, r9
612 ld.ab blink, [sp, 4]
613 ld.ab fp, [sp, 4]
614 ld.ab r26, [sp, 4] /* gp */
615 RESTORE_CALLER_SAVED
616 523
617 ld sp, [sp] /* restore original sp */ 524 ld sp, [sp] /* restore original sp */
618 /* orig_r0 and orig_r8 skipped automatically */ 525 /* orig_r0, ECR, user_r25 skipped automatically */
619.endm 526.endm
620 527
621.macro RESTORE_ALL_INT2 528.macro RESTORE_ALL_INT2
622 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 529 POPAX bta_l2
623 530 POPAX lp_start
624 ld.ab r9, [sp, 4] 531 POPAX lp_end
625 sr r9, [bta_l2]
626 ld.ab r9, [sp, 4]
627 sr r9, [lp_start]
628 ld.ab r9, [sp, 4]
629 sr r9, [lp_end]
630 ld.ab r9, [sp, 4]
631 mov lp_count, r9
632 ld.ab r9, [sp, 4]
633 sr r9, [status32_l2]
634 ld.ab r9, [sp, 4]
635 mov ilink2, r9
636 ld.ab blink, [sp, 4]
637 ld.ab fp, [sp, 4]
638 ld.ab r26, [sp, 4] /* gp */
639 RESTORE_CALLER_SAVED
640 532
641 ld sp, [sp] /* restore original sp */ 533 POP r9
642 /* orig_r0 and orig_r8 skipped automatically */ 534 mov lp_count, r9 ;LD to lp_count is not allowed
643 535
536 POPAX status32_l2
537 POP ilink2
538 POP blink
539 POP fp
540 POP gp
541 RESTORE_R12_TO_R0
542
543 ld sp, [sp] /* restore original sp */
544 /* orig_r0, ECR, user_r25 skipped automatically */
644.endm 545.endm
645 546
646 547
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 57898a17eb82..c0a72105ee0b 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -21,6 +21,6 @@
21extern void __init arc_init_IRQ(void); 21extern void __init arc_init_IRQ(void);
22extern int __init get_hw_config_num_irq(void); 22extern int __init get_hw_config_num_irq(void);
23 23
24void __cpuinit arc_local_timer_setup(unsigned int cpu); 24void arc_local_timer_setup(unsigned int cpu);
25 25
26#endif 26#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index eac071668201..d99f79bcf865 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -19,6 +19,26 @@
19 19
20#include <asm/arcregs.h> 20#include <asm/arcregs.h>
21 21
22/* status32 Reg bits related to Interrupt Handling */
23#define STATUS_E1_BIT 1 /* Int 1 enable */
24#define STATUS_E2_BIT 2 /* Int 2 enable */
25#define STATUS_A1_BIT 3 /* Int 1 active */
26#define STATUS_A2_BIT 4 /* Int 2 active */
27
28#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
29#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
30#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
31#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
32
33/* Other Interrupt Handling related Aux regs */
34#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
35#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
36#define AUX_IRQ_LV12 0x43 /* interrupt level register */
37
38#define AUX_IENABLE 0x40c
39#define AUX_ITRIGGER 0x40d
40#define AUX_IPULSE 0x415
41
22#ifndef __ASSEMBLY__ 42#ifndef __ASSEMBLY__
23 43
24/****************************************************************** 44/******************************************************************
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index 4930957ca3d3..b65fca7ffeb5 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -31,7 +31,7 @@ static inline void arch_kgdb_breakpoint(void)
31 __asm__ __volatile__ ("trap_s 0x4\n"); 31 __asm__ __volatile__ ("trap_s 0x4\n");
32} 32}
33 33
34extern void kgdb_trap(struct pt_regs *regs, int param); 34extern void kgdb_trap(struct pt_regs *regs);
35 35
36enum arc700_linux_regnums { 36enum arc700_linux_regnums {
37 _R0 = 0, 37 _R0 = 0,
@@ -53,7 +53,7 @@ enum arc700_linux_regnums {
53}; 53};
54 54
55#else 55#else
56#define kgdb_trap(regs, param) 56#define kgdb_trap(regs)
57#endif 57#endif
58 58
59#endif /* __ARC_KGDB_H__ */ 59#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 4d9c211fce70..944dbedb38b5 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -50,11 +50,9 @@ struct kprobe_ctlblk {
50 50
51int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause); 51int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
52void kretprobe_trampoline(void); 52void kretprobe_trampoline(void);
53void trap_is_kprobe(unsigned long cause, unsigned long address, 53void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
54 struct pt_regs *regs);
55#else 54#else
56static void trap_is_kprobe(unsigned long cause, unsigned long address, 55static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
57 struct pt_regs *regs)
58{ 56{
59} 57}
60#endif 58#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 56b02320f1a9..7c03fe61759c 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -9,6 +9,40 @@
9#ifndef _ASM_ARC_MMU_H 9#ifndef _ASM_ARC_MMU_H
10#define _ASM_ARC_MMU_H 10#define _ASM_ARC_MMU_H
11 11
12#if defined(CONFIG_ARC_MMU_V1)
13#define CONFIG_ARC_MMU_VER 1
14#elif defined(CONFIG_ARC_MMU_V2)
15#define CONFIG_ARC_MMU_VER 2
16#elif defined(CONFIG_ARC_MMU_V3)
17#define CONFIG_ARC_MMU_VER 3
18#endif
19
20/* MMU Management regs */
21#define ARC_REG_MMU_BCR 0x06f
22#define ARC_REG_TLBPD0 0x405
23#define ARC_REG_TLBPD1 0x406
24#define ARC_REG_TLBINDEX 0x407
25#define ARC_REG_TLBCOMMAND 0x408
26#define ARC_REG_PID 0x409
27#define ARC_REG_SCRATCH_DATA0 0x418
28
29/* Bits in MMU PID register */
30#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
31
32/* Error code if probe fails */
33#define TLB_LKUP_ERR 0x80000000
34
35/* TLB Commands */
36#define TLBWrite 0x1
37#define TLBRead 0x2
38#define TLBGetIndex 0x3
39#define TLBProbe 0x4
40
41#if (CONFIG_ARC_MMU_VER >= 2)
42#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
43#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
44#endif
45
12#ifndef __ASSEMBLY__ 46#ifndef __ASSEMBLY__
13 47
14typedef struct { 48typedef struct {
@@ -18,6 +52,16 @@ typedef struct {
18#endif 52#endif
19} mm_context_t; 53} mm_context_t;
20 54
55#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
56void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
57#else
58#define tlb_paranoid_check(a, b)
21#endif 59#endif
22 60
61void arc_mmu_init(void);
62extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
63void __init read_decode_mmu_bcr(void);
64
65#endif /* !__ASSEMBLY__ */
66
23#endif 67#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index ab84bf131fe1..9c8aa41e45c2 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -96,13 +96,8 @@ typedef unsigned long pgtable_t;
96 96
97#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 97#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
98 98
99/* Default Permissions for page, used in mmap.c */ 99/* Default Permissions for stack/heaps pages (Non Executable) */
100#ifdef CONFIG_ARC_STACK_NONEXEC
101#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) 100#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
102#else
103#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
104 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
105#endif
106 101
107#define WANT_PAGE_VIRTUAL 1 102#define WANT_PAGE_VIRTUAL 1
108 103
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index c110ac87d22b..4749a0eee1cf 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -135,6 +135,12 @@
135/* ioremap */ 135/* ioremap */
136#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) 136#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
137 137
138/* Masks for actual TLB "PD"s */
139#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
140#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
141 _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
142 _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
143
138/************************************************************************** 144/**************************************************************************
139 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) 145 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
140 * 146 *
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 5f26b2c1cba0..15334ab66b56 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -19,6 +19,7 @@
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20 20
21#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */ 21#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
22#include <asm/ptrace.h>
22 23
23/* Arch specific stuff which needs to be saved per task. 24/* Arch specific stuff which needs to be saved per task.
24 * However these items are not so important so as to earn a place in 25 * However these items are not so important so as to earn a place in
@@ -28,10 +29,6 @@ struct thread_struct {
28 unsigned long ksp; /* kernel mode stack pointer */ 29 unsigned long ksp; /* kernel mode stack pointer */
29 unsigned long callee_reg; /* pointer to callee regs */ 30 unsigned long callee_reg; /* pointer to callee regs */
30 unsigned long fault_address; /* dbls as brkpt holder as well */ 31 unsigned long fault_address; /* dbls as brkpt holder as well */
31 unsigned long cause_code; /* Exception Cause Code (ECR) */
32#ifdef CONFIG_ARC_CURR_IN_REG
33 unsigned long user_r25;
34#endif
35#ifdef CONFIG_ARC_FPU_SAVE_RESTORE 32#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
36 struct arc_fpu fpu; 33 struct arc_fpu fpu;
37#endif 34#endif
@@ -50,7 +47,7 @@ struct task_struct;
50unsigned long thread_saved_pc(struct task_struct *t); 47unsigned long thread_saved_pc(struct task_struct *t);
51 48
52#define task_pt_regs(p) \ 49#define task_pt_regs(p) \
53 ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1) 50 ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
54 51
55/* Free all resources held by a thread. */ 52/* Free all resources held by a thread. */
56#define release_thread(thread) do { } while (0) 53#define release_thread(thread) do { } while (0)
@@ -75,11 +72,15 @@ unsigned long thread_saved_pc(struct task_struct *t);
75 72
76/* 73/*
77 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. 74 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
78 * These can't be derived from pt_regs as that would give correp user-mode val 75 * Look in process.c for details of kernel stack layout
79 */ 76 */
80#define KSTK_ESP(tsk) (tsk->thread.ksp) 77#define KSTK_ESP(tsk) (tsk->thread.ksp)
81#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4))) 78
82#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4))) 79#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
80 sizeof(struct callee_regs) + off)))
81
82#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
83#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
83 84
84/* 85/*
85 * Do necessary setup to start up a newly executed thread. 86 * Do necessary setup to start up a newly executed thread.
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 6179de7e07c2..c9938e7a7dbd 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -17,12 +17,6 @@
17/* THE pt_regs: Defines how regs are saved during entry into kernel */ 17/* THE pt_regs: Defines how regs are saved during entry into kernel */
18 18
19struct pt_regs { 19struct pt_regs {
20 /*
21 * 1 word gutter after reg-file has been saved
22 * Technically not needed, Since SP always points to a "full" location
23 * (vs. "empty"). But pt_regs is shared with tools....
24 */
25 long res;
26 20
27 /* Real registers */ 21 /* Real registers */
28 long bta; /* bta_l1, bta_l2, erbta */ 22 long bta; /* bta_l1, bta_l2, erbta */
@@ -50,22 +44,32 @@ struct pt_regs {
50 long sp; /* user/kernel sp depending on where we came from */ 44 long sp; /* user/kernel sp depending on where we came from */
51 long orig_r0; 45 long orig_r0;
52 46
53 /*to distinguish bet excp, syscall, irq */ 47 /*
48 * To distinguish bet excp, syscall, irq
49 * For traps and exceptions, Exception Cause Register.
50 * ECR: <00> <VV> <CC> <PP>
51 * Last word used by Linux for extra state mgmt (syscall-restart)
52 * For interrupts, use artificial ECR values to note current prio-level
53 */
54 union { 54 union {
55 struct {
55#ifdef CONFIG_CPU_BIG_ENDIAN 56#ifdef CONFIG_CPU_BIG_ENDIAN
56 /* so that assembly code is same for LE/BE */ 57 unsigned long state:8, ecr_vec:8,
57 unsigned long orig_r8:16, event:16; 58 ecr_cause:8, ecr_param:8;
58#else 59#else
59 unsigned long event:16, orig_r8:16; 60 unsigned long ecr_param:8, ecr_cause:8,
61 ecr_vec:8, state:8;
60#endif 62#endif
61 long orig_r8_word; 63 };
64 unsigned long event;
62 }; 65 };
66
67 long user_r25;
63}; 68};
64 69
65/* Callee saved registers - need to be saved only when you are scheduled out */ 70/* Callee saved registers - need to be saved only when you are scheduled out */
66 71
67struct callee_regs { 72struct callee_regs {
68 long res; /* Again this is not needed */
69 long r25; 73 long r25;
70 long r24; 74 long r24;
71 long r23; 75 long r23;
@@ -99,18 +103,20 @@ struct callee_regs {
99/* return 1 if PC in delay slot */ 103/* return 1 if PC in delay slot */
100#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK) 104#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
101 105
102#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL) 106#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
103#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT) 107#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
108
109#define STATE_SCALL_RESTARTED 0x01
104 110
105#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED) 111#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
106#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED) 112#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
107 113
108#define current_pt_regs() \ 114#define current_pt_regs() \
109({ \ 115({ \
110 /* open-coded current_thread_info() */ \ 116 /* open-coded current_thread_info() */ \
111 register unsigned long sp asm ("sp"); \ 117 register unsigned long sp asm ("sp"); \
112 unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ 118 unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
113 (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \ 119 (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \
114}) 120})
115 121
116static inline long regs_return_value(struct pt_regs *regs) 122static inline long regs_return_value(struct pt_regs *regs)
@@ -120,11 +126,4 @@ static inline long regs_return_value(struct pt_regs *regs)
120 126
121#endif /* !__ASSEMBLY__ */ 127#endif /* !__ASSEMBLY__ */
122 128
123#define orig_r8_IS_SCALL 0x0001
124#define orig_r8_IS_SCALL_RESTARTED 0x0002
125#define orig_r8_IS_BRKPT 0x0004
126#define orig_r8_IS_EXCPN 0x0008
127#define orig_r8_IS_IRQ1 0x0010
128#define orig_r8_IS_IRQ2 0x0020
129
130#endif /* __ASM_PTRACE_H */ 129#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 33ab3048e9b2..29de09804306 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -18,7 +18,7 @@ static inline long
18syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 18syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
19{ 19{
20 if (user_mode(regs) && in_syscall(regs)) 20 if (user_mode(regs) && in_syscall(regs))
21 return regs->orig_r8; 21 return regs->r8;
22 else 22 else
23 return -1; 23 return -1;
24} 24}
@@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
26static inline void 26static inline void
27syscall_rollback(struct task_struct *task, struct pt_regs *regs) 27syscall_rollback(struct task_struct *task, struct pt_regs *regs)
28{ 28{
29 /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */ 29 regs->r0 = regs->orig_r0;
30 regs->r8 = regs->orig_r8;
31} 30}
32 31
33static inline long 32static inline long
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
index a5ff961b1efc..8a1ec96012ae 100644
--- a/arch/arc/include/asm/tlb-mmu1.h
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -9,9 +9,9 @@
9#ifndef __ASM_TLB_MMU_V1_H__ 9#ifndef __ASM_TLB_MMU_V1_H__
10#define __ASM_TLB_MMU_V1_H__ 10#define __ASM_TLB_MMU_V1_H__
11 11
12#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1) 12#include <asm/mmu.h>
13 13
14#include <asm/tlb.h> 14#if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1)
15 15
16.macro TLB_WRITE_HEURISTICS 16.macro TLB_WRITE_HEURISTICS
17 17
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index cb0c708ca665..a9db5f62aaf3 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -9,18 +9,6 @@
9#ifndef _ASM_ARC_TLB_H 9#ifndef _ASM_ARC_TLB_H
10#define _ASM_ARC_TLB_H 10#define _ASM_ARC_TLB_H
11 11
12#ifdef __KERNEL__
13
14#include <asm/pgtable.h>
15
16/* Masks for actual TLB "PD"s */
17#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
18#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
19 _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
20 _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
21
22#ifndef __ASSEMBLY__
23
24#define tlb_flush(tlb) \ 12#define tlb_flush(tlb) \
25do { \ 13do { \
26 if (tlb->fullmm) \ 14 if (tlb->fullmm) \
@@ -56,18 +44,4 @@ do { \
56#include <linux/pagemap.h> 44#include <linux/pagemap.h>
57#include <asm-generic/tlb.h> 45#include <asm-generic/tlb.h>
58 46
59#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
60void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
61#else
62#define tlb_paranoid_check(a, b)
63#endif
64
65void arc_mmu_init(void);
66extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
67void __init read_decode_mmu_bcr(void);
68
69#endif /* __ASSEMBLY__ */
70
71#endif /* __KERNEL__ */
72
73#endif /* _ASM_ARC_TLB_H */ 47#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
index 5dbe63f17b66..60702f3751d2 100644
--- a/arch/arc/include/asm/unaligned.h
+++ b/arch/arc/include/asm/unaligned.h
@@ -16,11 +16,11 @@
16 16
17#ifdef CONFIG_ARC_MISALIGN_ACCESS 17#ifdef CONFIG_ARC_MISALIGN_ACCESS
18int misaligned_fixup(unsigned long address, struct pt_regs *regs, 18int misaligned_fixup(unsigned long address, struct pt_regs *regs,
19 unsigned long cause, struct callee_regs *cregs); 19 struct callee_regs *cregs);
20#else 20#else
21static inline int 21static inline int
22misaligned_fixup(unsigned long address, struct pt_regs *regs, 22misaligned_fixup(unsigned long address, struct pt_regs *regs,
23 unsigned long cause, struct callee_regs *cregs) 23 struct callee_regs *cregs)
24{ 24{
25 return 0; 25 return 0;
26} 26}
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 30333cec0fef..2618cc13ba75 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -20,28 +20,31 @@
20 * 20 *
21 * This is to decouple pt_regs from user-space ABI, to be able to change it 21 * This is to decouple pt_regs from user-space ABI, to be able to change it
22 * w/o affecting the ABI. 22 * w/o affecting the ABI.
23 * Although the layout (initial padding) is similar to pt_regs to have some 23 *
24 * optimizations when copying pt_regs to/from user_regs_struct. 24 * The intermediate pad,pad2 are relics of initial layout based on pt_regs
25 * for optimizations when copying pt_regs to/from user_regs_struct.
26 * We no longer need them, but can't be changed as they are part of ABI now.
25 * 27 *
26 * Also, sigcontext only care about the scratch regs as that is what we really 28 * Also, sigcontext only care about the scratch regs as that is what we really
27 * save/restore for signal handling. 29 * save/restore for signal handling. However gdb also uses the same struct
30 * hence callee regs need to be in there too.
28*/ 31*/
29struct user_regs_struct { 32struct user_regs_struct {
30 33
34 long pad;
31 struct { 35 struct {
32 long pad;
33 long bta, lp_start, lp_end, lp_count; 36 long bta, lp_start, lp_end, lp_count;
34 long status32, ret, blink, fp, gp; 37 long status32, ret, blink, fp, gp;
35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 38 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
36 long sp; 39 long sp;
37 } scratch; 40 } scratch;
41 long pad2;
38 struct { 42 struct {
39 long pad;
40 long r25, r24, r23, r22, r21, r20; 43 long r25, r24, r23, r22, r21, r20;
41 long r19, r18, r17, r16, r15, r14, r13; 44 long r19, r18, r17, r16, r15, r14, r13;
42 } callee; 45 } callee;
43 long efa; /* break pt addr, for break points in delay slots */ 46 long efa; /* break pt addr, for break points in delay slots */
44 long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */ 47 long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
45}; 48};
46#endif /* !__ASSEMBLY__ */ 49#endif /* !__ASSEMBLY__ */
47 50
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
index 7dcda7025241..6c3aa0edb9b5 100644
--- a/arch/arc/kernel/asm-offsets.c
+++ b/arch/arc/kernel/asm-offsets.c
@@ -24,9 +24,6 @@ int main(void)
24 24
25 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); 25 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
26 DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg)); 26 DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
27#ifdef CONFIG_ARC_CURR_IN_REG
28 DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
29#endif
30 DEFINE(THREAD_FAULT_ADDR, 27 DEFINE(THREAD_FAULT_ADDR,
31 offsetof(struct thread_struct, fault_address)); 28 offsetof(struct thread_struct, fault_address));
32 29
@@ -49,7 +46,7 @@ int main(void)
49 BLANK(); 46 BLANK();
50 47
51 DEFINE(PT_status32, offsetof(struct pt_regs, status32)); 48 DEFINE(PT_status32, offsetof(struct pt_regs, status32));
52 DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word)); 49 DEFINE(PT_event, offsetof(struct pt_regs, event));
53 DEFINE(PT_sp, offsetof(struct pt_regs, sp)); 50 DEFINE(PT_sp, offsetof(struct pt_regs, sp));
54 DEFINE(PT_r0, offsetof(struct pt_regs, r0)); 51 DEFINE(PT_r0, offsetof(struct pt_regs, r0));
55 DEFINE(PT_r1, offsetof(struct pt_regs, r1)); 52 DEFINE(PT_r1, offsetof(struct pt_regs, r1));
@@ -60,5 +57,7 @@ int main(void)
60 DEFINE(PT_r6, offsetof(struct pt_regs, r6)); 57 DEFINE(PT_r6, offsetof(struct pt_regs, r6));
61 DEFINE(PT_r7, offsetof(struct pt_regs, r7)); 58 DEFINE(PT_r7, offsetof(struct pt_regs, r7));
62 59
60 DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
61 DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
63 return 0; 62 return 0;
64} 63}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index 60844dac6132..34410eb1a308 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -23,10 +23,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
23 unsigned int tmp; 23 unsigned int tmp;
24 unsigned int prev = (unsigned int)prev_task; 24 unsigned int prev = (unsigned int)prev_task;
25 unsigned int next = (unsigned int)next_task; 25 unsigned int next = (unsigned int)next_task;
26 int num_words_to_skip = 1;
27#ifdef CONFIG_ARC_CURR_IN_REG
28 num_words_to_skip++;
29#endif
30 26
31 __asm__ __volatile__( 27 __asm__ __volatile__(
32 /* FP/BLINK save generated by gcc (standard function prologue */ 28 /* FP/BLINK save generated by gcc (standard function prologue */
@@ -44,8 +40,9 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
44 "st.a r24, [sp, -4] \n\t" 40 "st.a r24, [sp, -4] \n\t"
45#ifndef CONFIG_ARC_CURR_IN_REG 41#ifndef CONFIG_ARC_CURR_IN_REG
46 "st.a r25, [sp, -4] \n\t" 42 "st.a r25, [sp, -4] \n\t"
43#else
44 "sub sp, sp, 4 \n\t" /* usual r25 placeholder */
47#endif 45#endif
48 "sub sp, sp, %4 \n\t" /* create gutter at top */
49 46
50 /* set ksp of outgoing task in tsk->thread.ksp */ 47 /* set ksp of outgoing task in tsk->thread.ksp */
51 "st.as sp, [%3, %1] \n\t" 48 "st.as sp, [%3, %1] \n\t"
@@ -76,10 +73,10 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
76 73
77 /* start loading it's CALLEE reg file */ 74 /* start loading it's CALLEE reg file */
78 75
79 "add sp, sp, %4 \n\t" /* skip gutter at top */
80
81#ifndef CONFIG_ARC_CURR_IN_REG 76#ifndef CONFIG_ARC_CURR_IN_REG
82 "ld.ab r25, [sp, 4] \n\t" 77 "ld.ab r25, [sp, 4] \n\t"
78#else
79 "add sp, sp, 4 \n\t"
83#endif 80#endif
84 "ld.ab r24, [sp, 4] \n\t" 81 "ld.ab r24, [sp, 4] \n\t"
85 "ld.ab r23, [sp, 4] \n\t" 82 "ld.ab r23, [sp, 4] \n\t"
@@ -100,8 +97,7 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
100 /* FP/BLINK restore generated by gcc (standard func epilogue */ 97 /* FP/BLINK restore generated by gcc (standard func epilogue */
101 98
102 : "=r"(tmp) 99 : "=r"(tmp)
103 : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev), 100 : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev)
104 "n"(num_words_to_skip * 4)
105 : "blink" 101 : "blink"
106 ); 102 );
107 103
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 0c6d664d4a83..1d7165156e17 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -142,7 +142,7 @@ VECTOR reserved ; Reserved Exceptions
142.endr 142.endr
143 143
144#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */ 144#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
145#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */ 145#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,SYS...} */
146#include <asm/errno.h> 146#include <asm/errno.h>
147#include <asm/arcregs.h> 147#include <asm/arcregs.h>
148#include <asm/irqflags.h> 148#include <asm/irqflags.h>
@@ -274,10 +274,8 @@ ARC_ENTRY instr_service
274 SWITCH_TO_KERNEL_STK 274 SWITCH_TO_KERNEL_STK
275 SAVE_ALL_SYS 275 SAVE_ALL_SYS
276 276
277 lr r0, [ecr] 277 lr r0, [efa]
278 lr r1, [efa] 278 mov r1, sp
279
280 mov r2, sp
281 279
282 FAKE_RET_FROM_EXCPN r9 280 FAKE_RET_FROM_EXCPN r9
283 281
@@ -298,9 +296,8 @@ ARC_ENTRY mem_service
298 SWITCH_TO_KERNEL_STK 296 SWITCH_TO_KERNEL_STK
299 SAVE_ALL_SYS 297 SAVE_ALL_SYS
300 298
301 lr r0, [ecr] 299 lr r0, [efa]
302 lr r1, [efa] 300 mov r1, sp
303 mov r2, sp
304 bl do_memory_error 301 bl do_memory_error
305 b ret_from_exception 302 b ret_from_exception
306ARC_EXIT mem_service 303ARC_EXIT mem_service
@@ -317,11 +314,14 @@ ARC_ENTRY EV_MachineCheck
317 SWITCH_TO_KERNEL_STK 314 SWITCH_TO_KERNEL_STK
318 SAVE_ALL_SYS 315 SAVE_ALL_SYS
319 316
320 lr r0, [ecr] 317 lr r2, [ecr]
321 lr r1, [efa] 318 lr r0, [efa]
322 mov r2, sp 319 mov r1, sp
320
321 lsr r3, r2, 8
322 bmsk r3, r3, 7
323 brne r3, ECR_C_MCHK_DUP_TLB, 1f
323 324
324 brne r0, 0x200100, 1f
325 bl do_tlb_overlap_fault 325 bl do_tlb_overlap_fault
326 b ret_from_exception 326 b ret_from_exception
327 327
@@ -355,8 +355,8 @@ ARC_ENTRY EV_TLBProtV
355 ; ecr and efa were not saved in case an Intr sneaks in 355 ; ecr and efa were not saved in case an Intr sneaks in
356 ; after fake rtie 356 ; after fake rtie
357 ; 357 ;
358 lr r3, [ecr] 358 lr r2, [ecr]
359 lr r4, [efa] 359 lr r1, [efa] ; Faulting Data address
360 360
361 ; --------(4) Return from CPU Exception Mode --------- 361 ; --------(4) Return from CPU Exception Mode ---------
362 ; Fake a rtie, but rtie to next label 362 ; Fake a rtie, but rtie to next label
@@ -368,31 +368,25 @@ ARC_ENTRY EV_TLBProtV
368 ;------ (5) Type of Protection Violation? ---------- 368 ;------ (5) Type of Protection Violation? ----------
369 ; 369 ;
370 ; ProtV Hardware Exception is triggered for Access Faults of 2 types 370 ; ProtV Hardware Exception is triggered for Access Faults of 2 types
371 ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW 371 ; -Access Violaton : 00_23_(00|01|02|03)_00
372 ; -Unaligned Access (READ/WRITE on odd boundary) 372 ; x r w r+w
373 ; -Unaligned Access : 00_23_04_00
373 ; 374 ;
374 cmp r3, 0x230400 ; Misaligned data access ? 375 bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
375 beq 4f
376 376
377 ;========= (6a) Access Violation Processing ======== 377 ;========= (6a) Access Violation Processing ========
378 cmp r3, 0x230100
379 mov r1, 0x0 ; if LD exception ? write = 0
380 mov.ne r1, 0x1 ; else write = 1
381
382 mov r2, r4 ; faulting address
383 mov r0, sp ; pt_regs 378 mov r0, sp ; pt_regs
384 bl do_page_fault 379 bl do_page_fault
385 b ret_from_exception 380 b ret_from_exception
386 381
387 ;========== (6b) Non aligned access ============ 382 ;========== (6b) Non aligned access ============
3884: 3834:
389 mov r0, r3 ; cause code 384 mov r0, r1
390 mov r1, r4 ; faulting address 385 mov r1, sp ; pt_regs
391 mov r2, sp ; pt_regs
392 386
393#ifdef CONFIG_ARC_MISALIGN_ACCESS 387#ifdef CONFIG_ARC_MISALIGN_ACCESS
394 SAVE_CALLEE_SAVED_USER 388 SAVE_CALLEE_SAVED_USER
395 mov r3, sp ; callee_regs 389 mov r2, sp ; callee_regs
396 390
397 bl do_misaligned_access 391 bl do_misaligned_access
398 392
@@ -419,9 +413,8 @@ ARC_ENTRY EV_PrivilegeV
419 SWITCH_TO_KERNEL_STK 413 SWITCH_TO_KERNEL_STK
420 SAVE_ALL_SYS 414 SAVE_ALL_SYS
421 415
422 lr r0, [ecr] 416 lr r0, [efa]
423 lr r1, [efa] 417 mov r1, sp
424 mov r2, sp
425 418
426 FAKE_RET_FROM_EXCPN r9 419 FAKE_RET_FROM_EXCPN r9
427 420
@@ -440,9 +433,8 @@ ARC_ENTRY EV_Extension
440 SWITCH_TO_KERNEL_STK 433 SWITCH_TO_KERNEL_STK
441 SAVE_ALL_SYS 434 SAVE_ALL_SYS
442 435
443 lr r0, [ecr] 436 lr r0, [efa]
444 lr r1, [efa] 437 mov r1, sp
445 mov r2, sp
446 bl do_extension_fault 438 bl do_extension_fault
447 b ret_from_exception 439 b ret_from_exception
448ARC_EXIT EV_Extension 440ARC_EXIT EV_Extension
@@ -498,11 +490,8 @@ tracesys_exit:
498trap_with_param: 490trap_with_param:
499 491
500 ; stop_pc info by gdb needs this info 492 ; stop_pc info by gdb needs this info
501 stw orig_r8_IS_BRKPT, [sp, PT_orig_r8] 493 lr r0, [efa]
502 494 mov r1, sp
503 mov r0, r12
504 lr r1, [efa]
505 mov r2, sp
506 495
507 ; Now that we have read EFA, its safe to do "fake" rtie 496 ; Now that we have read EFA, its safe to do "fake" rtie
508 ; and get out of CPU exception mode 497 ; and get out of CPU exception mode
@@ -544,11 +533,11 @@ ARC_ENTRY EV_Trap
544 lr r9, [erstatus] 533 lr r9, [erstatus]
545 534
546 SWITCH_TO_KERNEL_STK 535 SWITCH_TO_KERNEL_STK
547 SAVE_ALL_TRAP 536 SAVE_ALL_SYS
548 537
549 ;------- (4) What caused the Trap -------------- 538 ;------- (4) What caused the Trap --------------
550 lr r12, [ecr] 539 lr r12, [ecr]
551 and.f 0, r12, ECR_PARAM_MASK 540 bmsk.f 0, r12, 7
552 bnz trap_with_param 541 bnz trap_with_param
553 542
554 ; ======= (5a) Trap is due to System Call ======== 543 ; ======= (5a) Trap is due to System Call ========
@@ -589,11 +578,7 @@ ARC_ENTRY ret_from_exception
589 ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32 578 ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
590 ld r8, [sp, PT_status32] ; returning to User/Kernel Mode 579 ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
591 580
592#ifdef CONFIG_PREEMPT
593 bbit0 r8, STATUS_U_BIT, resume_kernel_mode 581 bbit0 r8, STATUS_U_BIT, resume_kernel_mode
594#else
595 bbit0 r8, STATUS_U_BIT, restore_regs
596#endif
597 582
598 ; Before returning to User mode check-for-and-complete any pending work 583 ; Before returning to User mode check-for-and-complete any pending work
599 ; such as rescheduling/signal-delivery etc. 584 ; such as rescheduling/signal-delivery etc.
@@ -653,10 +638,10 @@ resume_user_mode_begin:
653 b resume_user_mode_begin ; unconditionally back to U mode ret chks 638 b resume_user_mode_begin ; unconditionally back to U mode ret chks
654 ; for single exit point from this block 639 ; for single exit point from this block
655 640
656#ifdef CONFIG_PREEMPT
657
658resume_kernel_mode: 641resume_kernel_mode:
659 642
643#ifdef CONFIG_PREEMPT
644
660 ; Can't preempt if preemption disabled 645 ; Can't preempt if preemption disabled
661 GET_CURR_THR_INFO_FROM_SP r10 646 GET_CURR_THR_INFO_FROM_SP r10
662 ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] 647 ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
@@ -687,17 +672,6 @@ restore_regs :
687 ; XXX can this be optimised out 672 ; XXX can this be optimised out
688 IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy 673 IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
689 674
690#ifdef CONFIG_ARC_CURR_IN_REG
691 ; Restore User R25
692 ; Earlier this used to be only for returning to user mode
693 ; However with 2 levels of IRQ this can also happen even if
694 ; in kernel mode
695 ld r9, [sp, PT_sp]
696 brhs r9, VMALLOC_START, 8f
697 RESTORE_USER_R25
6988:
699#endif
700
701 ; Restore REG File. In case multiple Events outstanding, 675 ; Restore REG File. In case multiple Events outstanding,
702 ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None 676 ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
703 ; Note that we use realtime STATUS32 (not pt_regs->status32) to 677 ; Note that we use realtime STATUS32 (not pt_regs->status32) to
@@ -714,28 +688,33 @@ not_exception:
714 688
715#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS 689#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
716 690
691 ; Level 2 interrupt return Path - from hardware standpoint
717 bbit0 r10, STATUS_A2_BIT, not_level2_interrupt 692 bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
718 693
719 ;------------------------------------------------------------------ 694 ;------------------------------------------------------------------
695 ; However the context returning might not have taken L2 intr itself
696 ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret
697 ; Special considerations needed for the context which took L2 intr
698
699 ld r9, [sp, PT_event] ; Ensure this is L2 intr context
700 brne r9, event_IRQ2, 149f
701
702 ;------------------------------------------------------------------
720 ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier 703 ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier
721 ; so that sched doesnt move to new task, causing L1 to be delayed 704 ; so that sched doesnt move to new task, causing L1 to be delayed
722 ; undeterministically. Now that we've achieved that, lets reset 705 ; undeterministically. Now that we've achieved that, lets reset
723 ; things to what they were, before returning from L2 context 706 ; things to what they were, before returning from L2 context
724 ;---------------------------------------------------------------- 707 ;----------------------------------------------------------------
725 708
726 ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
727 brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
728
729 ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) 709 ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
730 bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal 710 bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
731 711
732 ; A1 is set in status32_l2
733 ; decrement thread_info->preempt_count (re-enable preemption) 712 ; decrement thread_info->preempt_count (re-enable preemption)
734 GET_CURR_THR_INFO_FROM_SP r10 713 GET_CURR_THR_INFO_FROM_SP r10
735 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] 714 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
736 715
737 ; paranoid check, given A1 was active when A2 happened, preempt count 716 ; paranoid check, given A1 was active when A2 happened, preempt count
738 ; must not be 0 beccause we would have incremented it. 717 ; must not be 0 because we would have incremented it.
739 ; If this does happen we simply HALT as it means a BUG !!! 718 ; If this does happen we simply HALT as it means a BUG !!!
740 cmp r9, 0 719 cmp r9, 0
741 bnz 2f 720 bnz 2f
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 006dec3fc353..2a913f85a747 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -27,6 +27,8 @@ stext:
27 ; Don't clobber r0-r4 yet. It might have bootloader provided info 27 ; Don't clobber r0-r4 yet. It might have bootloader provided info
28 ;------------------------------------------------------------------- 28 ;-------------------------------------------------------------------
29 29
30 sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
31
30#ifdef CONFIG_SMP 32#ifdef CONFIG_SMP
31 ; Only Boot (Master) proceeds. Others wait in platform dependent way 33 ; Only Boot (Master) proceeds. Others wait in platform dependent way
32 ; IDENTITY Reg [ 3 2 1 0 ] 34 ; IDENTITY Reg [ 3 2 1 0 ]
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 8115fa531575..305b3f866aa7 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -28,25 +28,17 @@
28 * -Disable all IRQs (on CPU side) 28 * -Disable all IRQs (on CPU side)
29 * -Optionally, setup the High priority Interrupts as Level 2 IRQs 29 * -Optionally, setup the High priority Interrupts as Level 2 IRQs
30 */ 30 */
31void __cpuinit arc_init_IRQ(void) 31void arc_init_IRQ(void)
32{ 32{
33 int level_mask = 0; 33 int level_mask = 0;
34 34
35 write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
36
37 /* Disable all IRQs: enable them as devices request */ 35 /* Disable all IRQs: enable them as devices request */
38 write_aux_reg(AUX_IENABLE, 0); 36 write_aux_reg(AUX_IENABLE, 0);
39 37
40 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ 38 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
41#ifdef CONFIG_ARC_IRQ3_LV2 39 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
42 level_mask |= (1 << 3); 40 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
43#endif 41 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
44#ifdef CONFIG_ARC_IRQ5_LV2
45 level_mask |= (1 << 5);
46#endif
47#ifdef CONFIG_ARC_IRQ6_LV2
48 level_mask |= (1 << 6);
49#endif
50 42
51 if (level_mask) { 43 if (level_mask) {
52 pr_info("Level-2 interrupts bitset %x\n", level_mask); 44 pr_info("Level-2 interrupts bitset %x\n", level_mask);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index 52bdc83c1495..a7698fb14818 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -169,7 +169,7 @@ int kgdb_arch_init(void)
169 return 0; 169 return 0;
170} 170}
171 171
172void kgdb_trap(struct pt_regs *regs, int param) 172void kgdb_trap(struct pt_regs *regs)
173{ 173{
174 /* trap_s 3 is used for breakpoints that overwrite existing 174 /* trap_s 3 is used for breakpoints that overwrite existing
175 * instructions, while trap_s 4 is used for compiled breakpoints. 175 * instructions, while trap_s 4 is used for compiled breakpoints.
@@ -181,7 +181,7 @@ void kgdb_trap(struct pt_regs *regs, int param)
181 * with trap_s 4 (compiled) breakpoints, continuation needs to 181 * with trap_s 4 (compiled) breakpoints, continuation needs to
182 * start after the breakpoint. 182 * start after the breakpoint.
183 */ 183 */
184 if (param == 3) 184 if (regs->ecr_param == 3)
185 instruction_pointer(regs) -= BREAK_INSTR_SIZE; 185 instruction_pointer(regs) -= BREAK_INSTR_SIZE;
186 186
187 kgdb_handle_exception(1, SIGTRAP, 0, regs); 187 kgdb_handle_exception(1, SIGTRAP, 0, regs);
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 5a7b80e2d883..72f97822784a 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -517,8 +517,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
517 return 0; 517 return 0;
518} 518}
519 519
520void trap_is_kprobe(unsigned long cause, unsigned long address, 520void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
521 struct pt_regs *regs)
522{ 521{
523 notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP); 522 notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
524} 523}
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index cad66851e0c4..07a3a968fe49 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -55,10 +55,8 @@ asmlinkage void ret_from_fork(void);
55 * | ... | 55 * | ... |
56 * | unused | 56 * | unused |
57 * | | 57 * | |
58 * ------------------ <==== top of Stack (thread.ksp)
59 * | UNUSED 1 word|
60 * ------------------ 58 * ------------------
61 * | r25 | 59 * | r25 | <==== top of Stack (thread.ksp)
62 * ~ ~ 60 * ~ ~
63 * | --to-- | (CALLEE Regs of user mode) 61 * | --to-- | (CALLEE Regs of user mode)
64 * | r13 | 62 * | r13 |
@@ -76,7 +74,10 @@ asmlinkage void ret_from_fork(void);
76 * | --to-- | (scratch Regs of user mode) 74 * | --to-- | (scratch Regs of user mode)
77 * | r0 | 75 * | r0 |
78 * ------------------ 76 * ------------------
79 * | UNUSED 1 word| 77 * | SP |
78 * | orig_r0 |
79 * | event/ECR |
80 * | user_r25 |
80 * ------------------ <===== END of PAGE 81 * ------------------ <===== END of PAGE
81 */ 82 */
82int copy_thread(unsigned long clone_flags, 83int copy_thread(unsigned long clone_flags,
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index c6a81c58d0f3..333238564b67 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -40,7 +40,15 @@ static int genregs_get(struct task_struct *target,
40 offsetof(struct user_regs_struct, LOC), \ 40 offsetof(struct user_regs_struct, LOC), \
41 offsetof(struct user_regs_struct, LOC) + 4); 41 offsetof(struct user_regs_struct, LOC) + 4);
42 42
43#define REG_O_ZERO(LOC) \
44 if (!ret) \
45 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, \
46 offsetof(struct user_regs_struct, LOC), \
47 offsetof(struct user_regs_struct, LOC) + 4);
48
49 REG_O_ZERO(pad);
43 REG_O_CHUNK(scratch, callee, ptregs); 50 REG_O_CHUNK(scratch, callee, ptregs);
51 REG_O_ZERO(pad2);
44 REG_O_CHUNK(callee, efa, cregs); 52 REG_O_CHUNK(callee, efa, cregs);
45 REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address); 53 REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
46 54
@@ -88,8 +96,10 @@ static int genregs_set(struct task_struct *target,
88 offsetof(struct user_regs_struct, LOC), \ 96 offsetof(struct user_regs_struct, LOC), \
89 offsetof(struct user_regs_struct, LOC) + 4); 97 offsetof(struct user_regs_struct, LOC) + 4);
90 98
91 /* TBD: disallow updates to STATUS32, orig_r8 etc*/ 99 REG_IGNORE_ONE(pad);
92 REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */ 100 /* TBD: disallow updates to STATUS32 etc*/
101 REG_IN_CHUNK(scratch, pad2, ptregs); /* pt_regs[bta..sp] */
102 REG_IGNORE_ONE(pad2);
93 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ 103 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
94 REG_IGNORE_ONE(efa); /* efa update invalid */ 104 REG_IGNORE_ONE(efa); /* efa update invalid */
95 REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */ 105 REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index b2b3731dd1e9..6b083454d039 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -31,14 +31,14 @@
31int running_on_hw = 1; /* vs. on ISS */ 31int running_on_hw = 1; /* vs. on ISS */
32 32
33char __initdata command_line[COMMAND_LINE_SIZE]; 33char __initdata command_line[COMMAND_LINE_SIZE];
34struct machine_desc *machine_desc __cpuinitdata; 34struct machine_desc *machine_desc;
35 35
36struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ 36struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
37 37
38struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 38struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
39 39
40 40
41void __cpuinit read_arc_build_cfg_regs(void) 41void read_arc_build_cfg_regs(void)
42{ 42{
43 struct bcr_perip uncached_space; 43 struct bcr_perip uncached_space;
44 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 44 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
@@ -182,7 +182,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
182 FIX_PTR(cpu); 182 FIX_PTR(cpu);
183#define IS_AVAIL1(var, str) ((var) ? str : "") 183#define IS_AVAIL1(var, str) ((var) ? str : "")
184#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "") 184#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
185#define IS_USED(var) ((var) ? "(in-use)" : "(not used)") 185#define IS_USED(cfg) (IS_ENABLED(cfg) ? "(in-use)" : "(not used)")
186 186
187 n += scnprintf(buf + n, len - n, 187 n += scnprintf(buf + n, len - n,
188 "Extn [700-Base]\t: %s %s %s %s %s %s\n", 188 "Extn [700-Base]\t: %s %s %s %s %s %s\n",
@@ -202,9 +202,9 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
202 if (cpu->core.family == 0x34) { 202 if (cpu->core.family == 0x34) {
203 n += scnprintf(buf + n, len - n, 203 n += scnprintf(buf + n, len - n,
204 "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n", 204 "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
205 IS_USED(__CONFIG_ARC_HAS_LLSC_VAL), 205 IS_USED(CONFIG_ARC_HAS_LLSC),
206 IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL), 206 IS_USED(CONFIG_ARC_HAS_SWAPE),
207 IS_USED(__CONFIG_ARC_HAS_RTSC_VAL)); 207 IS_USED(CONFIG_ARC_HAS_RTSC));
208 } 208 }
209 209
210 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s", 210 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
@@ -237,7 +237,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
237 return buf; 237 return buf;
238} 238}
239 239
240void __cpuinit arc_chk_ccms(void) 240void arc_chk_ccms(void)
241{ 241{
242#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) 242#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
243 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 243 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
@@ -272,7 +272,7 @@ void __cpuinit arc_chk_ccms(void)
272 * hardware has dedicated regs which need to be saved/restored on ctx-sw 272 * hardware has dedicated regs which need to be saved/restored on ctx-sw
273 * (Single Precision uses core regs), thus kernel is kind of oblivious to it 273 * (Single Precision uses core regs), thus kernel is kind of oblivious to it
274 */ 274 */
275void __cpuinit arc_chk_fpu(void) 275void arc_chk_fpu(void)
276{ 276{
277 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 277 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
278 278
@@ -293,7 +293,7 @@ void __cpuinit arc_chk_fpu(void)
293 * such as only for boot CPU etc 293 * such as only for boot CPU etc
294 */ 294 */
295 295
296void __cpuinit setup_processor(void) 296void setup_processor(void)
297{ 297{
298 char str[512]; 298 char str[512];
299 int cpu_id = smp_processor_id(); 299 int cpu_id = smp_processor_id();
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 5c7fd603d216..bca3052c956d 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -117,7 +117,7 @@ const char *arc_platform_smp_cpuinfo(void)
117 * Called from asm stub in head.S 117 * Called from asm stub in head.S
118 * "current"/R25 already setup by low level boot code 118 * "current"/R25 already setup by low level boot code
119 */ 119 */
120void __cpuinit start_kernel_secondary(void) 120void start_kernel_secondary(void)
121{ 121{
122 struct mm_struct *mm = &init_mm; 122 struct mm_struct *mm = &init_mm;
123 unsigned int cpu = smp_processor_id(); 123 unsigned int cpu = smp_processor_id();
@@ -154,7 +154,7 @@ void __cpuinit start_kernel_secondary(void)
154 * 154 *
155 * Essential requirements being where to run from (PC) and stack (SP) 155 * Essential requirements being where to run from (PC) and stack (SP)
156*/ 156*/
157int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 157int __cpu_up(unsigned int cpu, struct task_struct *idle)
158{ 158{
159 unsigned long wait_till; 159 unsigned long wait_till;
160 160
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index ca0207b9d5b6..f8b7d880304d 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -79,7 +79,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
79 * assembly code 79 * assembly code
80 */ 80 */
81 frame_info->regs.r27 = 0; 81 frame_info->regs.r27 = 0;
82 frame_info->regs.r28 += 64; 82 frame_info->regs.r28 += 60;
83 frame_info->call_frame = 0; 83 frame_info->call_frame = 0;
84 84
85 } else { 85 } else {
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 09f4309aa2c0..0e51e69cf30d 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -44,13 +44,24 @@
44#include <asm/clk.h> 44#include <asm/clk.h>
45#include <asm/mach_desc.h> 45#include <asm/mach_desc.h>
46 46
47/* Timer related Aux registers */
48#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
49#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
50#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
51#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
52#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
53#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
54
55#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
56#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
57
47#define ARC_TIMER_MAX 0xFFFFFFFF 58#define ARC_TIMER_MAX 0xFFFFFFFF
48 59
49/********** Clock Source Device *********/ 60/********** Clock Source Device *********/
50 61
51#ifdef CONFIG_ARC_HAS_RTSC 62#ifdef CONFIG_ARC_HAS_RTSC
52 63
53int __cpuinit arc_counter_setup(void) 64int arc_counter_setup(void)
54{ 65{
55 /* RTSC insn taps into cpu clk, needs no setup */ 66 /* RTSC insn taps into cpu clk, needs no setup */
56 67
@@ -105,7 +116,7 @@ static bool is_usable_as_clocksource(void)
105/* 116/*
106 * set 32bit TIMER1 to keep counting monotonically and wraparound 117 * set 32bit TIMER1 to keep counting monotonically and wraparound
107 */ 118 */
108int __cpuinit arc_counter_setup(void) 119int arc_counter_setup(void)
109{ 120{
110 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); 121 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
111 write_aux_reg(ARC_REG_TIMER1_CNT, 0); 122 write_aux_reg(ARC_REG_TIMER1_CNT, 0);
@@ -212,7 +223,7 @@ static struct irqaction arc_timer_irq = {
212 * Setup the local event timer for @cpu 223 * Setup the local event timer for @cpu
213 * N.B. weak so that some exotic ARC SoCs can completely override it 224 * N.B. weak so that some exotic ARC SoCs can completely override it
214 */ 225 */
215void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu) 226void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
216{ 227{
217 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); 228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
218 229
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 0471d9c9dd54..e21692d2fdab 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -28,10 +28,9 @@ void __init trap_init(void)
28 return; 28 return;
29} 29}
30 30
31void die(const char *str, struct pt_regs *regs, unsigned long address, 31void die(const char *str, struct pt_regs *regs, unsigned long address)
32 unsigned long cause_reg)
33{ 32{
34 show_kernel_fault_diag(str, regs, address, cause_reg); 33 show_kernel_fault_diag(str, regs, address);
35 34
36 /* DEAD END */ 35 /* DEAD END */
37 __asm__("flag 1"); 36 __asm__("flag 1");
@@ -42,14 +41,13 @@ void die(const char *str, struct pt_regs *regs, unsigned long address,
42 * -for user faults enqueues requested signal 41 * -for user faults enqueues requested signal
43 * -for kernel, chk if due to copy_(to|from)_user, otherwise die() 42 * -for kernel, chk if due to copy_(to|from)_user, otherwise die()
44 */ 43 */
45static noinline int handle_exception(unsigned long cause, char *str, 44static noinline int
46 struct pt_regs *regs, siginfo_t *info) 45handle_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
47{ 46{
48 if (user_mode(regs)) { 47 if (user_mode(regs)) {
49 struct task_struct *tsk = current; 48 struct task_struct *tsk = current;
50 49
51 tsk->thread.fault_address = (__force unsigned int)info->si_addr; 50 tsk->thread.fault_address = (__force unsigned int)info->si_addr;
52 tsk->thread.cause_code = cause;
53 51
54 force_sig_info(info->si_signo, info, tsk); 52 force_sig_info(info->si_signo, info, tsk);
55 53
@@ -58,14 +56,14 @@ static noinline int handle_exception(unsigned long cause, char *str,
58 if (fixup_exception(regs)) 56 if (fixup_exception(regs))
59 return 0; 57 return 0;
60 58
61 die(str, regs, (unsigned long)info->si_addr, cause); 59 die(str, regs, (unsigned long)info->si_addr);
62 } 60 }
63 61
64 return 1; 62 return 1;
65} 63}
66 64
67#define DO_ERROR_INFO(signr, str, name, sicode) \ 65#define DO_ERROR_INFO(signr, str, name, sicode) \
68int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \ 66int name(unsigned long address, struct pt_regs *regs) \
69{ \ 67{ \
70 siginfo_t info = { \ 68 siginfo_t info = { \
71 .si_signo = signr, \ 69 .si_signo = signr, \
@@ -73,7 +71,7 @@ int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \
73 .si_code = sicode, \ 71 .si_code = sicode, \
74 .si_addr = (void __user *)address, \ 72 .si_addr = (void __user *)address, \
75 }; \ 73 }; \
76 return handle_exception(cause, str, regs, &info);\ 74 return handle_exception(str, regs, &info);\
77} 75}
78 76
79/* 77/*
@@ -90,11 +88,11 @@ DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
90/* 88/*
91 * Entry Point for Misaligned Data access Exception, for emulating in software 89 * Entry Point for Misaligned Data access Exception, for emulating in software
92 */ 90 */
93int do_misaligned_access(unsigned long cause, unsigned long address, 91int do_misaligned_access(unsigned long address, struct pt_regs *regs,
94 struct pt_regs *regs, struct callee_regs *cregs) 92 struct callee_regs *cregs)
95{ 93{
96 if (misaligned_fixup(address, regs, cause, cregs) != 0) 94 if (misaligned_fixup(address, regs, cregs) != 0)
97 return do_misaligned_error(cause, address, regs); 95 return do_misaligned_error(address, regs);
98 96
99 return 0; 97 return 0;
100} 98}
@@ -104,10 +102,9 @@ int do_misaligned_access(unsigned long cause, unsigned long address,
104 * Entry point for miscll errors such as Nested Exceptions 102 * Entry point for miscll errors such as Nested Exceptions
105 * -Duplicate TLB entry is handled seperately though 103 * -Duplicate TLB entry is handled seperately though
106 */ 104 */
107void do_machine_check_fault(unsigned long cause, unsigned long address, 105void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
108 struct pt_regs *regs)
109{ 106{
110 die("Machine Check Exception", regs, address, cause); 107 die("Machine Check Exception", regs, address);
111} 108}
112 109
113 110
@@ -120,23 +117,22 @@ void do_machine_check_fault(unsigned long cause, unsigned long address,
120 * -1 used for software breakpointing (gdb) 117 * -1 used for software breakpointing (gdb)
121 * -2 used by kprobes 118 * -2 used by kprobes
122 */ 119 */
123void do_non_swi_trap(unsigned long cause, unsigned long address, 120void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
124 struct pt_regs *regs)
125{ 121{
126 unsigned int param = cause & 0xff; 122 unsigned int param = regs->ecr_param;
127 123
128 switch (param) { 124 switch (param) {
129 case 1: 125 case 1:
130 trap_is_brkpt(cause, address, regs); 126 trap_is_brkpt(address, regs);
131 break; 127 break;
132 128
133 case 2: 129 case 2:
134 trap_is_kprobe(param, address, regs); 130 trap_is_kprobe(address, regs);
135 break; 131 break;
136 132
137 case 3: 133 case 3:
138 case 4: 134 case 4:
139 kgdb_trap(regs, param); 135 kgdb_trap(regs);
140 break; 136 break;
141 137
142 default: 138 default:
@@ -149,14 +145,14 @@ void do_non_swi_trap(unsigned long cause, unsigned long address,
149 * -For a corner case, ARC kprobes implementation resorts to using 145 * -For a corner case, ARC kprobes implementation resorts to using
150 * this exception, hence the check 146 * this exception, hence the check
151 */ 147 */
152void do_insterror_or_kprobe(unsigned long cause, 148void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
153 unsigned long address,
154 struct pt_regs *regs)
155{ 149{
150 int rc;
151
156 /* Check if this exception is caused by kprobes */ 152 /* Check if this exception is caused by kprobes */
157 if (notify_die(DIE_IERR, "kprobe_ierr", regs, address, 153 rc = notify_die(DIE_IERR, "kprobe_ierr", regs, address, 0, SIGILL);
158 cause, SIGILL) == NOTIFY_STOP) 154 if (rc == NOTIFY_STOP)
159 return; 155 return;
160 156
161 insterror_is_error(cause, address, regs); 157 insterror_is_error(address, regs);
162} 158}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index a03528ecd276..73a7450ee622 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -117,23 +117,22 @@ static void show_faulting_vma(unsigned long address, char *buf)
117 117
118static void show_ecr_verbose(struct pt_regs *regs) 118static void show_ecr_verbose(struct pt_regs *regs)
119{ 119{
120 unsigned int vec, cause_code, cause_reg; 120 unsigned int vec, cause_code;
121 unsigned long address; 121 unsigned long address;
122 122
123 cause_reg = current->thread.cause_code; 123 pr_info("\n[ECR ]: 0x%08lx => ", regs->event);
124 pr_info("\n[ECR ]: 0x%08x => ", cause_reg);
125 124
126 /* For Data fault, this is data address not instruction addr */ 125 /* For Data fault, this is data address not instruction addr */
127 address = current->thread.fault_address; 126 address = current->thread.fault_address;
128 127
129 vec = cause_reg >> 16; 128 vec = regs->ecr_vec;
130 cause_code = (cause_reg >> 8) & 0xFF; 129 cause_code = regs->ecr_cause;
131 130
132 /* For DTLB Miss or ProtV, display the memory involved too */ 131 /* For DTLB Miss or ProtV, display the memory involved too */
133 if (vec == ECR_V_DTLB_MISS) { 132 if (vec == ECR_V_DTLB_MISS) {
134 pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n", 133 pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
135 (cause_code == 0x01) ? "Read From" : 134 (cause_code == 0x01) ? "Read" :
136 ((cause_code == 0x02) ? "Write to" : "EX"), 135 ((cause_code == 0x02) ? "Write" : "EX"),
137 address, regs->ret); 136 address, regs->ret);
138 } else if (vec == ECR_V_ITLB_MISS) { 137 } else if (vec == ECR_V_ITLB_MISS) {
139 pr_cont("Insn could not be fetched\n"); 138 pr_cont("Insn could not be fetched\n");
@@ -144,14 +143,12 @@ static void show_ecr_verbose(struct pt_regs *regs)
144 } else if (vec == ECR_V_PROTV) { 143 } else if (vec == ECR_V_PROTV) {
145 if (cause_code == ECR_C_PROTV_INST_FETCH) 144 if (cause_code == ECR_C_PROTV_INST_FETCH)
146 pr_cont("Execute from Non-exec Page\n"); 145 pr_cont("Execute from Non-exec Page\n");
147 else if (cause_code == ECR_C_PROTV_LOAD)
148 pr_cont("Read from Non-readable Page\n");
149 else if (cause_code == ECR_C_PROTV_STORE)
150 pr_cont("Write to Non-writable Page\n");
151 else if (cause_code == ECR_C_PROTV_XCHG)
152 pr_cont("Data exchange protection violation\n");
153 else if (cause_code == ECR_C_PROTV_MISALIG_DATA) 146 else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
154 pr_cont("Misaligned r/w from 0x%08lx\n", address); 147 pr_cont("Misaligned r/w from 0x%08lx\n", address);
148 else
149 pr_cont("%s access not allowed on page\n",
150 (cause_code == 0x01) ? "Read" :
151 ((cause_code == 0x02) ? "Write" : "EX"));
155 } else if (vec == ECR_V_INSN_ERR) { 152 } else if (vec == ECR_V_INSN_ERR) {
156 pr_cont("Illegal Insn\n"); 153 pr_cont("Illegal Insn\n");
157 } else { 154 } else {
@@ -176,8 +173,7 @@ void show_regs(struct pt_regs *regs)
176 print_task_path_n_nm(tsk, buf); 173 print_task_path_n_nm(tsk, buf);
177 show_regs_print_info(KERN_INFO); 174 show_regs_print_info(KERN_INFO);
178 175
179 if (current->thread.cause_code) 176 show_ecr_verbose(regs);
180 show_ecr_verbose(regs);
181 177
182 pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", 178 pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n",
183 current->thread.fault_address, 179 current->thread.fault_address,
@@ -213,10 +209,9 @@ void show_regs(struct pt_regs *regs)
213} 209}
214 210
215void show_kernel_fault_diag(const char *str, struct pt_regs *regs, 211void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
216 unsigned long address, unsigned long cause_reg) 212 unsigned long address)
217{ 213{
218 current->thread.fault_address = address; 214 current->thread.fault_address = address;
219 current->thread.cause_code = cause_reg;
220 215
221 /* Caller and Callee regs */ 216 /* Caller and Callee regs */
222 show_regs(regs); 217 show_regs(regs);
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 4cd81633febd..c0f832f595d3 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -187,7 +187,7 @@ fault: state->fault = 1;
187 * Returns 0 if successfully handled, 1 if some error happened 187 * Returns 0 if successfully handled, 1 if some error happened
188 */ 188 */
189int misaligned_fixup(unsigned long address, struct pt_regs *regs, 189int misaligned_fixup(unsigned long address, struct pt_regs *regs,
190 unsigned long cause, struct callee_regs *cregs) 190 struct callee_regs *cregs)
191{ 191{
192 struct disasm_state state; 192 struct disasm_state state;
193 char buf[TASK_COMM_LEN]; 193 char buf[TASK_COMM_LEN];
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index a8d02223da44..e550b117ec4f 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -289,6 +289,8 @@ static void __init setup_unwind_table(struct unwind_table *table,
289 * instead of the initial loc addr 289 * instead of the initial loc addr
290 * return; 290 * return;
291 */ 291 */
292 WARN(1, "unwinder: FDE->initial_location NULL %p\n",
293 (const u8 *)(fde + 1) + *fde);
292 } 294 }
293 ++n; 295 ++n;
294 } 296 }
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index d3c92f52d444..2555f5886af6 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -125,6 +125,11 @@ SECTIONS
125 *(.debug_frame) 125 *(.debug_frame)
126 __end_unwind = .; 126 __end_unwind = .;
127 } 127 }
128 /*
129 * gcc 4.8 generates this for -fasynchonous-unwind-tables,
130 * while we still use the .debug_frame based unwinder
131 */
132 /DISCARD/ : { *(.eh_frame) }
128#else 133#else
129 /DISCARD/ : { *(.debug_frame) } 134 /DISCARD/ : { *(.debug_frame) }
130#endif 135#endif
@@ -142,15 +147,18 @@ SECTIONS
142 *(.arcextmap.*) 147 *(.arcextmap.*)
143 } 148 }
144 149
150#ifndef CONFIG_DEBUG_INFO
145 /* open-coded because we need .debug_frame seperately for unwinding */ 151 /* open-coded because we need .debug_frame seperately for unwinding */
146 .debug_aranges 0 : { *(.debug_aranges) } 152 /DISCARD/ : { *(.debug_aranges) }
147 .debug_pubnames 0 : { *(.debug_pubnames) } 153 /DISCARD/ : { *(.debug_pubnames) }
148 .debug_info 0 : { *(.debug_info) } 154 /DISCARD/ : { *(.debug_info) }
149 .debug_abbrev 0 : { *(.debug_abbrev) } 155 /DISCARD/ : { *(.debug_abbrev) }
150 .debug_line 0 : { *(.debug_line) } 156 /DISCARD/ : { *(.debug_line) }
151 .debug_str 0 : { *(.debug_str) } 157 /DISCARD/ : { *(.debug_str) }
152 .debug_loc 0 : { *(.debug_loc) } 158 /DISCARD/ : { *(.debug_loc) }
153 .debug_macinfo 0 : { *(.debug_macinfo) } 159 /DISCARD/ : { *(.debug_macinfo) }
160 /DISCARD/ : { *(.debug_ranges) }
161#endif
154 162
155#ifdef CONFIG_ARC_HAS_DCCM 163#ifdef CONFIG_ARC_HAS_DCCM
156 . = CONFIG_ARC_DCCM_BASE; 164 . = CONFIG_ARC_DCCM_BASE;
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index aedce1905441..f415d851b765 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -73,6 +73,33 @@
73#include <asm/cachectl.h> 73#include <asm/cachectl.h>
74#include <asm/setup.h> 74#include <asm/setup.h>
75 75
76/* Instruction cache related Auxiliary registers */
77#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
78#define ARC_REG_IC_IVIC 0x10
79#define ARC_REG_IC_CTRL 0x11
80#define ARC_REG_IC_IVIL 0x19
81#if (CONFIG_ARC_MMU_VER > 2)
82#define ARC_REG_IC_PTAG 0x1E
83#endif
84
85/* Bit val in IC_CTRL */
86#define IC_CTRL_CACHE_DISABLE 0x1
87
88/* Data cache related Auxiliary registers */
89#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
90#define ARC_REG_DC_IVDC 0x47
91#define ARC_REG_DC_CTRL 0x48
92#define ARC_REG_DC_IVDL 0x4A
93#define ARC_REG_DC_FLSH 0x4B
94#define ARC_REG_DC_FLDL 0x4C
95#if (CONFIG_ARC_MMU_VER > 2)
96#define ARC_REG_DC_PTAG 0x5C
97#endif
98
99/* Bit val in DC_CTRL */
100#define DC_CTRL_INV_MODE_FLUSH 0x40
101#define DC_CTRL_FLUSH_STATUS 0x100
102
76char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) 103char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
77{ 104{
78 int n = 0; 105 int n = 0;
@@ -89,8 +116,10 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
89 enb ? "" : "DISABLED (kernel-build)"); \ 116 enb ? "" : "DISABLED (kernel-build)"); \
90} 117}
91 118
92 PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache"); 119 PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE),
93 PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache"); 120 "I-Cache");
121 PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE),
122 "D-Cache");
94 123
95 return buf; 124 return buf;
96} 125}
@@ -100,17 +129,23 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
100 * the cpuinfo structure for later use. 129 * the cpuinfo structure for later use.
101 * No Validation done here, simply read/convert the BCRs 130 * No Validation done here, simply read/convert the BCRs
102 */ 131 */
103void __cpuinit read_decode_cache_bcr(void) 132void read_decode_cache_bcr(void)
104{ 133{
105 struct bcr_cache ibcr, dbcr;
106 struct cpuinfo_arc_cache *p_ic, *p_dc; 134 struct cpuinfo_arc_cache *p_ic, *p_dc;
107 unsigned int cpu = smp_processor_id(); 135 unsigned int cpu = smp_processor_id();
136 struct bcr_cache {
137#ifdef CONFIG_CPU_BIG_ENDIAN
138 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
139#else
140 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
141#endif
142 } ibcr, dbcr;
108 143
109 p_ic = &cpuinfo_arc700[cpu].icache; 144 p_ic = &cpuinfo_arc700[cpu].icache;
110 READ_BCR(ARC_REG_IC_BCR, ibcr); 145 READ_BCR(ARC_REG_IC_BCR, ibcr);
111 146
112 if (ibcr.config == 0x3) 147 BUG_ON(ibcr.config != 3);
113 p_ic->assoc = 2; 148 p_ic->assoc = 2; /* Fixed to 2w set assoc */
114 p_ic->line_len = 8 << ibcr.line_len; 149 p_ic->line_len = 8 << ibcr.line_len;
115 p_ic->sz = 0x200 << ibcr.sz; 150 p_ic->sz = 0x200 << ibcr.sz;
116 p_ic->ver = ibcr.ver; 151 p_ic->ver = ibcr.ver;
@@ -118,8 +153,8 @@ void __cpuinit read_decode_cache_bcr(void)
118 p_dc = &cpuinfo_arc700[cpu].dcache; 153 p_dc = &cpuinfo_arc700[cpu].dcache;
119 READ_BCR(ARC_REG_DC_BCR, dbcr); 154 READ_BCR(ARC_REG_DC_BCR, dbcr);
120 155
121 if (dbcr.config == 0x2) 156 BUG_ON(dbcr.config != 2);
122 p_dc->assoc = 4; 157 p_dc->assoc = 4; /* Fixed to 4w set assoc */
123 p_dc->line_len = 16 << dbcr.line_len; 158 p_dc->line_len = 16 << dbcr.line_len;
124 p_dc->sz = 0x200 << dbcr.sz; 159 p_dc->sz = 0x200 << dbcr.sz;
125 p_dc->ver = dbcr.ver; 160 p_dc->ver = dbcr.ver;
@@ -132,14 +167,12 @@ void __cpuinit read_decode_cache_bcr(void)
132 * 3. Enable the Caches, setup default flush mode for D-Cache 167 * 3. Enable the Caches, setup default flush mode for D-Cache
133 * 3. Calculate the SHMLBA used by user space 168 * 3. Calculate the SHMLBA used by user space
134 */ 169 */
135void __cpuinit arc_cache_init(void) 170void arc_cache_init(void)
136{ 171{
137 unsigned int temp;
138 unsigned int cpu = smp_processor_id(); 172 unsigned int cpu = smp_processor_id();
139 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 173 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
140 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 174 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
141 int way_pg_ratio = way_pg_ratio; 175 unsigned int dcache_does_alias, temp;
142 int dcache_does_alias;
143 char str[256]; 176 char str[256];
144 177
145 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 178 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@@ -149,20 +182,11 @@ void __cpuinit arc_cache_init(void)
149 182
150#ifdef CONFIG_ARC_HAS_ICACHE 183#ifdef CONFIG_ARC_HAS_ICACHE
151 /* 1. Confirm some of I-cache params which Linux assumes */ 184 /* 1. Confirm some of I-cache params which Linux assumes */
152 if ((ic->assoc != ARC_ICACHE_WAYS) || 185 if (ic->line_len != ARC_ICACHE_LINE_LEN)
153 (ic->line_len != ARC_ICACHE_LINE_LEN)) {
154 panic("Cache H/W doesn't match kernel Config"); 186 panic("Cache H/W doesn't match kernel Config");
155 }
156#if (CONFIG_ARC_MMU_VER > 2)
157 if (ic->ver != 3) {
158 if (running_on_hw)
159 panic("Cache ver doesn't match MMU ver\n");
160
161 /* For ISS - suggest the toggles to use */
162 pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
163 187
164 } 188 if (ic->ver != CONFIG_ARC_MMU_VER)
165#endif 189 panic("Cache ver doesn't match MMU ver\n");
166#endif 190#endif
167 191
168 /* Enable/disable I-Cache */ 192 /* Enable/disable I-Cache */
@@ -181,14 +205,12 @@ chk_dc:
181 return; 205 return;
182 206
183#ifdef CONFIG_ARC_HAS_DCACHE 207#ifdef CONFIG_ARC_HAS_DCACHE
184 if ((dc->assoc != ARC_DCACHE_WAYS) || 208 if (dc->line_len != ARC_DCACHE_LINE_LEN)
185 (dc->line_len != ARC_DCACHE_LINE_LEN)) {
186 panic("Cache H/W doesn't match kernel Config"); 209 panic("Cache H/W doesn't match kernel Config");
187 }
188
189 dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
190 210
191 /* check for D-Cache aliasing */ 211 /* check for D-Cache aliasing */
212 dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
213
192 if (dcache_does_alias && !cache_is_vipt_aliasing()) 214 if (dcache_does_alias && !cache_is_vipt_aliasing())
193 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 215 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
194 else if (!dcache_does_alias && cache_is_vipt_aliasing()) 216 else if (!dcache_does_alias && cache_is_vipt_aliasing())
@@ -239,11 +261,9 @@ static inline void wait_for_flush(void)
239 */ 261 */
240static inline void __dc_entire_op(const int cacheop) 262static inline void __dc_entire_op(const int cacheop)
241{ 263{
242 unsigned long flags, tmp = tmp; 264 unsigned int tmp = tmp;
243 int aux; 265 int aux;
244 266
245 local_irq_save(flags);
246
247 if (cacheop == OP_FLUSH_N_INV) { 267 if (cacheop == OP_FLUSH_N_INV) {
248 /* Dcache provides 2 cmd: FLUSH or INV 268 /* Dcache provides 2 cmd: FLUSH or INV
249 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE 269 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
@@ -267,8 +287,6 @@ static inline void __dc_entire_op(const int cacheop)
267 /* Switch back the DISCARD ONLY Invalidate mode */ 287 /* Switch back the DISCARD ONLY Invalidate mode */
268 if (cacheop == OP_FLUSH_N_INV) 288 if (cacheop == OP_FLUSH_N_INV)
269 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); 289 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
270
271 local_irq_restore(flags);
272} 290}
273 291
274/* 292/*
@@ -459,8 +477,15 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
459 local_irq_restore(flags); 477 local_irq_restore(flags);
460} 478}
461 479
480static inline void __ic_entire_inv(void)
481{
482 write_aux_reg(ARC_REG_IC_IVIC, 1);
483 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
484}
485
462#else 486#else
463 487
488#define __ic_entire_inv()
464#define __ic_line_inv_vaddr(pstart, vstart, sz) 489#define __ic_line_inv_vaddr(pstart, vstart, sz)
465 490
466#endif /* CONFIG_ARC_HAS_ICACHE */ 491#endif /* CONFIG_ARC_HAS_ICACHE */
@@ -487,7 +512,7 @@ void flush_dcache_page(struct page *page)
487 struct address_space *mapping; 512 struct address_space *mapping;
488 513
489 if (!cache_is_vipt_aliasing()) { 514 if (!cache_is_vipt_aliasing()) {
490 set_bit(PG_arch_1, &page->flags); 515 clear_bit(PG_dc_clean, &page->flags);
491 return; 516 return;
492 } 517 }
493 518
@@ -501,7 +526,7 @@ void flush_dcache_page(struct page *page)
501 * Make a note that K-mapping is dirty 526 * Make a note that K-mapping is dirty
502 */ 527 */
503 if (!mapping_mapped(mapping)) { 528 if (!mapping_mapped(mapping)) {
504 set_bit(PG_arch_1, &page->flags); 529 clear_bit(PG_dc_clean, &page->flags);
505 } else if (page_mapped(page)) { 530 } else if (page_mapped(page)) {
506 531
507 /* kernel reading from page with U-mapping */ 532 /* kernel reading from page with U-mapping */
@@ -629,26 +654,13 @@ void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
629 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); 654 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
630} 655}
631 656
632void flush_icache_all(void)
633{
634 unsigned long flags;
635
636 local_irq_save(flags);
637
638 write_aux_reg(ARC_REG_IC_IVIC, 1);
639
640 /* lr will not complete till the icache inv operation is not over */
641 read_aux_reg(ARC_REG_IC_CTRL);
642 local_irq_restore(flags);
643}
644
645noinline void flush_cache_all(void) 657noinline void flush_cache_all(void)
646{ 658{
647 unsigned long flags; 659 unsigned long flags;
648 660
649 local_irq_save(flags); 661 local_irq_save(flags);
650 662
651 flush_icache_all(); 663 __ic_entire_inv();
652 __dc_entire_op(OP_FLUSH_N_INV); 664 __dc_entire_op(OP_FLUSH_N_INV);
653 665
654 local_irq_restore(flags); 666 local_irq_restore(flags);
@@ -667,7 +679,12 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
667{ 679{
668 unsigned int paddr = pfn << PAGE_SHIFT; 680 unsigned int paddr = pfn << PAGE_SHIFT;
669 681
670 __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); 682 u_vaddr &= PAGE_MASK;
683
684 ___flush_dcache_page(paddr, u_vaddr);
685
686 if (vma->vm_flags & VM_EXEC)
687 __inv_icache_page(paddr, u_vaddr);
671} 688}
672 689
673void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 690void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
@@ -717,7 +734,7 @@ void copy_user_highpage(struct page *to, struct page *from,
717 * non copied user pages (e.g. read faults which wire in pagecache page 734 * non copied user pages (e.g. read faults which wire in pagecache page
718 * directly). 735 * directly).
719 */ 736 */
720 set_bit(PG_arch_1, &to->flags); 737 clear_bit(PG_dc_clean, &to->flags);
721 738
722 /* 739 /*
723 * if SRC was already usermapped and non-congruent to kernel mapping 740 * if SRC was already usermapped and non-congruent to kernel mapping
@@ -725,15 +742,16 @@ void copy_user_highpage(struct page *to, struct page *from,
725 */ 742 */
726 if (clean_src_k_mappings) { 743 if (clean_src_k_mappings) {
727 __flush_dcache_page(kfrom, kfrom); 744 __flush_dcache_page(kfrom, kfrom);
745 set_bit(PG_dc_clean, &from->flags);
728 } else { 746 } else {
729 set_bit(PG_arch_1, &from->flags); 747 clear_bit(PG_dc_clean, &from->flags);
730 } 748 }
731} 749}
732 750
733void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 751void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
734{ 752{
735 clear_page(to); 753 clear_page(to);
736 set_bit(PG_arch_1, &page->flags); 754 clear_bit(PG_dc_clean, &page->flags);
737} 755}
738 756
739 757
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 689ffd86d5e9..318164cabdfc 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -15,6 +15,7 @@
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/kdebug.h> 16#include <linux/kdebug.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18#include <asm/mmu.h>
18 19
19static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) 20static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
20{ 21{
@@ -51,14 +52,14 @@ bad_area:
51 return 1; 52 return 1;
52} 53}
53 54
54void do_page_fault(struct pt_regs *regs, int write, unsigned long address, 55void do_page_fault(struct pt_regs *regs, unsigned long address)
55 unsigned long cause_code)
56{ 56{
57 struct vm_area_struct *vma = NULL; 57 struct vm_area_struct *vma = NULL;
58 struct task_struct *tsk = current; 58 struct task_struct *tsk = current;
59 struct mm_struct *mm = tsk->mm; 59 struct mm_struct *mm = tsk->mm;
60 siginfo_t info; 60 siginfo_t info;
61 int fault, ret; 61 int fault, ret;
62 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
62 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 63 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
63 (write ? FAULT_FLAG_WRITE : 0); 64 (write ? FAULT_FLAG_WRITE : 0);
64 65
@@ -109,7 +110,8 @@ good_area:
109 110
110 /* Handle protection violation, execute on heap or stack */ 111 /* Handle protection violation, execute on heap or stack */
111 112
112 if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH)) 113 if ((regs->ecr_vec == ECR_V_PROTV) &&
114 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
113 goto bad_area; 115 goto bad_area;
114 116
115 if (write) { 117 if (write) {
@@ -176,7 +178,6 @@ bad_area_nosemaphore:
176 /* User mode accesses just cause a SIGSEGV */ 178 /* User mode accesses just cause a SIGSEGV */
177 if (user_mode(regs)) { 179 if (user_mode(regs)) {
178 tsk->thread.fault_address = address; 180 tsk->thread.fault_address = address;
179 tsk->thread.cause_code = cause_code;
180 info.si_signo = SIGSEGV; 181 info.si_signo = SIGSEGV;
181 info.si_errno = 0; 182 info.si_errno = 0;
182 /* info.si_code has been set above */ 183 /* info.si_code has been set above */
@@ -197,7 +198,7 @@ no_context:
197 if (fixup_exception(regs)) 198 if (fixup_exception(regs))
198 return; 199 return;
199 200
200 die("Oops", regs, address, cause_code); 201 die("Oops", regs, address);
201 202
202out_of_memory: 203out_of_memory:
203 if (is_global_init(tsk)) { 204 if (is_global_init(tsk)) {
@@ -218,7 +219,6 @@ do_sigbus:
218 goto no_context; 219 goto no_context;
219 220
220 tsk->thread.fault_address = address; 221 tsk->thread.fault_address = address;
221 tsk->thread.cause_code = cause_code;
222 info.si_signo = SIGBUS; 222 info.si_signo = SIGBUS;
223 info.si_errno = 0; 223 info.si_errno = 0;
224 info.si_code = BUS_ADRERR; 224 info.si_code = BUS_ADRERR;
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index fe1c5a073afe..7957dc4e4d4a 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -55,7 +55,7 @@
55#include <asm/arcregs.h> 55#include <asm/arcregs.h>
56#include <asm/setup.h> 56#include <asm/setup.h>
57#include <asm/mmu_context.h> 57#include <asm/mmu_context.h>
58#include <asm/tlb.h> 58#include <asm/mmu.h>
59 59
60/* Need for ARC MMU v2 60/* Need for ARC MMU v2
61 * 61 *
@@ -97,6 +97,7 @@
97 * J-TLB entry got evicted/replaced. 97 * J-TLB entry got evicted/replaced.
98 */ 98 */
99 99
100
100/* A copy of the ASID from the PID reg is kept in asid_cache */ 101/* A copy of the ASID from the PID reg is kept in asid_cache */
101int asid_cache = FIRST_ASID; 102int asid_cache = FIRST_ASID;
102 103
@@ -432,9 +433,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
432{ 433{
433 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 434 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
434 unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 435 unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
436 struct page *page = pfn_to_page(pte_pfn(*ptep));
435 437
436 create_tlb(vma, vaddr, ptep); 438 create_tlb(vma, vaddr, ptep);
437 439
440 if (page == ZERO_PAGE(0)) {
441 return;
442 }
443
438 /* 444 /*
439 * Exec page : Independent of aliasing/page-color considerations, 445 * Exec page : Independent of aliasing/page-color considerations,
440 * since icache doesn't snoop dcache on ARC, any dirty 446 * since icache doesn't snoop dcache on ARC, any dirty
@@ -446,9 +452,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
446 */ 452 */
447 if ((vma->vm_flags & VM_EXEC) || 453 if ((vma->vm_flags & VM_EXEC) ||
448 addr_not_cache_congruent(paddr, vaddr)) { 454 addr_not_cache_congruent(paddr, vaddr)) {
449 struct page *page = pfn_to_page(pte_pfn(*ptep));
450 455
451 int dirty = test_and_clear_bit(PG_arch_1, &page->flags); 456 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
452 if (dirty) { 457 if (dirty) {
453 /* wback + inv dcache lines */ 458 /* wback + inv dcache lines */
454 __flush_dcache_page(paddr, paddr); 459 __flush_dcache_page(paddr, paddr);
@@ -464,12 +469,27 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
464 * the cpuinfo structure for later use. 469 * the cpuinfo structure for later use.
465 * No Validation is done here, simply read/convert the BCRs 470 * No Validation is done here, simply read/convert the BCRs
466 */ 471 */
467void __cpuinit read_decode_mmu_bcr(void) 472void read_decode_mmu_bcr(void)
468{ 473{
469 unsigned int tmp;
470 struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
471 struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */
472 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 474 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
475 unsigned int tmp;
476 struct bcr_mmu_1_2 {
477#ifdef CONFIG_CPU_BIG_ENDIAN
478 unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
479#else
480 unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
481#endif
482 } *mmu2;
483
484 struct bcr_mmu_3 {
485#ifdef CONFIG_CPU_BIG_ENDIAN
486 unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
487 u_itlb:4, u_dtlb:4;
488#else
489 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
490 ways:4, ver:8;
491#endif
492 } *mmu3;
473 493
474 tmp = read_aux_reg(ARC_REG_MMU_BCR); 494 tmp = read_aux_reg(ARC_REG_MMU_BCR);
475 mmu->ver = (tmp >> 24); 495 mmu->ver = (tmp >> 24);
@@ -505,12 +525,12 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
505 "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", 525 "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
506 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, 526 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
507 p_mmu->u_dtlb, p_mmu->u_itlb, 527 p_mmu->u_dtlb, p_mmu->u_itlb,
508 __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : ""); 528 IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : "");
509 529
510 return buf; 530 return buf;
511} 531}
512 532
513void __cpuinit arc_mmu_init(void) 533void arc_mmu_init(void)
514{ 534{
515 char str[256]; 535 char str[256];
516 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 536 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 3357d26ffe54..5c5bb23001b0 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -39,7 +39,7 @@
39 39
40#include <linux/linkage.h> 40#include <linux/linkage.h>
41#include <asm/entry.h> 41#include <asm/entry.h>
42#include <asm/tlb.h> 42#include <asm/mmu.h>
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/arcregs.h> 44#include <asm/arcregs.h>
45#include <asm/cache.h> 45#include <asm/cache.h>
@@ -147,9 +147,9 @@ ex_saved_reg1:
147#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 147#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
148 and.f 0, r0, _PAGE_PRESENT 148 and.f 0, r0, _PAGE_PRESENT
149 bz 1f 149 bz 1f
150 ld r2, [num_pte_not_present] 150 ld r3, [num_pte_not_present]
151 add r2, r2, 1 151 add r3, r3, 1
152 st r2, [num_pte_not_present] 152 st r3, [num_pte_not_present]
1531: 1531:
154#endif 154#endif
155 155
@@ -271,22 +271,22 @@ ARC_ENTRY EV_TLBMissI
271#endif 271#endif
272 272
273 ;---------------------------------------------------------------- 273 ;----------------------------------------------------------------
274 ; Get the PTE corresponding to V-addr accessed 274 ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
275 LOAD_FAULT_PTE 275 LOAD_FAULT_PTE
276 276
277 ;---------------------------------------------------------------- 277 ;----------------------------------------------------------------
278 ; VERIFY_PTE: Check if PTE permissions approp for executing code 278 ; VERIFY_PTE: Check if PTE permissions approp for executing code
279 cmp_s r2, VMALLOC_START 279 cmp_s r2, VMALLOC_START
280 mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE) 280 mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE)
281 mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE) 281 mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE)
282 282
283 and r3, r0, r2 ; Mask out NON Flag bits from PTE 283 and r3, r0, r2 ; Mask out NON Flag bits from PTE
284 xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) 284 xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
285 bnz do_slow_path_pf 285 bnz do_slow_path_pf
286 286
287 ; Let Linux VM know that the page was accessed 287 ; Let Linux VM know that the page was accessed
288 or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit 288 or r0, r0, _PAGE_ACCESSED ; set Accessed Bit
289 st_s r0, [r1] ; Write back PTE 289 st_s r0, [r1] ; Write back PTE
290 290
291 CONV_PTE_TO_TLB 291 CONV_PTE_TO_TLB
292 COMMIT_ENTRY_TO_MMU 292 COMMIT_ENTRY_TO_MMU
@@ -311,7 +311,7 @@ ARC_ENTRY EV_TLBMissD
311 311
312 ;---------------------------------------------------------------- 312 ;----------------------------------------------------------------
313 ; Get the PTE corresponding to V-addr accessed 313 ; Get the PTE corresponding to V-addr accessed
314 ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE 314 ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
315 LOAD_FAULT_PTE 315 LOAD_FAULT_PTE
316 316
317 ;---------------------------------------------------------------- 317 ;----------------------------------------------------------------
@@ -345,7 +345,7 @@ ARC_ENTRY EV_TLBMissD
345 ;---------------------------------------------------------------- 345 ;----------------------------------------------------------------
346 ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty 346 ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
347 lr r3, [ecr] 347 lr r3, [ecr]
348 or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always 348 or r0, r0, _PAGE_ACCESSED ; Accessed bit always
349 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? 349 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
350 or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well 350 or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well
351 st_s r0, [r1] ; Write back PTE 351 st_s r0, [r1] ; Write back PTE
@@ -381,18 +381,7 @@ do_slow_path_pf:
381 381
382 ; ------- setup args for Linux Page fault Hanlder --------- 382 ; ------- setup args for Linux Page fault Hanlder ---------
383 mov_s r0, sp 383 mov_s r0, sp
384 lr r2, [efa] 384 lr r1, [efa]
385 lr r3, [ecr]
386
387 ; Both st and ex imply WRITE access of some sort, hence do_page_fault( )
388 ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or
389 ; DTLB-ld Miss
390 ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03
391 ; Following code uses that fact that st/ex have one bit in common
392
393 btst_s r3, ECR_C_BIT_DTLB_ST_MISS
394 mov.z r1, 0
395 mov.nz r1, 1
396 385
397 ; We don't want exceptions to be disabled while the fault is handled. 386 ; We don't want exceptions to be disabled while the fault is handled.
398 ; Now that we have saved the context we return from exception hence 387 ; Now that we have saved the context we return from exception hence
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
index b3700c064c06..d71f3c3bcf24 100644
--- a/arch/arc/plat-arcfpga/platform.c
+++ b/arch/arc/plat-arcfpga/platform.c
@@ -77,6 +77,7 @@ static void __init setup_bvci_lat_unit(void)
77 77
78/*----------------------- Platform Devices -----------------------------*/ 78/*----------------------- Platform Devices -----------------------------*/
79 79
80#if IS_ENABLED(CONFIG_SERIAL_ARC)
80static unsigned long arc_uart_info[] = { 81static unsigned long arc_uart_info[] = {
81 0, /* uart->is_emulated (runtime @running_on_hw) */ 82 0, /* uart->is_emulated (runtime @running_on_hw) */
82 0, /* uart->port.uartclk */ 83 0, /* uart->port.uartclk */
@@ -115,7 +116,7 @@ static struct platform_device arc_uart0_dev = {
115static struct platform_device *fpga_early_devs[] __initdata = { 116static struct platform_device *fpga_early_devs[] __initdata = {
116 &arc_uart0_dev, 117 &arc_uart0_dev,
117}; 118};
118#endif 119#endif /* CONFIG_SERIAL_ARC_CONSOLE */
119 120
120static void arc_fpga_serial_init(void) 121static void arc_fpga_serial_init(void)
121{ 122{
@@ -152,8 +153,13 @@ static void arc_fpga_serial_init(void)
152 * otherwise the early console never gets a chance to run. 153 * otherwise the early console never gets a chance to run.
153 */ 154 */
154 add_preferred_console("ttyARC", 0, "115200"); 155 add_preferred_console("ttyARC", 0, "115200");
155#endif 156#endif /* CONFIG_SERIAL_ARC_CONSOLE */
157}
158#else /* !IS_ENABLED(CONFIG_SERIAL_ARC) */
159static void arc_fpga_serial_init(void)
160{
156} 161}
162#endif
157 163
158static void __init plat_fpga_early_init(void) 164static void __init plat_fpga_early_init(void)
159{ 165{
@@ -169,7 +175,7 @@ static void __init plat_fpga_early_init(void)
169} 175}
170 176
171static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = { 177static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = {
172#if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE) 178#if IS_ENABLED(CONFIG_SERIAL_ARC)
173 OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info), 179 OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info),
174#endif 180#endif
175 {} 181 {}
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 120b83bfde20..48d0a44270bd 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -27,7 +27,7 @@ OBJS += misc.o decompress.o
27ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y) 27ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
28OBJS += debug.o 28OBJS += debug.o
29endif 29endif
30FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c 30FONTC = $(srctree)/lib/fonts/font_acorn_8x8.c
31 31
32# string library code (-Os is enforced to keep it much smaller) 32# string library code (-Os is enforced to keep it much smaller)
33OBJS += string.o 33OBJS += string.o
diff --git a/arch/arm/include/asm/kvm_arch_timer.h b/arch/arm/include/asm/kvm_arch_timer.h
deleted file mode 100644
index 68cb9e1dfb81..000000000000
--- a/arch/arm/include/asm/kvm_arch_timer.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
20#define __ASM_ARM_KVM_ARCH_TIMER_H
21
22#include <linux/clocksource.h>
23#include <linux/hrtimer.h>
24#include <linux/workqueue.h>
25
26struct arch_timer_kvm {
27#ifdef CONFIG_KVM_ARM_TIMER
28 /* Is the timer enabled */
29 bool enabled;
30
31 /* Virtual offset */
32 cycle_t cntvoff;
33#endif
34};
35
36struct arch_timer_cpu {
37#ifdef CONFIG_KVM_ARM_TIMER
38 /* Registers: control register, timer value */
39 u32 cntv_ctl; /* Saved/restored */
40 cycle_t cntv_cval; /* Saved/restored */
41
42 /*
43 * Anything that is not used directly from assembly code goes
44 * here.
45 */
46
47 /* Background timer used when the guest is not running */
48 struct hrtimer timer;
49
50 /* Work queued with the above timer expires */
51 struct work_struct expired;
52
53 /* Background timer active */
54 bool armed;
55
56 /* Timer IRQ */
57 const struct kvm_irq_level *irq;
58#endif
59};
60
61#ifdef CONFIG_KVM_ARM_TIMER
62int kvm_timer_hyp_init(void);
63int kvm_timer_init(struct kvm *kvm);
64void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
65void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
66void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
67void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
68#else
69static inline int kvm_timer_hyp_init(void)
70{
71 return 0;
72};
73
74static inline int kvm_timer_init(struct kvm *kvm)
75{
76 return 0;
77}
78
79static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
80static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
81static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
82static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
83#endif
84
85#endif
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 124623e5ef14..64e96960de29 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -135,7 +135,6 @@
135#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) 135#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
136#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) 136#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
137#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) 137#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
138#define S2_PGD_SIZE (1 << S2_PGD_ORDER)
139 138
140/* Virtualization Translation Control Register (VTCR) bits */ 139/* Virtualization Translation Control Register (VTCR) bits */
141#define VTCR_SH0 (3 << 12) 140#define VTCR_SH0 (3 << 12)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 18d50322a9e2..a2f43ddcc300 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -37,16 +37,18 @@
37#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ 37#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
38#define c6_DFAR 16 /* Data Fault Address Register */ 38#define c6_DFAR 16 /* Data Fault Address Register */
39#define c6_IFAR 17 /* Instruction Fault Address Register */ 39#define c6_IFAR 17 /* Instruction Fault Address Register */
40#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */ 40#define c7_PAR 18 /* Physical Address Register */
41#define c10_PRRR 19 /* Primary Region Remap Register */ 41#define c7_PAR_high 19 /* PAR top 32 bits */
42#define c10_NMRR 20 /* Normal Memory Remap Register */ 42#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */
43#define c12_VBAR 21 /* Vector Base Address Register */ 43#define c10_PRRR 21 /* Primary Region Remap Register */
44#define c13_CID 22 /* Context ID Register */ 44#define c10_NMRR 22 /* Normal Memory Remap Register */
45#define c13_TID_URW 23 /* Thread ID, User R/W */ 45#define c12_VBAR 23 /* Vector Base Address Register */
46#define c13_TID_URO 24 /* Thread ID, User R/O */ 46#define c13_CID 24 /* Context ID Register */
47#define c13_TID_PRIV 25 /* Thread ID, Privileged */ 47#define c13_TID_URW 25 /* Thread ID, User R/W */
48#define c14_CNTKCTL 26 /* Timer Control Register (PL1) */ 48#define c13_TID_URO 26 /* Thread ID, User R/O */
49#define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */ 49#define c13_TID_PRIV 27 /* Thread ID, Privileged */
50#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
51#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */
50 52
51#define ARM_EXCEPTION_RESET 0 53#define ARM_EXCEPTION_RESET 0
52#define ARM_EXCEPTION_UNDEFINED 1 54#define ARM_EXCEPTION_UNDEFINED 1
@@ -72,8 +74,6 @@ extern char __kvm_hyp_vector[];
72extern char __kvm_hyp_code_start[]; 74extern char __kvm_hyp_code_start[];
73extern char __kvm_hyp_code_end[]; 75extern char __kvm_hyp_code_end[];
74 76
75extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
76
77extern void __kvm_flush_vm_context(void); 77extern void __kvm_flush_vm_context(void);
78extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 78extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
79 79
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 82b4babead2c..a464e8d7b6c5 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -65,11 +65,6 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
65 return cpsr_mode > USR_MODE;; 65 return cpsr_mode > USR_MODE;;
66} 66}
67 67
68static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
69{
70 return reg == 15;
71}
72
73static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) 68static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
74{ 69{
75 return vcpu->arch.fault.hsr; 70 return vcpu->arch.fault.hsr;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 57cb786a6203..7d22517d8071 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -23,9 +23,14 @@
23#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h> 24#include <asm/kvm_mmio.h>
25#include <asm/fpstate.h> 25#include <asm/fpstate.h>
26#include <asm/kvm_arch_timer.h> 26#include <kvm/arm_arch_timer.h>
27 27
28#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
28#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS 29#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
30#else
31#define KVM_MAX_VCPUS 0
32#endif
33
29#define KVM_USER_MEM_SLOTS 32 34#define KVM_USER_MEM_SLOTS 32
30#define KVM_PRIVATE_MEM_SLOTS 4 35#define KVM_PRIVATE_MEM_SLOTS 4
31#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 36#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -38,7 +43,7 @@
38#define KVM_NR_PAGE_SIZES 1 43#define KVM_NR_PAGE_SIZES 1
39#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) 44#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
40 45
41#include <asm/kvm_vgic.h> 46#include <kvm/arm_vgic.h>
42 47
43struct kvm_vcpu; 48struct kvm_vcpu;
44u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); 49u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
@@ -190,8 +195,8 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
190int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 195int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
191 int exception_index); 196 int exception_index);
192 197
193static inline void __cpu_init_hyp_mode(unsigned long long boot_pgd_ptr, 198static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
194 unsigned long long pgd_ptr, 199 phys_addr_t pgd_ptr,
195 unsigned long hyp_stack_ptr, 200 unsigned long hyp_stack_ptr,
196 unsigned long vector_ptr) 201 unsigned long vector_ptr)
197{ 202{
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
deleted file mode 100644
index 343744e4809c..000000000000
--- a/arch/arm/include/asm/kvm_vgic.h
+++ /dev/null
@@ -1,220 +0,0 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_VGIC_H
20#define __ASM_ARM_KVM_VGIC_H
21
22#include <linux/kernel.h>
23#include <linux/kvm.h>
24#include <linux/irqreturn.h>
25#include <linux/spinlock.h>
26#include <linux/types.h>
27#include <linux/irqchip/arm-gic.h>
28
29#define VGIC_NR_IRQS 128
30#define VGIC_NR_SGIS 16
31#define VGIC_NR_PPIS 16
32#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
33#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
34#define VGIC_MAX_CPUS KVM_MAX_VCPUS
35#define VGIC_MAX_LRS (1 << 6)
36
37/* Sanity checks... */
38#if (VGIC_MAX_CPUS > 8)
39#error Invalid number of CPU interfaces
40#endif
41
42#if (VGIC_NR_IRQS & 31)
43#error "VGIC_NR_IRQS must be a multiple of 32"
44#endif
45
46#if (VGIC_NR_IRQS > 1024)
47#error "VGIC_NR_IRQS must be <= 1024"
48#endif
49
50/*
51 * The GIC distributor registers describing interrupts have two parts:
52 * - 32 per-CPU interrupts (SGI + PPI)
53 * - a bunch of shared interrupts (SPI)
54 */
55struct vgic_bitmap {
56 union {
57 u32 reg[VGIC_NR_PRIVATE_IRQS / 32];
58 DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS);
59 } percpu[VGIC_MAX_CPUS];
60 union {
61 u32 reg[VGIC_NR_SHARED_IRQS / 32];
62 DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS);
63 } shared;
64};
65
66struct vgic_bytemap {
67 u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4];
68 u32 shared[VGIC_NR_SHARED_IRQS / 4];
69};
70
71struct vgic_dist {
72#ifdef CONFIG_KVM_ARM_VGIC
73 spinlock_t lock;
74 bool ready;
75
76 /* Virtual control interface mapping */
77 void __iomem *vctrl_base;
78
79 /* Distributor and vcpu interface mapping in the guest */
80 phys_addr_t vgic_dist_base;
81 phys_addr_t vgic_cpu_base;
82
83 /* Distributor enabled */
84 u32 enabled;
85
86 /* Interrupt enabled (one bit per IRQ) */
87 struct vgic_bitmap irq_enabled;
88
89 /* Interrupt 'pin' level */
90 struct vgic_bitmap irq_state;
91
92 /* Level-triggered interrupt in progress */
93 struct vgic_bitmap irq_active;
94
95 /* Interrupt priority. Not used yet. */
96 struct vgic_bytemap irq_priority;
97
98 /* Level/edge triggered */
99 struct vgic_bitmap irq_cfg;
100
101 /* Source CPU per SGI and target CPU */
102 u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS];
103
104 /* Target CPU for each IRQ */
105 u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS];
106 struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS];
107
108 /* Bitmap indicating which CPU has something pending */
109 unsigned long irq_pending_on_cpu;
110#endif
111};
112
113struct vgic_cpu {
114#ifdef CONFIG_KVM_ARM_VGIC
115 /* per IRQ to LR mapping */
116 u8 vgic_irq_lr_map[VGIC_NR_IRQS];
117
118 /* Pending interrupts on this VCPU */
119 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
120 DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
121
122 /* Bitmap of used/free list registers */
123 DECLARE_BITMAP( lr_used, VGIC_MAX_LRS);
124
125 /* Number of list registers on this CPU */
126 int nr_lr;
127
128 /* CPU vif control registers for world switch */
129 u32 vgic_hcr;
130 u32 vgic_vmcr;
131 u32 vgic_misr; /* Saved only */
132 u32 vgic_eisr[2]; /* Saved only */
133 u32 vgic_elrsr[2]; /* Saved only */
134 u32 vgic_apr;
135 u32 vgic_lr[VGIC_MAX_LRS];
136#endif
137};
138
139#define LR_EMPTY 0xff
140
141struct kvm;
142struct kvm_vcpu;
143struct kvm_run;
144struct kvm_exit_mmio;
145
146#ifdef CONFIG_KVM_ARM_VGIC
147int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr);
148int kvm_vgic_hyp_init(void);
149int kvm_vgic_init(struct kvm *kvm);
150int kvm_vgic_create(struct kvm *kvm);
151int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
152void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
153void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
154int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
155 bool level);
156int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
157bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
158 struct kvm_exit_mmio *mmio);
159
160#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
161#define vgic_initialized(k) ((k)->arch.vgic.ready)
162
163#else
164static inline int kvm_vgic_hyp_init(void)
165{
166 return 0;
167}
168
169static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
170{
171 return 0;
172}
173
174static inline int kvm_vgic_init(struct kvm *kvm)
175{
176 return 0;
177}
178
179static inline int kvm_vgic_create(struct kvm *kvm)
180{
181 return 0;
182}
183
184static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
185{
186 return 0;
187}
188
189static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
190static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
191
192static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
193 unsigned int irq_num, bool level)
194{
195 return 0;
196}
197
198static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
199{
200 return 0;
201}
202
203static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
204 struct kvm_exit_mmio *mmio)
205{
206 return false;
207}
208
209static inline int irqchip_in_kernel(struct kvm *kvm)
210{
211 return 0;
212}
213
214static inline bool vgic_initialized(struct kvm *kvm)
215{
216 return true;
217}
218#endif
219
220#endif
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 370e1a8af6ac..ebf5015508b5 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -41,9 +41,9 @@ config KVM_ARM_HOST
41 Provides host support for ARM processors. 41 Provides host support for ARM processors.
42 42
43config KVM_ARM_MAX_VCPUS 43config KVM_ARM_MAX_VCPUS
44 int "Number maximum supported virtual CPUs per VM" if KVM_ARM_HOST 44 int "Number maximum supported virtual CPUs per VM"
45 default 4 if KVM_ARM_HOST 45 depends on KVM_ARM_HOST
46 default 0 46 default 4
47 help 47 help
48 Static number of max supported virtual CPUs per VM. 48 Static number of max supported virtual CPUs per VM.
49 49
@@ -67,6 +67,4 @@ config KVM_ARM_TIMER
67 ---help--- 67 ---help---
68 Adds support for the Architected Timers in virtual machines 68 Adds support for the Architected Timers in virtual machines
69 69
70source drivers/virtio/Kconfig
71
72endif # VIRTUALIZATION 70endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 53c5ed83d16f..d99bee4950e5 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -14,10 +14,11 @@ CFLAGS_mmu.o := -I.
14AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) 14AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
15AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) 15AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
16 16
17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 17KVM := ../../../virt/kvm
18kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
18 19
19obj-y += kvm-arm.o init.o interrupts.o 20obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o 21obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o 22obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o
22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o 23obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o 24obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arch_timer.c b/arch/arm/kvm/arch_timer.c
deleted file mode 100644
index 49a7516d81c7..000000000000
--- a/arch/arm/kvm/arch_timer.c
+++ /dev/null
@@ -1,273 +0,0 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/cpu.h>
20#include <linux/of_irq.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/interrupt.h>
24
25#include <clocksource/arm_arch_timer.h>
26#include <asm/arch_timer.h>
27
28#include <asm/kvm_vgic.h>
29#include <asm/kvm_arch_timer.h>
30
31static struct timecounter *timecounter;
32static struct workqueue_struct *wqueue;
33static struct kvm_irq_level timer_irq = {
34 .level = 1,
35};
36
37static cycle_t kvm_phys_timer_read(void)
38{
39 return timecounter->cc->read(timecounter->cc);
40}
41
42static bool timer_is_armed(struct arch_timer_cpu *timer)
43{
44 return timer->armed;
45}
46
47/* timer_arm: as in "arm the timer", not as in ARM the company */
48static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
49{
50 timer->armed = true;
51 hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
52 HRTIMER_MODE_ABS);
53}
54
55static void timer_disarm(struct arch_timer_cpu *timer)
56{
57 if (timer_is_armed(timer)) {
58 hrtimer_cancel(&timer->timer);
59 cancel_work_sync(&timer->expired);
60 timer->armed = false;
61 }
62}
63
64static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
65{
66 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
67
68 timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
69 kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
70 vcpu->arch.timer_cpu.irq->irq,
71 vcpu->arch.timer_cpu.irq->level);
72}
73
74static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
75{
76 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
77
78 /*
79 * We disable the timer in the world switch and let it be
80 * handled by kvm_timer_sync_hwstate(). Getting a timer
81 * interrupt at this point is a sure sign of some major
82 * breakage.
83 */
84 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
85 return IRQ_HANDLED;
86}
87
88static void kvm_timer_inject_irq_work(struct work_struct *work)
89{
90 struct kvm_vcpu *vcpu;
91
92 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
93 vcpu->arch.timer_cpu.armed = false;
94 kvm_timer_inject_irq(vcpu);
95}
96
97static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
98{
99 struct arch_timer_cpu *timer;
100 timer = container_of(hrt, struct arch_timer_cpu, timer);
101 queue_work(wqueue, &timer->expired);
102 return HRTIMER_NORESTART;
103}
104
105/**
106 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
107 * @vcpu: The vcpu pointer
108 *
109 * Disarm any pending soft timers, since the world-switch code will write the
110 * virtual timer state back to the physical CPU.
111 */
112void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
113{
114 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
115
116 /*
117 * We're about to run this vcpu again, so there is no need to
118 * keep the background timer running, as we're about to
119 * populate the CPU timer again.
120 */
121 timer_disarm(timer);
122}
123
124/**
125 * kvm_timer_sync_hwstate - sync timer state from cpu
126 * @vcpu: The vcpu pointer
127 *
128 * Check if the virtual timer was armed and either schedule a corresponding
129 * soft timer or inject directly if already expired.
130 */
131void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
132{
133 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
134 cycle_t cval, now;
135 u64 ns;
136
137 if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
138 !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
139 return;
140
141 cval = timer->cntv_cval;
142 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
143
144 BUG_ON(timer_is_armed(timer));
145
146 if (cval <= now) {
147 /*
148 * Timer has already expired while we were not
149 * looking. Inject the interrupt and carry on.
150 */
151 kvm_timer_inject_irq(vcpu);
152 return;
153 }
154
155 ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
156 timer_arm(timer, ns);
157}
158
159void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
160{
161 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
162
163 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
164 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
165 timer->timer.function = kvm_timer_expire;
166 timer->irq = &timer_irq;
167}
168
169static void kvm_timer_init_interrupt(void *info)
170{
171 enable_percpu_irq(timer_irq.irq, 0);
172}
173
174
175static int kvm_timer_cpu_notify(struct notifier_block *self,
176 unsigned long action, void *cpu)
177{
178 switch (action) {
179 case CPU_STARTING:
180 case CPU_STARTING_FROZEN:
181 kvm_timer_init_interrupt(NULL);
182 break;
183 case CPU_DYING:
184 case CPU_DYING_FROZEN:
185 disable_percpu_irq(timer_irq.irq);
186 break;
187 }
188
189 return NOTIFY_OK;
190}
191
192static struct notifier_block kvm_timer_cpu_nb = {
193 .notifier_call = kvm_timer_cpu_notify,
194};
195
196static const struct of_device_id arch_timer_of_match[] = {
197 { .compatible = "arm,armv7-timer", },
198 { .compatible = "arm,armv8-timer", },
199 {},
200};
201
202int kvm_timer_hyp_init(void)
203{
204 struct device_node *np;
205 unsigned int ppi;
206 int err;
207
208 timecounter = arch_timer_get_timecounter();
209 if (!timecounter)
210 return -ENODEV;
211
212 np = of_find_matching_node(NULL, arch_timer_of_match);
213 if (!np) {
214 kvm_err("kvm_arch_timer: can't find DT node\n");
215 return -ENODEV;
216 }
217
218 ppi = irq_of_parse_and_map(np, 2);
219 if (!ppi) {
220 kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
221 err = -EINVAL;
222 goto out;
223 }
224
225 err = request_percpu_irq(ppi, kvm_arch_timer_handler,
226 "kvm guest timer", kvm_get_running_vcpus());
227 if (err) {
228 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
229 ppi, err);
230 goto out;
231 }
232
233 timer_irq.irq = ppi;
234
235 err = register_cpu_notifier(&kvm_timer_cpu_nb);
236 if (err) {
237 kvm_err("Cannot register timer CPU notifier\n");
238 goto out_free;
239 }
240
241 wqueue = create_singlethread_workqueue("kvm_arch_timer");
242 if (!wqueue) {
243 err = -ENOMEM;
244 goto out_free;
245 }
246
247 kvm_info("%s IRQ%d\n", np->name, ppi);
248 on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
249
250 goto out;
251out_free:
252 free_percpu_irq(ppi, kvm_get_running_vcpus());
253out:
254 of_node_put(np);
255 return err;
256}
257
258void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
259{
260 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
261
262 timer_disarm(timer);
263}
264
265int kvm_timer_init(struct kvm *kvm)
266{
267 if (timecounter && wqueue) {
268 kvm->arch.timer.cntvoff = kvm_phys_timer_read();
269 kvm->arch.timer.enabled = 1;
270 }
271
272 return 0;
273}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index ef1703b9587b..741f66a2edbd 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -800,8 +800,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
800 800
801static void cpu_init_hyp_mode(void *dummy) 801static void cpu_init_hyp_mode(void *dummy)
802{ 802{
803 unsigned long long boot_pgd_ptr; 803 phys_addr_t boot_pgd_ptr;
804 unsigned long long pgd_ptr; 804 phys_addr_t pgd_ptr;
805 unsigned long hyp_stack_ptr; 805 unsigned long hyp_stack_ptr;
806 unsigned long stack_page; 806 unsigned long stack_page;
807 unsigned long vector_ptr; 807 unsigned long vector_ptr;
@@ -809,8 +809,8 @@ static void cpu_init_hyp_mode(void *dummy)
809 /* Switch from the HYP stub to our own HYP init vector */ 809 /* Switch from the HYP stub to our own HYP init vector */
810 __hyp_set_vectors(kvm_get_idmap_vector()); 810 __hyp_set_vectors(kvm_get_idmap_vector());
811 811
812 boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr(); 812 boot_pgd_ptr = kvm_mmu_get_boot_httbr();
813 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); 813 pgd_ptr = kvm_mmu_get_httbr();
814 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); 814 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
815 hyp_stack_ptr = stack_page + PAGE_SIZE; 815 hyp_stack_ptr = stack_page + PAGE_SIZE;
816 vector_ptr = (unsigned long)__kvm_hyp_vector; 816 vector_ptr = (unsigned long)__kvm_hyp_vector;
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 8eea97be1ed5..4a5199070430 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -180,6 +180,10 @@ static const struct coproc_reg cp15_regs[] = {
180 NULL, reset_unknown, c6_DFAR }, 180 NULL, reset_unknown, c6_DFAR },
181 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, 181 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
182 NULL, reset_unknown, c6_IFAR }, 182 NULL, reset_unknown, c6_IFAR },
183
184 /* PAR swapped by interrupt.S */
185 { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
186
183 /* 187 /*
184 * DC{C,I,CI}SW operations: 188 * DC{C,I,CI}SW operations:
185 */ 189 */
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 3d74a0be47db..df4c82d47ad7 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -52,9 +52,6 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
52 52
53static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 53static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
54{ 54{
55 if (kvm_psci_call(vcpu))
56 return 1;
57
58 kvm_inject_undefined(vcpu); 55 kvm_inject_undefined(vcpu);
59 return 1; 56 return 1;
60} 57}
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index f7793df62f58..16cd4ba5d7fd 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -49,6 +49,7 @@ __kvm_hyp_code_start:
49ENTRY(__kvm_tlb_flush_vmid_ipa) 49ENTRY(__kvm_tlb_flush_vmid_ipa)
50 push {r2, r3} 50 push {r2, r3}
51 51
52 dsb ishst
52 add r0, r0, #KVM_VTTBR 53 add r0, r0, #KVM_VTTBR
53 ldrd r2, r3, [r0] 54 ldrd r2, r3, [r0]
54 mcrr p15, 6, r2, r3, c2 @ Write VTTBR 55 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
@@ -291,6 +292,7 @@ THUMB( orr r2, r2, #PSR_T_BIT )
291 ldr r2, =BSYM(panic) 292 ldr r2, =BSYM(panic)
292 msr ELR_hyp, r2 293 msr ELR_hyp, r2
293 ldr r0, =\panic_str 294 ldr r0, =\panic_str
295 clrex @ Clear exclusive monitor
294 eret 296 eret
295.endm 297.endm
296 298
@@ -414,6 +416,10 @@ guest_trap:
414 mrcne p15, 4, r2, c6, c0, 4 @ HPFAR 416 mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
415 bne 3f 417 bne 3f
416 418
419 /* Preserve PAR */
420 mrrc p15, 0, r0, r1, c7 @ PAR
421 push {r0, r1}
422
417 /* Resolve IPA using the xFAR */ 423 /* Resolve IPA using the xFAR */
418 mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR 424 mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
419 isb 425 isb
@@ -424,13 +430,20 @@ guest_trap:
424 lsl r2, r2, #4 430 lsl r2, r2, #4
425 orr r2, r2, r1, lsl #24 431 orr r2, r2, r1, lsl #24
426 432
433 /* Restore PAR */
434 pop {r0, r1}
435 mcrr p15, 0, r0, r1, c7 @ PAR
436
4273: load_vcpu @ Load VCPU pointer to r0 4373: load_vcpu @ Load VCPU pointer to r0
428 str r2, [r0, #VCPU_HPFAR] 438 str r2, [r0, #VCPU_HPFAR]
429 439
4301: mov r1, #ARM_EXCEPTION_HVC 4401: mov r1, #ARM_EXCEPTION_HVC
431 b __kvm_vcpu_return 441 b __kvm_vcpu_return
432 442
4334: pop {r0, r1, r2} @ Failed translation, return to guest 4434: pop {r0, r1} @ Failed translation, return to guest
444 mcrr p15, 0, r0, r1, c7 @ PAR
445 clrex
446 pop {r0, r1, r2}
434 eret 447 eret
435 448
436/* 449/*
@@ -456,6 +469,7 @@ switch_to_guest_vfp:
456 469
457 pop {r3-r7} 470 pop {r3-r7}
458 pop {r0-r2} 471 pop {r0-r2}
472 clrex
459 eret 473 eret
460#endif 474#endif
461 475
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index d43cfb5b37c4..6f18695a09cb 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -302,11 +302,14 @@ vcpu .req r0 @ vcpu pointer always in r0
302 .endif 302 .endif
303 303
304 mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL 304 mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
305 mrrc p15, 0, r4, r5, c7 @ PAR
305 306
306 .if \store_to_vcpu == 0 307 .if \store_to_vcpu == 0
307 push {r2} 308 push {r2,r4-r5}
308 .else 309 .else
309 str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] 310 str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
311 add r12, vcpu, #CP15_OFFSET(c7_PAR)
312 strd r4, r5, [r12]
310 .endif 313 .endif
311.endm 314.endm
312 315
@@ -319,12 +322,15 @@ vcpu .req r0 @ vcpu pointer always in r0
319 */ 322 */
320.macro write_cp15_state read_from_vcpu 323.macro write_cp15_state read_from_vcpu
321 .if \read_from_vcpu == 0 324 .if \read_from_vcpu == 0
322 pop {r2} 325 pop {r2,r4-r5}
323 .else 326 .else
324 ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] 327 ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
328 add r12, vcpu, #CP15_OFFSET(c7_PAR)
329 ldrd r4, r5, [r12]
325 .endif 330 .endif
326 331
327 mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL 332 mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
333 mcrr p15, 0, r4, r5, c7 @ PAR
328 334
329 .if \read_from_vcpu == 0 335 .if \read_from_vcpu == 0
330 pop {r2-r12} 336 pop {r2-r12}
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 72a12f2171b2..b8e06b7a2833 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -86,12 +86,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
86 sign_extend = kvm_vcpu_dabt_issext(vcpu); 86 sign_extend = kvm_vcpu_dabt_issext(vcpu);
87 rt = kvm_vcpu_dabt_get_rd(vcpu); 87 rt = kvm_vcpu_dabt_get_rd(vcpu);
88 88
89 if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
90 /* IO memory trying to read/write pc */
91 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
92 return 1;
93 }
94
95 mmio->is_write = is_write; 89 mmio->is_write = is_write;
96 mmio->phys_addr = fault_ipa; 90 mmio->phys_addr = fault_ipa;
97 mmio->len = len; 91 mmio->len = len;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 84ba67b982c0..ca6bea4859b4 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -382,9 +382,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
382 if (!pgd) 382 if (!pgd)
383 return -ENOMEM; 383 return -ENOMEM;
384 384
385 /* stage-2 pgd must be aligned to its size */
386 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
387
388 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); 385 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
389 kvm_clean_pgd(pgd); 386 kvm_clean_pgd(pgd);
390 kvm->arch.pgd = pgd; 387 kvm->arch.pgd = pgd;
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 7ee5bb7a3667..86a693a02ba3 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
75 * kvm_psci_call - handle PSCI call if r0 value is in range 75 * kvm_psci_call - handle PSCI call if r0 value is in range
76 * @vcpu: Pointer to the VCPU struct 76 * @vcpu: Pointer to the VCPU struct
77 * 77 *
78 * Handle PSCI calls from guests through traps from HVC or SMC instructions. 78 * Handle PSCI calls from guests through traps from HVC instructions.
79 * The calling convention is similar to SMC calls to the secure world where 79 * The calling convention is similar to SMC calls to the secure world where
80 * the function number is placed in r0 and this function returns true if the 80 * the function number is placed in r0 and this function returns true if the
81 * function number specified in r0 is withing the PSCI range, and false 81 * function number specified in r0 is withing the PSCI range, and false
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index b80256b554cd..b7840e7aa452 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -27,6 +27,8 @@
27#include <asm/kvm_arm.h> 27#include <asm/kvm_arm.h>
28#include <asm/kvm_coproc.h> 28#include <asm/kvm_coproc.h>
29 29
30#include <kvm/arm_arch_timer.h>
31
30/****************************************************************************** 32/******************************************************************************
31 * Cortex-A15 Reset Values 33 * Cortex-A15 Reset Values
32 */ 34 */
@@ -37,6 +39,11 @@ static struct kvm_regs a15_regs_reset = {
37 .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, 39 .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
38}; 40};
39 41
42static const struct kvm_irq_level a15_vtimer_irq = {
43 .irq = 27,
44 .level = 1,
45};
46
40 47
41/******************************************************************************* 48/*******************************************************************************
42 * Exported reset function 49 * Exported reset function
@@ -52,6 +59,7 @@ static struct kvm_regs a15_regs_reset = {
52int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 59int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
53{ 60{
54 struct kvm_regs *cpu_reset; 61 struct kvm_regs *cpu_reset;
62 const struct kvm_irq_level *cpu_vtimer_irq;
55 63
56 switch (vcpu->arch.target) { 64 switch (vcpu->arch.target) {
57 case KVM_ARM_TARGET_CORTEX_A15: 65 case KVM_ARM_TARGET_CORTEX_A15:
@@ -59,6 +67,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
59 return -EINVAL; 67 return -EINVAL;
60 cpu_reset = &a15_regs_reset; 68 cpu_reset = &a15_regs_reset;
61 vcpu->arch.midr = read_cpuid_id(); 69 vcpu->arch.midr = read_cpuid_id();
70 cpu_vtimer_irq = &a15_vtimer_irq;
62 break; 71 break;
63 default: 72 default:
64 return -ENODEV; 73 return -ENODEV;
@@ -70,5 +79,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
70 /* Reset CP15 registers */ 79 /* Reset CP15 registers */
71 kvm_reset_coprocs(vcpu); 80 kvm_reset_coprocs(vcpu);
72 81
82 /* Reset arch_timer context */
83 kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
84
73 return 0; 85 return 0;
74} 86}
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
deleted file mode 100644
index 17c5ac7d10ed..000000000000
--- a/arch/arm/kvm/vgic.c
+++ /dev/null
@@ -1,1499 +0,0 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/cpu.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27
28#include <linux/irqchip/arm-gic.h>
29
30#include <asm/kvm_emulate.h>
31#include <asm/kvm_arm.h>
32#include <asm/kvm_mmu.h>
33
34/*
35 * How the whole thing works (courtesy of Christoffer Dall):
36 *
37 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
38 * something is pending
39 * - VGIC pending interrupts are stored on the vgic.irq_state vgic
40 * bitmap (this bitmap is updated by both user land ioctls and guest
41 * mmio ops, and other in-kernel peripherals such as the
42 * arch. timers) and indicate the 'wire' state.
43 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
44 * recalculated
45 * - To calculate the oracle, we need info for each cpu from
46 * compute_pending_for_cpu, which considers:
47 * - PPI: dist->irq_state & dist->irq_enable
48 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
49 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
50 * registers, stored on each vcpu. We only keep one bit of
51 * information per interrupt, making sure that only one vcpu can
52 * accept the interrupt.
53 * - The same is true when injecting an interrupt, except that we only
54 * consider a single interrupt at a time. The irq_spi_cpu array
55 * contains the target CPU for each SPI.
56 *
57 * The handling of level interrupts adds some extra complexity. We
58 * need to track when the interrupt has been EOIed, so we can sample
59 * the 'line' again. This is achieved as such:
60 *
61 * - When a level interrupt is moved onto a vcpu, the corresponding
62 * bit in irq_active is set. As long as this bit is set, the line
63 * will be ignored for further interrupts. The interrupt is injected
64 * into the vcpu with the GICH_LR_EOI bit set (generate a
65 * maintenance interrupt on EOI).
66 * - When the interrupt is EOIed, the maintenance interrupt fires,
67 * and clears the corresponding bit in irq_active. This allow the
68 * interrupt line to be sampled again.
69 */
70
71#define VGIC_ADDR_UNDEF (-1)
72#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
73
74/* Physical address of vgic virtual cpu interface */
75static phys_addr_t vgic_vcpu_base;
76
77/* Virtual control interface base address */
78static void __iomem *vgic_vctrl_base;
79
80static struct device_node *vgic_node;
81
82#define ACCESS_READ_VALUE (1 << 0)
83#define ACCESS_READ_RAZ (0 << 0)
84#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
85#define ACCESS_WRITE_IGNORED (0 << 1)
86#define ACCESS_WRITE_SETBIT (1 << 1)
87#define ACCESS_WRITE_CLEARBIT (2 << 1)
88#define ACCESS_WRITE_VALUE (3 << 1)
89#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
90
91static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
92static void vgic_update_state(struct kvm *kvm);
93static void vgic_kick_vcpus(struct kvm *kvm);
94static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
95static u32 vgic_nr_lr;
96
97static unsigned int vgic_maint_irq;
98
99static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
100 int cpuid, u32 offset)
101{
102 offset >>= 2;
103 if (!offset)
104 return x->percpu[cpuid].reg;
105 else
106 return x->shared.reg + offset - 1;
107}
108
109static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
110 int cpuid, int irq)
111{
112 if (irq < VGIC_NR_PRIVATE_IRQS)
113 return test_bit(irq, x->percpu[cpuid].reg_ul);
114
115 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
116}
117
118static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
119 int irq, int val)
120{
121 unsigned long *reg;
122
123 if (irq < VGIC_NR_PRIVATE_IRQS) {
124 reg = x->percpu[cpuid].reg_ul;
125 } else {
126 reg = x->shared.reg_ul;
127 irq -= VGIC_NR_PRIVATE_IRQS;
128 }
129
130 if (val)
131 set_bit(irq, reg);
132 else
133 clear_bit(irq, reg);
134}
135
136static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
137{
138 if (unlikely(cpuid >= VGIC_MAX_CPUS))
139 return NULL;
140 return x->percpu[cpuid].reg_ul;
141}
142
143static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
144{
145 return x->shared.reg_ul;
146}
147
148static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
149{
150 offset >>= 2;
151 BUG_ON(offset > (VGIC_NR_IRQS / 4));
152 if (offset < 4)
153 return x->percpu[cpuid] + offset;
154 else
155 return x->shared + offset - 8;
156}
157
158#define VGIC_CFG_LEVEL 0
159#define VGIC_CFG_EDGE 1
160
161static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
162{
163 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
164 int irq_val;
165
166 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
167 return irq_val == VGIC_CFG_EDGE;
168}
169
170static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
171{
172 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
173
174 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
175}
176
177static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
178{
179 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
180
181 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
182}
183
184static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
185{
186 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
187
188 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
189}
190
191static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
192{
193 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
194
195 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
196}
197
198static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
199{
200 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
201
202 return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
203}
204
205static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
206{
207 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
208
209 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
210}
211
212static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
213{
214 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
215
216 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
217}
218
219static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
220{
221 if (irq < VGIC_NR_PRIVATE_IRQS)
222 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
223 else
224 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
225 vcpu->arch.vgic_cpu.pending_shared);
226}
227
228static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
229{
230 if (irq < VGIC_NR_PRIVATE_IRQS)
231 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
232 else
233 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
234 vcpu->arch.vgic_cpu.pending_shared);
235}
236
237static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
238{
239 return *((u32 *)mmio->data) & mask;
240}
241
242static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
243{
244 *((u32 *)mmio->data) = value & mask;
245}
246
247/**
248 * vgic_reg_access - access vgic register
249 * @mmio: pointer to the data describing the mmio access
250 * @reg: pointer to the virtual backing of vgic distributor data
251 * @offset: least significant 2 bits used for word offset
252 * @mode: ACCESS_ mode (see defines above)
253 *
254 * Helper to make vgic register access easier using one of the access
255 * modes defined for vgic register access
256 * (read,raz,write-ignored,setbit,clearbit,write)
257 */
258static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
259 phys_addr_t offset, int mode)
260{
261 int word_offset = (offset & 3) * 8;
262 u32 mask = (1UL << (mmio->len * 8)) - 1;
263 u32 regval;
264
265 /*
266 * Any alignment fault should have been delivered to the guest
267 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
268 */
269
270 if (reg) {
271 regval = *reg;
272 } else {
273 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
274 regval = 0;
275 }
276
277 if (mmio->is_write) {
278 u32 data = mmio_data_read(mmio, mask) << word_offset;
279 switch (ACCESS_WRITE_MASK(mode)) {
280 case ACCESS_WRITE_IGNORED:
281 return;
282
283 case ACCESS_WRITE_SETBIT:
284 regval |= data;
285 break;
286
287 case ACCESS_WRITE_CLEARBIT:
288 regval &= ~data;
289 break;
290
291 case ACCESS_WRITE_VALUE:
292 regval = (regval & ~(mask << word_offset)) | data;
293 break;
294 }
295 *reg = regval;
296 } else {
297 switch (ACCESS_READ_MASK(mode)) {
298 case ACCESS_READ_RAZ:
299 regval = 0;
300 /* fall through */
301
302 case ACCESS_READ_VALUE:
303 mmio_data_write(mmio, mask, regval >> word_offset);
304 }
305 }
306}
307
308static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
309 struct kvm_exit_mmio *mmio, phys_addr_t offset)
310{
311 u32 reg;
312 u32 word_offset = offset & 3;
313
314 switch (offset & ~3) {
315 case 0: /* CTLR */
316 reg = vcpu->kvm->arch.vgic.enabled;
317 vgic_reg_access(mmio, &reg, word_offset,
318 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
319 if (mmio->is_write) {
320 vcpu->kvm->arch.vgic.enabled = reg & 1;
321 vgic_update_state(vcpu->kvm);
322 return true;
323 }
324 break;
325
326 case 4: /* TYPER */
327 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
328 reg |= (VGIC_NR_IRQS >> 5) - 1;
329 vgic_reg_access(mmio, &reg, word_offset,
330 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
331 break;
332
333 case 8: /* IIDR */
334 reg = 0x4B00043B;
335 vgic_reg_access(mmio, &reg, word_offset,
336 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
337 break;
338 }
339
340 return false;
341}
342
343static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
344 struct kvm_exit_mmio *mmio, phys_addr_t offset)
345{
346 vgic_reg_access(mmio, NULL, offset,
347 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
348 return false;
349}
350
351static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
352 struct kvm_exit_mmio *mmio,
353 phys_addr_t offset)
354{
355 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
356 vcpu->vcpu_id, offset);
357 vgic_reg_access(mmio, reg, offset,
358 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
359 if (mmio->is_write) {
360 vgic_update_state(vcpu->kvm);
361 return true;
362 }
363
364 return false;
365}
366
367static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
368 struct kvm_exit_mmio *mmio,
369 phys_addr_t offset)
370{
371 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
372 vcpu->vcpu_id, offset);
373 vgic_reg_access(mmio, reg, offset,
374 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
375 if (mmio->is_write) {
376 if (offset < 4) /* Force SGI enabled */
377 *reg |= 0xffff;
378 vgic_retire_disabled_irqs(vcpu);
379 vgic_update_state(vcpu->kvm);
380 return true;
381 }
382
383 return false;
384}
385
386static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
387 struct kvm_exit_mmio *mmio,
388 phys_addr_t offset)
389{
390 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
391 vcpu->vcpu_id, offset);
392 vgic_reg_access(mmio, reg, offset,
393 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
394 if (mmio->is_write) {
395 vgic_update_state(vcpu->kvm);
396 return true;
397 }
398
399 return false;
400}
401
402static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
403 struct kvm_exit_mmio *mmio,
404 phys_addr_t offset)
405{
406 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
407 vcpu->vcpu_id, offset);
408 vgic_reg_access(mmio, reg, offset,
409 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
410 if (mmio->is_write) {
411 vgic_update_state(vcpu->kvm);
412 return true;
413 }
414
415 return false;
416}
417
418static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
419 struct kvm_exit_mmio *mmio,
420 phys_addr_t offset)
421{
422 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
423 vcpu->vcpu_id, offset);
424 vgic_reg_access(mmio, reg, offset,
425 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
426 return false;
427}
428
429#define GICD_ITARGETSR_SIZE 32
430#define GICD_CPUTARGETS_BITS 8
431#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
432static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
433{
434 struct vgic_dist *dist = &kvm->arch.vgic;
435 struct kvm_vcpu *vcpu;
436 int i, c;
437 unsigned long *bmap;
438 u32 val = 0;
439
440 irq -= VGIC_NR_PRIVATE_IRQS;
441
442 kvm_for_each_vcpu(c, vcpu, kvm) {
443 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
444 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
445 if (test_bit(irq + i, bmap))
446 val |= 1 << (c + i * 8);
447 }
448
449 return val;
450}
451
452static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
453{
454 struct vgic_dist *dist = &kvm->arch.vgic;
455 struct kvm_vcpu *vcpu;
456 int i, c;
457 unsigned long *bmap;
458 u32 target;
459
460 irq -= VGIC_NR_PRIVATE_IRQS;
461
462 /*
463 * Pick the LSB in each byte. This ensures we target exactly
464 * one vcpu per IRQ. If the byte is null, assume we target
465 * CPU0.
466 */
467 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
468 int shift = i * GICD_CPUTARGETS_BITS;
469 target = ffs((val >> shift) & 0xffU);
470 target = target ? (target - 1) : 0;
471 dist->irq_spi_cpu[irq + i] = target;
472 kvm_for_each_vcpu(c, vcpu, kvm) {
473 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
474 if (c == target)
475 set_bit(irq + i, bmap);
476 else
477 clear_bit(irq + i, bmap);
478 }
479 }
480}
481
482static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
483 struct kvm_exit_mmio *mmio,
484 phys_addr_t offset)
485{
486 u32 reg;
487
488 /* We treat the banked interrupts targets as read-only */
489 if (offset < 32) {
490 u32 roreg = 1 << vcpu->vcpu_id;
491 roreg |= roreg << 8;
492 roreg |= roreg << 16;
493
494 vgic_reg_access(mmio, &roreg, offset,
495 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
496 return false;
497 }
498
499 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
500 vgic_reg_access(mmio, &reg, offset,
501 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
502 if (mmio->is_write) {
503 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
504 vgic_update_state(vcpu->kvm);
505 return true;
506 }
507
508 return false;
509}
510
511static u32 vgic_cfg_expand(u16 val)
512{
513 u32 res = 0;
514 int i;
515
516 /*
517 * Turn a 16bit value like abcd...mnop into a 32bit word
518 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
519 */
520 for (i = 0; i < 16; i++)
521 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
522
523 return res;
524}
525
526static u16 vgic_cfg_compress(u32 val)
527{
528 u16 res = 0;
529 int i;
530
531 /*
532 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
533 * abcd...mnop which is what we really care about.
534 */
535 for (i = 0; i < 16; i++)
536 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
537
538 return res;
539}
540
541/*
542 * The distributor uses 2 bits per IRQ for the CFG register, but the
543 * LSB is always 0. As such, we only keep the upper bit, and use the
544 * two above functions to compress/expand the bits
545 */
546static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
547 struct kvm_exit_mmio *mmio, phys_addr_t offset)
548{
549 u32 val;
550 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
551 vcpu->vcpu_id, offset >> 1);
552 if (offset & 2)
553 val = *reg >> 16;
554 else
555 val = *reg & 0xffff;
556
557 val = vgic_cfg_expand(val);
558 vgic_reg_access(mmio, &val, offset,
559 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
560 if (mmio->is_write) {
561 if (offset < 4) {
562 *reg = ~0U; /* Force PPIs/SGIs to 1 */
563 return false;
564 }
565
566 val = vgic_cfg_compress(val);
567 if (offset & 2) {
568 *reg &= 0xffff;
569 *reg |= val << 16;
570 } else {
571 *reg &= 0xffff << 16;
572 *reg |= val;
573 }
574 }
575
576 return false;
577}
578
579static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
580 struct kvm_exit_mmio *mmio, phys_addr_t offset)
581{
582 u32 reg;
583 vgic_reg_access(mmio, &reg, offset,
584 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
585 if (mmio->is_write) {
586 vgic_dispatch_sgi(vcpu, reg);
587 vgic_update_state(vcpu->kvm);
588 return true;
589 }
590
591 return false;
592}
593
594/*
595 * I would have liked to use the kvm_bus_io_*() API instead, but it
596 * cannot cope with banked registers (only the VM pointer is passed
597 * around, and we need the vcpu). One of these days, someone please
598 * fix it!
599 */
600struct mmio_range {
601 phys_addr_t base;
602 unsigned long len;
603 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
604 phys_addr_t offset);
605};
606
607static const struct mmio_range vgic_ranges[] = {
608 {
609 .base = GIC_DIST_CTRL,
610 .len = 12,
611 .handle_mmio = handle_mmio_misc,
612 },
613 {
614 .base = GIC_DIST_IGROUP,
615 .len = VGIC_NR_IRQS / 8,
616 .handle_mmio = handle_mmio_raz_wi,
617 },
618 {
619 .base = GIC_DIST_ENABLE_SET,
620 .len = VGIC_NR_IRQS / 8,
621 .handle_mmio = handle_mmio_set_enable_reg,
622 },
623 {
624 .base = GIC_DIST_ENABLE_CLEAR,
625 .len = VGIC_NR_IRQS / 8,
626 .handle_mmio = handle_mmio_clear_enable_reg,
627 },
628 {
629 .base = GIC_DIST_PENDING_SET,
630 .len = VGIC_NR_IRQS / 8,
631 .handle_mmio = handle_mmio_set_pending_reg,
632 },
633 {
634 .base = GIC_DIST_PENDING_CLEAR,
635 .len = VGIC_NR_IRQS / 8,
636 .handle_mmio = handle_mmio_clear_pending_reg,
637 },
638 {
639 .base = GIC_DIST_ACTIVE_SET,
640 .len = VGIC_NR_IRQS / 8,
641 .handle_mmio = handle_mmio_raz_wi,
642 },
643 {
644 .base = GIC_DIST_ACTIVE_CLEAR,
645 .len = VGIC_NR_IRQS / 8,
646 .handle_mmio = handle_mmio_raz_wi,
647 },
648 {
649 .base = GIC_DIST_PRI,
650 .len = VGIC_NR_IRQS,
651 .handle_mmio = handle_mmio_priority_reg,
652 },
653 {
654 .base = GIC_DIST_TARGET,
655 .len = VGIC_NR_IRQS,
656 .handle_mmio = handle_mmio_target_reg,
657 },
658 {
659 .base = GIC_DIST_CONFIG,
660 .len = VGIC_NR_IRQS / 4,
661 .handle_mmio = handle_mmio_cfg_reg,
662 },
663 {
664 .base = GIC_DIST_SOFTINT,
665 .len = 4,
666 .handle_mmio = handle_mmio_sgi_reg,
667 },
668 {}
669};
670
671static const
672struct mmio_range *find_matching_range(const struct mmio_range *ranges,
673 struct kvm_exit_mmio *mmio,
674 phys_addr_t base)
675{
676 const struct mmio_range *r = ranges;
677 phys_addr_t addr = mmio->phys_addr - base;
678
679 while (r->len) {
680 if (addr >= r->base &&
681 (addr + mmio->len) <= (r->base + r->len))
682 return r;
683 r++;
684 }
685
686 return NULL;
687}
688
689/**
690 * vgic_handle_mmio - handle an in-kernel MMIO access
691 * @vcpu: pointer to the vcpu performing the access
692 * @run: pointer to the kvm_run structure
693 * @mmio: pointer to the data describing the access
694 *
695 * returns true if the MMIO access has been performed in kernel space,
696 * and false if it needs to be emulated in user space.
697 */
698bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
699 struct kvm_exit_mmio *mmio)
700{
701 const struct mmio_range *range;
702 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
703 unsigned long base = dist->vgic_dist_base;
704 bool updated_state;
705 unsigned long offset;
706
707 if (!irqchip_in_kernel(vcpu->kvm) ||
708 mmio->phys_addr < base ||
709 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
710 return false;
711
712 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
713 if (mmio->len > 4) {
714 kvm_inject_dabt(vcpu, mmio->phys_addr);
715 return true;
716 }
717
718 range = find_matching_range(vgic_ranges, mmio, base);
719 if (unlikely(!range || !range->handle_mmio)) {
720 pr_warn("Unhandled access %d %08llx %d\n",
721 mmio->is_write, mmio->phys_addr, mmio->len);
722 return false;
723 }
724
725 spin_lock(&vcpu->kvm->arch.vgic.lock);
726 offset = mmio->phys_addr - range->base - base;
727 updated_state = range->handle_mmio(vcpu, mmio, offset);
728 spin_unlock(&vcpu->kvm->arch.vgic.lock);
729 kvm_prepare_mmio(run, mmio);
730 kvm_handle_mmio_return(vcpu, run);
731
732 if (updated_state)
733 vgic_kick_vcpus(vcpu->kvm);
734
735 return true;
736}
737
738static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
739{
740 struct kvm *kvm = vcpu->kvm;
741 struct vgic_dist *dist = &kvm->arch.vgic;
742 int nrcpus = atomic_read(&kvm->online_vcpus);
743 u8 target_cpus;
744 int sgi, mode, c, vcpu_id;
745
746 vcpu_id = vcpu->vcpu_id;
747
748 sgi = reg & 0xf;
749 target_cpus = (reg >> 16) & 0xff;
750 mode = (reg >> 24) & 3;
751
752 switch (mode) {
753 case 0:
754 if (!target_cpus)
755 return;
756
757 case 1:
758 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
759 break;
760
761 case 2:
762 target_cpus = 1 << vcpu_id;
763 break;
764 }
765
766 kvm_for_each_vcpu(c, vcpu, kvm) {
767 if (target_cpus & 1) {
768 /* Flag the SGI as pending */
769 vgic_dist_irq_set(vcpu, sgi);
770 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
771 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
772 }
773
774 target_cpus >>= 1;
775 }
776}
777
778static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
779{
780 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
781 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
782 unsigned long pending_private, pending_shared;
783 int vcpu_id;
784
785 vcpu_id = vcpu->vcpu_id;
786 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
787 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
788
789 pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
790 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
791 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
792
793 pending = vgic_bitmap_get_shared_map(&dist->irq_state);
794 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
795 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
796 bitmap_and(pend_shared, pend_shared,
797 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
798 VGIC_NR_SHARED_IRQS);
799
800 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
801 pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
802 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
803 pending_shared < VGIC_NR_SHARED_IRQS);
804}
805
806/*
807 * Update the interrupt state and determine which CPUs have pending
808 * interrupts. Must be called with distributor lock held.
809 */
810static void vgic_update_state(struct kvm *kvm)
811{
812 struct vgic_dist *dist = &kvm->arch.vgic;
813 struct kvm_vcpu *vcpu;
814 int c;
815
816 if (!dist->enabled) {
817 set_bit(0, &dist->irq_pending_on_cpu);
818 return;
819 }
820
821 kvm_for_each_vcpu(c, vcpu, kvm) {
822 if (compute_pending_for_cpu(vcpu)) {
823 pr_debug("CPU%d has pending interrupts\n", c);
824 set_bit(c, &dist->irq_pending_on_cpu);
825 }
826 }
827}
828
829#define LR_CPUID(lr) \
830 (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
831#define MK_LR_PEND(src, irq) \
832 (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
833
834/*
835 * An interrupt may have been disabled after being made pending on the
836 * CPU interface (the classic case is a timer running while we're
837 * rebooting the guest - the interrupt would kick as soon as the CPU
838 * interface gets enabled, with deadly consequences).
839 *
840 * The solution is to examine already active LRs, and check the
841 * interrupt is still enabled. If not, just retire it.
842 */
843static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
844{
845 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
846 int lr;
847
848 for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
849 int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
850
851 if (!vgic_irq_is_enabled(vcpu, irq)) {
852 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
853 clear_bit(lr, vgic_cpu->lr_used);
854 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE;
855 if (vgic_irq_is_active(vcpu, irq))
856 vgic_irq_clear_active(vcpu, irq);
857 }
858 }
859}
860
861/*
862 * Queue an interrupt to a CPU virtual interface. Return true on success,
863 * or false if it wasn't possible to queue it.
864 */
865static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
866{
867 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
868 int lr;
869
870 /* Sanitize the input... */
871 BUG_ON(sgi_source_id & ~7);
872 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
873 BUG_ON(irq >= VGIC_NR_IRQS);
874
875 kvm_debug("Queue IRQ%d\n", irq);
876
877 lr = vgic_cpu->vgic_irq_lr_map[irq];
878
879 /* Do we have an active interrupt for the same CPUID? */
880 if (lr != LR_EMPTY &&
881 (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
882 kvm_debug("LR%d piggyback for IRQ%d %x\n",
883 lr, irq, vgic_cpu->vgic_lr[lr]);
884 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
885 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
886 return true;
887 }
888
889 /* Try to use another LR for this interrupt */
890 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
891 vgic_cpu->nr_lr);
892 if (lr >= vgic_cpu->nr_lr)
893 return false;
894
895 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
896 vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
897 vgic_cpu->vgic_irq_lr_map[irq] = lr;
898 set_bit(lr, vgic_cpu->lr_used);
899
900 if (!vgic_irq_is_edge(vcpu, irq))
901 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
902
903 return true;
904}
905
906static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
907{
908 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
909 unsigned long sources;
910 int vcpu_id = vcpu->vcpu_id;
911 int c;
912
913 sources = dist->irq_sgi_sources[vcpu_id][irq];
914
915 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
916 if (vgic_queue_irq(vcpu, c, irq))
917 clear_bit(c, &sources);
918 }
919
920 dist->irq_sgi_sources[vcpu_id][irq] = sources;
921
922 /*
923 * If the sources bitmap has been cleared it means that we
924 * could queue all the SGIs onto link registers (see the
925 * clear_bit above), and therefore we are done with them in
926 * our emulated gic and can get rid of them.
927 */
928 if (!sources) {
929 vgic_dist_irq_clear(vcpu, irq);
930 vgic_cpu_irq_clear(vcpu, irq);
931 return true;
932 }
933
934 return false;
935}
936
937static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
938{
939 if (vgic_irq_is_active(vcpu, irq))
940 return true; /* level interrupt, already queued */
941
942 if (vgic_queue_irq(vcpu, 0, irq)) {
943 if (vgic_irq_is_edge(vcpu, irq)) {
944 vgic_dist_irq_clear(vcpu, irq);
945 vgic_cpu_irq_clear(vcpu, irq);
946 } else {
947 vgic_irq_set_active(vcpu, irq);
948 }
949
950 return true;
951 }
952
953 return false;
954}
955
956/*
957 * Fill the list registers with pending interrupts before running the
958 * guest.
959 */
960static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
961{
962 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
963 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
964 int i, vcpu_id;
965 int overflow = 0;
966
967 vcpu_id = vcpu->vcpu_id;
968
969 /*
970 * We may not have any pending interrupt, or the interrupts
971 * may have been serviced from another vcpu. In all cases,
972 * move along.
973 */
974 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
975 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
976 goto epilog;
977 }
978
979 /* SGIs */
980 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
981 if (!vgic_queue_sgi(vcpu, i))
982 overflow = 1;
983 }
984
985 /* PPIs */
986 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
987 if (!vgic_queue_hwirq(vcpu, i))
988 overflow = 1;
989 }
990
991 /* SPIs */
992 for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
993 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
994 overflow = 1;
995 }
996
997epilog:
998 if (overflow) {
999 vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
1000 } else {
1001 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1002 /*
1003 * We're about to run this VCPU, and we've consumed
1004 * everything the distributor had in store for
1005 * us. Claim we don't have anything pending. We'll
1006 * adjust that if needed while exiting.
1007 */
1008 clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
1009 }
1010}
1011
1012static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1013{
1014 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1015 bool level_pending = false;
1016
1017 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
1018
1019 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
1020 /*
1021 * Some level interrupts have been EOIed. Clear their
1022 * active bit.
1023 */
1024 int lr, irq;
1025
1026 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
1027 vgic_cpu->nr_lr) {
1028 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1029
1030 vgic_irq_clear_active(vcpu, irq);
1031 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
1032
1033 /* Any additional pending interrupt? */
1034 if (vgic_dist_irq_is_pending(vcpu, irq)) {
1035 vgic_cpu_irq_set(vcpu, irq);
1036 level_pending = true;
1037 } else {
1038 vgic_cpu_irq_clear(vcpu, irq);
1039 }
1040
1041 /*
1042 * Despite being EOIed, the LR may not have
1043 * been marked as empty.
1044 */
1045 set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
1046 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
1047 }
1048 }
1049
1050 if (vgic_cpu->vgic_misr & GICH_MISR_U)
1051 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1052
1053 return level_pending;
1054}
1055
1056/*
1057 * Sync back the VGIC state after a guest run. The distributor lock is
1058 * needed so we don't get preempted in the middle of the state processing.
1059 */
1060static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1061{
1062 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1063 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1064 int lr, pending;
1065 bool level_pending;
1066
1067 level_pending = vgic_process_maintenance(vcpu);
1068
1069 /* Clear mappings for empty LRs */
1070 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
1071 vgic_cpu->nr_lr) {
1072 int irq;
1073
1074 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1075 continue;
1076
1077 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1078
1079 BUG_ON(irq >= VGIC_NR_IRQS);
1080 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1081 }
1082
1083 /* Check if we still have something up our sleeve... */
1084 pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
1085 vgic_cpu->nr_lr);
1086 if (level_pending || pending < vgic_cpu->nr_lr)
1087 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1088}
1089
1090void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1091{
1092 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1093
1094 if (!irqchip_in_kernel(vcpu->kvm))
1095 return;
1096
1097 spin_lock(&dist->lock);
1098 __kvm_vgic_flush_hwstate(vcpu);
1099 spin_unlock(&dist->lock);
1100}
1101
1102void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1103{
1104 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1105
1106 if (!irqchip_in_kernel(vcpu->kvm))
1107 return;
1108
1109 spin_lock(&dist->lock);
1110 __kvm_vgic_sync_hwstate(vcpu);
1111 spin_unlock(&dist->lock);
1112}
1113
1114int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1115{
1116 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1117
1118 if (!irqchip_in_kernel(vcpu->kvm))
1119 return 0;
1120
1121 return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1122}
1123
1124static void vgic_kick_vcpus(struct kvm *kvm)
1125{
1126 struct kvm_vcpu *vcpu;
1127 int c;
1128
1129 /*
1130 * We've injected an interrupt, time to find out who deserves
1131 * a good kick...
1132 */
1133 kvm_for_each_vcpu(c, vcpu, kvm) {
1134 if (kvm_vgic_vcpu_pending_irq(vcpu))
1135 kvm_vcpu_kick(vcpu);
1136 }
1137}
1138
1139static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1140{
1141 int is_edge = vgic_irq_is_edge(vcpu, irq);
1142 int state = vgic_dist_irq_is_pending(vcpu, irq);
1143
1144 /*
1145 * Only inject an interrupt if:
1146 * - edge triggered and we have a rising edge
1147 * - level triggered and we change level
1148 */
1149 if (is_edge)
1150 return level > state;
1151 else
1152 return level != state;
1153}
1154
1155static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
1156 unsigned int irq_num, bool level)
1157{
1158 struct vgic_dist *dist = &kvm->arch.vgic;
1159 struct kvm_vcpu *vcpu;
1160 int is_edge, is_level;
1161 int enabled;
1162 bool ret = true;
1163
1164 spin_lock(&dist->lock);
1165
1166 vcpu = kvm_get_vcpu(kvm, cpuid);
1167 is_edge = vgic_irq_is_edge(vcpu, irq_num);
1168 is_level = !is_edge;
1169
1170 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1171 ret = false;
1172 goto out;
1173 }
1174
1175 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1176 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1177 vcpu = kvm_get_vcpu(kvm, cpuid);
1178 }
1179
1180 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1181
1182 if (level)
1183 vgic_dist_irq_set(vcpu, irq_num);
1184 else
1185 vgic_dist_irq_clear(vcpu, irq_num);
1186
1187 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1188
1189 if (!enabled) {
1190 ret = false;
1191 goto out;
1192 }
1193
1194 if (is_level && vgic_irq_is_active(vcpu, irq_num)) {
1195 /*
1196 * Level interrupt in progress, will be picked up
1197 * when EOId.
1198 */
1199 ret = false;
1200 goto out;
1201 }
1202
1203 if (level) {
1204 vgic_cpu_irq_set(vcpu, irq_num);
1205 set_bit(cpuid, &dist->irq_pending_on_cpu);
1206 }
1207
1208out:
1209 spin_unlock(&dist->lock);
1210
1211 return ret;
1212}
1213
1214/**
1215 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1216 * @kvm: The VM structure pointer
1217 * @cpuid: The CPU for PPIs
1218 * @irq_num: The IRQ number that is assigned to the device
1219 * @level: Edge-triggered: true: to trigger the interrupt
1220 * false: to ignore the call
1221 * Level-sensitive true: activates an interrupt
1222 * false: deactivates an interrupt
1223 *
1224 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1225 * level-sensitive interrupts. You can think of the level parameter as 1
1226 * being HIGH and 0 being LOW and all devices being active-HIGH.
1227 */
1228int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1229 bool level)
1230{
1231 if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
1232 vgic_kick_vcpus(kvm);
1233
1234 return 0;
1235}
1236
1237static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1238{
1239 /*
1240 * We cannot rely on the vgic maintenance interrupt to be
1241 * delivered synchronously. This means we can only use it to
1242 * exit the VM, and we perform the handling of EOIed
1243 * interrupts on the exit path (see vgic_process_maintenance).
1244 */
1245 return IRQ_HANDLED;
1246}
1247
1248int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1249{
1250 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1251 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1252 int i;
1253
1254 if (!irqchip_in_kernel(vcpu->kvm))
1255 return 0;
1256
1257 if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
1258 return -EBUSY;
1259
1260 for (i = 0; i < VGIC_NR_IRQS; i++) {
1261 if (i < VGIC_NR_PPIS)
1262 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1263 vcpu->vcpu_id, i, 1);
1264 if (i < VGIC_NR_PRIVATE_IRQS)
1265 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1266 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1267
1268 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1269 }
1270
1271 /*
1272 * By forcing VMCR to zero, the GIC will restore the binary
1273 * points to their reset values. Anything else resets to zero
1274 * anyway.
1275 */
1276 vgic_cpu->vgic_vmcr = 0;
1277
1278 vgic_cpu->nr_lr = vgic_nr_lr;
1279 vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
1280
1281 return 0;
1282}
1283
1284static void vgic_init_maintenance_interrupt(void *info)
1285{
1286 enable_percpu_irq(vgic_maint_irq, 0);
1287}
1288
1289static int vgic_cpu_notify(struct notifier_block *self,
1290 unsigned long action, void *cpu)
1291{
1292 switch (action) {
1293 case CPU_STARTING:
1294 case CPU_STARTING_FROZEN:
1295 vgic_init_maintenance_interrupt(NULL);
1296 break;
1297 case CPU_DYING:
1298 case CPU_DYING_FROZEN:
1299 disable_percpu_irq(vgic_maint_irq);
1300 break;
1301 }
1302
1303 return NOTIFY_OK;
1304}
1305
1306static struct notifier_block vgic_cpu_nb = {
1307 .notifier_call = vgic_cpu_notify,
1308};
1309
1310int kvm_vgic_hyp_init(void)
1311{
1312 int ret;
1313 struct resource vctrl_res;
1314 struct resource vcpu_res;
1315
1316 vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
1317 if (!vgic_node) {
1318 kvm_err("error: no compatible vgic node in DT\n");
1319 return -ENODEV;
1320 }
1321
1322 vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
1323 if (!vgic_maint_irq) {
1324 kvm_err("error getting vgic maintenance irq from DT\n");
1325 ret = -ENXIO;
1326 goto out;
1327 }
1328
1329 ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
1330 "vgic", kvm_get_running_vcpus());
1331 if (ret) {
1332 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
1333 goto out;
1334 }
1335
1336 ret = register_cpu_notifier(&vgic_cpu_nb);
1337 if (ret) {
1338 kvm_err("Cannot register vgic CPU notifier\n");
1339 goto out_free_irq;
1340 }
1341
1342 ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
1343 if (ret) {
1344 kvm_err("Cannot obtain VCTRL resource\n");
1345 goto out_free_irq;
1346 }
1347
1348 vgic_vctrl_base = of_iomap(vgic_node, 2);
1349 if (!vgic_vctrl_base) {
1350 kvm_err("Cannot ioremap VCTRL\n");
1351 ret = -ENOMEM;
1352 goto out_free_irq;
1353 }
1354
1355 vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
1356 vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
1357
1358 ret = create_hyp_io_mappings(vgic_vctrl_base,
1359 vgic_vctrl_base + resource_size(&vctrl_res),
1360 vctrl_res.start);
1361 if (ret) {
1362 kvm_err("Cannot map VCTRL into hyp\n");
1363 goto out_unmap;
1364 }
1365
1366 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1367 vctrl_res.start, vgic_maint_irq);
1368 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1369
1370 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1371 kvm_err("Cannot obtain VCPU resource\n");
1372 ret = -ENXIO;
1373 goto out_unmap;
1374 }
1375 vgic_vcpu_base = vcpu_res.start;
1376
1377 goto out;
1378
1379out_unmap:
1380 iounmap(vgic_vctrl_base);
1381out_free_irq:
1382 free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
1383out:
1384 of_node_put(vgic_node);
1385 return ret;
1386}
1387
1388int kvm_vgic_init(struct kvm *kvm)
1389{
1390 int ret = 0, i;
1391
1392 mutex_lock(&kvm->lock);
1393
1394 if (vgic_initialized(kvm))
1395 goto out;
1396
1397 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1398 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1399 kvm_err("Need to set vgic cpu and dist addresses first\n");
1400 ret = -ENXIO;
1401 goto out;
1402 }
1403
1404 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1405 vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1406 if (ret) {
1407 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1408 goto out;
1409 }
1410
1411 for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1412 vgic_set_target_reg(kvm, 0, i);
1413
1414 kvm_timer_init(kvm);
1415 kvm->arch.vgic.ready = true;
1416out:
1417 mutex_unlock(&kvm->lock);
1418 return ret;
1419}
1420
1421int kvm_vgic_create(struct kvm *kvm)
1422{
1423 int ret = 0;
1424
1425 mutex_lock(&kvm->lock);
1426
1427 if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) {
1428 ret = -EEXIST;
1429 goto out;
1430 }
1431
1432 spin_lock_init(&kvm->arch.vgic.lock);
1433 kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
1434 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1435 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1436
1437out:
1438 mutex_unlock(&kvm->lock);
1439 return ret;
1440}
1441
1442static bool vgic_ioaddr_overlap(struct kvm *kvm)
1443{
1444 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1445 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1446
1447 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1448 return 0;
1449 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1450 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1451 return -EBUSY;
1452 return 0;
1453}
1454
1455static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1456 phys_addr_t addr, phys_addr_t size)
1457{
1458 int ret;
1459
1460 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1461 return -EEXIST;
1462 if (addr + size < addr)
1463 return -EINVAL;
1464
1465 ret = vgic_ioaddr_overlap(kvm);
1466 if (ret)
1467 return ret;
1468 *ioaddr = addr;
1469 return ret;
1470}
1471
1472int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
1473{
1474 int r = 0;
1475 struct vgic_dist *vgic = &kvm->arch.vgic;
1476
1477 if (addr & ~KVM_PHYS_MASK)
1478 return -E2BIG;
1479
1480 if (addr & (SZ_4K - 1))
1481 return -EINVAL;
1482
1483 mutex_lock(&kvm->lock);
1484 switch (type) {
1485 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1486 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
1487 addr, KVM_VGIC_V2_DIST_SIZE);
1488 break;
1489 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1490 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
1491 addr, KVM_VGIC_V2_CPU_SIZE);
1492 break;
1493 default:
1494 r = -ENODEV;
1495 }
1496
1497 mutex_unlock(&kvm->lock);
1498 return r;
1499}
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index a075b3e0c5c7..e026b19b23ea 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -40,6 +40,7 @@ config ARCH_DAVINCI_DA850
40 bool "DA850/OMAP-L138/AM18x based system" 40 bool "DA850/OMAP-L138/AM18x based system"
41 select ARCH_DAVINCI_DA8XX 41 select ARCH_DAVINCI_DA8XX
42 select ARCH_HAS_CPUFREQ 42 select ARCH_HAS_CPUFREQ
43 select CPU_FREQ_TABLE
43 select CP_INTC 44 select CP_INTC
44 45
45config ARCH_DAVINCI_DA8XX 46config ARCH_DAVINCI_DA8XX
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 4d6933848abf..a0d4f6038b60 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -1004,7 +1004,7 @@ static const struct da850_opp da850_opp_96 = {
1004 1004
1005#define OPP(freq) \ 1005#define OPP(freq) \
1006 { \ 1006 { \
1007 .index = (unsigned int) &da850_opp_##freq, \ 1007 .driver_data = (unsigned int) &da850_opp_##freq, \
1008 .frequency = freq * 1000, \ 1008 .frequency = freq * 1000, \
1009 } 1009 }
1010 1010
@@ -1016,7 +1016,7 @@ static struct cpufreq_frequency_table da850_freq_table[] = {
1016 OPP(200), 1016 OPP(200),
1017 OPP(96), 1017 OPP(96),
1018 { 1018 {
1019 .index = 0, 1019 .driver_data = 0,
1020 .frequency = CPUFREQ_TABLE_END, 1020 .frequency = CPUFREQ_TABLE_END,
1021 }, 1021 },
1022}; 1022};
@@ -1044,7 +1044,7 @@ static int da850_set_voltage(unsigned int index)
1044 if (!cvdd) 1044 if (!cvdd)
1045 return -ENODEV; 1045 return -ENODEV;
1046 1046
1047 opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; 1047 opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data;
1048 1048
1049 return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max); 1049 return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max);
1050} 1050}
@@ -1125,7 +1125,7 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index)
1125 struct pll_data *pll = clk->pll_data; 1125 struct pll_data *pll = clk->pll_data;
1126 int ret; 1126 int ret;
1127 1127
1128 opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; 1128 opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data;
1129 prediv = opp->prediv; 1129 prediv = opp->prediv;
1130 mult = opp->mult; 1130 mult = opp->mult;
1131 postdiv = opp->postdiv; 1131 postdiv = opp->postdiv;
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 68be532f8688..5cc92874be7e 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -588,11 +588,6 @@ static int _od_runtime_suspend(struct device *dev)
588 return ret; 588 return ret;
589} 589}
590 590
591static int _od_runtime_idle(struct device *dev)
592{
593 return pm_generic_runtime_idle(dev);
594}
595
596static int _od_runtime_resume(struct device *dev) 591static int _od_runtime_resume(struct device *dev)
597{ 592{
598 struct platform_device *pdev = to_platform_device(dev); 593 struct platform_device *pdev = to_platform_device(dev);
@@ -648,7 +643,7 @@ static int _od_resume_noirq(struct device *dev)
648struct dev_pm_domain omap_device_pm_domain = { 643struct dev_pm_domain omap_device_pm_domain = {
649 .ops = { 644 .ops = {
650 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, 645 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
651 _od_runtime_idle) 646 NULL)
652 USE_PLATFORM_PM_SLEEP_OPS 647 USE_PLATFORM_PM_SLEEP_OPS
653 .suspend_noirq = _od_suspend_noirq, 648 .suspend_noirq = _od_suspend_noirq,
654 .resume_noirq = _od_resume_noirq, 649 .resume_noirq = _od_resume_noirq,
diff --git a/arch/arm/mach-omap2/smartreflex-class3.c b/arch/arm/mach-omap2/smartreflex-class3.c
index aee3c8940a30..7a42e1960c3b 100644
--- a/arch/arm/mach-omap2/smartreflex-class3.c
+++ b/arch/arm/mach-omap2/smartreflex-class3.c
@@ -26,14 +26,14 @@ static int sr_class3_enable(struct omap_sr *sr)
26 } 26 }
27 27
28 omap_vp_enable(sr->voltdm); 28 omap_vp_enable(sr->voltdm);
29 return sr_enable(sr->voltdm, volt); 29 return sr_enable(sr, volt);
30} 30}
31 31
32static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) 32static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset)
33{ 33{
34 sr_disable_errgen(sr->voltdm); 34 sr_disable_errgen(sr);
35 omap_vp_disable(sr->voltdm); 35 omap_vp_disable(sr->voltdm);
36 sr_disable(sr->voltdm); 36 sr_disable(sr);
37 if (is_volt_reset) 37 if (is_volt_reset)
38 voltdm_reset(sr->voltdm); 38 voltdm_reset(sr->voltdm);
39 39
@@ -42,7 +42,7 @@ static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset)
42 42
43static int sr_class3_configure(struct omap_sr *sr) 43static int sr_class3_configure(struct omap_sr *sr)
44{ 44{
45 return sr_configure_errgen(sr->voltdm); 45 return sr_configure_errgen(sr);
46} 46}
47 47
48/* SR class3 structure */ 48/* SR class3 structure */
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 96100dbf5a2e..a8427115ee07 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -615,12 +615,14 @@ endmenu
615config PXA25x 615config PXA25x
616 bool 616 bool
617 select CPU_XSCALE 617 select CPU_XSCALE
618 select CPU_FREQ_TABLE if CPU_FREQ
618 help 619 help
619 Select code specific to PXA21x/25x/26x variants 620 Select code specific to PXA21x/25x/26x variants
620 621
621config PXA27x 622config PXA27x
622 bool 623 bool
623 select CPU_XSCALE 624 select CPU_XSCALE
625 select CPU_FREQ_TABLE if CPU_FREQ
624 help 626 help
625 Select code specific to PXA27x variants 627 Select code specific to PXA27x variants
626 628
@@ -633,6 +635,7 @@ config CPU_PXA26x
633config PXA3xx 635config PXA3xx
634 bool 636 bool
635 select CPU_XSC3 637 select CPU_XSC3
638 select CPU_FREQ_TABLE if CPU_FREQ
636 help 639 help
637 Select code specific to PXA3xx variants 640 Select code specific to PXA3xx variants
638 641
diff --git a/arch/arm/mach-s3c24xx/cpufreq-utils.c b/arch/arm/mach-s3c24xx/cpufreq-utils.c
index ddd8280e3875..2a0aa5684e72 100644
--- a/arch/arm/mach-s3c24xx/cpufreq-utils.c
+++ b/arch/arm/mach-s3c24xx/cpufreq-utils.c
@@ -60,5 +60,5 @@ void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
60 */ 60 */
61void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg) 61void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg)
62{ 62{
63 __raw_writel(cfg->pll.index, S3C2410_MPLLCON); 63 __raw_writel(cfg->pll.driver_data, S3C2410_MPLLCON);
64} 64}
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2410.c b/arch/arm/mach-s3c24xx/pll-s3c2410.c
index dcf3420a3271..5e37d368594b 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2410.c
@@ -33,36 +33,36 @@
33#include <plat/cpu-freq-core.h> 33#include <plat/cpu-freq-core.h>
34 34
35static struct cpufreq_frequency_table pll_vals_12MHz[] = { 35static struct cpufreq_frequency_table pll_vals_12MHz[] = {
36 { .frequency = 34000000, .index = PLLVAL(82, 2, 3), }, 36 { .frequency = 34000000, .driver_data = PLLVAL(82, 2, 3), },
37 { .frequency = 45000000, .index = PLLVAL(82, 1, 3), }, 37 { .frequency = 45000000, .driver_data = PLLVAL(82, 1, 3), },
38 { .frequency = 51000000, .index = PLLVAL(161, 3, 3), }, 38 { .frequency = 51000000, .driver_data = PLLVAL(161, 3, 3), },
39 { .frequency = 48000000, .index = PLLVAL(120, 2, 3), }, 39 { .frequency = 48000000, .driver_data = PLLVAL(120, 2, 3), },
40 { .frequency = 56000000, .index = PLLVAL(142, 2, 3), }, 40 { .frequency = 56000000, .driver_data = PLLVAL(142, 2, 3), },
41 { .frequency = 68000000, .index = PLLVAL(82, 2, 2), }, 41 { .frequency = 68000000, .driver_data = PLLVAL(82, 2, 2), },
42 { .frequency = 79000000, .index = PLLVAL(71, 1, 2), }, 42 { .frequency = 79000000, .driver_data = PLLVAL(71, 1, 2), },
43 { .frequency = 85000000, .index = PLLVAL(105, 2, 2), }, 43 { .frequency = 85000000, .driver_data = PLLVAL(105, 2, 2), },
44 { .frequency = 90000000, .index = PLLVAL(112, 2, 2), }, 44 { .frequency = 90000000, .driver_data = PLLVAL(112, 2, 2), },
45 { .frequency = 101000000, .index = PLLVAL(127, 2, 2), }, 45 { .frequency = 101000000, .driver_data = PLLVAL(127, 2, 2), },
46 { .frequency = 113000000, .index = PLLVAL(105, 1, 2), }, 46 { .frequency = 113000000, .driver_data = PLLVAL(105, 1, 2), },
47 { .frequency = 118000000, .index = PLLVAL(150, 2, 2), }, 47 { .frequency = 118000000, .driver_data = PLLVAL(150, 2, 2), },
48 { .frequency = 124000000, .index = PLLVAL(116, 1, 2), }, 48 { .frequency = 124000000, .driver_data = PLLVAL(116, 1, 2), },
49 { .frequency = 135000000, .index = PLLVAL(82, 2, 1), }, 49 { .frequency = 135000000, .driver_data = PLLVAL(82, 2, 1), },
50 { .frequency = 147000000, .index = PLLVAL(90, 2, 1), }, 50 { .frequency = 147000000, .driver_data = PLLVAL(90, 2, 1), },
51 { .frequency = 152000000, .index = PLLVAL(68, 1, 1), }, 51 { .frequency = 152000000, .driver_data = PLLVAL(68, 1, 1), },
52 { .frequency = 158000000, .index = PLLVAL(71, 1, 1), }, 52 { .frequency = 158000000, .driver_data = PLLVAL(71, 1, 1), },
53 { .frequency = 170000000, .index = PLLVAL(77, 1, 1), }, 53 { .frequency = 170000000, .driver_data = PLLVAL(77, 1, 1), },
54 { .frequency = 180000000, .index = PLLVAL(82, 1, 1), }, 54 { .frequency = 180000000, .driver_data = PLLVAL(82, 1, 1), },
55 { .frequency = 186000000, .index = PLLVAL(85, 1, 1), }, 55 { .frequency = 186000000, .driver_data = PLLVAL(85, 1, 1), },
56 { .frequency = 192000000, .index = PLLVAL(88, 1, 1), }, 56 { .frequency = 192000000, .driver_data = PLLVAL(88, 1, 1), },
57 { .frequency = 203000000, .index = PLLVAL(161, 3, 1), }, 57 { .frequency = 203000000, .driver_data = PLLVAL(161, 3, 1), },
58 58
59 /* 2410A extras */ 59 /* 2410A extras */
60 60
61 { .frequency = 210000000, .index = PLLVAL(132, 2, 1), }, 61 { .frequency = 210000000, .driver_data = PLLVAL(132, 2, 1), },
62 { .frequency = 226000000, .index = PLLVAL(105, 1, 1), }, 62 { .frequency = 226000000, .driver_data = PLLVAL(105, 1, 1), },
63 { .frequency = 266000000, .index = PLLVAL(125, 1, 1), }, 63 { .frequency = 266000000, .driver_data = PLLVAL(125, 1, 1), },
64 { .frequency = 268000000, .index = PLLVAL(126, 1, 1), }, 64 { .frequency = 268000000, .driver_data = PLLVAL(126, 1, 1), },
65 { .frequency = 270000000, .index = PLLVAL(127, 1, 1), }, 65 { .frequency = 270000000, .driver_data = PLLVAL(127, 1, 1), },
66}; 66};
67 67
68static int s3c2410_plls_add(struct device *dev, struct subsys_interface *sif) 68static int s3c2410_plls_add(struct device *dev, struct subsys_interface *sif)
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
index 673781758319..a19460e6e7b0 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
@@ -21,33 +21,33 @@
21#include <plat/cpu-freq-core.h> 21#include <plat/cpu-freq-core.h>
22 22
23static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = { 23static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
24 { .frequency = 75000000, .index = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ 24 { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
25 { .frequency = 80000000, .index = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ 25 { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
26 { .frequency = 90000000, .index = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ 26 { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
27 { .frequency = 100000000, .index = PLLVAL(0x5c, 1, 3), }, /* FVco 800.000000 */ 27 { .frequency = 100000000, .driver_data = PLLVAL(0x5c, 1, 3), }, /* FVco 800.000000 */
28 { .frequency = 110000000, .index = PLLVAL(0x66, 1, 3), }, /* FVco 880.000000 */ 28 { .frequency = 110000000, .driver_data = PLLVAL(0x66, 1, 3), }, /* FVco 880.000000 */
29 { .frequency = 120000000, .index = PLLVAL(0x70, 1, 3), }, /* FVco 960.000000 */ 29 { .frequency = 120000000, .driver_data = PLLVAL(0x70, 1, 3), }, /* FVco 960.000000 */
30 { .frequency = 150000000, .index = PLLVAL(0x75, 3, 2), }, /* FVco 600.000000 */ 30 { .frequency = 150000000, .driver_data = PLLVAL(0x75, 3, 2), }, /* FVco 600.000000 */
31 { .frequency = 160000000, .index = PLLVAL(0x98, 4, 2), }, /* FVco 640.000000 */ 31 { .frequency = 160000000, .driver_data = PLLVAL(0x98, 4, 2), }, /* FVco 640.000000 */
32 { .frequency = 170000000, .index = PLLVAL(0x4d, 1, 2), }, /* FVco 680.000000 */ 32 { .frequency = 170000000, .driver_data = PLLVAL(0x4d, 1, 2), }, /* FVco 680.000000 */
33 { .frequency = 180000000, .index = PLLVAL(0x70, 2, 2), }, /* FVco 720.000000 */ 33 { .frequency = 180000000, .driver_data = PLLVAL(0x70, 2, 2), }, /* FVco 720.000000 */
34 { .frequency = 190000000, .index = PLLVAL(0x57, 1, 2), }, /* FVco 760.000000 */ 34 { .frequency = 190000000, .driver_data = PLLVAL(0x57, 1, 2), }, /* FVco 760.000000 */
35 { .frequency = 200000000, .index = PLLVAL(0x5c, 1, 2), }, /* FVco 800.000000 */ 35 { .frequency = 200000000, .driver_data = PLLVAL(0x5c, 1, 2), }, /* FVco 800.000000 */
36 { .frequency = 210000000, .index = PLLVAL(0x84, 2, 2), }, /* FVco 840.000000 */ 36 { .frequency = 210000000, .driver_data = PLLVAL(0x84, 2, 2), }, /* FVco 840.000000 */
37 { .frequency = 220000000, .index = PLLVAL(0x66, 1, 2), }, /* FVco 880.000000 */ 37 { .frequency = 220000000, .driver_data = PLLVAL(0x66, 1, 2), }, /* FVco 880.000000 */
38 { .frequency = 230000000, .index = PLLVAL(0x6b, 1, 2), }, /* FVco 920.000000 */ 38 { .frequency = 230000000, .driver_data = PLLVAL(0x6b, 1, 2), }, /* FVco 920.000000 */
39 { .frequency = 240000000, .index = PLLVAL(0x70, 1, 2), }, /* FVco 960.000000 */ 39 { .frequency = 240000000, .driver_data = PLLVAL(0x70, 1, 2), }, /* FVco 960.000000 */
40 { .frequency = 300000000, .index = PLLVAL(0x75, 3, 1), }, /* FVco 600.000000 */ 40 { .frequency = 300000000, .driver_data = PLLVAL(0x75, 3, 1), }, /* FVco 600.000000 */
41 { .frequency = 310000000, .index = PLLVAL(0x93, 4, 1), }, /* FVco 620.000000 */ 41 { .frequency = 310000000, .driver_data = PLLVAL(0x93, 4, 1), }, /* FVco 620.000000 */
42 { .frequency = 320000000, .index = PLLVAL(0x98, 4, 1), }, /* FVco 640.000000 */ 42 { .frequency = 320000000, .driver_data = PLLVAL(0x98, 4, 1), }, /* FVco 640.000000 */
43 { .frequency = 330000000, .index = PLLVAL(0x66, 2, 1), }, /* FVco 660.000000 */ 43 { .frequency = 330000000, .driver_data = PLLVAL(0x66, 2, 1), }, /* FVco 660.000000 */
44 { .frequency = 340000000, .index = PLLVAL(0x4d, 1, 1), }, /* FVco 680.000000 */ 44 { .frequency = 340000000, .driver_data = PLLVAL(0x4d, 1, 1), }, /* FVco 680.000000 */
45 { .frequency = 350000000, .index = PLLVAL(0xa7, 4, 1), }, /* FVco 700.000000 */ 45 { .frequency = 350000000, .driver_data = PLLVAL(0xa7, 4, 1), }, /* FVco 700.000000 */
46 { .frequency = 360000000, .index = PLLVAL(0x70, 2, 1), }, /* FVco 720.000000 */ 46 { .frequency = 360000000, .driver_data = PLLVAL(0x70, 2, 1), }, /* FVco 720.000000 */
47 { .frequency = 370000000, .index = PLLVAL(0xb1, 4, 1), }, /* FVco 740.000000 */ 47 { .frequency = 370000000, .driver_data = PLLVAL(0xb1, 4, 1), }, /* FVco 740.000000 */
48 { .frequency = 380000000, .index = PLLVAL(0x57, 1, 1), }, /* FVco 760.000000 */ 48 { .frequency = 380000000, .driver_data = PLLVAL(0x57, 1, 1), }, /* FVco 760.000000 */
49 { .frequency = 390000000, .index = PLLVAL(0x7a, 2, 1), }, /* FVco 780.000000 */ 49 { .frequency = 390000000, .driver_data = PLLVAL(0x7a, 2, 1), }, /* FVco 780.000000 */
50 { .frequency = 400000000, .index = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */ 50 { .frequency = 400000000, .driver_data = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */
51}; 51};
52 52
53static int s3c2440_plls12_add(struct device *dev, struct subsys_interface *sif) 53static int s3c2440_plls12_add(struct device *dev, struct subsys_interface *sif)
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
index debfa106289b..1191b2905625 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
@@ -21,61 +21,61 @@
21#include <plat/cpu-freq-core.h> 21#include <plat/cpu-freq-core.h>
22 22
23static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = { 23static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
24 { .frequency = 78019200, .index = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ 24 { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
25 { .frequency = 84067200, .index = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ 25 { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
26 { .frequency = 90115200, .index = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ 26 { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
27 { .frequency = 96163200, .index = PLLVAL(151, 5, 3), }, /* FVco 769.305600 */ 27 { .frequency = 96163200, .driver_data = PLLVAL(151, 5, 3), }, /* FVco 769.305600 */
28 { .frequency = 102135600, .index = PLLVAL(185, 6, 3), }, /* FVco 817.084800 */ 28 { .frequency = 102135600, .driver_data = PLLVAL(185, 6, 3), }, /* FVco 817.084800 */
29 { .frequency = 108259200, .index = PLLVAL(171, 5, 3), }, /* FVco 866.073600 */ 29 { .frequency = 108259200, .driver_data = PLLVAL(171, 5, 3), }, /* FVco 866.073600 */
30 { .frequency = 114307200, .index = PLLVAL(127, 3, 3), }, /* FVco 914.457600 */ 30 { .frequency = 114307200, .driver_data = PLLVAL(127, 3, 3), }, /* FVco 914.457600 */
31 { .frequency = 120234240, .index = PLLVAL(134, 3, 3), }, /* FVco 961.873920 */ 31 { .frequency = 120234240, .driver_data = PLLVAL(134, 3, 3), }, /* FVco 961.873920 */
32 { .frequency = 126161280, .index = PLLVAL(141, 3, 3), }, /* FVco 1009.290240 */ 32 { .frequency = 126161280, .driver_data = PLLVAL(141, 3, 3), }, /* FVco 1009.290240 */
33 { .frequency = 132088320, .index = PLLVAL(148, 3, 3), }, /* FVco 1056.706560 */ 33 { .frequency = 132088320, .driver_data = PLLVAL(148, 3, 3), }, /* FVco 1056.706560 */
34 { .frequency = 138015360, .index = PLLVAL(155, 3, 3), }, /* FVco 1104.122880 */ 34 { .frequency = 138015360, .driver_data = PLLVAL(155, 3, 3), }, /* FVco 1104.122880 */
35 { .frequency = 144789120, .index = PLLVAL(163, 3, 3), }, /* FVco 1158.312960 */ 35 { .frequency = 144789120, .driver_data = PLLVAL(163, 3, 3), }, /* FVco 1158.312960 */
36 { .frequency = 150100363, .index = PLLVAL(187, 9, 2), }, /* FVco 600.401454 */ 36 { .frequency = 150100363, .driver_data = PLLVAL(187, 9, 2), }, /* FVco 600.401454 */
37 { .frequency = 156038400, .index = PLLVAL(121, 5, 2), }, /* FVco 624.153600 */ 37 { .frequency = 156038400, .driver_data = PLLVAL(121, 5, 2), }, /* FVco 624.153600 */
38 { .frequency = 162086400, .index = PLLVAL(126, 5, 2), }, /* FVco 648.345600 */ 38 { .frequency = 162086400, .driver_data = PLLVAL(126, 5, 2), }, /* FVco 648.345600 */
39 { .frequency = 168134400, .index = PLLVAL(131, 5, 2), }, /* FVco 672.537600 */ 39 { .frequency = 168134400, .driver_data = PLLVAL(131, 5, 2), }, /* FVco 672.537600 */
40 { .frequency = 174048000, .index = PLLVAL(177, 7, 2), }, /* FVco 696.192000 */ 40 { .frequency = 174048000, .driver_data = PLLVAL(177, 7, 2), }, /* FVco 696.192000 */
41 { .frequency = 180230400, .index = PLLVAL(141, 5, 2), }, /* FVco 720.921600 */ 41 { .frequency = 180230400, .driver_data = PLLVAL(141, 5, 2), }, /* FVco 720.921600 */
42 { .frequency = 186278400, .index = PLLVAL(124, 4, 2), }, /* FVco 745.113600 */ 42 { .frequency = 186278400, .driver_data = PLLVAL(124, 4, 2), }, /* FVco 745.113600 */
43 { .frequency = 192326400, .index = PLLVAL(151, 5, 2), }, /* FVco 769.305600 */ 43 { .frequency = 192326400, .driver_data = PLLVAL(151, 5, 2), }, /* FVco 769.305600 */
44 { .frequency = 198132480, .index = PLLVAL(109, 3, 2), }, /* FVco 792.529920 */ 44 { .frequency = 198132480, .driver_data = PLLVAL(109, 3, 2), }, /* FVco 792.529920 */
45 { .frequency = 204271200, .index = PLLVAL(185, 6, 2), }, /* FVco 817.084800 */ 45 { .frequency = 204271200, .driver_data = PLLVAL(185, 6, 2), }, /* FVco 817.084800 */
46 { .frequency = 210268800, .index = PLLVAL(141, 4, 2), }, /* FVco 841.075200 */ 46 { .frequency = 210268800, .driver_data = PLLVAL(141, 4, 2), }, /* FVco 841.075200 */
47 { .frequency = 216518400, .index = PLLVAL(171, 5, 2), }, /* FVco 866.073600 */ 47 { .frequency = 216518400, .driver_data = PLLVAL(171, 5, 2), }, /* FVco 866.073600 */
48 { .frequency = 222264000, .index = PLLVAL(97, 2, 2), }, /* FVco 889.056000 */ 48 { .frequency = 222264000, .driver_data = PLLVAL(97, 2, 2), }, /* FVco 889.056000 */
49 { .frequency = 228614400, .index = PLLVAL(127, 3, 2), }, /* FVco 914.457600 */ 49 { .frequency = 228614400, .driver_data = PLLVAL(127, 3, 2), }, /* FVco 914.457600 */
50 { .frequency = 234259200, .index = PLLVAL(158, 4, 2), }, /* FVco 937.036800 */ 50 { .frequency = 234259200, .driver_data = PLLVAL(158, 4, 2), }, /* FVco 937.036800 */
51 { .frequency = 240468480, .index = PLLVAL(134, 3, 2), }, /* FVco 961.873920 */ 51 { .frequency = 240468480, .driver_data = PLLVAL(134, 3, 2), }, /* FVco 961.873920 */
52 { .frequency = 246960000, .index = PLLVAL(167, 4, 2), }, /* FVco 987.840000 */ 52 { .frequency = 246960000, .driver_data = PLLVAL(167, 4, 2), }, /* FVco 987.840000 */
53 { .frequency = 252322560, .index = PLLVAL(141, 3, 2), }, /* FVco 1009.290240 */ 53 { .frequency = 252322560, .driver_data = PLLVAL(141, 3, 2), }, /* FVco 1009.290240 */
54 { .frequency = 258249600, .index = PLLVAL(114, 2, 2), }, /* FVco 1032.998400 */ 54 { .frequency = 258249600, .driver_data = PLLVAL(114, 2, 2), }, /* FVco 1032.998400 */
55 { .frequency = 264176640, .index = PLLVAL(148, 3, 2), }, /* FVco 1056.706560 */ 55 { .frequency = 264176640, .driver_data = PLLVAL(148, 3, 2), }, /* FVco 1056.706560 */
56 { .frequency = 270950400, .index = PLLVAL(120, 2, 2), }, /* FVco 1083.801600 */ 56 { .frequency = 270950400, .driver_data = PLLVAL(120, 2, 2), }, /* FVco 1083.801600 */
57 { .frequency = 276030720, .index = PLLVAL(155, 3, 2), }, /* FVco 1104.122880 */ 57 { .frequency = 276030720, .driver_data = PLLVAL(155, 3, 2), }, /* FVco 1104.122880 */
58 { .frequency = 282240000, .index = PLLVAL(92, 1, 2), }, /* FVco 1128.960000 */ 58 { .frequency = 282240000, .driver_data = PLLVAL(92, 1, 2), }, /* FVco 1128.960000 */
59 { .frequency = 289578240, .index = PLLVAL(163, 3, 2), }, /* FVco 1158.312960 */ 59 { .frequency = 289578240, .driver_data = PLLVAL(163, 3, 2), }, /* FVco 1158.312960 */
60 { .frequency = 294235200, .index = PLLVAL(131, 2, 2), }, /* FVco 1176.940800 */ 60 { .frequency = 294235200, .driver_data = PLLVAL(131, 2, 2), }, /* FVco 1176.940800 */
61 { .frequency = 300200727, .index = PLLVAL(187, 9, 1), }, /* FVco 600.401454 */ 61 { .frequency = 300200727, .driver_data = PLLVAL(187, 9, 1), }, /* FVco 600.401454 */
62 { .frequency = 306358690, .index = PLLVAL(191, 9, 1), }, /* FVco 612.717380 */ 62 { .frequency = 306358690, .driver_data = PLLVAL(191, 9, 1), }, /* FVco 612.717380 */
63 { .frequency = 312076800, .index = PLLVAL(121, 5, 1), }, /* FVco 624.153600 */ 63 { .frequency = 312076800, .driver_data = PLLVAL(121, 5, 1), }, /* FVco 624.153600 */
64 { .frequency = 318366720, .index = PLLVAL(86, 3, 1), }, /* FVco 636.733440 */ 64 { .frequency = 318366720, .driver_data = PLLVAL(86, 3, 1), }, /* FVco 636.733440 */
65 { .frequency = 324172800, .index = PLLVAL(126, 5, 1), }, /* FVco 648.345600 */ 65 { .frequency = 324172800, .driver_data = PLLVAL(126, 5, 1), }, /* FVco 648.345600 */
66 { .frequency = 330220800, .index = PLLVAL(109, 4, 1), }, /* FVco 660.441600 */ 66 { .frequency = 330220800, .driver_data = PLLVAL(109, 4, 1), }, /* FVco 660.441600 */
67 { .frequency = 336268800, .index = PLLVAL(131, 5, 1), }, /* FVco 672.537600 */ 67 { .frequency = 336268800, .driver_data = PLLVAL(131, 5, 1), }, /* FVco 672.537600 */
68 { .frequency = 342074880, .index = PLLVAL(93, 3, 1), }, /* FVco 684.149760 */ 68 { .frequency = 342074880, .driver_data = PLLVAL(93, 3, 1), }, /* FVco 684.149760 */
69 { .frequency = 348096000, .index = PLLVAL(177, 7, 1), }, /* FVco 696.192000 */ 69 { .frequency = 348096000, .driver_data = PLLVAL(177, 7, 1), }, /* FVco 696.192000 */
70 { .frequency = 355622400, .index = PLLVAL(118, 4, 1), }, /* FVco 711.244800 */ 70 { .frequency = 355622400, .driver_data = PLLVAL(118, 4, 1), }, /* FVco 711.244800 */
71 { .frequency = 360460800, .index = PLLVAL(141, 5, 1), }, /* FVco 720.921600 */ 71 { .frequency = 360460800, .driver_data = PLLVAL(141, 5, 1), }, /* FVco 720.921600 */
72 { .frequency = 366206400, .index = PLLVAL(165, 6, 1), }, /* FVco 732.412800 */ 72 { .frequency = 366206400, .driver_data = PLLVAL(165, 6, 1), }, /* FVco 732.412800 */
73 { .frequency = 372556800, .index = PLLVAL(124, 4, 1), }, /* FVco 745.113600 */ 73 { .frequency = 372556800, .driver_data = PLLVAL(124, 4, 1), }, /* FVco 745.113600 */
74 { .frequency = 378201600, .index = PLLVAL(126, 4, 1), }, /* FVco 756.403200 */ 74 { .frequency = 378201600, .driver_data = PLLVAL(126, 4, 1), }, /* FVco 756.403200 */
75 { .frequency = 384652800, .index = PLLVAL(151, 5, 1), }, /* FVco 769.305600 */ 75 { .frequency = 384652800, .driver_data = PLLVAL(151, 5, 1), }, /* FVco 769.305600 */
76 { .frequency = 391608000, .index = PLLVAL(177, 6, 1), }, /* FVco 783.216000 */ 76 { .frequency = 391608000, .driver_data = PLLVAL(177, 6, 1), }, /* FVco 783.216000 */
77 { .frequency = 396264960, .index = PLLVAL(109, 3, 1), }, /* FVco 792.529920 */ 77 { .frequency = 396264960, .driver_data = PLLVAL(109, 3, 1), }, /* FVco 792.529920 */
78 { .frequency = 402192000, .index = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */ 78 { .frequency = 402192000, .driver_data = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */
79}; 79};
80 80
81static int s3c2440_plls169344_add(struct device *dev, 81static int s3c2440_plls169344_add(struct device *dev,
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 7e105932c09d..5390c6bbbc02 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -142,15 +142,15 @@ static void pllc2_table_rebuild(struct clk *clk)
142 /* Initialise PLLC2 frequency table */ 142 /* Initialise PLLC2 frequency table */
143 for (i = 0; i < ARRAY_SIZE(pllc2_freq_table) - 2; i++) { 143 for (i = 0; i < ARRAY_SIZE(pllc2_freq_table) - 2; i++) {
144 pllc2_freq_table[i].frequency = clk->parent->rate * (i + 20) * 2; 144 pllc2_freq_table[i].frequency = clk->parent->rate * (i + 20) * 2;
145 pllc2_freq_table[i].index = i; 145 pllc2_freq_table[i].driver_data = i;
146 } 146 }
147 147
148 /* This is a special entry - switching PLL off makes it a repeater */ 148 /* This is a special entry - switching PLL off makes it a repeater */
149 pllc2_freq_table[i].frequency = clk->parent->rate; 149 pllc2_freq_table[i].frequency = clk->parent->rate;
150 pllc2_freq_table[i].index = i; 150 pllc2_freq_table[i].driver_data = i;
151 151
152 pllc2_freq_table[++i].frequency = CPUFREQ_TABLE_END; 152 pllc2_freq_table[++i].frequency = CPUFREQ_TABLE_END;
153 pllc2_freq_table[i].index = i; 153 pllc2_freq_table[i].driver_data = i;
154} 154}
155 155
156static unsigned long pllc2_recalc(struct clk *clk) 156static unsigned long pllc2_recalc(struct clk *clk)
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 65c5ae6fa386..ef3a8da49b2d 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -28,7 +28,6 @@ config ARCH_TEGRA_2x_SOC
28 select ARM_ERRATA_754327 if SMP 28 select ARM_ERRATA_754327 if SMP
29 select ARM_ERRATA_764369 if SMP 29 select ARM_ERRATA_764369 if SMP
30 select ARM_GIC 30 select ARM_GIC
31 select CPU_FREQ_TABLE if CPU_FREQ
32 select CPU_V7 31 select CPU_V7
33 select PINCTRL 32 select PINCTRL
34 select PINCTRL_TEGRA20 33 select PINCTRL_TEGRA20
@@ -46,7 +45,6 @@ config ARCH_TEGRA_3x_SOC
46 select ARM_ERRATA_754322 45 select ARM_ERRATA_754322
47 select ARM_ERRATA_764369 if SMP 46 select ARM_ERRATA_764369 if SMP
48 select ARM_GIC 47 select ARM_GIC
49 select CPU_FREQ_TABLE if CPU_FREQ
50 select CPU_V7 48 select CPU_V7
51 select PINCTRL 49 select PINCTRL
52 select PINCTRL_TEGRA30 50 select PINCTRL_TEGRA30
@@ -63,7 +61,6 @@ config ARCH_TEGRA_114_SOC
63 select HAVE_ARM_ARCH_TIMER 61 select HAVE_ARM_ARCH_TIMER
64 select ARM_GIC 62 select ARM_GIC
65 select ARM_L1_CACHE_SHIFT_6 63 select ARM_L1_CACHE_SHIFT_6
66 select CPU_FREQ_TABLE if CPU_FREQ
67 select CPU_V7 64 select CPU_V7
68 select PINCTRL 65 select PINCTRL
69 select PINCTRL_TEGRA114 66 select PINCTRL_TEGRA114
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index ec5836b1e713..b25153e2ebaa 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -23,7 +23,7 @@
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/irqchip.h> 25#include <linux/irqchip.h>
26#include <linux/clk/tegra.h> 26#include <linux/clk-provider.h>
27 27
28#include <asm/hardware/cache-l2x0.h> 28#include <asm/hardware/cache-l2x0.h>
29 29
@@ -60,7 +60,7 @@ u32 tegra_uart_config[4] = {
60#ifdef CONFIG_OF 60#ifdef CONFIG_OF
61void __init tegra_dt_init_irq(void) 61void __init tegra_dt_init_irq(void)
62{ 62{
63 tegra_clocks_init(); 63 of_clk_init(NULL);
64 tegra_pmc_init(); 64 tegra_pmc_init();
65 tegra_init_irq(); 65 tegra_init_irq();
66 irqchip_init(); 66 irqchip_init();
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index b6145ea51641..e6fb0239151b 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -76,13 +76,15 @@ void __init ux500_init_irq(void)
76 } else if (cpu_is_u9540()) { 76 } else if (cpu_is_u9540()) {
77 prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1); 77 prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1);
78 ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1); 78 ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1);
79 u8500_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, 79 u9540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE,
80 U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, 80 U8500_CLKRST3_BASE, U8500_CLKRST5_BASE,
81 U8500_CLKRST6_BASE); 81 U8500_CLKRST6_BASE);
82 } else if (cpu_is_u8540()) { 82 } else if (cpu_is_u8540()) {
83 prcmu_early_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); 83 prcmu_early_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1);
84 ux500_pm_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); 84 ux500_pm_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1);
85 u8540_clk_init(); 85 u8540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE,
86 U8500_CLKRST3_BASE, U8500_CLKRST5_BASE,
87 U8500_CLKRST6_BASE);
86 } 88 }
87} 89}
88 90
diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
index d7e17150028a..7231c8e4975e 100644
--- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
+++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
@@ -285,7 +285,7 @@ static inline int s3c_cpufreq_addfreq(struct cpufreq_frequency_table *table,
285 s3c_freq_dbg("%s: { %d = %u kHz }\n", 285 s3c_freq_dbg("%s: { %d = %u kHz }\n",
286 __func__, index, freq); 286 __func__, index, freq);
287 287
288 table[index].index = index; 288 table[index].driver_data = index;
289 table[index].frequency = freq; 289 table[index].frequency = freq;
290 } 290 }
291 291
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 7c2f6685bf43..7f8759a8a92a 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1060,7 +1060,9 @@ struct platform_device *__init at32_add_device_usart(unsigned int id)
1060 1060
1061void __init at32_setup_serial_console(unsigned int usart_id) 1061void __init at32_setup_serial_console(unsigned int usart_id)
1062{ 1062{
1063#ifdef CONFIG_SERIAL_ATMEL
1063 atmel_default_console_device = at32_usarts[usart_id]; 1064 atmel_default_console_device = at32_usarts[usart_id];
1065#endif
1064} 1066}
1065 1067
1066/* -------------------------------------------------------------------- 1068/* --------------------------------------------------------------------
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 4258b088aa93..e49f918531ad 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -55,3 +55,4 @@ generic-y += types.h
55generic-y += ucontext.h 55generic-y += ucontext.h
56generic-y += user.h 56generic-y += user.h
57generic-y += vga.h 57generic-y += vga.h
58generic-y += xor.h
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 8769a9045a54..3201ddb8da6a 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -134,11 +134,13 @@ config SVINTO_SIM
134 134
135config ETRAXFS 135config ETRAXFS
136 bool "ETRAX-FS-V32" 136 bool "ETRAX-FS-V32"
137 select CPU_FREQ_TABLE if CPU_FREQ
137 help 138 help
138 Support CRIS V32. 139 Support CRIS V32.
139 140
140config CRIS_MACH_ARTPEC3 141config CRIS_MACH_ARTPEC3
141 bool "ARTPEC-3" 142 bool "ARTPEC-3"
143 select CPU_FREQ_TABLE if CPU_FREQ
142 help 144 help
143 Support Axis ARTPEC-3. 145 Support Axis ARTPEC-3.
144 146
@@ -637,40 +639,10 @@ endchoice
637 639
638endmenu 640endmenu
639 641
640source "drivers/base/Kconfig" 642source "drivers/Kconfig"
641
642# standard linux drivers
643source "drivers/mtd/Kconfig"
644
645source "drivers/parport/Kconfig"
646
647source "drivers/pnp/Kconfig"
648
649source "drivers/block/Kconfig"
650
651source "drivers/ide/Kconfig"
652
653source "drivers/net/Kconfig"
654
655source "drivers/i2c/Kconfig"
656
657source "drivers/rtc/Kconfig"
658
659#
660# input before char - char/joystick depends on it. As does USB.
661#
662source "drivers/input/Kconfig"
663
664source "drivers/char/Kconfig"
665 643
666source "fs/Kconfig" 644source "fs/Kconfig"
667 645
668source "drivers/usb/Kconfig"
669
670source "drivers/uwb/Kconfig"
671
672source "drivers/staging/Kconfig"
673
674source "arch/cris/Kconfig.debug" 646source "arch/cris/Kconfig.debug"
675 647
676source "security/Kconfig" 648source "security/Kconfig"
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index 37e6d2c50b76..22d846bfc570 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -230,46 +230,6 @@ struct register_image
230 unsigned int usp; /* 0x66 User mode stack pointer */ 230 unsigned int usp; /* 0x66 User mode stack pointer */
231} registers; 231} registers;
232 232
233/************** Prototypes for local library functions ***********************/
234
235/* Copy of strcpy from libc. */
236static char *gdb_cris_strcpy (char *s1, const char *s2);
237
238/* Copy of strlen from libc. */
239static int gdb_cris_strlen (const char *s);
240
241/* Copy of memchr from libc. */
242static void *gdb_cris_memchr (const void *s, int c, int n);
243
244/* Copy of strtol from libc. Does only support base 16. */
245static int gdb_cris_strtol (const char *s, char **endptr, int base);
246
247/********************** Prototypes for local functions. **********************/
248/* Copy the content of a register image into another. The size n is
249 the size of the register image. Due to struct assignment generation of
250 memcpy in libc. */
251static void copy_registers (registers *dptr, registers *sptr, int n);
252
253/* Copy the stored registers from the stack. Put the register contents
254 of thread thread_id in the struct reg. */
255static void copy_registers_from_stack (int thread_id, registers *reg);
256
257/* Copy the registers to the stack. Put the register contents of thread
258 thread_id from struct reg to the stack. */
259static void copy_registers_to_stack (int thread_id, registers *reg);
260
261/* Write a value to a specified register regno in the register image
262 of the current thread. */
263static int write_register (int regno, char *val);
264
265/* Write a value to a specified register in the stack of a thread other
266 than the current thread. */
267static int write_stack_register(int thread_id, int regno, char *valptr);
268
269/* Read a value from a specified register in the register image. Returns the
270 status of the read operation. The register value is returned in valptr. */
271static int read_register (char regno, unsigned int *valptr);
272
273/* Serial port, reads one character. ETRAX 100 specific. from debugport.c */ 233/* Serial port, reads one character. ETRAX 100 specific. from debugport.c */
274int getDebugChar (void); 234int getDebugChar (void);
275 235
@@ -278,42 +238,6 @@ void putDebugChar (int val);
278 238
279void enableDebugIRQ (void); 239void enableDebugIRQ (void);
280 240
281/* Returns the integer equivalent of a hexadecimal character. */
282static int hex (char ch);
283
284/* Convert the memory, pointed to by mem into hexadecimal representation.
285 Put the result in buf, and return a pointer to the last character
286 in buf (null). */
287static char *mem2hex (char *buf, unsigned char *mem, int count);
288
289/* Convert the array, in hexadecimal representation, pointed to by buf into
290 binary representation. Put the result in mem, and return a pointer to
291 the character after the last byte written. */
292static unsigned char *hex2mem (unsigned char *mem, char *buf, int count);
293
294/* Put the content of the array, in binary representation, pointed to by buf
295 into memory pointed to by mem, and return a pointer to
296 the character after the last byte written. */
297static unsigned char *bin2mem (unsigned char *mem, unsigned char *buf, int count);
298
299/* Await the sequence $<data>#<checksum> and store <data> in the array buffer
300 returned. */
301static void getpacket (char *buffer);
302
303/* Send $<data>#<checksum> from the <data> in the array buffer. */
304static void putpacket (char *buffer);
305
306/* Build and send a response packet in order to inform the host the
307 stub is stopped. */
308static void stub_is_stopped (int sigval);
309
310/* All expected commands are sent from remote.c. Send a response according
311 to the description in remote.c. */
312static void handle_exception (int sigval);
313
314/* Performs a complete re-start from scratch. ETRAX specific. */
315static void kill_restart (void);
316
317/******************** Prototypes for global functions. ***********************/ 241/******************** Prototypes for global functions. ***********************/
318 242
319/* The string str is prepended with the GDB printout token and sent. */ 243/* The string str is prepended with the GDB printout token and sent. */
@@ -336,10 +260,6 @@ extern unsigned char executing_task;
336/* The number of characters used for a 64 bit thread identifier. */ 260/* The number of characters used for a 64 bit thread identifier. */
337#define HEXCHARS_IN_THREAD_ID 16 261#define HEXCHARS_IN_THREAD_ID 16
338 262
339/* Avoid warning as the internal_stack is not used in the C-code. */
340#define USEDVAR(name) { if (name) { ; } }
341#define USEDFUN(name) { void (*pf)(void) = (void *)name; USEDVAR(pf) }
342
343/********************************** Packet I/O ******************************/ 263/********************************** Packet I/O ******************************/
344/* BUFMAX defines the maximum number of characters in 264/* BUFMAX defines the maximum number of characters in
345 inbound/outbound buffers */ 265 inbound/outbound buffers */
@@ -405,7 +325,7 @@ static int register_size[] =
405 325
406/* Contains the register image of the executing thread in the assembler 326/* Contains the register image of the executing thread in the assembler
407 part of the code in order to avoid horrible addressing modes. */ 327 part of the code in order to avoid horrible addressing modes. */
408static registers reg; 328registers cris_reg;
409 329
410/* FIXME: Should this be used? Delete otherwise. */ 330/* FIXME: Should this be used? Delete otherwise. */
411/* Contains the assumed consistency state of the register image. Uses the 331/* Contains the assumed consistency state of the register image. Uses the
@@ -413,7 +333,7 @@ static registers reg;
413static int consistency_status = SUCCESS; 333static int consistency_status = SUCCESS;
414 334
415/********************************** Handle exceptions ************************/ 335/********************************** Handle exceptions ************************/
416/* The variable reg contains the register image associated with the 336/* The variable cris_reg contains the register image associated with the
417 current_thread_c variable. It is a complete register image created at 337 current_thread_c variable. It is a complete register image created at
418 entry. The reg_g contains a register image of a task where the general 338 entry. The reg_g contains a register image of a task where the general
419 registers are taken from the stack and all special registers are taken 339 registers are taken from the stack and all special registers are taken
@@ -421,18 +341,10 @@ static int consistency_status = SUCCESS;
421 in order to provide access mainly for 'g', 'G' and 'P'. 341 in order to provide access mainly for 'g', 'G' and 'P'.
422*/ 342*/
423 343
424/* Need two task id pointers in order to handle Hct and Hgt commands. */
425static int current_thread_c = 0;
426static int current_thread_g = 0;
427
428/* Need two register images in order to handle Hct and Hgt commands. The
429 variable reg_g is in addition to reg above. */
430static registers reg_g;
431
432/********************************** Breakpoint *******************************/ 344/********************************** Breakpoint *******************************/
433/* Use an internal stack in the breakpoint and interrupt response routines */ 345/* Use an internal stack in the breakpoint and interrupt response routines */
434#define INTERNAL_STACK_SIZE 1024 346#define INTERNAL_STACK_SIZE 1024
435static char internal_stack[INTERNAL_STACK_SIZE]; 347char internal_stack[INTERNAL_STACK_SIZE];
436 348
437/* Due to the breakpoint return pointer, a state variable is needed to keep 349/* Due to the breakpoint return pointer, a state variable is needed to keep
438 track of whether it is a static (compiled) or dynamic (gdb-invoked) 350 track of whether it is a static (compiled) or dynamic (gdb-invoked)
@@ -500,164 +412,6 @@ gdb_cris_strtol (const char *s, char **endptr, int base)
500 return x; 412 return x;
501} 413}
502 414
503/********************************* Register image ****************************/
504/* Copy the content of a register image into another. The size n is
505 the size of the register image. Due to struct assignment generation of
506 memcpy in libc. */
507static void
508copy_registers (registers *dptr, registers *sptr, int n)
509{
510 unsigned char *dreg;
511 unsigned char *sreg;
512
513 for (dreg = (unsigned char*)dptr, sreg = (unsigned char*)sptr; n > 0; n--)
514 *dreg++ = *sreg++;
515}
516
517#ifdef PROCESS_SUPPORT
518/* Copy the stored registers from the stack. Put the register contents
519 of thread thread_id in the struct reg. */
520static void
521copy_registers_from_stack (int thread_id, registers *regptr)
522{
523 int j;
524 stack_registers *s = (stack_registers *)stack_list[thread_id];
525 unsigned int *d = (unsigned int *)regptr;
526
527 for (j = 13; j >= 0; j--)
528 *d++ = s->r[j];
529 regptr->sp = (unsigned int)stack_list[thread_id];
530 regptr->pc = s->pc;
531 regptr->dccr = s->dccr;
532 regptr->srp = s->srp;
533}
534
535/* Copy the registers to the stack. Put the register contents of thread
536 thread_id from struct reg to the stack. */
537static void
538copy_registers_to_stack (int thread_id, registers *regptr)
539{
540 int i;
541 stack_registers *d = (stack_registers *)stack_list[thread_id];
542 unsigned int *s = (unsigned int *)regptr;
543
544 for (i = 0; i < 14; i++) {
545 d->r[i] = *s++;
546 }
547 d->pc = regptr->pc;
548 d->dccr = regptr->dccr;
549 d->srp = regptr->srp;
550}
551#endif
552
553/* Write a value to a specified register in the register image of the current
554 thread. Returns status code SUCCESS, E02 or E05. */
555static int
556write_register (int regno, char *val)
557{
558 int status = SUCCESS;
559 registers *current_reg = &reg;
560
561 if (regno >= R0 && regno <= PC) {
562 /* 32-bit register with simple offset. */
563 hex2mem ((unsigned char *)current_reg + regno * sizeof(unsigned int),
564 val, sizeof(unsigned int));
565 }
566 else if (regno == P0 || regno == VR || regno == P4 || regno == P8) {
567 /* Do not support read-only registers. */
568 status = E02;
569 }
570 else if (regno == CCR) {
571 /* 16 bit register with complex offset. (P4 is read-only, P6 is not implemented,
572 and P7 (MOF) is 32 bits in ETRAX 100LX. */
573 hex2mem ((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short),
574 val, sizeof(unsigned short));
575 }
576 else if (regno >= MOF && regno <= USP) {
577 /* 32 bit register with complex offset. (P8 has been taken care of.) */
578 hex2mem ((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int),
579 val, sizeof(unsigned int));
580 }
581 else {
582 /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */
583 status = E05;
584 }
585 return status;
586}
587
588#ifdef PROCESS_SUPPORT
589/* Write a value to a specified register in the stack of a thread other
590 than the current thread. Returns status code SUCCESS or E07. */
591static int
592write_stack_register (int thread_id, int regno, char *valptr)
593{
594 int status = SUCCESS;
595 stack_registers *d = (stack_registers *)stack_list[thread_id];
596 unsigned int val;
597
598 hex2mem ((unsigned char *)&val, valptr, sizeof(unsigned int));
599 if (regno >= R0 && regno < SP) {
600 d->r[regno] = val;
601 }
602 else if (regno == SP) {
603 stack_list[thread_id] = val;
604 }
605 else if (regno == PC) {
606 d->pc = val;
607 }
608 else if (regno == SRP) {
609 d->srp = val;
610 }
611 else if (regno == DCCR) {
612 d->dccr = val;
613 }
614 else {
615 /* Do not support registers in the current thread. */
616 status = E07;
617 }
618 return status;
619}
620#endif
621
622/* Read a value from a specified register in the register image. Returns the
623 value in the register or -1 for non-implemented registers.
624 Should check consistency_status after a call which may be E05 after changes
625 in the implementation. */
626static int
627read_register (char regno, unsigned int *valptr)
628{
629 registers *current_reg = &reg;
630
631 if (regno >= R0 && regno <= PC) {
632 /* 32-bit register with simple offset. */
633 *valptr = *(unsigned int *)((char *)current_reg + regno * sizeof(unsigned int));
634 return SUCCESS;
635 }
636 else if (regno == P0 || regno == VR) {
637 /* 8 bit register with complex offset. */
638 *valptr = (unsigned int)(*(unsigned char *)
639 ((char *)&(current_reg->p0) + (regno-P0) * sizeof(char)));
640 return SUCCESS;
641 }
642 else if (regno == P4 || regno == CCR) {
643 /* 16 bit register with complex offset. */
644 *valptr = (unsigned int)(*(unsigned short *)
645 ((char *)&(current_reg->p4) + (regno-P4) * sizeof(unsigned short)));
646 return SUCCESS;
647 }
648 else if (regno >= MOF && regno <= USP) {
649 /* 32 bit register with complex offset. */
650 *valptr = *(unsigned int *)((char *)&(current_reg->p8)
651 + (regno-P8) * sizeof(unsigned int));
652 return SUCCESS;
653 }
654 else {
655 /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */
656 consistency_status = E05;
657 return E05;
658 }
659}
660
661/********************************** Packet I/O ******************************/ 415/********************************** Packet I/O ******************************/
662/* Returns the integer equivalent of a hexadecimal character. */ 416/* Returns the integer equivalent of a hexadecimal character. */
663static int 417static int
@@ -676,8 +430,6 @@ hex (char ch)
676 Put the result in buf, and return a pointer to the last character 430 Put the result in buf, and return a pointer to the last character
677 in buf (null). */ 431 in buf (null). */
678 432
679static int do_printk = 0;
680
681static char * 433static char *
682mem2hex(char *buf, unsigned char *mem, int count) 434mem2hex(char *buf, unsigned char *mem, int count)
683{ 435{
@@ -761,7 +513,7 @@ getpacket (char *buffer)
761 xmitcsum = -1; 513 xmitcsum = -1;
762 count = 0; 514 count = 0;
763 /* Read until a # or the end of the buffer is reached */ 515 /* Read until a # or the end of the buffer is reached */
764 while (count < BUFMAX) { 516 while (count < BUFMAX - 1) {
765 ch = getDebugChar (); 517 ch = getDebugChar ();
766 if (ch == '#') 518 if (ch == '#')
767 break; 519 break;
@@ -845,6 +597,81 @@ putDebugString (const unsigned char *str, int length)
845 putpacket(remcomOutBuffer); 597 putpacket(remcomOutBuffer);
846} 598}
847 599
600/********************************* Register image ****************************/
601/* Write a value to a specified register in the register image of the current
602 thread. Returns status code SUCCESS, E02 or E05. */
603static int
604write_register (int regno, char *val)
605{
606 int status = SUCCESS;
607 registers *current_reg = &cris_reg;
608
609 if (regno >= R0 && regno <= PC) {
610 /* 32-bit register with simple offset. */
611 hex2mem ((unsigned char *)current_reg + regno * sizeof(unsigned int),
612 val, sizeof(unsigned int));
613 }
614 else if (regno == P0 || regno == VR || regno == P4 || regno == P8) {
615 /* Do not support read-only registers. */
616 status = E02;
617 }
618 else if (regno == CCR) {
619 /* 16 bit register with complex offset. (P4 is read-only, P6 is not implemented,
620 and P7 (MOF) is 32 bits in ETRAX 100LX. */
621 hex2mem ((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short),
622 val, sizeof(unsigned short));
623 }
624 else if (regno >= MOF && regno <= USP) {
625 /* 32 bit register with complex offset. (P8 has been taken care of.) */
626 hex2mem ((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int),
627 val, sizeof(unsigned int));
628 }
629 else {
630 /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */
631 status = E05;
632 }
633 return status;
634}
635
636/* Read a value from a specified register in the register image. Returns the
637 value in the register or -1 for non-implemented registers.
638 Should check consistency_status after a call which may be E05 after changes
639 in the implementation. */
640static int
641read_register (char regno, unsigned int *valptr)
642{
643 registers *current_reg = &cris_reg;
644
645 if (regno >= R0 && regno <= PC) {
646 /* 32-bit register with simple offset. */
647 *valptr = *(unsigned int *)((char *)current_reg + regno * sizeof(unsigned int));
648 return SUCCESS;
649 }
650 else if (regno == P0 || regno == VR) {
651 /* 8 bit register with complex offset. */
652 *valptr = (unsigned int)(*(unsigned char *)
653 ((char *)&(current_reg->p0) + (regno-P0) * sizeof(char)));
654 return SUCCESS;
655 }
656 else if (regno == P4 || regno == CCR) {
657 /* 16 bit register with complex offset. */
658 *valptr = (unsigned int)(*(unsigned short *)
659 ((char *)&(current_reg->p4) + (regno-P4) * sizeof(unsigned short)));
660 return SUCCESS;
661 }
662 else if (regno >= MOF && regno <= USP) {
663 /* 32 bit register with complex offset. */
664 *valptr = *(unsigned int *)((char *)&(current_reg->p8)
665 + (regno-P8) * sizeof(unsigned int));
666 return SUCCESS;
667 }
668 else {
669 /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */
670 consistency_status = E05;
671 return E05;
672 }
673}
674
848/********************************** Handle exceptions ************************/ 675/********************************** Handle exceptions ************************/
849/* Build and send a response packet in order to inform the host the 676/* Build and send a response packet in order to inform the host the
850 stub is stopped. TAAn...:r...;n...:r...;n...:r...; 677 stub is stopped. TAAn...:r...;n...:r...;n...:r...;
@@ -891,26 +718,6 @@ stub_is_stopped(int sigval)
891 718
892 } 719 }
893 720
894#ifdef PROCESS_SUPPORT
895 /* Store the registers of the executing thread. Assume that both step,
896 continue, and register content requests are with respect to this
897 thread. The executing task is from the operating system scheduler. */
898
899 current_thread_c = executing_task;
900 current_thread_g = executing_task;
901
902 /* A struct assignment translates into a libc memcpy call. Avoid
903 all libc functions in order to prevent recursive break points. */
904 copy_registers (&reg_g, &reg, sizeof(registers));
905
906 /* Store thread:r...; with the executing task TID. */
907 gdb_cris_strcpy (&remcomOutBuffer[pos], "thread:");
908 pos += gdb_cris_strlen ("thread:");
909 remcomOutBuffer[pos++] = hex_asc_hi(executing_task);
910 remcomOutBuffer[pos++] = hex_asc_lo(executing_task);
911 gdb_cris_strcpy (&remcomOutBuffer[pos], ";");
912#endif
913
914 /* null-terminate and send it off */ 721 /* null-terminate and send it off */
915 722
916 *ptr = 0; 723 *ptr = 0;
@@ -918,16 +725,18 @@ stub_is_stopped(int sigval)
918 putpacket (remcomOutBuffer); 725 putpacket (remcomOutBuffer);
919} 726}
920 727
728/* Performs a complete re-start from scratch. */
729static void
730kill_restart (void)
731{
732 machine_restart("");
733}
734
921/* All expected commands are sent from remote.c. Send a response according 735/* All expected commands are sent from remote.c. Send a response according
922 to the description in remote.c. */ 736 to the description in remote.c. */
923static void 737void
924handle_exception (int sigval) 738handle_exception (int sigval)
925{ 739{
926 /* Avoid warning of not used. */
927
928 USEDFUN(handle_exception);
929 USEDVAR(internal_stack[0]);
930
931 /* Send response. */ 740 /* Send response. */
932 741
933 stub_is_stopped (sigval); 742 stub_is_stopped (sigval);
@@ -943,19 +752,7 @@ handle_exception (int sigval)
943 in a register are in the same order the machine uses. 752 in a register are in the same order the machine uses.
944 Failure: void. */ 753 Failure: void. */
945 754
946 { 755 mem2hex(remcomOutBuffer, (char *)&cris_reg, sizeof(registers));
947#ifdef PROCESS_SUPPORT
948 /* Use the special register content in the executing thread. */
949 copy_registers (&reg_g, &reg, sizeof(registers));
950 /* Replace the content available on the stack. */
951 if (current_thread_g != executing_task) {
952 copy_registers_from_stack (current_thread_g, &reg_g);
953 }
954 mem2hex ((unsigned char *)remcomOutBuffer, (unsigned char *)&reg_g, sizeof(registers));
955#else
956 mem2hex(remcomOutBuffer, (char *)&reg, sizeof(registers));
957#endif
958 }
959 break; 756 break;
960 757
961 case 'G': 758 case 'G':
@@ -963,17 +760,7 @@ handle_exception (int sigval)
963 Each byte of register data is described by two hex digits. 760 Each byte of register data is described by two hex digits.
964 Success: OK 761 Success: OK
965 Failure: void. */ 762 Failure: void. */
966#ifdef PROCESS_SUPPORT 763 hex2mem((char *)&cris_reg, &remcomInBuffer[1], sizeof(registers));
967 hex2mem ((unsigned char *)&reg_g, &remcomInBuffer[1], sizeof(registers));
968 if (current_thread_g == executing_task) {
969 copy_registers (&reg, &reg_g, sizeof(registers));
970 }
971 else {
972 copy_registers_to_stack(current_thread_g, &reg_g);
973 }
974#else
975 hex2mem((char *)&reg, &remcomInBuffer[1], sizeof(registers));
976#endif
977 gdb_cris_strcpy (remcomOutBuffer, "OK"); 764 gdb_cris_strcpy (remcomOutBuffer, "OK");
978 break; 765 break;
979 766
@@ -989,12 +776,7 @@ handle_exception (int sigval)
989 char *suffix; 776 char *suffix;
990 int regno = gdb_cris_strtol (&remcomInBuffer[1], &suffix, 16); 777 int regno = gdb_cris_strtol (&remcomInBuffer[1], &suffix, 16);
991 int status; 778 int status;
992#ifdef PROCESS_SUPPORT 779 status = write_register (regno, suffix+1);
993 if (current_thread_g != executing_task)
994 status = write_stack_register (current_thread_g, regno, suffix+1);
995 else
996#endif
997 status = write_register (regno, suffix+1);
998 780
999 switch (status) { 781 switch (status) {
1000 case E02: 782 case E02:
@@ -1073,7 +855,7 @@ handle_exception (int sigval)
1073 Success: return to the executing thread. 855 Success: return to the executing thread.
1074 Failure: will never know. */ 856 Failure: will never know. */
1075 if (remcomInBuffer[1] != '\0') { 857 if (remcomInBuffer[1] != '\0') {
1076 reg.pc = gdb_cris_strtol (&remcomInBuffer[1], 0, 16); 858 cris_reg.pc = gdb_cris_strtol (&remcomInBuffer[1], 0, 16);
1077 } 859 }
1078 enableDebugIRQ(); 860 enableDebugIRQ();
1079 return; 861 return;
@@ -1129,119 +911,6 @@ handle_exception (int sigval)
1129 Not supported: E04 */ 911 Not supported: E04 */
1130 gdb_cris_strcpy (remcomOutBuffer, error_message[E04]); 912 gdb_cris_strcpy (remcomOutBuffer, error_message[E04]);
1131 break; 913 break;
1132#ifdef PROCESS_SUPPORT
1133
1134 case 'T':
1135 /* Thread alive. TXX
1136 Is thread XX alive?
1137 Success: OK, thread XX is alive.
1138 Failure: E03, thread XX is dead. */
1139 {
1140 int thread_id = (int)gdb_cris_strtol (&remcomInBuffer[1], 0, 16);
1141 /* Cannot tell whether it is alive or not. */
1142 if (thread_id >= 0 && thread_id < number_of_tasks)
1143 gdb_cris_strcpy (remcomOutBuffer, "OK");
1144 }
1145 break;
1146
1147 case 'H':
1148 /* Set thread for subsequent operations: Hct
1149 c = 'c' for thread used in step and continue;
1150 t can be -1 for all threads.
1151 c = 'g' for thread used in other operations.
1152 t = 0 means pick any thread.
1153 Success: OK
1154 Failure: E01 */
1155 {
1156 int thread_id = gdb_cris_strtol (&remcomInBuffer[2], 0, 16);
1157 if (remcomInBuffer[1] == 'c') {
1158 /* c = 'c' for thread used in step and continue */
1159 /* Do not change current_thread_c here. It would create a mess in
1160 the scheduler. */
1161 gdb_cris_strcpy (remcomOutBuffer, "OK");
1162 }
1163 else if (remcomInBuffer[1] == 'g') {
1164 /* c = 'g' for thread used in other operations.
1165 t = 0 means pick any thread. Impossible since the scheduler does
1166 not allow that. */
1167 if (thread_id >= 0 && thread_id < number_of_tasks) {
1168 current_thread_g = thread_id;
1169 gdb_cris_strcpy (remcomOutBuffer, "OK");
1170 }
1171 else {
1172 /* Not expected - send an error message. */
1173 gdb_cris_strcpy (remcomOutBuffer, error_message[E01]);
1174 }
1175 }
1176 else {
1177 /* Not expected - send an error message. */
1178 gdb_cris_strcpy (remcomOutBuffer, error_message[E01]);
1179 }
1180 }
1181 break;
1182
1183 case 'q':
1184 case 'Q':
1185 /* Query of general interest. qXXXX
1186 Set general value XXXX. QXXXX=yyyy */
1187 {
1188 int pos;
1189 int nextpos;
1190 int thread_id;
1191
1192 switch (remcomInBuffer[1]) {
1193 case 'C':
1194 /* Identify the remote current thread. */
1195 gdb_cris_strcpy (&remcomOutBuffer[0], "QC");
1196 remcomOutBuffer[2] = hex_asc_hi(current_thread_c);
1197 remcomOutBuffer[3] = hex_asc_lo(current_thread_c);
1198 remcomOutBuffer[4] = '\0';
1199 break;
1200 case 'L':
1201 gdb_cris_strcpy (&remcomOutBuffer[0], "QM");
1202 /* Reply with number of threads. */
1203 if (os_is_started()) {
1204 remcomOutBuffer[2] = hex_asc_hi(number_of_tasks);
1205 remcomOutBuffer[3] = hex_asc_lo(number_of_tasks);
1206 }
1207 else {
1208 remcomOutBuffer[2] = hex_asc_hi(0);
1209 remcomOutBuffer[3] = hex_asc_lo(1);
1210 }
1211 /* Done with the reply. */
1212 remcomOutBuffer[4] = hex_asc_lo(1);
1213 pos = 5;
1214 /* Expects the argument thread id. */
1215 for (; pos < (5 + HEXCHARS_IN_THREAD_ID); pos++)
1216 remcomOutBuffer[pos] = remcomInBuffer[pos];
1217 /* Reply with the thread identifiers. */
1218 if (os_is_started()) {
1219 /* Store the thread identifiers of all tasks. */
1220 for (thread_id = 0; thread_id < number_of_tasks; thread_id++) {
1221 nextpos = pos + HEXCHARS_IN_THREAD_ID - 1;
1222 for (; pos < nextpos; pos ++)
1223 remcomOutBuffer[pos] = hex_asc_lo(0);
1224 remcomOutBuffer[pos++] = hex_asc_lo(thread_id);
1225 }
1226 }
1227 else {
1228 /* Store the thread identifier of the boot task. */
1229 nextpos = pos + HEXCHARS_IN_THREAD_ID - 1;
1230 for (; pos < nextpos; pos ++)
1231 remcomOutBuffer[pos] = hex_asc_lo(0);
1232 remcomOutBuffer[pos++] = hex_asc_lo(current_thread_c);
1233 }
1234 remcomOutBuffer[pos] = '\0';
1235 break;
1236 default:
1237 /* Not supported: "" */
1238 /* Request information about section offsets: qOffsets. */
1239 remcomOutBuffer[0] = 0;
1240 break;
1241 }
1242 }
1243 break;
1244#endif /* PROCESS_SUPPORT */
1245 914
1246 default: 915 default:
1247 /* The stub should ignore other request and send an empty 916 /* The stub should ignore other request and send an empty
@@ -1254,13 +923,6 @@ handle_exception (int sigval)
1254 } 923 }
1255} 924}
1256 925
1257/* Performs a complete re-start from scratch. */
1258static void
1259kill_restart ()
1260{
1261 machine_restart("");
1262}
1263
1264/********************************** Breakpoint *******************************/ 926/********************************** Breakpoint *******************************/
1265/* The hook for both a static (compiled) and a dynamic breakpoint set by GDB. 927/* The hook for both a static (compiled) and a dynamic breakpoint set by GDB.
1266 An internal stack is used by the stub. The register image of the caller is 928 An internal stack is used by the stub. The register image of the caller is
@@ -1270,93 +932,93 @@ kill_restart ()
1270 932
1271void kgdb_handle_breakpoint(void); 933void kgdb_handle_breakpoint(void);
1272 934
1273asm (" 935asm ("\n"
1274 .global kgdb_handle_breakpoint 936" .global kgdb_handle_breakpoint\n"
1275kgdb_handle_breakpoint: 937"kgdb_handle_breakpoint:\n"
1276;; 938";;\n"
1277;; Response to the break-instruction 939";; Response to the break-instruction\n"
1278;; 940";;\n"
1279;; Create a register image of the caller 941";; Create a register image of the caller\n"
1280;; 942";;\n"
1281 move $dccr,[reg+0x5E] ; Save the flags in DCCR before disable interrupts 943" move $dccr,[cris_reg+0x5E] ; Save the flags in DCCR before disable interrupts\n"
1282 di ; Disable interrupts 944" di ; Disable interrupts\n"
1283 move.d $r0,[reg] ; Save R0 945" move.d $r0,[cris_reg] ; Save R0\n"
1284 move.d $r1,[reg+0x04] ; Save R1 946" move.d $r1,[cris_reg+0x04] ; Save R1\n"
1285 move.d $r2,[reg+0x08] ; Save R2 947" move.d $r2,[cris_reg+0x08] ; Save R2\n"
1286 move.d $r3,[reg+0x0C] ; Save R3 948" move.d $r3,[cris_reg+0x0C] ; Save R3\n"
1287 move.d $r4,[reg+0x10] ; Save R4 949" move.d $r4,[cris_reg+0x10] ; Save R4\n"
1288 move.d $r5,[reg+0x14] ; Save R5 950" move.d $r5,[cris_reg+0x14] ; Save R5\n"
1289 move.d $r6,[reg+0x18] ; Save R6 951" move.d $r6,[cris_reg+0x18] ; Save R6\n"
1290 move.d $r7,[reg+0x1C] ; Save R7 952" move.d $r7,[cris_reg+0x1C] ; Save R7\n"
1291 move.d $r8,[reg+0x20] ; Save R8 953" move.d $r8,[cris_reg+0x20] ; Save R8\n"
1292 move.d $r9,[reg+0x24] ; Save R9 954" move.d $r9,[cris_reg+0x24] ; Save R9\n"
1293 move.d $r10,[reg+0x28] ; Save R10 955" move.d $r10,[cris_reg+0x28] ; Save R10\n"
1294 move.d $r11,[reg+0x2C] ; Save R11 956" move.d $r11,[cris_reg+0x2C] ; Save R11\n"
1295 move.d $r12,[reg+0x30] ; Save R12 957" move.d $r12,[cris_reg+0x30] ; Save R12\n"
1296 move.d $r13,[reg+0x34] ; Save R13 958" move.d $r13,[cris_reg+0x34] ; Save R13\n"
1297 move.d $sp,[reg+0x38] ; Save SP (R14) 959" move.d $sp,[cris_reg+0x38] ; Save SP (R14)\n"
1298;; Due to the old assembler-versions BRP might not be recognized 960";; Due to the old assembler-versions BRP might not be recognized\n"
1299 .word 0xE670 ; move brp,$r0 961" .word 0xE670 ; move brp,$r0\n"
1300 subq 2,$r0 ; Set to address of previous instruction. 962" subq 2,$r0 ; Set to address of previous instruction.\n"
1301 move.d $r0,[reg+0x3c] ; Save the address in PC (R15) 963" move.d $r0,[cris_reg+0x3c] ; Save the address in PC (R15)\n"
1302 clear.b [reg+0x40] ; Clear P0 964" clear.b [cris_reg+0x40] ; Clear P0\n"
1303 move $vr,[reg+0x41] ; Save special register P1 965" move $vr,[cris_reg+0x41] ; Save special register P1\n"
1304 clear.w [reg+0x42] ; Clear P4 966" clear.w [cris_reg+0x42] ; Clear P4\n"
1305 move $ccr,[reg+0x44] ; Save special register CCR 967" move $ccr,[cris_reg+0x44] ; Save special register CCR\n"
1306 move $mof,[reg+0x46] ; P7 968" move $mof,[cris_reg+0x46] ; P7\n"
1307 clear.d [reg+0x4A] ; Clear P8 969" clear.d [cris_reg+0x4A] ; Clear P8\n"
1308 move $ibr,[reg+0x4E] ; P9, 970" move $ibr,[cris_reg+0x4E] ; P9,\n"
1309 move $irp,[reg+0x52] ; P10, 971" move $irp,[cris_reg+0x52] ; P10,\n"
1310 move $srp,[reg+0x56] ; P11, 972" move $srp,[cris_reg+0x56] ; P11,\n"
1311 move $dtp0,[reg+0x5A] ; P12, register BAR, assembler might not know BAR 973" move $dtp0,[cris_reg+0x5A] ; P12, register BAR, assembler might not know BAR\n"
1312 ; P13, register DCCR already saved 974" ; P13, register DCCR already saved\n"
1313;; Due to the old assembler-versions BRP might not be recognized 975";; Due to the old assembler-versions BRP might not be recognized\n"
1314 .word 0xE670 ; move brp,r0 976" .word 0xE670 ; move brp,r0\n"
1315;; Static (compiled) breakpoints must return to the next instruction in order 977";; Static (compiled) breakpoints must return to the next instruction in order\n"
1316;; to avoid infinite loops. Dynamic (gdb-invoked) must restore the instruction 978";; to avoid infinite loops. Dynamic (gdb-invoked) must restore the instruction\n"
1317;; in order to execute it when execution is continued. 979";; in order to execute it when execution is continued.\n"
1318 test.b [is_dyn_brkp] ; Is this a dynamic breakpoint? 980" test.b [is_dyn_brkp] ; Is this a dynamic breakpoint?\n"
1319 beq is_static ; No, a static breakpoint 981" beq is_static ; No, a static breakpoint\n"
1320 nop 982" nop\n"
1321 subq 2,$r0 ; rerun the instruction the break replaced 983" subq 2,$r0 ; rerun the instruction the break replaced\n"
1322is_static: 984"is_static:\n"
1323 moveq 1,$r1 985" moveq 1,$r1\n"
1324 move.b $r1,[is_dyn_brkp] ; Set the state variable to dynamic breakpoint 986" move.b $r1,[is_dyn_brkp] ; Set the state variable to dynamic breakpoint\n"
1325 move.d $r0,[reg+0x62] ; Save the return address in BRP 987" move.d $r0,[cris_reg+0x62] ; Save the return address in BRP\n"
1326 move $usp,[reg+0x66] ; USP 988" move $usp,[cris_reg+0x66] ; USP\n"
1327;; 989";;\n"
1328;; Handle the communication 990";; Handle the communication\n"
1329;; 991";;\n"
1330 move.d internal_stack+1020,$sp ; Use the internal stack which grows upward 992" move.d internal_stack+1020,$sp ; Use the internal stack which grows upward\n"
1331 moveq 5,$r10 ; SIGTRAP 993" moveq 5,$r10 ; SIGTRAP\n"
1332 jsr handle_exception ; Interactive routine 994" jsr handle_exception ; Interactive routine\n"
1333;; 995";;\n"
1334;; Return to the caller 996";; Return to the caller\n"
1335;; 997";;\n"
1336 move.d [reg],$r0 ; Restore R0 998" move.d [cris_reg],$r0 ; Restore R0\n"
1337 move.d [reg+0x04],$r1 ; Restore R1 999" move.d [cris_reg+0x04],$r1 ; Restore R1\n"
1338 move.d [reg+0x08],$r2 ; Restore R2 1000" move.d [cris_reg+0x08],$r2 ; Restore R2\n"
1339 move.d [reg+0x0C],$r3 ; Restore R3 1001" move.d [cris_reg+0x0C],$r3 ; Restore R3\n"
1340 move.d [reg+0x10],$r4 ; Restore R4 1002" move.d [cris_reg+0x10],$r4 ; Restore R4\n"
1341 move.d [reg+0x14],$r5 ; Restore R5 1003" move.d [cris_reg+0x14],$r5 ; Restore R5\n"
1342 move.d [reg+0x18],$r6 ; Restore R6 1004" move.d [cris_reg+0x18],$r6 ; Restore R6\n"
1343 move.d [reg+0x1C],$r7 ; Restore R7 1005" move.d [cris_reg+0x1C],$r7 ; Restore R7\n"
1344 move.d [reg+0x20],$r8 ; Restore R8 1006" move.d [cris_reg+0x20],$r8 ; Restore R8\n"
1345 move.d [reg+0x24],$r9 ; Restore R9 1007" move.d [cris_reg+0x24],$r9 ; Restore R9\n"
1346 move.d [reg+0x28],$r10 ; Restore R10 1008" move.d [cris_reg+0x28],$r10 ; Restore R10\n"
1347 move.d [reg+0x2C],$r11 ; Restore R11 1009" move.d [cris_reg+0x2C],$r11 ; Restore R11\n"
1348 move.d [reg+0x30],$r12 ; Restore R12 1010" move.d [cris_reg+0x30],$r12 ; Restore R12\n"
1349 move.d [reg+0x34],$r13 ; Restore R13 1011" move.d [cris_reg+0x34],$r13 ; Restore R13\n"
1350;; 1012";;\n"
1351;; FIXME: Which registers should be restored? 1013";; FIXME: Which registers should be restored?\n"
1352;; 1014";;\n"
1353 move.d [reg+0x38],$sp ; Restore SP (R14) 1015" move.d [cris_reg+0x38],$sp ; Restore SP (R14)\n"
1354 move [reg+0x56],$srp ; Restore the subroutine return pointer. 1016" move [cris_reg+0x56],$srp ; Restore the subroutine return pointer.\n"
1355 move [reg+0x5E],$dccr ; Restore DCCR 1017" move [cris_reg+0x5E],$dccr ; Restore DCCR\n"
1356 move [reg+0x66],$usp ; Restore USP 1018" move [cris_reg+0x66],$usp ; Restore USP\n"
1357 jump [reg+0x62] ; A jump to the content in register BRP works. 1019" jump [cris_reg+0x62] ; A jump to the content in register BRP works.\n"
1358 nop ; 1020" nop ;\n"
1359"); 1021"\n");
1360 1022
1361/* The hook for an interrupt generated by GDB. An internal stack is used 1023/* The hook for an interrupt generated by GDB. An internal stack is used
1362 by the stub. The register image of the caller is stored in the structure 1024 by the stub. The register image of the caller is stored in the structure
@@ -1367,94 +1029,94 @@ is_static:
1367 1029
1368void kgdb_handle_serial(void); 1030void kgdb_handle_serial(void);
1369 1031
1370asm (" 1032asm ("\n"
1371 .global kgdb_handle_serial 1033" .global kgdb_handle_serial\n"
1372kgdb_handle_serial: 1034"kgdb_handle_serial:\n"
1373;; 1035";;\n"
1374;; Response to a serial interrupt 1036";; Response to a serial interrupt\n"
1375;; 1037";;\n"
1376 1038"\n"
1377 move $dccr,[reg+0x5E] ; Save the flags in DCCR 1039" move $dccr,[cris_reg+0x5E] ; Save the flags in DCCR\n"
1378 di ; Disable interrupts 1040" di ; Disable interrupts\n"
1379 move.d $r0,[reg] ; Save R0 1041" move.d $r0,[cris_reg] ; Save R0\n"
1380 move.d $r1,[reg+0x04] ; Save R1 1042" move.d $r1,[cris_reg+0x04] ; Save R1\n"
1381 move.d $r2,[reg+0x08] ; Save R2 1043" move.d $r2,[cris_reg+0x08] ; Save R2\n"
1382 move.d $r3,[reg+0x0C] ; Save R3 1044" move.d $r3,[cris_reg+0x0C] ; Save R3\n"
1383 move.d $r4,[reg+0x10] ; Save R4 1045" move.d $r4,[cris_reg+0x10] ; Save R4\n"
1384 move.d $r5,[reg+0x14] ; Save R5 1046" move.d $r5,[cris_reg+0x14] ; Save R5\n"
1385 move.d $r6,[reg+0x18] ; Save R6 1047" move.d $r6,[cris_reg+0x18] ; Save R6\n"
1386 move.d $r7,[reg+0x1C] ; Save R7 1048" move.d $r7,[cris_reg+0x1C] ; Save R7\n"
1387 move.d $r8,[reg+0x20] ; Save R8 1049" move.d $r8,[cris_reg+0x20] ; Save R8\n"
1388 move.d $r9,[reg+0x24] ; Save R9 1050" move.d $r9,[cris_reg+0x24] ; Save R9\n"
1389 move.d $r10,[reg+0x28] ; Save R10 1051" move.d $r10,[cris_reg+0x28] ; Save R10\n"
1390 move.d $r11,[reg+0x2C] ; Save R11 1052" move.d $r11,[cris_reg+0x2C] ; Save R11\n"
1391 move.d $r12,[reg+0x30] ; Save R12 1053" move.d $r12,[cris_reg+0x30] ; Save R12\n"
1392 move.d $r13,[reg+0x34] ; Save R13 1054" move.d $r13,[cris_reg+0x34] ; Save R13\n"
1393 move.d $sp,[reg+0x38] ; Save SP (R14) 1055" move.d $sp,[cris_reg+0x38] ; Save SP (R14)\n"
1394 move $irp,[reg+0x3c] ; Save the address in PC (R15) 1056" move $irp,[cris_reg+0x3c] ; Save the address in PC (R15)\n"
1395 clear.b [reg+0x40] ; Clear P0 1057" clear.b [cris_reg+0x40] ; Clear P0\n"
1396 move $vr,[reg+0x41] ; Save special register P1, 1058" move $vr,[cris_reg+0x41] ; Save special register P1,\n"
1397 clear.w [reg+0x42] ; Clear P4 1059" clear.w [cris_reg+0x42] ; Clear P4\n"
1398 move $ccr,[reg+0x44] ; Save special register CCR 1060" move $ccr,[cris_reg+0x44] ; Save special register CCR\n"
1399 move $mof,[reg+0x46] ; P7 1061" move $mof,[cris_reg+0x46] ; P7\n"
1400 clear.d [reg+0x4A] ; Clear P8 1062" clear.d [cris_reg+0x4A] ; Clear P8\n"
1401 move $ibr,[reg+0x4E] ; P9, 1063" move $ibr,[cris_reg+0x4E] ; P9,\n"
1402 move $irp,[reg+0x52] ; P10, 1064" move $irp,[cris_reg+0x52] ; P10,\n"
1403 move $srp,[reg+0x56] ; P11, 1065" move $srp,[cris_reg+0x56] ; P11,\n"
1404 move $dtp0,[reg+0x5A] ; P12, register BAR, assembler might not know BAR 1066" move $dtp0,[cris_reg+0x5A] ; P12, register BAR, assembler might not know BAR\n"
1405 ; P13, register DCCR already saved 1067" ; P13, register DCCR already saved\n"
1406;; Due to the old assembler-versions BRP might not be recognized 1068";; Due to the old assembler-versions BRP might not be recognized\n"
1407 .word 0xE670 ; move brp,r0 1069" .word 0xE670 ; move brp,r0\n"
1408 move.d $r0,[reg+0x62] ; Save the return address in BRP 1070" move.d $r0,[cris_reg+0x62] ; Save the return address in BRP\n"
1409 move $usp,[reg+0x66] ; USP 1071" move $usp,[cris_reg+0x66] ; USP\n"
1410 1072"\n"
1411;; get the serial character (from debugport.c) and check if it is a ctrl-c 1073";; get the serial character (from debugport.c) and check if it is a ctrl-c\n"
1412 1074"\n"
1413 jsr getDebugChar 1075" jsr getDebugChar\n"
1414 cmp.b 3, $r10 1076" cmp.b 3, $r10\n"
1415 bne goback 1077" bne goback\n"
1416 nop 1078" nop\n"
1417 1079"\n"
1418 move.d [reg+0x5E], $r10 ; Get DCCR 1080" move.d [cris_reg+0x5E], $r10 ; Get DCCR\n"
1419 btstq 8, $r10 ; Test the U-flag. 1081" btstq 8, $r10 ; Test the U-flag.\n"
1420 bmi goback 1082" bmi goback\n"
1421 nop 1083" nop\n"
1422 1084"\n"
1423;; 1085";;\n"
1424;; Handle the communication 1086";; Handle the communication\n"
1425;; 1087";;\n"
1426 move.d internal_stack+1020,$sp ; Use the internal stack 1088" move.d internal_stack+1020,$sp ; Use the internal stack\n"
1427 moveq 2,$r10 ; SIGINT 1089" moveq 2,$r10 ; SIGINT\n"
1428 jsr handle_exception ; Interactive routine 1090" jsr handle_exception ; Interactive routine\n"
1429 1091"\n"
1430goback: 1092"goback:\n"
1431;; 1093";;\n"
1432;; Return to the caller 1094";; Return to the caller\n"
1433;; 1095";;\n"
1434 move.d [reg],$r0 ; Restore R0 1096" move.d [cris_reg],$r0 ; Restore R0\n"
1435 move.d [reg+0x04],$r1 ; Restore R1 1097" move.d [cris_reg+0x04],$r1 ; Restore R1\n"
1436 move.d [reg+0x08],$r2 ; Restore R2 1098" move.d [cris_reg+0x08],$r2 ; Restore R2\n"
1437 move.d [reg+0x0C],$r3 ; Restore R3 1099" move.d [cris_reg+0x0C],$r3 ; Restore R3\n"
1438 move.d [reg+0x10],$r4 ; Restore R4 1100" move.d [cris_reg+0x10],$r4 ; Restore R4\n"
1439 move.d [reg+0x14],$r5 ; Restore R5 1101" move.d [cris_reg+0x14],$r5 ; Restore R5\n"
1440 move.d [reg+0x18],$r6 ; Restore R6 1102" move.d [cris_reg+0x18],$r6 ; Restore R6\n"
1441 move.d [reg+0x1C],$r7 ; Restore R7 1103" move.d [cris_reg+0x1C],$r7 ; Restore R7\n"
1442 move.d [reg+0x20],$r8 ; Restore R8 1104" move.d [cris_reg+0x20],$r8 ; Restore R8\n"
1443 move.d [reg+0x24],$r9 ; Restore R9 1105" move.d [cris_reg+0x24],$r9 ; Restore R9\n"
1444 move.d [reg+0x28],$r10 ; Restore R10 1106" move.d [cris_reg+0x28],$r10 ; Restore R10\n"
1445 move.d [reg+0x2C],$r11 ; Restore R11 1107" move.d [cris_reg+0x2C],$r11 ; Restore R11\n"
1446 move.d [reg+0x30],$r12 ; Restore R12 1108" move.d [cris_reg+0x30],$r12 ; Restore R12\n"
1447 move.d [reg+0x34],$r13 ; Restore R13 1109" move.d [cris_reg+0x34],$r13 ; Restore R13\n"
1448;; 1110";;\n"
1449;; FIXME: Which registers should be restored? 1111";; FIXME: Which registers should be restored?\n"
1450;; 1112";;\n"
1451 move.d [reg+0x38],$sp ; Restore SP (R14) 1113" move.d [cris_reg+0x38],$sp ; Restore SP (R14)\n"
1452 move [reg+0x56],$srp ; Restore the subroutine return pointer. 1114" move [cris_reg+0x56],$srp ; Restore the subroutine return pointer.\n"
1453 move [reg+0x5E],$dccr ; Restore DCCR 1115" move [cris_reg+0x5E],$dccr ; Restore DCCR\n"
1454 move [reg+0x66],$usp ; Restore USP 1116" move [cris_reg+0x66],$usp ; Restore USP\n"
1455 reti ; Return from the interrupt routine 1117" reti ; Return from the interrupt routine\n"
1456 nop 1118" nop\n"
1457"); 1119"\n");
1458 1120
1459/* Use this static breakpoint in the start-up only. */ 1121/* Use this static breakpoint in the start-up only. */
1460 1122
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index ab725edbc680..acff3df8c43f 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -640,8 +640,6 @@ config ETRAX_STREAMCOPROC
640 This option enables a driver for the stream co-processor 640 This option enables a driver for the stream co-processor
641 for cryptographic operations. 641 for cryptographic operations.
642 642
643source drivers/mmc/Kconfig
644
645config ETRAX_MMC_IOP 643config ETRAX_MMC_IOP
646 tristate "MMC/SD host driver using IO-processor" 644 tristate "MMC/SD host driver using IO-processor"
647 depends on ETRAX_ARCH_V32 && MMC 645 depends on ETRAX_ARCH_V32 && MMC
@@ -833,9 +831,4 @@ config ETRAX_SPI_MMC_WP_GPIO_PIN
833 The pin to use for the SD/MMC write-protect signal for a memory 831 The pin to use for the SD/MMC write-protect signal for a memory
834 card. If defined as " " (space), the card is considered writable. 832 card. If defined as " " (space), the card is considered writable.
835 833
836# Avoid choices causing non-working configs by conditionalizing the inclusion.
837if ETRAX_SPI_MMC
838source drivers/spi/Kconfig
839endif
840
841endif 834endif
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index f1e79edc9dd2..c8325455520e 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -5,5 +5,9 @@ header-y += arch-v32/
5 5
6generic-y += clkdev.h 6generic-y += clkdev.h
7generic-y += exec.h 7generic-y += exec.h
8generic-y += kvm_para.h
9generic-y += linkage.h
8generic-y += module.h 10generic-y += module.h
9generic-y += trace_clock.h 11generic-y += trace_clock.h
12generic-y += vga.h
13generic-y += xor.h
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
index ac12ae2b9286..5d3047e5563b 100644
--- a/arch/cris/include/asm/io.h
+++ b/arch/cris/include/asm/io.h
@@ -167,6 +167,9 @@ static inline void outsl(unsigned int port, const void *addr,
167 cris_iops->write_io(port, (void *)addr, 4, count); 167 cris_iops->write_io(port, (void *)addr, 4, count);
168} 168}
169 169
170#define inb_p(port) inb(port)
171#define outb_p(val, port) outb((val), (port))
172
170/* 173/*
171 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 174 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
172 * access 175 * access
diff --git a/arch/cris/include/asm/linkage.h b/arch/cris/include/asm/linkage.h
deleted file mode 100644
index 291c2d01c44f..000000000000
--- a/arch/cris/include/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4/* Nothing to see here... */
5
6#endif
diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S
index e9a8cc63ac94..a7d0bea9c036 100644
--- a/arch/frv/kernel/head.S
+++ b/arch/frv/kernel/head.S
@@ -479,11 +479,6 @@ __head_mmu_enabled:
479 479
480 LEDS 0x000c 480 LEDS 0x000c
481 481
482 # initialise the processor and the peripherals
483 #call SYMBOL_NAME(processor_init)
484 #call SYMBOL_NAME(unit_init)
485 #LEDS 0x0aff
486
487 sethi.p #0xe5e5,gr3 482 sethi.p #0xe5e5,gr3
488 setlo #0xe5e5,gr3 483 setlo #0xe5e5,gr3
489 or.p gr3,gr0,gr4 484 or.p gr3,gr0,gr4
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 303e4f9a79d1..3d6759ee382f 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -94,126 +94,10 @@ endmenu
94 94
95source "net/Kconfig" 95source "net/Kconfig"
96 96
97source "drivers/base/Kconfig" 97source "drivers/Kconfig"
98
99source "drivers/mtd/Kconfig"
100
101source "drivers/block/Kconfig"
102
103source "drivers/ide/Kconfig"
104 98
105source "arch/h8300/Kconfig.ide" 99source "arch/h8300/Kconfig.ide"
106 100
107source "drivers/net/Kconfig"
108
109#
110# input - input/joystick depends on it. As does USB.
111#
112source "drivers/input/Kconfig"
113
114menu "Character devices"
115
116config VT
117 bool "Virtual terminal"
118 ---help---
119 If you say Y here, you will get support for terminal devices with
120 display and keyboard devices. These are called "virtual" because you
121 can run several virtual terminals (also called virtual consoles) on
122 one physical terminal. This is rather useful, for example one
123 virtual terminal can collect system messages and warnings, another
124 one can be used for a text-mode user session, and a third could run
125 an X session, all in parallel. Switching between virtual terminals
126 is done with certain key combinations, usually Alt-<function key>.
127
128 The setterm command ("man setterm") can be used to change the
129 properties (such as colors or beeping) of a virtual terminal. The
130 man page console_codes(4) ("man console_codes") contains the special
131 character sequences that can be used to change those properties
132 directly. The fonts used on virtual terminals can be changed with
133 the setfont ("man setfont") command and the key bindings are defined
134 with the loadkeys ("man loadkeys") command.
135
136 You need at least one virtual terminal device in order to make use
137 of your keyboard and monitor. Therefore, only people configuring an
138 embedded system would want to say N here in order to save some
139 memory; the only way to log into such a system is then via a serial
140 or network connection.
141
142 If unsure, say Y, or else you won't be able to do much with your new
143 shiny Linux system :-)
144
145config VT_CONSOLE
146 bool "Support for console on virtual terminal"
147 depends on VT
148 ---help---
149 The system console is the device which receives all kernel messages
150 and warnings and which allows logins in single user mode. If you
151 answer Y here, a virtual terminal (the device used to interact with
152 a physical terminal) can be used as system console. This is the most
153 common mode of operations, so you should say Y here unless you want
154 the kernel messages be output only to a serial port (in which case
155 you should say Y to "Console on serial port", below).
156
157 If you do say Y here, by default the currently visible virtual
158 terminal (/dev/tty0) will be used as system console. You can change
159 that with a kernel command line option such as "console=tty3" which
160 would use the third virtual terminal as system console. (Try "man
161 bootparam" or see the documentation of your boot loader (lilo or
162 loadlin) about how to pass options to the kernel at boot time.)
163
164 If unsure, say Y.
165
166config HW_CONSOLE
167 bool
168 depends on VT
169 default y
170
171comment "Unix98 PTY support"
172
173config UNIX98_PTYS
174 bool "Unix98 PTY support"
175 ---help---
176 A pseudo terminal (PTY) is a software device consisting of two
177 halves: a master and a slave. The slave device behaves identical to
178 a physical terminal; the master device is used by a process to
179 read data from and write data to the slave, thereby emulating a
180 terminal. Typical programs for the master side are telnet servers
181 and xterms.
182
183 Linux has traditionally used the BSD-like names /dev/ptyxx for
184 masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
185 has a number of problems. The GNU C library glibc 2.1 and later,
186 however, supports the Unix98 naming standard: in order to acquire a
187 pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
188 terminal is then made available to the process and the pseudo
189 terminal slave can be accessed as /dev/pts/<number>. What was
190 traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
191
192 The entries in /dev/pts/ are created on the fly by a virtual
193 file system; therefore, if you say Y here you should say Y to
194 "/dev/pts file system for Unix98 PTYs" as well.
195
196 If you want to say Y here, you need to have the C library glibc 2.1
197 or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
198 Read the instructions in <file:Documentation/Changes> pertaining to
199 pseudo terminals. It's safe to say N.
200
201source "drivers/char/pcmcia/Kconfig"
202
203source "drivers/tty/serial/Kconfig"
204
205source "drivers/i2c/Kconfig"
206
207source "drivers/hwmon/Kconfig"
208
209source "drivers/usb/Kconfig"
210
211source "drivers/uwb/Kconfig"
212
213endmenu
214
215source "drivers/staging/Kconfig"
216
217source "fs/Kconfig" 101source "fs/Kconfig"
218 102
219source "arch/h8300/Kconfig.debug" 103source "arch/h8300/Kconfig.debug"
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index 321f3922728b..cdee771460ed 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -64,6 +64,7 @@ choice
64 64
65config H83002 65config H83002
66 bool "H8/3001,3002,3003" 66 bool "H8/3001,3002,3003"
67 depends on BROKEN
67 select CPU_H8300H 68 select CPU_H8300H
68 69
69config H83007 70config H83007
@@ -72,6 +73,7 @@ config H83007
72 73
73config H83048 74config H83048
74 bool "H8/3044,3045,3046,3047,3048,3052" 75 bool "H8/3044,3045,3046,3047,3048,3052"
76 depends on BROKEN
75 select CPU_H8300H 77 select CPU_H8300H
76 78
77config H83068 79config H83068
@@ -155,10 +157,12 @@ config H8300_TIMER16_CH
155config H8300_ITU_CH 157config H8300_ITU_CH
156 int "ITU channel" 158 int "ITU channel"
157 depends on H8300_ITU 159 depends on H8300_ITU
160 range 0 4
158 161
159config H8300_TPU_CH 162config H8300_TPU_CH
160 int "TPU channel" 163 int "TPU channel"
161 depends on H8300_TPU 164 depends on H8300_TPU
165 range 0 4
162 166
163source "kernel/Kconfig.preempt" 167source "kernel/Kconfig.preempt"
164 168
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
index 6745cb1ffb4f..a6c98fe3bbc3 100644
--- a/arch/h8300/boot/compressed/Makefile
+++ b/arch/h8300/boot/compressed/Makefile
@@ -16,7 +16,7 @@ OBJECTS = $(obj)/head.o $(obj)/misc.o
16# 16#
17CONFIG_MEMORY_START ?= 0x00400000 17CONFIG_MEMORY_START ?= 0x00400000
18CONFIG_BOOT_LINK_OFFSET ?= 0x00140000 18CONFIG_BOOT_LINK_OFFSET ?= 0x00140000
19IMAGE_OFFSET := $(shell printf "0x%08x" $$[$(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET)]) 19IMAGE_OFFSET := $(shell printf "0x%08x" $$(($(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET))))
20 20
21LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup $(obj)/vmlinux.lds 21LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup $(obj)/vmlinux.lds
22 22
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 995eb47e01bb..8ada3cf0c98d 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,6 +1,8 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += linkage.h
4generic-y += mmu.h 5generic-y += mmu.h
5generic-y += module.h 6generic-y += module.h
6generic-y += trace_clock.h 7generic-y += trace_clock.h
8generic-y += xor.h
diff --git a/arch/h8300/include/asm/barrier.h b/arch/h8300/include/asm/barrier.h
index c7283c343c55..9e0aa9fc195d 100644
--- a/arch/h8300/include/asm/barrier.h
+++ b/arch/h8300/include/asm/barrier.h
@@ -12,6 +12,8 @@
12#define wmb() asm volatile ("" : : :"memory") 12#define wmb() asm volatile ("" : : :"memory")
13#define set_mb(var, value) do { xchg(&var, value); } while (0) 13#define set_mb(var, value) do { xchg(&var, value); } while (0)
14 14
15#define read_barrier_depends() do { } while (0)
16
15#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
16#define smp_mb() mb() 18#define smp_mb() mb()
17#define smp_rmb() rmb() 19#define smp_rmb() rmb()
diff --git a/arch/h8300/include/asm/linkage.h b/arch/h8300/include/asm/linkage.h
deleted file mode 100644
index 1d81604fb0ad..000000000000
--- a/arch/h8300/include/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _H8300_LINKAGE_H
2#define _H8300_LINKAGE_H
3
4#undef SYMBOL_NAME_LABEL
5#define SYMBOL_NAME_LABEL(_name_) _##_name_##:
6#endif
diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h
index 3dea80ad9e6f..7f0743051ad5 100644
--- a/arch/h8300/include/asm/tlb.h
+++ b/arch/h8300/include/asm/tlb.h
@@ -1,16 +1,3 @@
1/*
2 include/asm-h8300/tlb.h
3*/
4
5#ifndef __H8300_TLB_H__
6#define __H8300_TLB_H__
7
8#define tlb_flush(tlb) do { } while(0)
9
10/*
11 include/asm-h8300/tlb.h
12*/
13
14#ifndef __H8300_TLB_H__ 1#ifndef __H8300_TLB_H__
15#define __H8300_TLB_H__ 2#define __H8300_TLB_H__
16 3
@@ -19,5 +6,3 @@
19#include <asm-generic/tlb.h> 6#include <asm-generic/tlb.h>
20 7
21#endif 8#endif
22
23#endif
diff --git a/arch/h8300/kernel/entry.S b/arch/h8300/kernel/entry.S
index 617a6878787f..94bd30f11df6 100644
--- a/arch/h8300/kernel/entry.S
+++ b/arch/h8300/kernel/entry.S
@@ -87,13 +87,13 @@ INTERRUPTS = 128
87 bne 5f 87 bne 5f
88 88
89 /* user mode */ 89 /* user mode */
90 mov.l sp,@SYMBOL_NAME(sw_usp) 90 mov.l sp,@_sw_usp
91 mov.l @sp,er0 /* restore saved er0 */ 91 mov.l @sp,er0 /* restore saved er0 */
92 orc #0x10,ccr /* switch kernel stack */ 92 orc #0x10,ccr /* switch kernel stack */
93 mov.l @SYMBOL_NAME(sw_ksp),sp 93 mov.l @_sw_ksp,sp
94 sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */ 94 sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
95 SAVEREGS 95 SAVEREGS
96 mov.l @SYMBOL_NAME(sw_usp),er0 96 mov.l @_sw_usp,er0
97 mov.l @(USERRET:16,er0),er1 /* copy the RET addr */ 97 mov.l @(USERRET:16,er0),er1 /* copy the RET addr */
98 mov.l er1,@(LRET-LER3:16,sp) 98 mov.l er1,@(LRET-LER3:16,sp)
99 SAVEEXR 99 SAVEEXR
@@ -128,7 +128,7 @@ INTERRUPTS = 128
128 bne 7f 128 bne 7f
129 129
130 orc #0x80,ccr 130 orc #0x80,ccr
131 mov.l @SYMBOL_NAME(sw_usp),er0 131 mov.l @_sw_usp,er0
132 mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */ 132 mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
133 mov.l er1,@er0 133 mov.l er1,@er0
134 RESTOREEXR 134 RESTOREEXR
@@ -141,7 +141,7 @@ INTERRUPTS = 128
141 141
142 mov.l @sp+,er1 142 mov.l @sp+,er1
143 add.l #(LRET-LER1),sp /* remove LORIG - LRET */ 143 add.l #(LRET-LER1),sp /* remove LORIG - LRET */
144 mov.l sp,@SYMBOL_NAME(sw_ksp) 144 mov.l sp,@_sw_ksp
145 andc #0xef,ccr /* switch to user mode */ 145 andc #0xef,ccr /* switch to user mode */
146 mov.l er0,sp 146 mov.l er0,sp
147 bra 8f 147 bra 8f
@@ -155,20 +155,20 @@ INTERRUPTS = 128
155 rte 155 rte
156 .endm 156 .endm
157 157
158.globl SYMBOL_NAME(system_call) 158.globl _system_call
159.globl SYMBOL_NAME(ret_from_exception) 159.globl _ret_from_exception
160.globl SYMBOL_NAME(ret_from_fork) 160.globl _ret_from_fork
161.globl SYMBOL_NAME(ret_from_kernel_thread) 161.globl _ret_from_kernel_thread
162.globl SYMBOL_NAME(ret_from_interrupt) 162.globl _ret_from_interrupt
163.globl SYMBOL_NAME(interrupt_redirect_table) 163.globl _interrupt_redirect_table
164.globl SYMBOL_NAME(sw_ksp),SYMBOL_NAME(sw_usp) 164.globl _sw_ksp,_sw_usp
165.globl SYMBOL_NAME(resume) 165.globl _resume
166.globl SYMBOL_NAME(interrupt_entry) 166.globl _interrupt_entry
167.globl SYMBOL_NAME(trace_break) 167.globl _trace_break
168 168
169#if defined(CONFIG_ROMKERNEL) 169#if defined(CONFIG_ROMKERNEL)
170 .section .int_redirect,"ax" 170 .section .int_redirect,"ax"
171SYMBOL_NAME_LABEL(interrupt_redirect_table) 171_interrupt_redirect_table:
172#if defined(CONFIG_CPU_H8300H) 172#if defined(CONFIG_CPU_H8300H)
173 .rept 7 173 .rept 7
174 .long 0 174 .long 0
@@ -178,54 +178,54 @@ SYMBOL_NAME_LABEL(interrupt_redirect_table)
178 .rept 5 178 .rept 5
179 .long 0 179 .long 0
180 .endr 180 .endr
181 jmp @SYMBOL_NAME(trace_break) 181 jmp @_trace_break
182 .long 0 182 .long 0
183#endif 183#endif
184 184
185 jsr @SYMBOL_NAME(interrupt_entry) /* NMI */ 185 jsr @_interrupt_entry /* NMI */
186 jmp @SYMBOL_NAME(system_call) /* TRAPA #0 (System call) */ 186 jmp @_system_call /* TRAPA #0 (System call) */
187 .long 0 187 .long 0
188 .long 0 188 .long 0
189 jmp @SYMBOL_NAME(trace_break) /* TRAPA #3 (breakpoint) */ 189 jmp @_trace_break /* TRAPA #3 (breakpoint) */
190 .rept INTERRUPTS-12 190 .rept INTERRUPTS-12
191 jsr @SYMBOL_NAME(interrupt_entry) 191 jsr @_interrupt_entry
192 .endr 192 .endr
193#endif 193#endif
194#if defined(CONFIG_RAMKERNEL) 194#if defined(CONFIG_RAMKERNEL)
195.globl SYMBOL_NAME(interrupt_redirect_table) 195.globl _interrupt_redirect_table
196 .section .bss 196 .section .bss
197SYMBOL_NAME_LABEL(interrupt_redirect_table) 197_interrupt_redirect_table:
198 .space 4 198 .space 4
199#endif 199#endif
200 200
201 .section .text 201 .section .text
202 .align 2 202 .align 2
203SYMBOL_NAME_LABEL(interrupt_entry) 203_interrupt_entry:
204 SAVE_ALL 204 SAVE_ALL
205 mov.l sp,er0 205 mov.l sp,er0
206 add.l #LVEC,er0 206 add.l #LVEC,er0
207 btst #4,r1l 207 btst #4,r1l
208 bne 1f 208 bne 1f
209 /* user LVEC */ 209 /* user LVEC */
210 mov.l @SYMBOL_NAME(sw_usp),er0 210 mov.l @_sw_usp,er0
211 adds #4,er0 211 adds #4,er0
2121: 2121:
213 mov.l @er0,er0 /* LVEC address */ 213 mov.l @er0,er0 /* LVEC address */
214#if defined(CONFIG_ROMKERNEL) 214#if defined(CONFIG_ROMKERNEL)
215 sub.l #SYMBOL_NAME(interrupt_redirect_table),er0 215 sub.l #_interrupt_redirect_table,er0
216#endif 216#endif
217#if defined(CONFIG_RAMKERNEL) 217#if defined(CONFIG_RAMKERNEL)
218 mov.l @SYMBOL_NAME(interrupt_redirect_table),er1 218 mov.l @_interrupt_redirect_table,er1
219 sub.l er1,er0 219 sub.l er1,er0
220#endif 220#endif
221 SHLR2 er0 221 SHLR2 er0
222 dec.l #1,er0 222 dec.l #1,er0
223 mov.l sp,er1 223 mov.l sp,er1
224 subs #4,er1 /* adjust ret_pc */ 224 subs #4,er1 /* adjust ret_pc */
225 jsr @SYMBOL_NAME(do_IRQ) 225 jsr @_do_IRQ
226 jmp @SYMBOL_NAME(ret_from_interrupt) 226 jmp @_ret_from_interrupt
227 227
228SYMBOL_NAME_LABEL(system_call) 228_system_call:
229 subs #4,sp /* dummy LVEC */ 229 subs #4,sp /* dummy LVEC */
230 SAVE_ALL 230 SAVE_ALL
231 andc #0x7f,ccr 231 andc #0x7f,ccr
@@ -233,21 +233,21 @@ SYMBOL_NAME_LABEL(system_call)
233 233
234 /* save top of frame */ 234 /* save top of frame */
235 mov.l sp,er0 235 mov.l sp,er0
236 jsr @SYMBOL_NAME(set_esp0) 236 jsr @_set_esp0
237 mov.l sp,er2 237 mov.l sp,er2
238 and.w #0xe000,r2 238 and.w #0xe000,r2
239 mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l 239 mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
240 btst #(TIF_SYSCALL_TRACE & 7),r2l 240 btst #(TIF_SYSCALL_TRACE & 7),r2l
241 beq 1f 241 beq 1f
242 jsr @SYMBOL_NAME(do_syscall_trace) 242 jsr @_do_syscall_trace
2431: 2431:
244 cmp.l #NR_syscalls,er4 244 cmp.l #NR_syscalls,er4
245 bcc badsys 245 bcc badsys
246 SHLL2 er4 246 SHLL2 er4
247 mov.l #SYMBOL_NAME(sys_call_table),er0 247 mov.l #_sys_call_table,er0
248 add.l er4,er0 248 add.l er4,er0
249 mov.l @er0,er4 249 mov.l @er0,er4
250 beq SYMBOL_NAME(ret_from_exception):16 250 beq _ret_from_exception:16
251 mov.l @(LER1:16,sp),er0 251 mov.l @(LER1:16,sp),er0
252 mov.l @(LER2:16,sp),er1 252 mov.l @(LER2:16,sp),er1
253 mov.l @(LER3:16,sp),er2 253 mov.l @(LER3:16,sp),er2
@@ -258,10 +258,10 @@ SYMBOL_NAME_LABEL(system_call)
258 mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l 258 mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
259 btst #(TIF_SYSCALL_TRACE & 7),r2l 259 btst #(TIF_SYSCALL_TRACE & 7),r2l
260 beq 2f 260 beq 2f
261 jsr @SYMBOL_NAME(do_syscall_trace) 261 jsr @_do_syscall_trace
2622: 2622:
263#if defined(CONFIG_SYSCALL_PRINT) 263#if defined(CONFIG_SYSCALL_PRINT)
264 jsr @SYMBOL_NAME(syscall_print) 264 jsr @_syscall_print
265#endif 265#endif
266 orc #0x80,ccr 266 orc #0x80,ccr
267 bra resume_userspace 267 bra resume_userspace
@@ -275,11 +275,11 @@ badsys:
275#define resume_kernel restore_all 275#define resume_kernel restore_all
276#endif 276#endif
277 277
278SYMBOL_NAME_LABEL(ret_from_exception) 278_ret_from_exception:
279#if defined(CONFIG_PREEMPT) 279#if defined(CONFIG_PREEMPT)
280 orc #0x80,ccr 280 orc #0x80,ccr
281#endif 281#endif
282SYMBOL_NAME_LABEL(ret_from_interrupt) 282_ret_from_interrupt:
283 mov.b @(LCCR+1:16,sp),r0l 283 mov.b @(LCCR+1:16,sp),r0l
284 btst #4,r0l 284 btst #4,r0l
285 bne resume_kernel:8 /* return from kernel */ 285 bne resume_kernel:8 /* return from kernel */
@@ -296,12 +296,12 @@ work_pending:
296 /* work notifysig */ 296 /* work notifysig */
297 mov.l sp,er0 297 mov.l sp,er0
298 subs #4,er0 /* er0: pt_regs */ 298 subs #4,er0 /* er0: pt_regs */
299 jsr @SYMBOL_NAME(do_notify_resume) 299 jsr @_do_notify_resume
300 bra restore_all:8 300 bra restore_all:8
301work_resched: 301work_resched:
302 mov.l sp,er0 302 mov.l sp,er0
303 jsr @SYMBOL_NAME(set_esp0) 303 jsr @_set_esp0
304 jsr @SYMBOL_NAME(schedule) 304 jsr @_schedule
305 bra resume_userspace:8 305 bra resume_userspace:8
306restore_all: 306restore_all:
307 RESTORE_ALL /* Does RTE */ 307 RESTORE_ALL /* Does RTE */
@@ -320,26 +320,26 @@ need_resched:
320 mov.l er0,@(TI_PRE_COUNT:16,er4) 320 mov.l er0,@(TI_PRE_COUNT:16,er4)
321 andc #0x7f,ccr 321 andc #0x7f,ccr
322 mov.l sp,er0 322 mov.l sp,er0
323 jsr @SYMBOL_NAME(set_esp0) 323 jsr @_set_esp0
324 jsr @SYMBOL_NAME(schedule) 324 jsr @_schedule
325 orc #0x80,ccr 325 orc #0x80,ccr
326 bra need_resched:8 326 bra need_resched:8
327#endif 327#endif
328 328
329SYMBOL_NAME_LABEL(ret_from_fork) 329_ret_from_fork:
330 mov.l er2,er0 330 mov.l er2,er0
331 jsr @SYMBOL_NAME(schedule_tail) 331 jsr @_schedule_tail
332 jmp @SYMBOL_NAME(ret_from_exception) 332 jmp @_ret_from_exception
333 333
334SYMBOL_NAME_LABEL(ret_from_kernel_thread) 334_ret_from_kernel_thread:
335 mov.l er2,er0 335 mov.l er2,er0
336 jsr @SYMBOL_NAME(schedule_tail) 336 jsr @_schedule_tail
337 mov.l @(LER4:16,sp),er0 337 mov.l @(LER4:16,sp),er0
338 mov.l @(LER5:16,sp),er1 338 mov.l @(LER5:16,sp),er1
339 jsr @er1 339 jsr @er1
340 jmp @SYMBOL_NAME(ret_from_exception) 340 jmp @_ret_from_exception
341 341
342SYMBOL_NAME_LABEL(resume) 342_resume:
343 /* 343 /*
344 * Beware - when entering resume, offset of tss is in d1, 344 * Beware - when entering resume, offset of tss is in d1,
345 * prev (the current task) is in a0, next (the new task) 345 * prev (the current task) is in a0, next (the new task)
@@ -355,7 +355,7 @@ SYMBOL_NAME_LABEL(resume)
355 355
356 /* disable interrupts */ 356 /* disable interrupts */
357 orc #0x80,ccr 357 orc #0x80,ccr
358 mov.l @SYMBOL_NAME(sw_usp),er3 358 mov.l @_sw_usp,er3
359 mov.l er3,@(THREAD_USP:16,er0) 359 mov.l er3,@(THREAD_USP:16,er0)
360 mov.l sp,@(THREAD_KSP:16,er0) 360 mov.l sp,@(THREAD_KSP:16,er0)
361 361
@@ -363,7 +363,7 @@ SYMBOL_NAME_LABEL(resume)
363 /* FIXME: what did we hack out of here, this does nothing! */ 363 /* FIXME: what did we hack out of here, this does nothing! */
364 364
365 mov.l @(THREAD_USP:16,er1),er0 365 mov.l @(THREAD_USP:16,er1),er0
366 mov.l er0,@SYMBOL_NAME(sw_usp) 366 mov.l er0,@_sw_usp
367 mov.l @(THREAD_KSP:16,er1),sp 367 mov.l @(THREAD_KSP:16,er1),sp
368 368
369 /* restore status register */ 369 /* restore status register */
@@ -372,15 +372,15 @@ SYMBOL_NAME_LABEL(resume)
372 ldc r3l,ccr 372 ldc r3l,ccr
373 rts 373 rts
374 374
375SYMBOL_NAME_LABEL(trace_break) 375_trace_break:
376 subs #4,sp 376 subs #4,sp
377 SAVE_ALL 377 SAVE_ALL
378 sub.l er1,er1 378 sub.l er1,er1
379 dec.l #1,er1 379 dec.l #1,er1
380 mov.l er1,@(LORIG,sp) 380 mov.l er1,@(LORIG,sp)
381 mov.l sp,er0 381 mov.l sp,er0
382 jsr @SYMBOL_NAME(set_esp0) 382 jsr @_set_esp0
383 mov.l @SYMBOL_NAME(sw_usp),er0 383 mov.l @_sw_usp,er0
384 mov.l @er0,er1 384 mov.l @er0,er1
385 mov.w @(-2:16,er1),r2 385 mov.w @(-2:16,er1),r2
386 cmp.w #0x5730,r2 386 cmp.w #0x5730,r2
@@ -390,13 +390,13 @@ SYMBOL_NAME_LABEL(trace_break)
3901: 3901:
391 and.w #0xff,e1 391 and.w #0xff,e1
392 mov.l er1,er0 392 mov.l er1,er0
393 jsr @SYMBOL_NAME(trace_trap) 393 jsr @_trace_trap
394 jmp @SYMBOL_NAME(ret_from_exception) 394 jmp @_ret_from_exception
395 395
396 .section .bss 396 .section .bss
397SYMBOL_NAME_LABEL(sw_ksp) 397_sw_ksp:
398 .space 4 398 .space 4
399SYMBOL_NAME_LABEL(sw_usp) 399_sw_usp:
400 .space 4 400 .space 4
401 401
402 .end 402 .end
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index 5c2168fb9b9e..c55e0ed270d5 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -2,8 +2,10 @@
2#include <linux/sys.h> 2#include <linux/sys.h>
3#include <asm/linkage.h> 3#include <asm/linkage.h>
4#include <asm/unistd.h> 4#include <asm/unistd.h>
5 5
6.globl SYMBOL_NAME(sys_call_table) 6#define CALL(x) .long _ ## x
7
8.globl _sys_call_table
7 9
8#if defined(CONFIG_CPU_H8300H) 10#if defined(CONFIG_CPU_H8300H)
9 .h8300h 11 .h8300h
@@ -13,324 +15,324 @@
13#endif 15#endif
14 .section .text 16 .section .text
15 .align 2 17 .align 2
16SYMBOL_NAME_LABEL(sys_call_table) 18_sys_call_table:
17 .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system call*/ 19 CALL(sys_ni_syscall) /* 0 - old "setup()" system call*/
18 .long SYMBOL_NAME(sys_exit) 20 CALL(sys_exit)
19 .long SYMBOL_NAME(sys_fork) 21 CALL(sys_fork)
20 .long SYMBOL_NAME(sys_read) 22 CALL(sys_read)
21 .long SYMBOL_NAME(sys_write) 23 CALL(sys_write)
22 .long SYMBOL_NAME(sys_open) /* 5 */ 24 CALL(sys_open) /* 5 */
23 .long SYMBOL_NAME(sys_close) 25 CALL(sys_close)
24 .long SYMBOL_NAME(sys_waitpid) 26 CALL(sys_waitpid)
25 .long SYMBOL_NAME(sys_creat) 27 CALL(sys_creat)
26 .long SYMBOL_NAME(sys_link) 28 CALL(sys_link)
27 .long SYMBOL_NAME(sys_unlink) /* 10 */ 29 CALL(sys_unlink) /* 10 */
28 .long SYMBOL_NAME(sys_execve) 30 CALL(sys_execve)
29 .long SYMBOL_NAME(sys_chdir) 31 CALL(sys_chdir)
30 .long SYMBOL_NAME(sys_time) 32 CALL(sys_time)
31 .long SYMBOL_NAME(sys_mknod) 33 CALL(sys_mknod)
32 .long SYMBOL_NAME(sys_chmod) /* 15 */ 34 CALL(sys_chmod) /* 15 */
33 .long SYMBOL_NAME(sys_chown16) 35 CALL(sys_chown16)
34 .long SYMBOL_NAME(sys_ni_syscall) /* old break syscall holder */ 36 CALL(sys_ni_syscall) /* old break syscall holder */
35 .long SYMBOL_NAME(sys_stat) 37 CALL(sys_stat)
36 .long SYMBOL_NAME(sys_lseek) 38 CALL(sys_lseek)
37 .long SYMBOL_NAME(sys_getpid) /* 20 */ 39 CALL(sys_getpid) /* 20 */
38 .long SYMBOL_NAME(sys_mount) 40 CALL(sys_mount)
39 .long SYMBOL_NAME(sys_oldumount) 41 CALL(sys_oldumount)
40 .long SYMBOL_NAME(sys_setuid16) 42 CALL(sys_setuid16)
41 .long SYMBOL_NAME(sys_getuid16) 43 CALL(sys_getuid16)
42 .long SYMBOL_NAME(sys_stime) /* 25 */ 44 CALL(sys_stime) /* 25 */
43 .long SYMBOL_NAME(sys_ptrace) 45 CALL(sys_ptrace)
44 .long SYMBOL_NAME(sys_alarm) 46 CALL(sys_alarm)
45 .long SYMBOL_NAME(sys_fstat) 47 CALL(sys_fstat)
46 .long SYMBOL_NAME(sys_pause) 48 CALL(sys_pause)
47 .long SYMBOL_NAME(sys_utime) /* 30 */ 49 CALL(sys_utime) /* 30 */
48 .long SYMBOL_NAME(sys_ni_syscall) /* old stty syscall holder */ 50 CALL(sys_ni_syscall) /* old stty syscall holder */
49 .long SYMBOL_NAME(sys_ni_syscall) /* old gtty syscall holder */ 51 CALL(sys_ni_syscall) /* old gtty syscall holder */
50 .long SYMBOL_NAME(sys_access) 52 CALL(sys_access)
51 .long SYMBOL_NAME(sys_nice) 53 CALL(sys_nice)
52 .long SYMBOL_NAME(sys_ni_syscall) /* 35 old ftime syscall holder */ 54 CALL(sys_ni_syscall) /* 35 old ftime syscall holder */
53 .long SYMBOL_NAME(sys_sync) 55 CALL(sys_sync)
54 .long SYMBOL_NAME(sys_kill) 56 CALL(sys_kill)
55 .long SYMBOL_NAME(sys_rename) 57 CALL(sys_rename)
56 .long SYMBOL_NAME(sys_mkdir) 58 CALL(sys_mkdir)
57 .long SYMBOL_NAME(sys_rmdir) /* 40 */ 59 CALL(sys_rmdir) /* 40 */
58 .long SYMBOL_NAME(sys_dup) 60 CALL(sys_dup)
59 .long SYMBOL_NAME(sys_pipe) 61 CALL(sys_pipe)
60 .long SYMBOL_NAME(sys_times) 62 CALL(sys_times)
61 .long SYMBOL_NAME(sys_ni_syscall) /* old prof syscall holder */ 63 CALL(sys_ni_syscall) /* old prof syscall holder */
62 .long SYMBOL_NAME(sys_brk) /* 45 */ 64 CALL(sys_brk) /* 45 */
63 .long SYMBOL_NAME(sys_setgid16) 65 CALL(sys_setgid16)
64 .long SYMBOL_NAME(sys_getgid16) 66 CALL(sys_getgid16)
65 .long SYMBOL_NAME(sys_signal) 67 CALL(sys_signal)
66 .long SYMBOL_NAME(sys_geteuid16) 68 CALL(sys_geteuid16)
67 .long SYMBOL_NAME(sys_getegid16) /* 50 */ 69 CALL(sys_getegid16) /* 50 */
68 .long SYMBOL_NAME(sys_acct) 70 CALL(sys_acct)
69 .long SYMBOL_NAME(sys_umount) /* recycled never used phys() */ 71 CALL(sys_umount) /* recycled never used phys() */
70 .long SYMBOL_NAME(sys_ni_syscall) /* old lock syscall holder */ 72 CALL(sys_ni_syscall) /* old lock syscall holder */
71 .long SYMBOL_NAME(sys_ioctl) 73 CALL(sys_ioctl)
72 .long SYMBOL_NAME(sys_fcntl) /* 55 */ 74 CALL(sys_fcntl) /* 55 */
73 .long SYMBOL_NAME(sys_ni_syscall) /* old mpx syscall holder */ 75 CALL(sys_ni_syscall) /* old mpx syscall holder */
74 .long SYMBOL_NAME(sys_setpgid) 76 CALL(sys_setpgid)
75 .long SYMBOL_NAME(sys_ni_syscall) /* old ulimit syscall holder */ 77 CALL(sys_ni_syscall) /* old ulimit syscall holder */
76 .long SYMBOL_NAME(sys_ni_syscall) 78 CALL(sys_ni_syscall)
77 .long SYMBOL_NAME(sys_umask) /* 60 */ 79 CALL(sys_umask) /* 60 */
78 .long SYMBOL_NAME(sys_chroot) 80 CALL(sys_chroot)
79 .long SYMBOL_NAME(sys_ustat) 81 CALL(sys_ustat)
80 .long SYMBOL_NAME(sys_dup2) 82 CALL(sys_dup2)
81 .long SYMBOL_NAME(sys_getppid) 83 CALL(sys_getppid)
82 .long SYMBOL_NAME(sys_getpgrp) /* 65 */ 84 CALL(sys_getpgrp) /* 65 */
83 .long SYMBOL_NAME(sys_setsid) 85 CALL(sys_setsid)
84 .long SYMBOL_NAME(sys_sigaction) 86 CALL(sys_sigaction)
85 .long SYMBOL_NAME(sys_sgetmask) 87 CALL(sys_sgetmask)
86 .long SYMBOL_NAME(sys_ssetmask) 88 CALL(sys_ssetmask)
87 .long SYMBOL_NAME(sys_setreuid16) /* 70 */ 89 CALL(sys_setreuid16) /* 70 */
88 .long SYMBOL_NAME(sys_setregid16) 90 CALL(sys_setregid16)
89 .long SYMBOL_NAME(sys_sigsuspend) 91 CALL(sys_sigsuspend)
90 .long SYMBOL_NAME(sys_sigpending) 92 CALL(sys_sigpending)
91 .long SYMBOL_NAME(sys_sethostname) 93 CALL(sys_sethostname)
92 .long SYMBOL_NAME(sys_setrlimit) /* 75 */ 94 CALL(sys_setrlimit) /* 75 */
93 .long SYMBOL_NAME(sys_old_getrlimit) 95 CALL(sys_old_getrlimit)
94 .long SYMBOL_NAME(sys_getrusage) 96 CALL(sys_getrusage)
95 .long SYMBOL_NAME(sys_gettimeofday) 97 CALL(sys_gettimeofday)
96 .long SYMBOL_NAME(sys_settimeofday) 98 CALL(sys_settimeofday)
97 .long SYMBOL_NAME(sys_getgroups16) /* 80 */ 99 CALL(sys_getgroups16) /* 80 */
98 .long SYMBOL_NAME(sys_setgroups16) 100 CALL(sys_setgroups16)
99 .long SYMBOL_NAME(sys_old_select) 101 CALL(sys_old_select)
100 .long SYMBOL_NAME(sys_symlink) 102 CALL(sys_symlink)
101 .long SYMBOL_NAME(sys_lstat) 103 CALL(sys_lstat)
102 .long SYMBOL_NAME(sys_readlink) /* 85 */ 104 CALL(sys_readlink) /* 85 */
103 .long SYMBOL_NAME(sys_uselib) 105 CALL(sys_uselib)
104 .long SYMBOL_NAME(sys_swapon) 106 CALL(sys_swapon)
105 .long SYMBOL_NAME(sys_reboot) 107 CALL(sys_reboot)
106 .long SYMBOL_NAME(sys_old_readdir) 108 CALL(sys_old_readdir)
107 .long SYMBOL_NAME(sys_old_mmap) /* 90 */ 109 CALL(sys_old_mmap) /* 90 */
108 .long SYMBOL_NAME(sys_munmap) 110 CALL(sys_munmap)
109 .long SYMBOL_NAME(sys_truncate) 111 CALL(sys_truncate)
110 .long SYMBOL_NAME(sys_ftruncate) 112 CALL(sys_ftruncate)
111 .long SYMBOL_NAME(sys_fchmod) 113 CALL(sys_fchmod)
112 .long SYMBOL_NAME(sys_fchown16) /* 95 */ 114 CALL(sys_fchown16) /* 95 */
113 .long SYMBOL_NAME(sys_getpriority) 115 CALL(sys_getpriority)
114 .long SYMBOL_NAME(sys_setpriority) 116 CALL(sys_setpriority)
115 .long SYMBOL_NAME(sys_ni_syscall) /* old profil syscall holder */ 117 CALL(sys_ni_syscall) /* old profil syscall holder */
116 .long SYMBOL_NAME(sys_statfs) 118 CALL(sys_statfs)
117 .long SYMBOL_NAME(sys_fstatfs) /* 100 */ 119 CALL(sys_fstatfs) /* 100 */
118 .long SYMBOL_NAME(sys_ni_syscall) /* ioperm for i386 */ 120 CALL(sys_ni_syscall) /* ioperm for i386 */
119 .long SYMBOL_NAME(sys_socketcall) 121 CALL(sys_socketcall)
120 .long SYMBOL_NAME(sys_syslog) 122 CALL(sys_syslog)
121 .long SYMBOL_NAME(sys_setitimer) 123 CALL(sys_setitimer)
122 .long SYMBOL_NAME(sys_getitimer) /* 105 */ 124 CALL(sys_getitimer) /* 105 */
123 .long SYMBOL_NAME(sys_newstat) 125 CALL(sys_newstat)
124 .long SYMBOL_NAME(sys_newlstat) 126 CALL(sys_newlstat)
125 .long SYMBOL_NAME(sys_newfstat) 127 CALL(sys_newfstat)
126 .long SYMBOL_NAME(sys_ni_syscall) 128 CALL(sys_ni_syscall)
127 .long SYMBOL_NAME(sys_ni_syscall) /* iopl for i386 */ /* 110 */ 129 CALL(sys_ni_syscall) /* iopl for i386 */ /* 110 */
128 .long SYMBOL_NAME(sys_vhangup) 130 CALL(sys_vhangup)
129 .long SYMBOL_NAME(sys_ni_syscall) /* obsolete idle() syscall */ 131 CALL(sys_ni_syscall) /* obsolete idle() syscall */
130 .long SYMBOL_NAME(sys_ni_syscall) /* vm86old for i386 */ 132 CALL(sys_ni_syscall) /* vm86old for i386 */
131 .long SYMBOL_NAME(sys_wait4) 133 CALL(sys_wait4)
132 .long SYMBOL_NAME(sys_swapoff) /* 115 */ 134 CALL(sys_swapoff) /* 115 */
133 .long SYMBOL_NAME(sys_sysinfo) 135 CALL(sys_sysinfo)
134 .long SYMBOL_NAME(sys_ipc) 136 CALL(sys_ipc)
135 .long SYMBOL_NAME(sys_fsync) 137 CALL(sys_fsync)
136 .long SYMBOL_NAME(sys_sigreturn) 138 CALL(sys_sigreturn)
137 .long SYMBOL_NAME(sys_clone) /* 120 */ 139 CALL(sys_clone) /* 120 */
138 .long SYMBOL_NAME(sys_setdomainname) 140 CALL(sys_setdomainname)
139 .long SYMBOL_NAME(sys_newuname) 141 CALL(sys_newuname)
140 .long SYMBOL_NAME(sys_cacheflush) /* modify_ldt for i386 */ 142 CALL(sys_cacheflush) /* modify_ldt for i386 */
141 .long SYMBOL_NAME(sys_adjtimex) 143 CALL(sys_adjtimex)
142 .long SYMBOL_NAME(sys_ni_syscall) /* 125 sys_mprotect */ 144 CALL(sys_ni_syscall) /* 125 sys_mprotect */
143 .long SYMBOL_NAME(sys_sigprocmask) 145 CALL(sys_sigprocmask)
144 .long SYMBOL_NAME(sys_ni_syscall) /* sys_create_module */ 146 CALL(sys_ni_syscall) /* sys_create_module */
145 .long SYMBOL_NAME(sys_init_module) 147 CALL(sys_init_module)
146 .long SYMBOL_NAME(sys_delete_module) 148 CALL(sys_delete_module)
147 .long SYMBOL_NAME(sys_ni_syscall) /* 130 sys_get_kernel_syms */ 149 CALL(sys_ni_syscall) /* 130 sys_get_kernel_syms */
148 .long SYMBOL_NAME(sys_quotactl) 150 CALL(sys_quotactl)
149 .long SYMBOL_NAME(sys_getpgid) 151 CALL(sys_getpgid)
150 .long SYMBOL_NAME(sys_fchdir) 152 CALL(sys_fchdir)
151 .long SYMBOL_NAME(sys_bdflush) 153 CALL(sys_bdflush)
152 .long SYMBOL_NAME(sys_sysfs) /* 135 */ 154 CALL(sys_sysfs) /* 135 */
153 .long SYMBOL_NAME(sys_personality) 155 CALL(sys_personality)
154 .long SYMBOL_NAME(sys_ni_syscall) /* for afs_syscall */ 156 CALL(sys_ni_syscall) /* for afs_syscall */
155 .long SYMBOL_NAME(sys_setfsuid16) 157 CALL(sys_setfsuid16)
156 .long SYMBOL_NAME(sys_setfsgid16) 158 CALL(sys_setfsgid16)
157 .long SYMBOL_NAME(sys_llseek) /* 140 */ 159 CALL(sys_llseek) /* 140 */
158 .long SYMBOL_NAME(sys_getdents) 160 CALL(sys_getdents)
159 .long SYMBOL_NAME(sys_select) 161 CALL(sys_select)
160 .long SYMBOL_NAME(sys_flock) 162 CALL(sys_flock)
161 .long SYMBOL_NAME(sys_ni_syscall) /* sys_msync */ 163 CALL(sys_ni_syscall) /* sys_msync */
162 .long SYMBOL_NAME(sys_readv) /* 145 */ 164 CALL(sys_readv) /* 145 */
163 .long SYMBOL_NAME(sys_writev) 165 CALL(sys_writev)
164 .long SYMBOL_NAME(sys_getsid) 166 CALL(sys_getsid)
165 .long SYMBOL_NAME(sys_fdatasync) 167 CALL(sys_fdatasync)
166 .long SYMBOL_NAME(sys_sysctl) 168 CALL(sys_sysctl)
167 .long SYMBOL_NAME(sys_ni_syscall) /* 150 sys_mlock */ 169 CALL(sys_ni_syscall) /* 150 sys_mlock */
168 .long SYMBOL_NAME(sys_ni_syscall) /* sys_munlock */ 170 CALL(sys_ni_syscall) /* sys_munlock */
169 .long SYMBOL_NAME(sys_ni_syscall) /* sys_mlockall */ 171 CALL(sys_ni_syscall) /* sys_mlockall */
170 .long SYMBOL_NAME(sys_ni_syscall) /* sys_munlockall */ 172 CALL(sys_ni_syscall) /* sys_munlockall */
171 .long SYMBOL_NAME(sys_sched_setparam) 173 CALL(sys_sched_setparam)
172 .long SYMBOL_NAME(sys_sched_getparam) /* 155 */ 174 CALL(sys_sched_getparam) /* 155 */
173 .long SYMBOL_NAME(sys_sched_setscheduler) 175 CALL(sys_sched_setscheduler)
174 .long SYMBOL_NAME(sys_sched_getscheduler) 176 CALL(sys_sched_getscheduler)
175 .long SYMBOL_NAME(sys_sched_yield) 177 CALL(sys_sched_yield)
176 .long SYMBOL_NAME(sys_sched_get_priority_max) 178 CALL(sys_sched_get_priority_max)
177 .long SYMBOL_NAME(sys_sched_get_priority_min) /* 160 */ 179 CALL(sys_sched_get_priority_min) /* 160 */
178 .long SYMBOL_NAME(sys_sched_rr_get_interval) 180 CALL(sys_sched_rr_get_interval)
179 .long SYMBOL_NAME(sys_nanosleep) 181 CALL(sys_nanosleep)
180 .long SYMBOL_NAME(sys_ni_syscall) /* sys_mremap */ 182 CALL(sys_ni_syscall) /* sys_mremap */
181 .long SYMBOL_NAME(sys_setresuid16) 183 CALL(sys_setresuid16)
182 .long SYMBOL_NAME(sys_getresuid16) /* 165 */ 184 CALL(sys_getresuid16) /* 165 */
183 .long SYMBOL_NAME(sys_ni_syscall) /* for vm86 */ 185 CALL(sys_ni_syscall) /* for vm86 */
184 .long SYMBOL_NAME(sys_ni_syscall) /* sys_query_module */ 186 CALL(sys_ni_syscall) /* sys_query_module */
185 .long SYMBOL_NAME(sys_poll) 187 CALL(sys_poll)
186 .long SYMBOL_NAME(sys_ni_syscall) /* old nfsservctl */ 188 CALL(sys_ni_syscall) /* old nfsservctl */
187 .long SYMBOL_NAME(sys_setresgid16) /* 170 */ 189 CALL(sys_setresgid16) /* 170 */
188 .long SYMBOL_NAME(sys_getresgid16) 190 CALL(sys_getresgid16)
189 .long SYMBOL_NAME(sys_prctl) 191 CALL(sys_prctl)
190 .long SYMBOL_NAME(sys_rt_sigreturn) 192 CALL(sys_rt_sigreturn)
191 .long SYMBOL_NAME(sys_rt_sigaction) 193 CALL(sys_rt_sigaction)
192 .long SYMBOL_NAME(sys_rt_sigprocmask) /* 175 */ 194 CALL(sys_rt_sigprocmask) /* 175 */
193 .long SYMBOL_NAME(sys_rt_sigpending) 195 CALL(sys_rt_sigpending)
194 .long SYMBOL_NAME(sys_rt_sigtimedwait) 196 CALL(sys_rt_sigtimedwait)
195 .long SYMBOL_NAME(sys_rt_sigqueueinfo) 197 CALL(sys_rt_sigqueueinfo)
196 .long SYMBOL_NAME(sys_rt_sigsuspend) 198 CALL(sys_rt_sigsuspend)
197 .long SYMBOL_NAME(sys_pread64) /* 180 */ 199 CALL(sys_pread64) /* 180 */
198 .long SYMBOL_NAME(sys_pwrite64) 200 CALL(sys_pwrite64)
199 .long SYMBOL_NAME(sys_lchown16); 201 CALL(sys_lchown16);
200 .long SYMBOL_NAME(sys_getcwd) 202 CALL(sys_getcwd)
201 .long SYMBOL_NAME(sys_capget) 203 CALL(sys_capget)
202 .long SYMBOL_NAME(sys_capset) /* 185 */ 204 CALL(sys_capset) /* 185 */
203 .long SYMBOL_NAME(sys_sigaltstack) 205 CALL(sys_sigaltstack)
204 .long SYMBOL_NAME(sys_sendfile) 206 CALL(sys_sendfile)
205 .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */ 207 CALL(sys_ni_syscall) /* streams1 */
206 .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */ 208 CALL(sys_ni_syscall) /* streams2 */
207 .long SYMBOL_NAME(sys_vfork) /* 190 */ 209 CALL(sys_vfork) /* 190 */
208 .long SYMBOL_NAME(sys_getrlimit) 210 CALL(sys_getrlimit)
209 .long SYMBOL_NAME(sys_mmap_pgoff) 211 CALL(sys_mmap_pgoff)
210 .long SYMBOL_NAME(sys_truncate64) 212 CALL(sys_truncate64)
211 .long SYMBOL_NAME(sys_ftruncate64) 213 CALL(sys_ftruncate64)
212 .long SYMBOL_NAME(sys_stat64) /* 195 */ 214 CALL(sys_stat64) /* 195 */
213 .long SYMBOL_NAME(sys_lstat64) 215 CALL(sys_lstat64)
214 .long SYMBOL_NAME(sys_fstat64) 216 CALL(sys_fstat64)
215 .long SYMBOL_NAME(sys_chown) 217 CALL(sys_chown)
216 .long SYMBOL_NAME(sys_getuid) 218 CALL(sys_getuid)
217 .long SYMBOL_NAME(sys_getgid) /* 200 */ 219 CALL(sys_getgid) /* 200 */
218 .long SYMBOL_NAME(sys_geteuid) 220 CALL(sys_geteuid)
219 .long SYMBOL_NAME(sys_getegid) 221 CALL(sys_getegid)
220 .long SYMBOL_NAME(sys_setreuid) 222 CALL(sys_setreuid)
221 .long SYMBOL_NAME(sys_setregid) 223 CALL(sys_setregid)
222 .long SYMBOL_NAME(sys_getgroups) /* 205 */ 224 CALL(sys_getgroups) /* 205 */
223 .long SYMBOL_NAME(sys_setgroups) 225 CALL(sys_setgroups)
224 .long SYMBOL_NAME(sys_fchown) 226 CALL(sys_fchown)
225 .long SYMBOL_NAME(sys_setresuid) 227 CALL(sys_setresuid)
226 .long SYMBOL_NAME(sys_getresuid) 228 CALL(sys_getresuid)
227 .long SYMBOL_NAME(sys_setresgid) /* 210 */ 229 CALL(sys_setresgid) /* 210 */
228 .long SYMBOL_NAME(sys_getresgid) 230 CALL(sys_getresgid)
229 .long SYMBOL_NAME(sys_lchown) 231 CALL(sys_lchown)
230 .long SYMBOL_NAME(sys_setuid) 232 CALL(sys_setuid)
231 .long SYMBOL_NAME(sys_setgid) 233 CALL(sys_setgid)
232 .long SYMBOL_NAME(sys_setfsuid) /* 215 */ 234 CALL(sys_setfsuid) /* 215 */
233 .long SYMBOL_NAME(sys_setfsgid) 235 CALL(sys_setfsgid)
234 .long SYMBOL_NAME(sys_pivot_root) 236 CALL(sys_pivot_root)
235 .long SYMBOL_NAME(sys_ni_syscall) 237 CALL(sys_ni_syscall)
236 .long SYMBOL_NAME(sys_ni_syscall) 238 CALL(sys_ni_syscall)
237 .long SYMBOL_NAME(sys_getdents64) /* 220 */ 239 CALL(sys_getdents64) /* 220 */
238 .long SYMBOL_NAME(sys_fcntl64) 240 CALL(sys_fcntl64)
239 .long SYMBOL_NAME(sys_ni_syscall) /* reserved TUX */ 241 CALL(sys_ni_syscall) /* reserved TUX */
240 .long SYMBOL_NAME(sys_ni_syscall) /* reserved Security */ 242 CALL(sys_ni_syscall) /* reserved Security */
241 .long SYMBOL_NAME(sys_gettid) 243 CALL(sys_gettid)
242 .long SYMBOL_NAME(sys_readahead) /* 225 */ 244 CALL(sys_readahead) /* 225 */
243 .long SYMBOL_NAME(sys_setxattr) 245 CALL(sys_setxattr)
244 .long SYMBOL_NAME(sys_lsetxattr) 246 CALL(sys_lsetxattr)
245 .long SYMBOL_NAME(sys_fsetxattr) 247 CALL(sys_fsetxattr)
246 .long SYMBOL_NAME(sys_getxattr) 248 CALL(sys_getxattr)
247 .long SYMBOL_NAME(sys_lgetxattr) /* 230 */ 249 CALL(sys_lgetxattr) /* 230 */
248 .long SYMBOL_NAME(sys_fgetxattr) 250 CALL(sys_fgetxattr)
249 .long SYMBOL_NAME(sys_listxattr) 251 CALL(sys_listxattr)
250 .long SYMBOL_NAME(sys_llistxattr) 252 CALL(sys_llistxattr)
251 .long SYMBOL_NAME(sys_flistxattr) 253 CALL(sys_flistxattr)
252 .long SYMBOL_NAME(sys_removexattr) /* 235 */ 254 CALL(sys_removexattr) /* 235 */
253 .long SYMBOL_NAME(sys_lremovexattr) 255 CALL(sys_lremovexattr)
254 .long SYMBOL_NAME(sys_fremovexattr) 256 CALL(sys_fremovexattr)
255 .long SYMBOL_NAME(sys_tkill) 257 CALL(sys_tkill)
256 .long SYMBOL_NAME(sys_sendfile64) 258 CALL(sys_sendfile64)
257 .long SYMBOL_NAME(sys_futex) /* 240 */ 259 CALL(sys_futex) /* 240 */
258 .long SYMBOL_NAME(sys_sched_setaffinity) 260 CALL(sys_sched_setaffinity)
259 .long SYMBOL_NAME(sys_sched_getaffinity) 261 CALL(sys_sched_getaffinity)
260 .long SYMBOL_NAME(sys_ni_syscall) 262 CALL(sys_ni_syscall)
261 .long SYMBOL_NAME(sys_ni_syscall) 263 CALL(sys_ni_syscall)
262 .long SYMBOL_NAME(sys_io_setup) /* 245 */ 264 CALL(sys_io_setup) /* 245 */
263 .long SYMBOL_NAME(sys_io_destroy) 265 CALL(sys_io_destroy)
264 .long SYMBOL_NAME(sys_io_getevents) 266 CALL(sys_io_getevents)
265 .long SYMBOL_NAME(sys_io_submit) 267 CALL(sys_io_submit)
266 .long SYMBOL_NAME(sys_io_cancel) 268 CALL(sys_io_cancel)
267 .long SYMBOL_NAME(sys_fadvise64) /* 250 */ 269 CALL(sys_fadvise64) /* 250 */
268 .long SYMBOL_NAME(sys_ni_syscall) 270 CALL(sys_ni_syscall)
269 .long SYMBOL_NAME(sys_exit_group) 271 CALL(sys_exit_group)
270 .long SYMBOL_NAME(sys_lookup_dcookie) 272 CALL(sys_lookup_dcookie)
271 .long SYMBOL_NAME(sys_epoll_create) 273 CALL(sys_epoll_create)
272 .long SYMBOL_NAME(sys_epoll_ctl) /* 255 */ 274 CALL(sys_epoll_ctl) /* 255 */
273 .long SYMBOL_NAME(sys_epoll_wait) 275 CALL(sys_epoll_wait)
274 .long SYMBOL_NAME(sys_ni_syscall) /* sys_remap_file_pages */ 276 CALL(sys_ni_syscall) /* sys_remap_file_pages */
275 .long SYMBOL_NAME(sys_set_tid_address) 277 CALL(sys_set_tid_address)
276 .long SYMBOL_NAME(sys_timer_create) 278 CALL(sys_timer_create)
277 .long SYMBOL_NAME(sys_timer_settime) /* 260 */ 279 CALL(sys_timer_settime) /* 260 */
278 .long SYMBOL_NAME(sys_timer_gettime) 280 CALL(sys_timer_gettime)
279 .long SYMBOL_NAME(sys_timer_getoverrun) 281 CALL(sys_timer_getoverrun)
280 .long SYMBOL_NAME(sys_timer_delete) 282 CALL(sys_timer_delete)
281 .long SYMBOL_NAME(sys_clock_settime) 283 CALL(sys_clock_settime)
282 .long SYMBOL_NAME(sys_clock_gettime) /* 265 */ 284 CALL(sys_clock_gettime) /* 265 */
283 .long SYMBOL_NAME(sys_clock_getres) 285 CALL(sys_clock_getres)
284 .long SYMBOL_NAME(sys_clock_nanosleep) 286 CALL(sys_clock_nanosleep)
285 .long SYMBOL_NAME(sys_statfs64) 287 CALL(sys_statfs64)
286 .long SYMBOL_NAME(sys_fstatfs64) 288 CALL(sys_fstatfs64)
287 .long SYMBOL_NAME(sys_tgkill) /* 270 */ 289 CALL(sys_tgkill) /* 270 */
288 .long SYMBOL_NAME(sys_utimes) 290 CALL(sys_utimes)
289 .long SYMBOL_NAME(sys_fadvise64_64) 291 CALL(sys_fadvise64_64)
290 .long SYMBOL_NAME(sys_ni_syscall) /* sys_vserver */ 292 CALL(sys_ni_syscall) /* sys_vserver */
291 .long SYMBOL_NAME(sys_ni_syscall) 293 CALL(sys_ni_syscall)
292 .long SYMBOL_NAME(sys_get_mempolicy) /* 275 */ 294 CALL(sys_get_mempolicy) /* 275 */
293 .long SYMBOL_NAME(sys_set_mempolicy) 295 CALL(sys_set_mempolicy)
294 .long SYMBOL_NAME(sys_mq_open) 296 CALL(sys_mq_open)
295 .long SYMBOL_NAME(sys_mq_unlink) 297 CALL(sys_mq_unlink)
296 .long SYMBOL_NAME(sys_mq_timedsend) 298 CALL(sys_mq_timedsend)
297 .long SYMBOL_NAME(sys_mq_timedreceive) /* 280 */ 299 CALL(sys_mq_timedreceive) /* 280 */
298 .long SYMBOL_NAME(sys_mq_notify) 300 CALL(sys_mq_notify)
299 .long SYMBOL_NAME(sys_mq_getsetattr) 301 CALL(sys_mq_getsetattr)
300 .long SYMBOL_NAME(sys_waitid) 302 CALL(sys_waitid)
301 .long SYMBOL_NAME(sys_ni_syscall) /* sys_kexec_load */ 303 CALL(sys_ni_syscall) /* sys_kexec_load */
302 .long SYMBOL_NAME(sys_add_key) /* 285 */ 304 CALL(sys_add_key) /* 285 */
303 .long SYMBOL_NAME(sys_request_key) 305 CALL(sys_request_key)
304 .long SYMBOL_NAME(sys_keyctl) 306 CALL(sys_keyctl)
305 .long SYMBOL_NAME(sys_ioprio_set) 307 CALL(sys_ioprio_set)
306 .long SYMBOL_NAME(sys_ioprio_get) /* 290 */ 308 CALL(sys_ioprio_get) /* 290 */
307 .long SYMBOL_NAME(sys_inotify_init) 309 CALL(sys_inotify_init)
308 .long SYMBOL_NAME(sys_inotify_add_watch) 310 CALL(sys_inotify_add_watch)
309 .long SYMBOL_NAME(sys_inotify_rm_watch) 311 CALL(sys_inotify_rm_watch)
310 .long SYMBOL_NAME(sys_migrate_pages) 312 CALL(sys_migrate_pages)
311 .long SYMBOL_NAME(sys_openat) /* 295 */ 313 CALL(sys_openat) /* 295 */
312 .long SYMBOL_NAME(sys_mkdirat) 314 CALL(sys_mkdirat)
313 .long SYMBOL_NAME(sys_mknodat) 315 CALL(sys_mknodat)
314 .long SYMBOL_NAME(sys_fchownat) 316 CALL(sys_fchownat)
315 .long SYMBOL_NAME(sys_futimesat) 317 CALL(sys_futimesat)
316 .long SYMBOL_NAME(sys_fstatat64) /* 300 */ 318 CALL(sys_fstatat64) /* 300 */
317 .long SYMBOL_NAME(sys_unlinkat) 319 CALL(sys_unlinkat)
318 .long SYMBOL_NAME(sys_renameat) 320 CALL(sys_renameat)
319 .long SYMBOL_NAME(sys_linkat) 321 CALL(sys_linkat)
320 .long SYMBOL_NAME(sys_symlinkat) 322 CALL(sys_symlinkat)
321 .long SYMBOL_NAME(sys_readlinkat) /* 305 */ 323 CALL(sys_readlinkat) /* 305 */
322 .long SYMBOL_NAME(sys_fchmodat) 324 CALL(sys_fchmodat)
323 .long SYMBOL_NAME(sys_faccessat) 325 CALL(sys_faccessat)
324 .long SYMBOL_NAME(sys_ni_syscall) /* sys_pselect6 */ 326 CALL(sys_ni_syscall) /* sys_pselect6 */
325 .long SYMBOL_NAME(sys_ni_syscall) /* sys_ppoll */ 327 CALL(sys_ni_syscall) /* sys_ppoll */
326 .long SYMBOL_NAME(sys_unshare) /* 310 */ 328 CALL(sys_unshare) /* 310 */
327 .long SYMBOL_NAME(sys_set_robust_list) 329 CALL(sys_set_robust_list)
328 .long SYMBOL_NAME(sys_get_robust_list) 330 CALL(sys_get_robust_list)
329 .long SYMBOL_NAME(sys_splice) 331 CALL(sys_splice)
330 .long SYMBOL_NAME(sys_sync_file_range) 332 CALL(sys_sync_file_range)
331 .long SYMBOL_NAME(sys_tee) /* 315 */ 333 CALL(sys_tee) /* 315 */
332 .long SYMBOL_NAME(sys_vmsplice) 334 CALL(sys_vmsplice)
333 .long SYMBOL_NAME(sys_ni_syscall) /* sys_move_pages */ 335 CALL(sys_ni_syscall) /* sys_move_pages */
334 .long SYMBOL_NAME(sys_getcpu) 336 CALL(sys_getcpu)
335 .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_pwait */ 337 CALL(sys_ni_syscall) /* sys_epoll_pwait */
336 .long SYMBOL_NAME(sys_setns) /* 320 */ 338 CALL(sys_setns) /* 320 */
diff --git a/arch/h8300/lib/abs.S b/arch/h8300/lib/abs.S
index cabdd46b41db..ddd1fb3d01ad 100644
--- a/arch/h8300/lib/abs.S
+++ b/arch/h8300/lib/abs.S
@@ -9,10 +9,10 @@
9 .h8300s 9 .h8300s
10#endif 10#endif
11 .text 11 .text
12.global SYMBOL_NAME(abs) 12.global _abs
13 13
14;;; int abs(int n) 14;;; int abs(int n)
15SYMBOL_NAME_LABEL(abs) 15_abs:
16 mov.l er0,er0 16 mov.l er0,er0
17 bpl 1f 17 bpl 1f
18 neg.l er0 18 neg.l er0
diff --git a/arch/h8300/lib/memcpy.S b/arch/h8300/lib/memcpy.S
index fdcbc1ee673c..cad325e2c0e8 100644
--- a/arch/h8300/lib/memcpy.S
+++ b/arch/h8300/lib/memcpy.S
@@ -10,10 +10,10 @@
10#endif 10#endif
11 11
12 .text 12 .text
13.global SYMBOL_NAME(memcpy) 13.global _memcpy
14 14
15;;; void *memcpy(void *to, void *from, size_t n) 15;;; void *memcpy(void *to, void *from, size_t n)
16SYMBOL_NAME_LABEL(memcpy) 16_memcpy:
17 mov.l er2,er2 17 mov.l er2,er2
18 bne 1f 18 bne 1f
19 rts 19 rts
diff --git a/arch/h8300/lib/memset.S b/arch/h8300/lib/memset.S
index 59abdf9485a5..4549a64c5b79 100644
--- a/arch/h8300/lib/memset.S
+++ b/arch/h8300/lib/memset.S
@@ -10,13 +10,13 @@
10#endif 10#endif
11 .text 11 .text
12 12
13.global SYMBOL_NAME(memset) 13.global _memset
14 14
15;;void *memset(*ptr, int c, size_t count) 15;;void *memset(*ptr, int c, size_t count)
16;; ptr = er0 16;; ptr = er0
17;; c = er1(r1l) 17;; c = er1(r1l)
18;; count = er2 18;; count = er2
19SYMBOL_NAME_LABEL(memset) 19_memset:
20 btst #0,r0l 20 btst #0,r0l
21 beq 2f 21 beq 2f
22 22
diff --git a/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S b/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
index ecaeb31ae9a4..b2ad0f2d0417 100644
--- a/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
+++ b/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
@@ -22,10 +22,10 @@
22#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS 22#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
23#endif 23#endif
24 24
25 .global SYMBOL_NAME(_start) 25 .global __start
26 .global SYMBOL_NAME(command_line) 26 .global _command_line
27 .global SYMBOL_NAME(_platform_gpio_table) 27 .global __platform_gpio_table
28 .global SYMBOL_NAME(_target_name) 28 .global __target_name
29 29
30 .h8300h 30 .h8300h
31 31
@@ -33,7 +33,7 @@
33 .file "crt0_ram.S" 33 .file "crt0_ram.S"
34 34
35 /* CPU Reset entry */ 35 /* CPU Reset entry */
36SYMBOL_NAME_LABEL(_start) 36__start:
37 mov.l #RAMEND,sp 37 mov.l #RAMEND,sp
38 ldc #0x80,ccr 38 ldc #0x80,ccr
39 39
@@ -59,13 +59,13 @@ SYMBOL_NAME_LABEL(_start)
59 59
60 /* copy kernel commandline */ 60 /* copy kernel commandline */
61 mov.l #COMMAND_START,er5 61 mov.l #COMMAND_START,er5
62 mov.l #SYMBOL_NAME(command_line),er6 62 mov.l #_command_line,er6
63 mov.w #512,r4 63 mov.w #512,r4
64 eepmov.w 64 eepmov.w
65 65
66 /* uClinux kernel start */ 66 /* uClinux kernel start */
67 ldc #0x90,ccr /* running kernel */ 67 ldc #0x90,ccr /* running kernel */
68 mov.l #SYMBOL_NAME(init_thread_union),sp 68 mov.l #_init_thread_union,sp
69 add.l #0x2000,sp 69 add.l #0x2000,sp
70 jsr @_start_kernel 70 jsr @_start_kernel
71_exit: 71_exit:
@@ -107,4 +107,4 @@ __target_name:
107 .asciz "AE-3068" 107 .asciz "AE-3068"
108 108
109 .section .bootvec,"ax" 109 .section .bootvec,"ax"
110 jmp @SYMBOL_NAME(_start) 110 jmp @__start
diff --git a/arch/h8300/platform/h8300h/generic/crt0_ram.S b/arch/h8300/platform/h8300h/generic/crt0_ram.S
index 80d0e16a4499..5ab7d9c12910 100644
--- a/arch/h8300/platform/h8300h/generic/crt0_ram.S
+++ b/arch/h8300/platform/h8300h/generic/crt0_ram.S
@@ -22,10 +22,10 @@
22#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS 22#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
23#endif 23#endif
24 24
25 .global SYMBOL_NAME(_start) 25 .global __start
26 .global SYMBOL_NAME(command_line) 26 .global _command_line
27 .global SYMBOL_NAME(_platform_gpio_table) 27 .global __platform_gpio_table
28 .global SYMBOL_NAME(_target_name) 28 .global __target_name
29 29
30 .h8300h 30 .h8300h
31 31
@@ -33,7 +33,7 @@
33 .file "crt0_ram.S" 33 .file "crt0_ram.S"
34 34
35 /* CPU Reset entry */ 35 /* CPU Reset entry */
36SYMBOL_NAME_LABEL(_start) 36__start:
37 mov.l #RAMEND,sp 37 mov.l #RAMEND,sp
38 ldc #0x80,ccr 38 ldc #0x80,ccr
39 39
@@ -59,13 +59,13 @@ SYMBOL_NAME_LABEL(_start)
59 59
60 /* copy kernel commandline */ 60 /* copy kernel commandline */
61 mov.l #COMMAND_START,er5 61 mov.l #COMMAND_START,er5
62 mov.l #SYMBOL_NAME(command_line),er6 62 mov.l #_command_line,er6
63 mov.w #512,r4 63 mov.w #512,r4
64 eepmov.w 64 eepmov.w
65 65
66 /* uClinux kernel start */ 66 /* uClinux kernel start */
67 ldc #0x90,ccr /* running kernel */ 67 ldc #0x90,ccr /* running kernel */
68 mov.l #SYMBOL_NAME(init_thread_union),sp 68 mov.l #_init_thread_union,sp
69 add.l #0x2000,sp 69 add.l #0x2000,sp
70 jsr @_start_kernel 70 jsr @_start_kernel
71_exit: 71_exit:
diff --git a/arch/h8300/platform/h8300h/generic/crt0_rom.S b/arch/h8300/platform/h8300h/generic/crt0_rom.S
index 120add7ca832..dda1dfa15a5e 100644
--- a/arch/h8300/platform/h8300h/generic/crt0_rom.S
+++ b/arch/h8300/platform/h8300h/generic/crt0_rom.S
@@ -12,17 +12,17 @@
12 12
13#include <asm/linkage.h> 13#include <asm/linkage.h>
14 14
15 .global SYMBOL_NAME(_start) 15 .global __start
16 .global SYMBOL_NAME(_command_line) 16 .global __command_line
17 .global SYMBOL_NAME(_platform_gpio_table) 17 .global __platform_gpio_table
18 .global SYMBOL_NAME(_target_name) 18 .global __target_name
19 19
20 .h8300h 20 .h8300h
21 .section .text 21 .section .text
22 .file "crt0_rom.S" 22 .file "crt0_rom.S"
23 23
24 /* CPU Reset entry */ 24 /* CPU Reset entry */
25SYMBOL_NAME_LABEL(_start) 25__start:
26 mov.l #__ramend,sp 26 mov.l #__ramend,sp
27 ldc #0x80,ccr 27 ldc #0x80,ccr
28 28
@@ -60,13 +60,13 @@ SYMBOL_NAME_LABEL(_start)
60 60
61 /* copy kernel commandline */ 61 /* copy kernel commandline */
62 mov.l #COMMAND_START,er5 62 mov.l #COMMAND_START,er5
63 mov.l #SYMBOL_NAME(_command_line),er6 63 mov.l #__command_line,er6
64 mov.w #512,r4 64 mov.w #512,r4
65 eepmov.w 65 eepmov.w
66 66
67 /* linux kernel start */ 67 /* linux kernel start */
68 ldc #0x90,ccr /* running kernel */ 68 ldc #0x90,ccr /* running kernel */
69 mov.l #SYMBOL_NAME(init_thread_union),sp 69 mov.l #_init_thread_union,sp
70 add.l #0x2000,sp 70 add.l #0x2000,sp
71 jsr @_start_kernel 71 jsr @_start_kernel
72_exit: 72_exit:
diff --git a/arch/h8300/platform/h8300h/h8max/crt0_ram.S b/arch/h8300/platform/h8300h/h8max/crt0_ram.S
index efcbefb91b67..6a0d4e2d9ec6 100644
--- a/arch/h8300/platform/h8300h/h8max/crt0_ram.S
+++ b/arch/h8300/platform/h8300h/h8max/crt0_ram.S
@@ -22,10 +22,10 @@
22#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS 22#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
23#endif 23#endif
24 24
25 .global SYMBOL_NAME(_start) 25 .global __start
26 .global SYMBOL_NAME(command_line) 26 .global _command_line
27 .global SYMBOL_NAME(_platform_gpio_table) 27 .global __platform_gpio_table
28 .global SYMBOL_NAME(_target_name) 28 .global __target_name
29 29
30 .h8300h 30 .h8300h
31 31
@@ -33,7 +33,7 @@
33 .file "crt0_ram.S" 33 .file "crt0_ram.S"
34 34
35 /* CPU Reset entry */ 35 /* CPU Reset entry */
36SYMBOL_NAME_LABEL(_start) 36__start:
37 mov.l #RAMEND,sp 37 mov.l #RAMEND,sp
38 ldc #0x80,ccr 38 ldc #0x80,ccr
39 39
@@ -59,13 +59,13 @@ SYMBOL_NAME_LABEL(_start)
59 59
60 /* copy kernel commandline */ 60 /* copy kernel commandline */
61 mov.l #COMMAND_START,er5 61 mov.l #COMMAND_START,er5
62 mov.l #SYMBOL_NAME(command_line),er6 62 mov.l #_command_line,er6
63 mov.w #512,r4 63 mov.w #512,r4
64 eepmov.w 64 eepmov.w
65 65
66 /* uClinux kernel start */ 66 /* uClinux kernel start */
67 ldc #0x90,ccr /* running kernel */ 67 ldc #0x90,ccr /* running kernel */
68 mov.l #SYMBOL_NAME(init_thread_union),sp 68 mov.l #_init_thread_union,sp
69 add.l #0x2000,sp 69 add.l #0x2000,sp
70 jsr @_start_kernel 70 jsr @_start_kernel
71_exit: 71_exit:
@@ -107,4 +107,4 @@ __target_name:
107 .asciz "H8MAX" 107 .asciz "H8MAX"
108 108
109 .section .bootvec,"ax" 109 .section .bootvec,"ax"
110 jmp @SYMBOL_NAME(_start) 110 jmp @__start
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S b/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
index d12b0debe478..5ed191b37cde 100644
--- a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
+++ b/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
@@ -23,10 +23,10 @@
23#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS 23#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
24#endif 24#endif
25 25
26 .global SYMBOL_NAME(_start) 26 .global __start
27 .global SYMBOL_NAME(_command_line) 27 .global __command_line
28 .global SYMBOL_NAME(_platform_gpio_table) 28 .global __platform_gpio_table
29 .global SYMBOL_NAME(_target_name) 29 .global __target_name
30 30
31 .h8300s 31 .h8300s
32 32
@@ -34,7 +34,7 @@
34 .file "crt0_ram.S" 34 .file "crt0_ram.S"
35 35
36 /* CPU Reset entry */ 36 /* CPU Reset entry */
37SYMBOL_NAME_LABEL(_start) 37__start:
38 mov.l #RAMEND,sp 38 mov.l #RAMEND,sp
39 ldc #0x80,ccr 39 ldc #0x80,ccr
40 ldc #0x00,exr 40 ldc #0x00,exr
@@ -66,13 +66,13 @@ SYMBOL_NAME_LABEL(_start)
66 66
67 /* copy kernel commandline */ 67 /* copy kernel commandline */
68 mov.l #COMMAND_START,er5 68 mov.l #COMMAND_START,er5
69 mov.l #SYMBOL_NAME(command_line),er6 69 mov.l #_command_line,er6
70 mov.w #512,r4 70 mov.w #512,r4
71 eepmov.w 71 eepmov.w
72 72
73 /* uClinux kernel start */ 73 /* uClinux kernel start */
74 ldc #0x90,ccr /* running kernel */ 74 ldc #0x90,ccr /* running kernel */
75 mov.l #SYMBOL_NAME(init_thread_union),sp 75 mov.l #_init_thread_union,sp
76 add.l #0x2000,sp 76 add.l #0x2000,sp
77 jsr @_start_kernel 77 jsr @_start_kernel
78_exit: 78_exit:
@@ -127,4 +127,4 @@ __target_name:
127 .asciz "EDOSK-2674" 127 .asciz "EDOSK-2674"
128 128
129 .section .bootvec,"ax" 129 .section .bootvec,"ax"
130 jmp @SYMBOL_NAME(_start) 130 jmp @__start
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_rom.S b/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
index c03d23c6fe12..06d1d7f324ca 100644
--- a/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
+++ b/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
@@ -13,17 +13,17 @@
13#include <asm/linkage.h> 13#include <asm/linkage.h>
14#include <asm/regs267x.h> 14#include <asm/regs267x.h>
15 15
16 .global SYMBOL_NAME(_start) 16 .global __start
17 .global SYMBOL_NAME(_command_line) 17 .global __command_line
18 .global SYMBOL_NAME(_platform_gpio_table) 18 .global __platform_gpio_table
19 .global SYMBOL_NAME(_target_name) 19 .global __target_name
20 20
21 .h8300s 21 .h8300s
22 .section .text 22 .section .text
23 .file "crt0_rom.S" 23 .file "crt0_rom.S"
24 24
25 /* CPU Reset entry */ 25 /* CPU Reset entry */
26SYMBOL_NAME_LABEL(_start) 26__start:
27 mov.l #__ramend,sp 27 mov.l #__ramend,sp
28 ldc #0x80,ccr 28 ldc #0x80,ccr
29 ldc #0,exr 29 ldc #0,exr
@@ -82,13 +82,13 @@ SYMBOL_NAME_LABEL(_start)
82 82
83 /* copy kernel commandline */ 83 /* copy kernel commandline */
84 mov.l #COMMAND_START,er5 84 mov.l #COMMAND_START,er5
85 mov.l #SYMBOL_NAME(_command_line),er6 85 mov.l #__command_line,er6
86 mov.w #512,r4 86 mov.w #512,r4
87 eepmov.w 87 eepmov.w
88 88
89 /* linux kernel start */ 89 /* linux kernel start */
90 ldc #0x90,ccr /* running kernel */ 90 ldc #0x90,ccr /* running kernel */
91 mov.l #SYMBOL_NAME(init_thread_union),sp 91 mov.l #_init_thread_union,sp
92 add.l #0x2000,sp 92 add.l #0x2000,sp
93 jsr @_start_kernel 93 jsr @_start_kernel
94_exit: 94_exit:
diff --git a/arch/h8300/platform/h8s/generic/crt0_ram.S b/arch/h8300/platform/h8s/generic/crt0_ram.S
index b04541069976..7018915de74f 100644
--- a/arch/h8300/platform/h8s/generic/crt0_ram.S
+++ b/arch/h8300/platform/h8s/generic/crt0_ram.S
@@ -23,10 +23,10 @@
23#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS 23#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
24#endif 24#endif
25 25
26 .global SYMBOL_NAME(_start) 26 .global __start
27 .global SYMBOL_NAME(_command_line) 27 .global __command_line
28 .global SYMBOL_NAME(_platform_gpio_table) 28 .global __platform_gpio_table
29 .global SYMBOL_NAME(_target_name) 29 .global __target_name
30 30
31 .h8300s 31 .h8300s
32 32
@@ -34,7 +34,7 @@
34 .file "crt0_ram.S" 34 .file "crt0_ram.S"
35 35
36 /* CPU Reset entry */ 36 /* CPU Reset entry */
37SYMBOL_NAME_LABEL(_start) 37__start:
38 mov.l #RAMEND,sp 38 mov.l #RAMEND,sp
39 ldc #0x80,ccr 39 ldc #0x80,ccr
40 ldc #0x00,exr 40 ldc #0x00,exr
@@ -63,13 +63,13 @@ SYMBOL_NAME_LABEL(_start)
63 63
64 /* copy kernel commandline */ 64 /* copy kernel commandline */
65 mov.l #COMMAND_START,er5 65 mov.l #COMMAND_START,er5
66 mov.l #SYMBOL_NAME(command_line),er6 66 mov.l #_command_line,er6
67 mov.w #512,r4 67 mov.w #512,r4
68 eepmov.w 68 eepmov.w
69 69
70 /* uClinux kernel start */ 70 /* uClinux kernel start */
71 ldc #0x90,ccr /* running kernel */ 71 ldc #0x90,ccr /* running kernel */
72 mov.l #SYMBOL_NAME(init_thread_union),sp 72 mov.l #_init_thread_union,sp
73 add.l #0x2000,sp 73 add.l #0x2000,sp
74 jsr @_start_kernel 74 jsr @_start_kernel
75_exit: 75_exit:
@@ -124,4 +124,4 @@ __target_name:
124 .asciz "generic" 124 .asciz "generic"
125 125
126 .section .bootvec,"ax" 126 .section .bootvec,"ax"
127 jmp @SYMBOL_NAME(_start) 127 jmp @__start
diff --git a/arch/h8300/platform/h8s/generic/crt0_rom.S b/arch/h8300/platform/h8s/generic/crt0_rom.S
index 95b6f2898f52..623ba7828193 100644
--- a/arch/h8300/platform/h8s/generic/crt0_rom.S
+++ b/arch/h8300/platform/h8s/generic/crt0_rom.S
@@ -13,17 +13,17 @@
13#include <asm/linkage.h> 13#include <asm/linkage.h>
14#include <asm/regs267x.h> 14#include <asm/regs267x.h>
15 15
16 .global SYMBOL_NAME(_start) 16 .global __start
17 .global SYMBOL_NAME(_command_line) 17 .global __command_line
18 .global SYMBOL_NAME(_platform_gpio_table) 18 .global __platform_gpio_table
19 .global SYMBOL_NAME(_target_name) 19 .global __target_name
20 20
21 .h8300s 21 .h8300s
22 .section .text 22 .section .text
23 .file "crt0_rom.S" 23 .file "crt0_rom.S"
24 24
25 /* CPU Reset entry */ 25 /* CPU Reset entry */
26SYMBOL_NAME_LABEL(_start) 26__start:
27 mov.l #__ramend,sp 27 mov.l #__ramend,sp
28 ldc #0x80,ccr 28 ldc #0x80,ccr
29 ldc #0,exr 29 ldc #0,exr
@@ -61,7 +61,7 @@ SYMBOL_NAME_LABEL(_start)
61 61
62 /* linux kernel start */ 62 /* linux kernel start */
63 ldc #0x90,ccr /* running kernel */ 63 ldc #0x90,ccr /* running kernel */
64 mov.l #SYMBOL_NAME(init_thread_union),sp 64 mov.l #_init_thread_union,sp
65 add.l #0x2000,sp 65 add.l #0x2000,sp
66 jsr @_start_kernel 66 jsr @_start_kernel
67_exit: 67_exit:
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index bcda5b2d121a..d43daf192b21 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2042,7 +2042,8 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2042#endif 2042#endif
2043 2043
2044static int __init 2044static int __init
2045acpi_sba_ioc_add(struct acpi_device *device) 2045acpi_sba_ioc_add(struct acpi_device *device,
2046 const struct acpi_device_id *not_used)
2046{ 2047{
2047 struct ioc *ioc; 2048 struct ioc *ioc;
2048 acpi_status status; 2049 acpi_status status;
@@ -2090,14 +2091,18 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2090 {"HWP0004", 0}, 2091 {"HWP0004", 0},
2091 {"", 0}, 2092 {"", 0},
2092}; 2093};
2093static struct acpi_driver acpi_sba_ioc_driver = { 2094static struct acpi_scan_handler acpi_sba_ioc_handler = {
2094 .name = "IOC IOMMU Driver", 2095 .ids = hp_ioc_iommu_device_ids,
2095 .ids = hp_ioc_iommu_device_ids, 2096 .attach = acpi_sba_ioc_add,
2096 .ops = {
2097 .add = acpi_sba_ioc_add,
2098 },
2099}; 2097};
2100 2098
2099static int __init acpi_sba_ioc_init_acpi(void)
2100{
2101 return acpi_scan_add_handler(&acpi_sba_ioc_handler);
2102}
2103/* This has to run before acpi_scan_init(). */
2104arch_initcall(acpi_sba_ioc_init_acpi);
2105
2101extern struct dma_map_ops swiotlb_dma_ops; 2106extern struct dma_map_ops swiotlb_dma_ops;
2102 2107
2103static int __init 2108static int __init
@@ -2122,7 +2127,10 @@ sba_init(void)
2122 } 2127 }
2123#endif 2128#endif
2124 2129
2125 acpi_bus_register_driver(&acpi_sba_ioc_driver); 2130 /*
2131 * ioc_list should be populated by the acpi_sba_ioc_handler's .attach()
2132 * routine, but that only happens if acpi_scan_init() has already run.
2133 */
2126 if (!ioc_list) { 2134 if (!ioc_list) {
2127#ifdef CONFIG_IA64_GENERIC 2135#ifdef CONFIG_IA64_GENERIC
2128 /* 2136 /*
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 331de723c676..3a428f19a001 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -88,8 +88,8 @@ simscsi_setup (char *s)
88 if (strlen(s) > MAX_ROOT_LEN) { 88 if (strlen(s) > MAX_ROOT_LEN) {
89 printk(KERN_ERR "simscsi_setup: prefix too long---using default %s\n", 89 printk(KERN_ERR "simscsi_setup: prefix too long---using default %s\n",
90 simscsi_root); 90 simscsi_root);
91 } 91 } else
92 simscsi_root = s; 92 simscsi_root = s;
93 return 1; 93 return 1;
94} 94}
95 95
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 5e04b591e423..80775f55f03f 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -89,9 +89,9 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
89#define pci_legacy_read platform_pci_legacy_read 89#define pci_legacy_read platform_pci_legacy_read
90#define pci_legacy_write platform_pci_legacy_write 90#define pci_legacy_write platform_pci_legacy_write
91 91
92struct pci_window { 92struct iospace_resource {
93 struct resource resource; 93 struct list_head list;
94 u64 offset; 94 struct resource res;
95}; 95};
96 96
97struct pci_controller { 97struct pci_controller {
@@ -100,12 +100,10 @@ struct pci_controller {
100 int segment; 100 int segment;
101 int node; /* nearest node with memory or -1 for global allocation */ 101 int node; /* nearest node with memory or -1 for global allocation */
102 102
103 unsigned int windows;
104 struct pci_window *window;
105
106 void *platform_data; 103 void *platform_data;
107}; 104};
108 105
106
109#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata) 107#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
110#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment) 108#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
111 109
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 335eb07480fe..5eb71d22c3d5 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -807,7 +807,7 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
807 * ACPI based hotplug CPU support 807 * ACPI based hotplug CPU support
808 */ 808 */
809#ifdef CONFIG_ACPI_HOTPLUG_CPU 809#ifdef CONFIG_ACPI_HOTPLUG_CPU
810static __cpuinit 810static
811int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 811int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
812{ 812{
813#ifdef CONFIG_ACPI_NUMA 813#ifdef CONFIG_ACPI_NUMA
@@ -882,7 +882,7 @@ __init void prefill_possible_map(void)
882 set_cpu_possible(i, true); 882 set_cpu_possible(i, true);
883} 883}
884 884
885static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) 885static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
886{ 886{
887 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 887 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
888 union acpi_object *obj; 888 union acpi_object *obj;
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 2d67317a1ec2..f59c0b844e88 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -225,17 +225,17 @@ static struct attribute_group err_inject_attr_group = {
225 .name = "err_inject" 225 .name = "err_inject"
226}; 226};
227/* Add/Remove err_inject interface for CPU device */ 227/* Add/Remove err_inject interface for CPU device */
228static int __cpuinit err_inject_add_dev(struct device * sys_dev) 228static int err_inject_add_dev(struct device *sys_dev)
229{ 229{
230 return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); 230 return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
231} 231}
232 232
233static int __cpuinit err_inject_remove_dev(struct device * sys_dev) 233static int err_inject_remove_dev(struct device *sys_dev)
234{ 234{
235 sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); 235 sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
236 return 0; 236 return 0;
237} 237}
238static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, 238static int err_inject_cpu_callback(struct notifier_block *nfb,
239 unsigned long action, void *hcpu) 239 unsigned long action, void *hcpu)
240{ 240{
241 unsigned int cpu = (unsigned long)hcpu; 241 unsigned int cpu = (unsigned long)hcpu;
@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
256 return NOTIFY_OK; 256 return NOTIFY_OK;
257} 257}
258 258
259static struct notifier_block __cpuinitdata err_inject_cpu_notifier = 259static struct notifier_block err_inject_cpu_notifier =
260{ 260{
261 .notifier_call = err_inject_cpu_callback, 261 .notifier_call = err_inject_cpu_callback,
262}; 262};
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d7396dbb07bb..b8edfa75a83f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -631,7 +631,7 @@ ia64_mca_register_cpev (int cpev)
631 * Outputs 631 * Outputs
632 * None 632 * None
633 */ 633 */
634void __cpuinit 634void
635ia64_mca_cmc_vector_setup (void) 635ia64_mca_cmc_vector_setup (void)
636{ 636{
637 cmcv_reg_t cmcv; 637 cmcv_reg_t cmcv;
@@ -1814,7 +1814,7 @@ static struct irqaction mca_cpep_irqaction = {
1814 * format most of the fields. 1814 * format most of the fields.
1815 */ 1815 */
1816 1816
1817static void __cpuinit 1817static void
1818format_mca_init_stack(void *mca_data, unsigned long offset, 1818format_mca_init_stack(void *mca_data, unsigned long offset,
1819 const char *type, int cpu) 1819 const char *type, int cpu)
1820{ 1820{
@@ -1844,7 +1844,7 @@ static void * __init_refok mca_bootmem(void)
1844} 1844}
1845 1845
1846/* Do per-CPU MCA-related initialization. */ 1846/* Do per-CPU MCA-related initialization. */
1847void __cpuinit 1847void
1848ia64_mca_cpu_init(void *cpu_data) 1848ia64_mca_cpu_init(void *cpu_data)
1849{ 1849{
1850 void *pal_vaddr; 1850 void *pal_vaddr;
@@ -1896,7 +1896,7 @@ ia64_mca_cpu_init(void *cpu_data)
1896 PAGE_KERNEL)); 1896 PAGE_KERNEL));
1897} 1897}
1898 1898
1899static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) 1899static void ia64_mca_cmc_vector_adjust(void *dummy)
1900{ 1900{
1901 unsigned long flags; 1901 unsigned long flags;
1902 1902
@@ -1906,7 +1906,7 @@ static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy)
1906 local_irq_restore(flags); 1906 local_irq_restore(flags);
1907} 1907}
1908 1908
1909static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, 1909static int mca_cpu_callback(struct notifier_block *nfb,
1910 unsigned long action, 1910 unsigned long action,
1911 void *hcpu) 1911 void *hcpu)
1912{ 1912{
@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
1922 return NOTIFY_OK; 1922 return NOTIFY_OK;
1923} 1923}
1924 1924
1925static struct notifier_block mca_cpu_notifier __cpuinitdata = { 1925static struct notifier_block mca_cpu_notifier = {
1926 .notifier_call = mca_cpu_callback 1926 .notifier_call = mca_cpu_callback
1927}; 1927};
1928 1928
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index c93420c97409..d288cde93606 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
30cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 30cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
31EXPORT_SYMBOL(node_to_cpu_mask); 31EXPORT_SYMBOL(node_to_cpu_mask);
32 32
33void __cpuinit map_cpu_to_node(int cpu, int nid) 33void map_cpu_to_node(int cpu, int nid)
34{ 34{
35 int oldnid; 35 int oldnid;
36 if (nid < 0) { /* just initialize by zero */ 36 if (nid < 0) { /* just initialize by zero */
@@ -51,7 +51,7 @@ void __cpuinit map_cpu_to_node(int cpu, int nid)
51 return; 51 return;
52} 52}
53 53
54void __cpuinit unmap_cpu_from_node(int cpu, int nid) 54void unmap_cpu_from_node(int cpu, int nid)
55{ 55{
56 WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid])); 56 WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
57 WARN_ON(cpu_to_node_map[cpu] != nid); 57 WARN_ON(cpu_to_node_map[cpu] != nid);
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 2b3c2d79256f..ab333284f4b2 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -932,7 +932,7 @@ static const struct file_operations proc_palinfo_fops = {
932 .release = single_release, 932 .release = single_release,
933}; 933};
934 934
935static void __cpuinit 935static void
936create_palinfo_proc_entries(unsigned int cpu) 936create_palinfo_proc_entries(unsigned int cpu)
937{ 937{
938 pal_func_cpu_u_t f; 938 pal_func_cpu_u_t f;
@@ -962,7 +962,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
962 remove_proc_subtree(cpustr, palinfo_dir); 962 remove_proc_subtree(cpustr, palinfo_dir);
963} 963}
964 964
965static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, 965static int palinfo_cpu_callback(struct notifier_block *nfb,
966 unsigned long action, void *hcpu) 966 unsigned long action, void *hcpu)
967{ 967{
968 unsigned int hotcpu = (unsigned long)hcpu; 968 unsigned int hotcpu = (unsigned long)hcpu;
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 1ddcfe5ef353..992c1098c522 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -33,15 +33,6 @@ int force_iommu __read_mostly;
33 33
34int iommu_pass_through; 34int iommu_pass_through;
35 35
36/* Dummy device used for NULL arguments (normally ISA). Better would
37 be probably a smaller DMA mask, but this is bug-to-bug compatible
38 to i386. */
39struct device fallback_dev = {
40 .init_name = "fallback device",
41 .coherent_dma_mask = DMA_BIT_MASK(32),
42 .dma_mask = &fallback_dev.coherent_dma_mask,
43};
44
45extern struct dma_map_ops intel_dma_ops; 36extern struct dma_map_ops intel_dma_ops;
46 37
47static int __init pci_iommu_init(void) 38static int __init pci_iommu_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 9ea25fce06d5..5a9ff1c3c3e9 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5647,24 +5647,8 @@ pfm_proc_show_header(struct seq_file *m)
5647 5647
5648 list_for_each(pos, &pfm_buffer_fmt_list) { 5648 list_for_each(pos, &pfm_buffer_fmt_list) {
5649 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); 5649 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5650 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n", 5650 seq_printf(m, "format : %16phD %s\n",
5651 entry->fmt_uuid[0], 5651 entry->fmt_uuid, entry->fmt_name);
5652 entry->fmt_uuid[1],
5653 entry->fmt_uuid[2],
5654 entry->fmt_uuid[3],
5655 entry->fmt_uuid[4],
5656 entry->fmt_uuid[5],
5657 entry->fmt_uuid[6],
5658 entry->fmt_uuid[7],
5659 entry->fmt_uuid[8],
5660 entry->fmt_uuid[9],
5661 entry->fmt_uuid[10],
5662 entry->fmt_uuid[11],
5663 entry->fmt_uuid[12],
5664 entry->fmt_uuid[13],
5665 entry->fmt_uuid[14],
5666 entry->fmt_uuid[15],
5667 entry->fmt_name);
5668 } 5652 }
5669 spin_unlock(&pfm_buffer_fmt_lock); 5653 spin_unlock(&pfm_buffer_fmt_lock);
5670 5654
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 4bc580af67b3..960a396f5929 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -568,7 +568,7 @@ static const struct file_operations salinfo_data_fops = {
568 .llseek = default_llseek, 568 .llseek = default_llseek,
569}; 569};
570 570
571static int __cpuinit 571static int
572salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 572salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
573{ 573{
574 unsigned int i, cpu = (unsigned long)hcpu; 574 unsigned int i, cpu = (unsigned long)hcpu;
@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
609 return NOTIFY_OK; 609 return NOTIFY_OK;
610} 610}
611 611
612static struct notifier_block salinfo_cpu_notifier __cpuinitdata = 612static struct notifier_block salinfo_cpu_notifier =
613{ 613{
614 .notifier_call = salinfo_cpu_callback, 614 .notifier_call = salinfo_cpu_callback,
615 .priority = 0, 615 .priority = 0,
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 13bfdd22afc8..4fc2e9569bb2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -748,7 +748,7 @@ const struct seq_operations cpuinfo_op = {
748#define MAX_BRANDS 8 748#define MAX_BRANDS 8
749static char brandname[MAX_BRANDS][128]; 749static char brandname[MAX_BRANDS][128];
750 750
751static char * __cpuinit 751static char *
752get_model_name(__u8 family, __u8 model) 752get_model_name(__u8 family, __u8 model)
753{ 753{
754 static int overflow; 754 static int overflow;
@@ -778,7 +778,7 @@ get_model_name(__u8 family, __u8 model)
778 return "Unknown"; 778 return "Unknown";
779} 779}
780 780
781static void __cpuinit 781static void
782identify_cpu (struct cpuinfo_ia64 *c) 782identify_cpu (struct cpuinfo_ia64 *c)
783{ 783{
784 union { 784 union {
@@ -850,7 +850,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
850 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". 850 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
851 * 3. the minimum of the cache stride sizes for "clflush_cache_range()". 851 * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
852 */ 852 */
853static void __cpuinit 853static void
854get_cache_info(void) 854get_cache_info(void)
855{ 855{
856 unsigned long line_size, max = 1; 856 unsigned long line_size, max = 1;
@@ -915,10 +915,10 @@ get_cache_info(void)
915 * cpu_init() initializes state that is per-CPU. This function acts 915 * cpu_init() initializes state that is per-CPU. This function acts
916 * as a 'CPU state barrier', nothing should get across. 916 * as a 'CPU state barrier', nothing should get across.
917 */ 917 */
918void __cpuinit 918void
919cpu_init (void) 919cpu_init (void)
920{ 920{
921 extern void __cpuinit ia64_mmu_init (void *); 921 extern void ia64_mmu_init(void *);
922 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; 922 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
923 unsigned long num_phys_stacked; 923 unsigned long num_phys_stacked;
924 pal_vm_info_2_u_t vmi; 924 pal_vm_info_2_u_t vmi;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8d87168d218d..547a48d78bd7 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -351,7 +351,7 @@ static inline void smp_setup_percpu_timer(void)
351{ 351{
352} 352}
353 353
354static void __cpuinit 354static void
355smp_callin (void) 355smp_callin (void)
356{ 356{
357 int cpuid, phys_id, itc_master; 357 int cpuid, phys_id, itc_master;
@@ -442,7 +442,7 @@ smp_callin (void)
442/* 442/*
443 * Activate a secondary processor. head.S calls this. 443 * Activate a secondary processor. head.S calls this.
444 */ 444 */
445int __cpuinit 445int
446start_secondary (void *unused) 446start_secondary (void *unused)
447{ 447{
448 /* Early console may use I/O ports */ 448 /* Early console may use I/O ports */
@@ -459,7 +459,7 @@ start_secondary (void *unused)
459 return 0; 459 return 0;
460} 460}
461 461
462static int __cpuinit 462static int
463do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) 463do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
464{ 464{
465 int timeout; 465 int timeout;
@@ -728,7 +728,7 @@ static inline void set_cpu_sibling_map(int cpu)
728 } 728 }
729} 729}
730 730
731int __cpuinit 731int
732__cpu_up(unsigned int cpu, struct task_struct *tidle) 732__cpu_up(unsigned int cpu, struct task_struct *tidle)
733{ 733{
734 int ret; 734 int ret;
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index dc00b2c1b42a..ca69a5a96dcc 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -135,11 +135,11 @@ struct cpu_cache_info {
135 struct kobject kobj; 135 struct kobject kobj;
136}; 136};
137 137
138static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata; 138static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
139#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) 139#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
140 140
141#ifdef CONFIG_SMP 141#ifdef CONFIG_SMP
142static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, 142static void cache_shared_cpu_map_setup(unsigned int cpu,
143 struct cache_info * this_leaf) 143 struct cache_info * this_leaf)
144{ 144{
145 pal_cache_shared_info_t csi; 145 pal_cache_shared_info_t csi;
@@ -174,7 +174,7 @@ static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
174 &csi) == PAL_STATUS_SUCCESS); 174 &csi) == PAL_STATUS_SUCCESS);
175} 175}
176#else 176#else
177static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, 177static void cache_shared_cpu_map_setup(unsigned int cpu,
178 struct cache_info * this_leaf) 178 struct cache_info * this_leaf)
179{ 179{
180 cpu_set(cpu, this_leaf->shared_cpu_map); 180 cpu_set(cpu, this_leaf->shared_cpu_map);
@@ -298,7 +298,7 @@ static struct kobj_type cache_ktype_percpu_entry = {
298 .sysfs_ops = &cache_sysfs_ops, 298 .sysfs_ops = &cache_sysfs_ops,
299}; 299};
300 300
301static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) 301static void cpu_cache_sysfs_exit(unsigned int cpu)
302{ 302{
303 kfree(all_cpu_cache_info[cpu].cache_leaves); 303 kfree(all_cpu_cache_info[cpu].cache_leaves);
304 all_cpu_cache_info[cpu].cache_leaves = NULL; 304 all_cpu_cache_info[cpu].cache_leaves = NULL;
@@ -307,7 +307,7 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
307 return; 307 return;
308} 308}
309 309
310static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) 310static int cpu_cache_sysfs_init(unsigned int cpu)
311{ 311{
312 unsigned long i, levels, unique_caches; 312 unsigned long i, levels, unique_caches;
313 pal_cache_config_info_t cci; 313 pal_cache_config_info_t cci;
@@ -351,7 +351,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
351} 351}
352 352
353/* Add cache interface for CPU device */ 353/* Add cache interface for CPU device */
354static int __cpuinit cache_add_dev(struct device * sys_dev) 354static int cache_add_dev(struct device *sys_dev)
355{ 355{
356 unsigned int cpu = sys_dev->id; 356 unsigned int cpu = sys_dev->id;
357 unsigned long i, j; 357 unsigned long i, j;
@@ -401,7 +401,7 @@ static int __cpuinit cache_add_dev(struct device * sys_dev)
401} 401}
402 402
403/* Remove cache interface for CPU device */ 403/* Remove cache interface for CPU device */
404static int __cpuinit cache_remove_dev(struct device * sys_dev) 404static int cache_remove_dev(struct device *sys_dev)
405{ 405{
406 unsigned int cpu = sys_dev->id; 406 unsigned int cpu = sys_dev->id;
407 unsigned long i; 407 unsigned long i;
@@ -425,7 +425,7 @@ static int __cpuinit cache_remove_dev(struct device * sys_dev)
425 * When a cpu is hot-plugged, do a check and initiate 425 * When a cpu is hot-plugged, do a check and initiate
426 * cache kobject if necessary 426 * cache kobject if necessary
427 */ 427 */
428static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, 428static int cache_cpu_callback(struct notifier_block *nfb,
429 unsigned long action, void *hcpu) 429 unsigned long action, void *hcpu)
430{ 430{
431 unsigned int cpu = (unsigned long)hcpu; 431 unsigned int cpu = (unsigned long)hcpu;
@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
445 return NOTIFY_OK; 445 return NOTIFY_OK;
446} 446}
447 447
448static struct notifier_block __cpuinitdata cache_cpu_notifier = 448static struct notifier_block cache_cpu_notifier =
449{ 449{
450 .notifier_call = cache_cpu_callback 450 .notifier_call = cache_cpu_callback
451}; 451};
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index f7f9f9c6caf0..d3636e67a98e 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -630,7 +630,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
630 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", 630 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
631 iip, ifa, isr); 631 iip, ifa, isr);
632 force_sig(SIGSEGV, current); 632 force_sig(SIGSEGV, current);
633 break; 633 return;
634 634
635 case 46: 635 case 46:
636 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); 636 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 1a4053789d01..18e45ec49bbf 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -47,12 +47,13 @@ FORCE : $(obj)/$(offsets-file)
47 47
48ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ 48ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
49asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ 49asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
50KVM := ../../../virt/kvm
50 51
51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 52common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \
52 coalesced_mmio.o irq_comm.o) 53 $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o
53 54
54ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y) 55ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y)
55common-objs += $(addprefix ../../../virt/kvm/, assigned-dev.o iommu.o) 56common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o
56endif 57endif
57 58
58kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o 59kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e4a6a5366dea..da5237d636d6 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -156,8 +156,7 @@ static void *cpu_data;
156 * 156 *
157 * Allocate and setup per-cpu data areas. 157 * Allocate and setup per-cpu data areas.
158 */ 158 */
159void * __cpuinit 159void *per_cpu_init(void)
160per_cpu_init (void)
161{ 160{
162 static bool first_time = true; 161 static bool first_time = true;
163 void *cpu0_data = __cpu0_per_cpu; 162 void *cpu0_data = __cpu0_per_cpu;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 58550b8f7d40..2de08f4d9930 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -592,7 +592,7 @@ void __init find_memory(void)
592 * find_pernode_space() does most of this already, we just need to set 592 * find_pernode_space() does most of this already, we just need to set
593 * local_per_cpu_offset 593 * local_per_cpu_offset
594 */ 594 */
595void __cpuinit *per_cpu_init(void) 595void *per_cpu_init(void)
596{ 596{
597 int cpu; 597 int cpu;
598 static int first_time = 1; 598 static int first_time = 1;
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 4248492b9321..ea21d4cad540 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -86,7 +86,7 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
86 return -1; 86 return -1;
87} 87}
88 88
89void __cpuinit numa_clear_node(int cpu) 89void numa_clear_node(int cpu)
90{ 90{
91 unmap_cpu_from_node(cpu, NUMA_NO_NODE); 91 unmap_cpu_from_node(cpu, NUMA_NO_NODE);
92} 92}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index de1474ff0bc5..2326790b7d8b 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -134,6 +134,10 @@ struct pci_root_info {
134 struct acpi_device *bridge; 134 struct acpi_device *bridge;
135 struct pci_controller *controller; 135 struct pci_controller *controller;
136 struct list_head resources; 136 struct list_head resources;
137 struct resource *res;
138 resource_size_t *res_offset;
139 unsigned int res_num;
140 struct list_head io_resources;
137 char *name; 141 char *name;
138}; 142};
139 143
@@ -153,7 +157,7 @@ new_space (u64 phys_base, int sparse)
153 return i; 157 return i;
154 158
155 if (num_io_spaces == MAX_IO_SPACES) { 159 if (num_io_spaces == MAX_IO_SPACES) {
156 printk(KERN_ERR "PCI: Too many IO port spaces " 160 pr_err("PCI: Too many IO port spaces "
157 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES); 161 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
158 return ~0; 162 return ~0;
159 } 163 }
@@ -168,25 +172,22 @@ new_space (u64 phys_base, int sparse)
168static u64 add_io_space(struct pci_root_info *info, 172static u64 add_io_space(struct pci_root_info *info,
169 struct acpi_resource_address64 *addr) 173 struct acpi_resource_address64 *addr)
170{ 174{
175 struct iospace_resource *iospace;
171 struct resource *resource; 176 struct resource *resource;
172 char *name; 177 char *name;
173 unsigned long base, min, max, base_port; 178 unsigned long base, min, max, base_port;
174 unsigned int sparse = 0, space_nr, len; 179 unsigned int sparse = 0, space_nr, len;
175 180
176 resource = kzalloc(sizeof(*resource), GFP_KERNEL); 181 len = strlen(info->name) + 32;
177 if (!resource) { 182 iospace = kzalloc(sizeof(*iospace) + len, GFP_KERNEL);
178 printk(KERN_ERR "PCI: No memory for %s I/O port space\n", 183 if (!iospace) {
179 info->name); 184 dev_err(&info->bridge->dev,
185 "PCI: No memory for %s I/O port space\n",
186 info->name);
180 goto out; 187 goto out;
181 } 188 }
182 189
183 len = strlen(info->name) + 32; 190 name = (char *)(iospace + 1);
184 name = kzalloc(len, GFP_KERNEL);
185 if (!name) {
186 printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
187 info->name);
188 goto free_resource;
189 }
190 191
191 min = addr->minimum; 192 min = addr->minimum;
192 max = min + addr->address_length - 1; 193 max = min + addr->address_length - 1;
@@ -195,7 +196,7 @@ static u64 add_io_space(struct pci_root_info *info,
195 196
196 space_nr = new_space(addr->translation_offset, sparse); 197 space_nr = new_space(addr->translation_offset, sparse);
197 if (space_nr == ~0) 198 if (space_nr == ~0)
198 goto free_name; 199 goto free_resource;
199 200
200 base = __pa(io_space[space_nr].mmio_base); 201 base = __pa(io_space[space_nr].mmio_base);
201 base_port = IO_SPACE_BASE(space_nr); 202 base_port = IO_SPACE_BASE(space_nr);
@@ -210,18 +211,23 @@ static u64 add_io_space(struct pci_root_info *info,
210 if (space_nr == 0) 211 if (space_nr == 0)
211 sparse = 1; 212 sparse = 1;
212 213
214 resource = &iospace->res;
213 resource->name = name; 215 resource->name = name;
214 resource->flags = IORESOURCE_MEM; 216 resource->flags = IORESOURCE_MEM;
215 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min); 217 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
216 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max); 218 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
217 insert_resource(&iomem_resource, resource); 219 if (insert_resource(&iomem_resource, resource)) {
220 dev_err(&info->bridge->dev,
221 "can't allocate host bridge io space resource %pR\n",
222 resource);
223 goto free_resource;
224 }
218 225
226 list_add_tail(&iospace->list, &info->io_resources);
219 return base_port; 227 return base_port;
220 228
221free_name:
222 kfree(name);
223free_resource: 229free_resource:
224 kfree(resource); 230 kfree(iospace);
225out: 231out:
226 return ~0; 232 return ~0;
227} 233}
@@ -265,7 +271,7 @@ static acpi_status count_window(struct acpi_resource *resource, void *data)
265static acpi_status add_window(struct acpi_resource *res, void *data) 271static acpi_status add_window(struct acpi_resource *res, void *data)
266{ 272{
267 struct pci_root_info *info = data; 273 struct pci_root_info *info = data;
268 struct pci_window *window; 274 struct resource *resource;
269 struct acpi_resource_address64 addr; 275 struct acpi_resource_address64 addr;
270 acpi_status status; 276 acpi_status status;
271 unsigned long flags, offset = 0; 277 unsigned long flags, offset = 0;
@@ -289,55 +295,146 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
289 } else 295 } else
290 return AE_OK; 296 return AE_OK;
291 297
292 window = &info->controller->window[info->controller->windows++]; 298 resource = &info->res[info->res_num];
293 window->resource.name = info->name; 299 resource->name = info->name;
294 window->resource.flags = flags; 300 resource->flags = flags;
295 window->resource.start = addr.minimum + offset; 301 resource->start = addr.minimum + offset;
296 window->resource.end = window->resource.start + addr.address_length - 1; 302 resource->end = resource->start + addr.address_length - 1;
297 window->offset = offset; 303 info->res_offset[info->res_num] = offset;
298 304
299 if (insert_resource(root, &window->resource)) { 305 if (insert_resource(root, resource)) {
300 dev_err(&info->bridge->dev, 306 dev_err(&info->bridge->dev,
301 "can't allocate host bridge window %pR\n", 307 "can't allocate host bridge window %pR\n",
302 &window->resource); 308 resource);
303 } else { 309 } else {
304 if (offset) 310 if (offset)
305 dev_info(&info->bridge->dev, "host bridge window %pR " 311 dev_info(&info->bridge->dev, "host bridge window %pR "
306 "(PCI address [%#llx-%#llx])\n", 312 "(PCI address [%#llx-%#llx])\n",
307 &window->resource, 313 resource,
308 window->resource.start - offset, 314 resource->start - offset,
309 window->resource.end - offset); 315 resource->end - offset);
310 else 316 else
311 dev_info(&info->bridge->dev, 317 dev_info(&info->bridge->dev,
312 "host bridge window %pR\n", 318 "host bridge window %pR\n", resource);
313 &window->resource);
314 } 319 }
315
316 /* HP's firmware has a hack to work around a Windows bug. 320 /* HP's firmware has a hack to work around a Windows bug.
317 * Ignore these tiny memory ranges */ 321 * Ignore these tiny memory ranges */
318 if (!((window->resource.flags & IORESOURCE_MEM) && 322 if (!((resource->flags & IORESOURCE_MEM) &&
319 (window->resource.end - window->resource.start < 16))) 323 (resource->end - resource->start < 16)))
320 pci_add_resource_offset(&info->resources, &window->resource, 324 pci_add_resource_offset(&info->resources, resource,
321 window->offset); 325 info->res_offset[info->res_num]);
322 326
327 info->res_num++;
323 return AE_OK; 328 return AE_OK;
324} 329}
325 330
331static void free_pci_root_info_res(struct pci_root_info *info)
332{
333 struct iospace_resource *iospace, *tmp;
334
335 list_for_each_entry_safe(iospace, tmp, &info->io_resources, list)
336 kfree(iospace);
337
338 kfree(info->name);
339 kfree(info->res);
340 info->res = NULL;
341 kfree(info->res_offset);
342 info->res_offset = NULL;
343 info->res_num = 0;
344 kfree(info->controller);
345 info->controller = NULL;
346}
347
348static void __release_pci_root_info(struct pci_root_info *info)
349{
350 int i;
351 struct resource *res;
352 struct iospace_resource *iospace;
353
354 list_for_each_entry(iospace, &info->io_resources, list)
355 release_resource(&iospace->res);
356
357 for (i = 0; i < info->res_num; i++) {
358 res = &info->res[i];
359
360 if (!res->parent)
361 continue;
362
363 if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
364 continue;
365
366 release_resource(res);
367 }
368
369 free_pci_root_info_res(info);
370 kfree(info);
371}
372
373static void release_pci_root_info(struct pci_host_bridge *bridge)
374{
375 struct pci_root_info *info = bridge->release_data;
376
377 __release_pci_root_info(info);
378}
379
380static int
381probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
382 int busnum, int domain)
383{
384 char *name;
385
386 name = kmalloc(16, GFP_KERNEL);
387 if (!name)
388 return -ENOMEM;
389
390 sprintf(name, "PCI Bus %04x:%02x", domain, busnum);
391 info->bridge = device;
392 info->name = name;
393
394 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
395 &info->res_num);
396 if (info->res_num) {
397 info->res =
398 kzalloc_node(sizeof(*info->res) * info->res_num,
399 GFP_KERNEL, info->controller->node);
400 if (!info->res) {
401 kfree(name);
402 return -ENOMEM;
403 }
404
405 info->res_offset =
406 kzalloc_node(sizeof(*info->res_offset) * info->res_num,
407 GFP_KERNEL, info->controller->node);
408 if (!info->res_offset) {
409 kfree(name);
410 kfree(info->res);
411 info->res = NULL;
412 return -ENOMEM;
413 }
414
415 info->res_num = 0;
416 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
417 add_window, info);
418 } else
419 kfree(name);
420
421 return 0;
422}
423
326struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) 424struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
327{ 425{
328 struct acpi_device *device = root->device; 426 struct acpi_device *device = root->device;
329 int domain = root->segment; 427 int domain = root->segment;
330 int bus = root->secondary.start; 428 int bus = root->secondary.start;
331 struct pci_controller *controller; 429 struct pci_controller *controller;
332 unsigned int windows = 0; 430 struct pci_root_info *info = NULL;
333 struct pci_root_info info; 431 int busnum = root->secondary.start;
334 struct pci_bus *pbus; 432 struct pci_bus *pbus;
335 char *name; 433 int pxm, ret;
336 int pxm;
337 434
338 controller = alloc_pci_controller(domain); 435 controller = alloc_pci_controller(domain);
339 if (!controller) 436 if (!controller)
340 goto out1; 437 return NULL;
341 438
342 controller->acpi_handle = device->handle; 439 controller->acpi_handle = device->handle;
343 440
@@ -347,29 +444,27 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
347 controller->node = pxm_to_node(pxm); 444 controller->node = pxm_to_node(pxm);
348#endif 445#endif
349 446
350 INIT_LIST_HEAD(&info.resources); 447 info = kzalloc(sizeof(*info), GFP_KERNEL);
351 /* insert busn resource at first */ 448 if (!info) {
352 pci_add_resource(&info.resources, &root->secondary); 449 dev_err(&device->dev,
353 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, 450 "pci_bus %04x:%02x: ignored (out of memory)\n",
354 &windows); 451 domain, busnum);
355 if (windows) { 452 kfree(controller);
356 controller->window = 453 return NULL;
357 kzalloc_node(sizeof(*controller->window) * windows,
358 GFP_KERNEL, controller->node);
359 if (!controller->window)
360 goto out2;
361
362 name = kmalloc(16, GFP_KERNEL);
363 if (!name)
364 goto out3;
365
366 sprintf(name, "PCI Bus %04x:%02x", domain, bus);
367 info.bridge = device;
368 info.controller = controller;
369 info.name = name;
370 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
371 add_window, &info);
372 } 454 }
455
456 info->controller = controller;
457 INIT_LIST_HEAD(&info->io_resources);
458 INIT_LIST_HEAD(&info->resources);
459
460 ret = probe_pci_root_info(info, device, busnum, domain);
461 if (ret) {
462 kfree(info->controller);
463 kfree(info);
464 return NULL;
465 }
466 /* insert busn resource at first */
467 pci_add_resource(&info->resources, &root->secondary);
373 /* 468 /*
374 * See arch/x86/pci/acpi.c. 469 * See arch/x86/pci/acpi.c.
375 * The desired pci bus might already be scanned in a quirk. We 470 * The desired pci bus might already be scanned in a quirk. We
@@ -377,21 +472,17 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
377 * such quirk. So we just ignore the case now. 472 * such quirk. So we just ignore the case now.
378 */ 473 */
379 pbus = pci_create_root_bus(NULL, bus, &pci_root_ops, controller, 474 pbus = pci_create_root_bus(NULL, bus, &pci_root_ops, controller,
380 &info.resources); 475 &info->resources);
381 if (!pbus) { 476 if (!pbus) {
382 pci_free_resource_list(&info.resources); 477 pci_free_resource_list(&info->resources);
478 __release_pci_root_info(info);
383 return NULL; 479 return NULL;
384 } 480 }
385 481
482 pci_set_host_bridge_release(to_pci_host_bridge(pbus->bridge),
483 release_pci_root_info, info);
386 pci_scan_child_bus(pbus); 484 pci_scan_child_bus(pbus);
387 return pbus; 485 return pbus;
388
389out3:
390 kfree(controller->window);
391out2:
392 kfree(controller);
393out1:
394 return NULL;
395} 486}
396 487
397int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 488int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
@@ -691,7 +782,7 @@ static void __init set_pci_dfl_cacheline_size(void)
691 782
692 status = ia64_pal_cache_summary(&levels, &unique_caches); 783 status = ia64_pal_cache_summary(&levels, &unique_caches);
693 if (status != 0) { 784 if (status != 0) {
694 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed " 785 pr_err("%s: ia64_pal_cache_summary() failed "
695 "(status=%ld)\n", __func__, status); 786 "(status=%ld)\n", __func__, status);
696 return; 787 return;
697 } 788 }
@@ -699,7 +790,7 @@ static void __init set_pci_dfl_cacheline_size(void)
699 status = ia64_pal_cache_config_info(levels - 1, 790 status = ia64_pal_cache_config_info(levels - 1,
700 /* cache_type (data_or_unified)= */ 2, &cci); 791 /* cache_type (data_or_unified)= */ 2, &cci);
701 if (status != 0) { 792 if (status != 0) {
702 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed " 793 pr_err("%s: ia64_pal_cache_config_info() failed "
703 "(status=%ld)\n", __func__, status); 794 "(status=%ld)\n", __func__, status);
704 return; 795 return;
705 } 796 }
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 238e2c511d94..0b5ce82d203d 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -118,76 +118,26 @@ static void __init sn_fixup_ionodes(void)
118} 118}
119 119
120/* 120/*
121 * sn_pci_legacy_window_fixup - Create PCI controller windows for 121 * sn_pci_legacy_window_fixup - Setup PCI resources for
122 * legacy IO and MEM space. This needs to 122 * legacy IO and MEM space. This needs to
123 * be done here, as the PROM does not have 123 * be done here, as the PROM does not have
124 * ACPI support defining the root buses 124 * ACPI support defining the root buses
125 * and their resources (_CRS), 125 * and their resources (_CRS),
126 */ 126 */
127static void 127static void
128sn_legacy_pci_window_fixup(struct pci_controller *controller, 128sn_legacy_pci_window_fixup(struct resource *res,
129 u64 legacy_io, u64 legacy_mem) 129 u64 legacy_io, u64 legacy_mem)
130{ 130{
131 controller->window = kcalloc(2, sizeof(struct pci_window), 131 res[0].name = "legacy_io";
132 GFP_KERNEL); 132 res[0].flags = IORESOURCE_IO;
133 BUG_ON(controller->window == NULL); 133 res[0].start = legacy_io;
134 controller->window[0].offset = legacy_io; 134 res[0].end = res[0].start + 0xffff;
135 controller->window[0].resource.name = "legacy_io"; 135 res[0].parent = &ioport_resource;
136 controller->window[0].resource.flags = IORESOURCE_IO; 136 res[1].name = "legacy_mem";
137 controller->window[0].resource.start = legacy_io; 137 res[1].flags = IORESOURCE_MEM;
138 controller->window[0].resource.end = 138 res[1].start = legacy_mem;
139 controller->window[0].resource.start + 0xffff; 139 res[1].end = res[1].start + (1024 * 1024) - 1;
140 controller->window[0].resource.parent = &ioport_resource; 140 res[1].parent = &iomem_resource;
141 controller->window[1].offset = legacy_mem;
142 controller->window[1].resource.name = "legacy_mem";
143 controller->window[1].resource.flags = IORESOURCE_MEM;
144 controller->window[1].resource.start = legacy_mem;
145 controller->window[1].resource.end =
146 controller->window[1].resource.start + (1024 * 1024) - 1;
147 controller->window[1].resource.parent = &iomem_resource;
148 controller->windows = 2;
149}
150
151/*
152 * sn_pci_window_fixup() - Create a pci_window for each device resource.
153 * It will setup pci_windows for use by
154 * pcibios_bus_to_resource(), pcibios_resource_to_bus(),
155 * etc.
156 */
157static void
158sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
159 s64 * pci_addrs)
160{
161 struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
162 unsigned int i;
163 unsigned int idx;
164 unsigned int new_count;
165 struct pci_window *new_window;
166
167 if (count == 0)
168 return;
169 idx = controller->windows;
170 new_count = controller->windows + count;
171 new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
172 BUG_ON(new_window == NULL);
173 if (controller->window) {
174 memcpy(new_window, controller->window,
175 sizeof(struct pci_window) * controller->windows);
176 kfree(controller->window);
177 }
178
179 /* Setup a pci_window for each device resource. */
180 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
181 if (pci_addrs[i] == -1)
182 continue;
183
184 new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
185 new_window[idx].resource = dev->resource[i];
186 idx++;
187 }
188
189 controller->windows = new_count;
190 controller->window = new_window;
191} 141}
192 142
193/* 143/*
@@ -199,9 +149,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
199void 149void
200sn_io_slot_fixup(struct pci_dev *dev) 150sn_io_slot_fixup(struct pci_dev *dev)
201{ 151{
202 unsigned int count = 0;
203 int idx; 152 int idx;
204 s64 pci_addrs[PCI_ROM_RESOURCE + 1];
205 unsigned long addr, end, size, start; 153 unsigned long addr, end, size, start;
206 struct pcidev_info *pcidev_info; 154 struct pcidev_info *pcidev_info;
207 struct sn_irq_info *sn_irq_info; 155 struct sn_irq_info *sn_irq_info;
@@ -229,7 +177,6 @@ sn_io_slot_fixup(struct pci_dev *dev)
229 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 177 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
230 178
231 if (!pcidev_info->pdi_pio_mapped_addr[idx]) { 179 if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
232 pci_addrs[idx] = -1;
233 continue; 180 continue;
234 } 181 }
235 182
@@ -237,11 +184,8 @@ sn_io_slot_fixup(struct pci_dev *dev)
237 end = dev->resource[idx].end; 184 end = dev->resource[idx].end;
238 size = end - start; 185 size = end - start;
239 if (size == 0) { 186 if (size == 0) {
240 pci_addrs[idx] = -1;
241 continue; 187 continue;
242 } 188 }
243 pci_addrs[idx] = start;
244 count++;
245 addr = pcidev_info->pdi_pio_mapped_addr[idx]; 189 addr = pcidev_info->pdi_pio_mapped_addr[idx];
246 addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET; 190 addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
247 dev->resource[idx].start = addr; 191 dev->resource[idx].start = addr;
@@ -276,11 +220,6 @@ sn_io_slot_fixup(struct pci_dev *dev)
276 IORESOURCE_ROM_BIOS_COPY; 220 IORESOURCE_ROM_BIOS_COPY;
277 } 221 }
278 } 222 }
279 /* Create a pci_window in the pci_controller struct for
280 * each device resource.
281 */
282 if (count > 0)
283 sn_pci_window_fixup(dev, count, pci_addrs);
284 223
285 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info); 224 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
286} 225}
@@ -297,8 +236,8 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
297 s64 status = 0; 236 s64 status = 0;
298 struct pci_controller *controller; 237 struct pci_controller *controller;
299 struct pcibus_bussoft *prom_bussoft_ptr; 238 struct pcibus_bussoft *prom_bussoft_ptr;
239 struct resource *res;
300 LIST_HEAD(resources); 240 LIST_HEAD(resources);
301 int i;
302 241
303 status = sal_get_pcibus_info((u64) segment, (u64) busnum, 242 status = sal_get_pcibus_info((u64) segment, (u64) busnum,
304 (u64) ia64_tpa(&prom_bussoft_ptr)); 243 (u64) ia64_tpa(&prom_bussoft_ptr));
@@ -310,32 +249,29 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
310 BUG_ON(!controller); 249 BUG_ON(!controller);
311 controller->segment = segment; 250 controller->segment = segment;
312 251
252 res = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
253 BUG_ON(!res);
254
313 /* 255 /*
314 * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup(). 256 * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
315 * (platform_data will be overwritten later in sn_common_bus_fixup()) 257 * (platform_data will be overwritten later in sn_common_bus_fixup())
316 */ 258 */
317 controller->platform_data = prom_bussoft_ptr; 259 controller->platform_data = prom_bussoft_ptr;
318 260
319 sn_legacy_pci_window_fixup(controller, 261 sn_legacy_pci_window_fixup(res,
320 prom_bussoft_ptr->bs_legacy_io, 262 prom_bussoft_ptr->bs_legacy_io,
321 prom_bussoft_ptr->bs_legacy_mem); 263 prom_bussoft_ptr->bs_legacy_mem);
322 for (i = 0; i < controller->windows; i++) 264 pci_add_resource_offset(&resources, &res[0],
323 pci_add_resource_offset(&resources, 265 prom_bussoft_ptr->bs_legacy_io);
324 &controller->window[i].resource, 266 pci_add_resource_offset(&resources, &res[1],
325 controller->window[i].offset); 267 prom_bussoft_ptr->bs_legacy_mem);
268
326 bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller, 269 bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller,
327 &resources); 270 &resources);
328 if (bus == NULL) 271 if (bus == NULL) {
329 goto error_return; /* error, or bus already scanned */ 272 kfree(res);
330 273 kfree(controller);
331 bus->sysdata = controller; 274 }
332
333 return;
334
335error_return:
336
337 kfree(controller);
338 return;
339} 275}
340 276
341/* 277/*
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index f82e7b462b7b..53b01b8e2f19 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -192,7 +192,7 @@ void __init early_sn_setup(void)
192} 192}
193 193
194extern int platform_intr_list[]; 194extern int platform_intr_list[];
195static int __cpuinitdata shub_1_1_found; 195static int shub_1_1_found;
196 196
197/* 197/*
198 * sn_check_for_wars 198 * sn_check_for_wars
@@ -200,7 +200,7 @@ static int __cpuinitdata shub_1_1_found;
200 * Set flag for enabling shub specific wars 200 * Set flag for enabling shub specific wars
201 */ 201 */
202 202
203static inline int __cpuinit is_shub_1_1(int nasid) 203static inline int is_shub_1_1(int nasid)
204{ 204{
205 unsigned long id; 205 unsigned long id;
206 int rev; 206 int rev;
@@ -212,7 +212,7 @@ static inline int __cpuinit is_shub_1_1(int nasid)
212 return rev <= 2; 212 return rev <= 2;
213} 213}
214 214
215static void __cpuinit sn_check_for_wars(void) 215static void sn_check_for_wars(void)
216{ 216{
217 int cnode; 217 int cnode;
218 218
@@ -558,7 +558,7 @@ static void __init sn_init_pdas(char **cmdline_p)
558 * Also sets up a few fields in the nodepda. Also known as 558 * Also sets up a few fields in the nodepda. Also known as
559 * platform_cpu_init() by the ia64 machvec code. 559 * platform_cpu_init() by the ia64 machvec code.
560 */ 560 */
561void __cpuinit sn_cpu_init(void) 561void sn_cpu_init(void)
562{ 562{
563 int cpuid; 563 int cpuid;
564 int cpuphyid; 564 int cpuphyid;
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
index 52172eee8591..fab62528a80b 100644
--- a/arch/ia64/xen/hypervisor.c
+++ b/arch/ia64/xen/hypervisor.c
@@ -74,7 +74,7 @@ void __init xen_setup_vcpu_info_placement(void)
74 xen_vcpu_setup(cpu); 74 xen_vcpu_setup(cpu);
75} 75}
76 76
77void __cpuinit 77void
78xen_cpu_init(void) 78xen_cpu_init(void)
79{ 79{
80 xen_smp_intr_init(); 80 xen_smp_intr_init();
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
index fa12283d58fc..229682721240 100644
--- a/arch/m68k/Kconfig.debug
+++ b/arch/m68k/Kconfig.debug
@@ -11,9 +11,8 @@ config BOOTPARAM_STRING
11 depends on BOOTPARAM 11 depends on BOOTPARAM
12 12
13config EARLY_PRINTK 13config EARLY_PRINTK
14 bool "Early printk" if EMBEDDED 14 bool "Early printk"
15 depends on MVME16x || MAC 15 depends on MVME16x || MAC
16 default y
17 help 16 help
18 Write kernel log output directly to a serial port. 17 Write kernel log output directly to a serial port.
19 18
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 0f795d8e65fa..b17a8837f0e1 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -214,6 +214,7 @@ CONFIG_DEVTMPFS=y
214# CONFIG_FW_LOADER_USER_HELPER is not set 214# CONFIG_FW_LOADER_USER_HELPER is not set
215CONFIG_CONNECTOR=m 215CONFIG_CONNECTOR=m
216CONFIG_PARPORT=m 216CONFIG_PARPORT=m
217CONFIG_PARPORT_PC=m
217CONFIG_PARPORT_AMIGA=m 218CONFIG_PARPORT_AMIGA=m
218CONFIG_PARPORT_MFC3=m 219CONFIG_PARPORT_MFC3=m
219CONFIG_PARPORT_ATARI=m 220CONFIG_PARPORT_ATARI=m
@@ -325,6 +326,7 @@ CONFIG_ZORRO8390=y
325# CONFIG_NET_VENDOR_SEEQ is not set 326# CONFIG_NET_VENDOR_SEEQ is not set
326# CONFIG_NET_VENDOR_STMICRO is not set 327# CONFIG_NET_VENDOR_STMICRO is not set
327# CONFIG_NET_VENDOR_WIZNET is not set 328# CONFIG_NET_VENDOR_WIZNET is not set
329CONFIG_PLIP=m
328CONFIG_PPP=m 330CONFIG_PPP=m
329CONFIG_PPP_BSDCOMP=m 331CONFIG_PPP_BSDCOMP=m
330CONFIG_PPP_DEFLATE=m 332CONFIG_PPP_DEFLATE=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 8982370e8b42..be1496ed9b66 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -199,6 +199,9 @@ CONFIG_DEVTMPFS=y
199# CONFIG_FIRMWARE_IN_KERNEL is not set 199# CONFIG_FIRMWARE_IN_KERNEL is not set
200# CONFIG_FW_LOADER_USER_HELPER is not set 200# CONFIG_FW_LOADER_USER_HELPER is not set
201CONFIG_CONNECTOR=m 201CONFIG_CONNECTOR=m
202CONFIG_PARPORT=m
203CONFIG_PARPORT_PC=m
204CONFIG_PARPORT_1284=y
202CONFIG_BLK_DEV_LOOP=y 205CONFIG_BLK_DEV_LOOP=y
203CONFIG_BLK_DEV_CRYPTOLOOP=m 206CONFIG_BLK_DEV_CRYPTOLOOP=m
204CONFIG_BLK_DEV_DRBD=m 207CONFIG_BLK_DEV_DRBD=m
@@ -267,6 +270,7 @@ CONFIG_NE2000=m
267# CONFIG_NET_VENDOR_SMSC is not set 270# CONFIG_NET_VENDOR_SMSC is not set
268# CONFIG_NET_VENDOR_STMICRO is not set 271# CONFIG_NET_VENDOR_STMICRO is not set
269# CONFIG_NET_VENDOR_WIZNET is not set 272# CONFIG_NET_VENDOR_WIZNET is not set
273CONFIG_PLIP=m
270CONFIG_PPP=m 274CONFIG_PPP=m
271CONFIG_PPP_BSDCOMP=m 275CONFIG_PPP_BSDCOMP=m
272CONFIG_PPP_DEFLATE=m 276CONFIG_PPP_DEFLATE=m
@@ -292,9 +296,11 @@ CONFIG_SERIO_Q40KBD=y
292CONFIG_VT_HW_CONSOLE_BINDING=y 296CONFIG_VT_HW_CONSOLE_BINDING=y
293# CONFIG_LEGACY_PTYS is not set 297# CONFIG_LEGACY_PTYS is not set
294# CONFIG_DEVKMEM is not set 298# CONFIG_DEVKMEM is not set
299CONFIG_PRINTER=m
295# CONFIG_HW_RANDOM is not set 300# CONFIG_HW_RANDOM is not set
296CONFIG_NTP_PPS=y 301CONFIG_NTP_PPS=y
297CONFIG_PPS_CLIENT_LDISC=m 302CONFIG_PPS_CLIENT_LDISC=m
303CONFIG_PPS_CLIENT_PARPORT=m
298CONFIG_PTP_1588_CLOCK=m 304CONFIG_PTP_1588_CLOCK=m
299# CONFIG_HWMON is not set 305# CONFIG_HWMON is not set
300CONFIG_FB=y 306CONFIG_FB=y
diff --git a/arch/m68k/include/asm/parport.h b/arch/m68k/include/asm/parport.h
index 5ea75e6a7399..c85cece778e8 100644
--- a/arch/m68k/include/asm/parport.h
+++ b/arch/m68k/include/asm/parport.h
@@ -11,6 +11,8 @@
11#ifndef _ASM_M68K_PARPORT_H 11#ifndef _ASM_M68K_PARPORT_H
12#define _ASM_M68K_PARPORT_H 1 12#define _ASM_M68K_PARPORT_H 1
13 13
14#undef insl
15#undef outsl
14#define insl(port,buf,len) isa_insb(port,buf,(len)<<2) 16#define insl(port,buf,len) isa_insb(port,buf,(len)<<2)
15#define outsl(port,buf,len) isa_outsb(port,buf,(len)<<2) 17#define outsl(port,buf,len) isa_outsb(port,buf,(len)<<2)
16 18
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h
index 9aea9f11fa25..c30c03d98581 100644
--- a/arch/m68k/include/asm/string.h
+++ b/arch/m68k/include/asm/string.h
@@ -4,20 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6 6
7static inline char *__kernel_strcpy(char *dest, const char *src)
8{
9 char *xdest = dest;
10
11 asm volatile ("\n"
12 "1: move.b (%1)+,(%0)+\n"
13 " jne 1b"
14 : "+a" (dest), "+a" (src)
15 : : "memory");
16 return xdest;
17}
18
19#ifndef __IN_STRING_C
20
21#define __HAVE_ARCH_STRNLEN 7#define __HAVE_ARCH_STRNLEN
22static inline size_t strnlen(const char *s, size_t count) 8static inline size_t strnlen(const char *s, size_t count)
23{ 9{
@@ -34,16 +20,6 @@ static inline size_t strnlen(const char *s, size_t count)
34 return sc - s; 20 return sc - s;
35} 21}
36 22
37#define __HAVE_ARCH_STRCPY
38#if __GNUC__ >= 4
39#define strcpy(d, s) (__builtin_constant_p(s) && \
40 __builtin_strlen(s) <= 32 ? \
41 __builtin_strcpy(d, s) : \
42 __kernel_strcpy(d, s))
43#else
44#define strcpy(d, s) __kernel_strcpy(d, s)
45#endif
46
47#define __HAVE_ARCH_STRNCPY 23#define __HAVE_ARCH_STRNCPY
48static inline char *strncpy(char *dest, const char *src, size_t n) 24static inline char *strncpy(char *dest, const char *src, size_t n)
49{ 25{
@@ -61,12 +37,6 @@ static inline char *strncpy(char *dest, const char *src, size_t n)
61 return xdest; 37 return xdest;
62} 38}
63 39
64#define __HAVE_ARCH_STRCAT
65#define strcat(d, s) ({ \
66 char *__d = (d); \
67 strcpy(__d + strlen(__d), (s)); \
68})
69
70#ifndef CONFIG_COLDFIRE 40#ifndef CONFIG_COLDFIRE
71#define __HAVE_ARCH_STRCMP 41#define __HAVE_ARCH_STRCMP
72static inline int strcmp(const char *cs, const char *ct) 42static inline int strcmp(const char *cs, const char *ct)
@@ -100,6 +70,4 @@ extern void *memset(void *, int, __kernel_size_t);
100extern void *memcpy(void *, const void *, __kernel_size_t); 70extern void *memcpy(void *, const void *, __kernel_size_t);
101#define memcpy(d, s, n) __builtin_memcpy(d, s, n) 71#define memcpy(d, s, n) __builtin_memcpy(d, s, n)
102 72
103#endif
104
105#endif /* _M68K_STRING_H_ */ 73#endif /* _M68K_STRING_H_ */
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 472c891a4aee..15901db435b9 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -90,7 +90,7 @@ asm volatile ("\n" \
90 __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ 90 __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
91 break; \ 91 break; \
92 case 2: \ 92 case 2: \
93 __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \ 93 __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \
94 break; \ 94 break; \
95 case 4: \ 95 case 4: \
96 __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ 96 __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
@@ -158,7 +158,7 @@ asm volatile ("\n" \
158 __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ 158 __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
159 break; \ 159 break; \
160 case 2: \ 160 case 2: \
161 __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \ 161 __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \
162 break; \ 162 break; \
163 case 4: \ 163 case 4: \
164 __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ 164 __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
@@ -245,7 +245,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
245 __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); 245 __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
246 break; 246 break;
247 case 2: 247 case 2:
248 __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2); 248 __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2);
249 break; 249 break;
250 case 3: 250 case 3:
251 __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); 251 __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
@@ -326,7 +326,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
326 __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); 326 __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
327 break; 327 break;
328 case 2: 328 case 2:
329 __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2); 329 __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
330 break; 330 break;
331 case 3: 331 case 3:
332 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); 332 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index a972b00cd77d..8b7b22846366 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -77,7 +77,7 @@ int main(void)
77 DEFINE(BIR_SIZE, offsetof(struct bi_record, size)); 77 DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
78 DEFINE(BIR_DATA, offsetof(struct bi_record, data)); 78 DEFINE(BIR_DATA, offsetof(struct bi_record, data));
79 79
80 /* offsets into font_desc (drivers/video/console/font.h) */ 80 /* offsets into the font_desc struct */
81 DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx)); 81 DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
82 DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name)); 82 DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
83 DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width)); 83 DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 6b32b64bac35..4d7da384eea0 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -101,7 +101,7 @@ void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt)
101 BUG_ON(IRQ_USER + cnt > NR_IRQS); 101 BUG_ON(IRQ_USER + cnt > NR_IRQS);
102 m68k_first_user_vec = vec; 102 m68k_first_user_vec = vec;
103 for (i = 0; i < cnt; i++) 103 for (i = 0; i < cnt; i++)
104 irq_set_chip(IRQ_USER + i, &user_irq_chip); 104 irq_set_chip_and_handler(i, &user_irq_chip, handle_simple_irq);
105 *user_irqvec_fixup = vec - IRQ_USER; 105 *user_irqvec_fixup = vec - IRQ_USER;
106 flush_icache(); 106 flush_icache();
107} 107}
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index a9d782d34276..fcd8eb1d7c7d 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -6,7 +6,7 @@
6lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ 6lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
7 memcpy.o memset.o memmove.o 7 memcpy.o memset.o memmove.o
8 8
9lib-$(CONFIG_MMU) += string.o uaccess.o 9lib-$(CONFIG_MMU) += uaccess.o
10lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += mulsi3.o divsi3.o udivsi3.o 10lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += mulsi3.o divsi3.o udivsi3.o
11lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += modsi3.o umodsi3.o 11lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += modsi3.o umodsi3.o
12 12
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
deleted file mode 100644
index 4d61fa8a112c..000000000000
--- a/arch/m68k/lib/string.c
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#define __IN_STRING_C
8
9#include <linux/module.h>
10#include <linux/string.h>
11
12char *strcpy(char *dest, const char *src)
13{
14 return __kernel_strcpy(dest, src);
15}
16EXPORT_SYMBOL(strcpy);
17
18char *strcat(char *dest, const char *src)
19{
20 return __kernel_strcpy(dest + strlen(dest), src);
21}
22EXPORT_SYMBOL(strcat);
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 5e97f2ee7c11..35d1442dee89 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -52,7 +52,7 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from,
52 " .long 3b,30b\n" 52 " .long 3b,30b\n"
53 " .long 5b,50b\n" 53 " .long 5b,50b\n"
54 " .previous" 54 " .previous"
55 : "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp) 55 : "=d" (res), "+a" (from), "+a" (to), "=&d" (tmp)
56 : "0" (n / 4), "d" (n & 3)); 56 : "0" (n / 4), "d" (n & 3));
57 57
58 return res; 58 return res;
@@ -96,7 +96,7 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
96 " .long 7b,50b\n" 96 " .long 7b,50b\n"
97 " .long 8b,50b\n" 97 " .long 8b,50b\n"
98 " .previous" 98 " .previous"
99 : "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp) 99 : "=d" (res), "+a" (from), "+a" (to), "=&d" (tmp)
100 : "0" (n / 4), "d" (n & 3)); 100 : "0" (n / 4), "d" (n & 3));
101 101
102 return res; 102 return res;
@@ -141,7 +141,7 @@ unsigned long __clear_user(void __user *to, unsigned long n)
141 " .long 7b,40b\n" 141 " .long 7b,40b\n"
142 " .previous" 142 " .previous"
143 : "=d" (res), "+a" (to) 143 : "=d" (res), "+a" (to)
144 : "r" (0), "0" (n / 4), "d" (n & 3)); 144 : "d" (0), "0" (n / 4), "d" (n & 3));
145 145
146 return res; 146 return res;
147} 147}
diff --git a/arch/m68k/math-emu/fp_arith.c b/arch/m68k/math-emu/fp_arith.c
index 08f286db3c5a..239eb1990184 100644
--- a/arch/m68k/math-emu/fp_arith.c
+++ b/arch/m68k/math-emu/fp_arith.c
@@ -519,7 +519,7 @@ static void fp_roundint(struct fp_ext *dest, int mode)
519 return; 519 return;
520 break; 520 break;
521 case 0x401e: 521 case 0x401e:
522 if (!(oldmant.m32[1] >= 0)) 522 if (oldmant.m32[1] & 0x80000000)
523 return; 523 return;
524 if (oldmant.m32[0] & 1) 524 if (oldmant.m32[0] & 1)
525 break; 525 break;
diff --git a/arch/m68k/platform/coldfire/pci.c b/arch/m68k/platform/coldfire/pci.c
index 8572246db84d..b33f97a13e6d 100644
--- a/arch/m68k/platform/coldfire/pci.c
+++ b/arch/m68k/platform/coldfire/pci.c
@@ -320,7 +320,6 @@ static int __init mcf_pci_init(void)
320 pci_bus_size_bridges(rootbus); 320 pci_bus_size_bridges(rootbus);
321 pci_bus_assign_resources(rootbus); 321 pci_bus_assign_resources(rootbus);
322 pci_enable_bridges(rootbus); 322 pci_enable_bridges(rootbus);
323 pci_bus_add_devices(rootbus);
324 return 0; 323 return 0;
325} 324}
326 325
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index ca0966cac72a..cab54482ca34 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -275,7 +275,7 @@ void dvma_init(void)
275 275
276} 276}
277 277
278inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align) 278unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
279{ 279{
280 280
281 unsigned long baddr; 281 unsigned long baddr;
diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
index bc739d4bab2e..4dc2f5fa3f67 100644
--- a/arch/mips/loongson/lemote-2f/clock.c
+++ b/arch/mips/loongson/lemote-2f/clock.c
@@ -121,7 +121,8 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
121 clk->rate = rate; 121 clk->rate = rate;
122 122
123 regval = LOONGSON_CHIPCFG0; 123 regval = LOONGSON_CHIPCFG0;
124 regval = (regval & ~0x7) | (loongson2_clockmod_table[i].index - 1); 124 regval = (regval & ~0x7) |
125 (loongson2_clockmod_table[i].driver_data - 1);
125 LOONGSON_CHIPCFG0 = regval; 126 LOONGSON_CHIPCFG0 = regval;
126 127
127 return ret; 128 return ret;
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index f20d01d9aaf9..195653e851da 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -66,3 +66,4 @@ generic-y += types.h
66generic-y += ucontext.h 66generic-y += ucontext.h
67generic-y += user.h 67generic-y += user.h
68generic-y += word-at-a-time.h 68generic-y += word-at-a-time.h
69generic-y += xor.h
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 349ed85c7d61..08891d07aeb6 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -107,8 +107,9 @@ struct kvmppc_vcpu_book3s {
107#define CONTEXT_GUEST 1 107#define CONTEXT_GUEST 1
108#define CONTEXT_GUEST_END 2 108#define CONTEXT_GUEST_END 2
109 109
110#define VSID_REAL 0x1fffffffffc00000ULL 110#define VSID_REAL 0x0fffffffffc00000ULL
111#define VSID_BAT 0x1fffffffffb00000ULL 111#define VSID_BAT 0x0fffffffffb00000ULL
112#define VSID_1T 0x1000000000000000ULL
112#define VSID_REAL_DR 0x2000000000000000ULL 113#define VSID_REAL_DR 0x2000000000000000ULL
113#define VSID_REAL_IR 0x4000000000000000ULL 114#define VSID_REAL_IR 0x4000000000000000ULL
114#define VSID_PR 0x8000000000000000ULL 115#define VSID_PR 0x8000000000000000ULL
@@ -123,6 +124,7 @@ extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
123extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 124extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
124extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 125extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
125extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 126extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
127extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
126extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 128extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
127extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 129extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
128 struct kvm_vcpu *vcpu, unsigned long addr, 130 struct kvm_vcpu *vcpu, unsigned long addr,
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 2a67e9baa59f..6b0ba5854d99 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -128,7 +128,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
128 const char *type; 128 const char *type;
129 struct pci_slot *slot; 129 struct pci_slot *slot;
130 130
131 dev = alloc_pci_dev(); 131 dev = pci_alloc_dev(bus);
132 if (!dev) 132 if (!dev)
133 return NULL; 133 return NULL;
134 type = of_get_property(node, "device_type", NULL); 134 type = of_get_property(node, "device_type", NULL);
@@ -137,7 +137,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
137 137
138 pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); 138 pr_debug(" create device, devfn: %x, type: %s\n", devfn, type);
139 139
140 dev->bus = bus;
141 dev->dev.of_node = of_node_get(node); 140 dev->dev.of_node = of_node_get(node);
142 dev->dev.parent = bus->bridge; 141 dev->dev.parent = bus->bridge;
143 dev->dev.bus = &pci_bus_type; 142 dev->dev.bus = &pci_bus_type;
@@ -165,7 +164,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
165 pr_debug(" class: 0x%x\n", dev->class); 164 pr_debug(" class: 0x%x\n", dev->class);
166 pr_debug(" revision: 0x%x\n", dev->revision); 165 pr_debug(" revision: 0x%x\n", dev->revision);
167 166
168 dev->current_state = 4; /* unknown power state */ 167 dev->current_state = PCI_UNKNOWN; /* unknown power state */
169 dev->error_state = pci_channel_io_normal; 168 dev->error_state = pci_channel_io_normal;
170 dev->dma_mask = 0xffffffff; 169 dev->dma_mask = 0xffffffff;
171 170
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 422de3f4d46c..008cd856c5b5 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -5,9 +5,10 @@
5subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror 5subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
6 6
7ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm 7ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
8KVM := ../../../virt/kvm
8 9
9common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \ 10common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
10 eventfd.o) 11 $(KVM)/eventfd.o
11 12
12CFLAGS_44x_tlb.o := -I. 13CFLAGS_44x_tlb.o := -I.
13CFLAGS_e500_mmu.o := -I. 14CFLAGS_e500_mmu.o := -I.
@@ -53,7 +54,7 @@ kvm-e500mc-objs := \
53kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) 54kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
54 55
55kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ 56kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
56 ../../../virt/kvm/coalesced_mmio.o \ 57 $(KVM)/coalesced_mmio.o \
57 fpu.o \ 58 fpu.o \
58 book3s_paired_singles.o \ 59 book3s_paired_singles.o \
59 book3s_pr.o \ 60 book3s_pr.o \
@@ -86,8 +87,8 @@ kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
86 book3s_xics.o 87 book3s_xics.o
87 88
88kvm-book3s_64-module-objs := \ 89kvm-book3s_64-module-objs := \
89 ../../../virt/kvm/kvm_main.o \ 90 $(KVM)/kvm_main.o \
90 ../../../virt/kvm/eventfd.o \ 91 $(KVM)/eventfd.o \
91 powerpc.o \ 92 powerpc.o \
92 emulate.o \ 93 emulate.o \
93 book3s.o \ 94 book3s.o \
@@ -111,7 +112,7 @@ kvm-book3s_32-objs := \
111kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) 112kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
112 113
113kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o 114kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o
114kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(addprefix ../../../virt/kvm/, irqchip.o) 115kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
115 116
116kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 117kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
117 118
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index b871721c0050..739bfbadb85e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -26,6 +26,7 @@
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/kvm_ppc.h> 27#include <asm/kvm_ppc.h>
28#include <asm/kvm_book3s.h> 28#include <asm/kvm_book3s.h>
29#include <asm/mmu-hash64.h>
29 30
30/* #define DEBUG_MMU */ 31/* #define DEBUG_MMU */
31 32
@@ -76,6 +77,24 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
76 return NULL; 77 return NULL;
77} 78}
78 79
80static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
81{
82 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
83}
84
85static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
86{
87 return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
88}
89
90static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
91{
92 eaddr &= kvmppc_slb_offset_mask(slb);
93
94 return (eaddr >> VPN_SHIFT) |
95 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
96}
97
79static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, 98static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
80 bool data) 99 bool data)
81{ 100{
@@ -85,11 +104,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
85 if (!slb) 104 if (!slb)
86 return 0; 105 return 0;
87 106
88 if (slb->tb) 107 return kvmppc_slb_calc_vpn(slb, eaddr);
89 return (((u64)eaddr >> 12) & 0xfffffff) |
90 (((u64)slb->vsid) << 28);
91
92 return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16);
93} 108}
94 109
95static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) 110static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
@@ -100,7 +115,8 @@ static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
100static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) 115static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
101{ 116{
102 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); 117 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
103 return ((eaddr & 0xfffffff) >> p); 118
119 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
104} 120}
105 121
106static hva_t kvmppc_mmu_book3s_64_get_pteg( 122static hva_t kvmppc_mmu_book3s_64_get_pteg(
@@ -109,13 +125,15 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(
109 bool second) 125 bool second)
110{ 126{
111 u64 hash, pteg, htabsize; 127 u64 hash, pteg, htabsize;
112 u32 page; 128 u32 ssize;
113 hva_t r; 129 hva_t r;
130 u64 vpn;
114 131
115 page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
116 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); 132 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
117 133
118 hash = slbe->vsid ^ page; 134 vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
135 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
136 hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
119 if (second) 137 if (second)
120 hash = ~hash; 138 hash = ~hash;
121 hash &= ((1ULL << 39ULL) - 1ULL); 139 hash &= ((1ULL << 39ULL) - 1ULL);
@@ -146,7 +164,7 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
146 u64 avpn; 164 u64 avpn;
147 165
148 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); 166 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
149 avpn |= slbe->vsid << (28 - p); 167 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
150 168
151 if (p < 24) 169 if (p < 24)
152 avpn >>= ((80 - p) - 56) - 8; 170 avpn >>= ((80 - p) - 56) - 8;
@@ -167,7 +185,6 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
167 int i; 185 int i;
168 u8 key = 0; 186 u8 key = 0;
169 bool found = false; 187 bool found = false;
170 bool perm_err = false;
171 int second = 0; 188 int second = 0;
172 ulong mp_ea = vcpu->arch.magic_page_ea; 189 ulong mp_ea = vcpu->arch.magic_page_ea;
173 190
@@ -190,13 +207,15 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
190 if (!slbe) 207 if (!slbe)
191 goto no_seg_found; 208 goto no_seg_found;
192 209
210 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
211 if (slbe->tb)
212 avpn |= SLB_VSID_B_1T;
213
193do_second: 214do_second:
194 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); 215 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
195 if (kvm_is_error_hva(ptegp)) 216 if (kvm_is_error_hva(ptegp))
196 goto no_page_found; 217 goto no_page_found;
197 218
198 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
199
200 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { 219 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
201 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp); 220 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
202 goto no_page_found; 221 goto no_page_found;
@@ -219,7 +238,7 @@ do_second:
219 continue; 238 continue;
220 239
221 /* AVPN compare */ 240 /* AVPN compare */
222 if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) { 241 if (HPTE_V_COMPARE(avpn, v)) {
223 u8 pp = (r & HPTE_R_PP) | key; 242 u8 pp = (r & HPTE_R_PP) | key;
224 int eaddr_mask = 0xFFF; 243 int eaddr_mask = 0xFFF;
225 244
@@ -248,11 +267,6 @@ do_second:
248 break; 267 break;
249 } 268 }
250 269
251 if (!gpte->may_read) {
252 perm_err = true;
253 continue;
254 }
255
256 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " 270 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
257 "-> 0x%lx\n", 271 "-> 0x%lx\n",
258 eaddr, avpn, gpte->vpage, gpte->raddr); 272 eaddr, avpn, gpte->vpage, gpte->raddr);
@@ -281,6 +295,8 @@ do_second:
281 if (pteg[i+1] != oldr) 295 if (pteg[i+1] != oldr)
282 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); 296 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
283 297
298 if (!gpte->may_read)
299 return -EPERM;
284 return 0; 300 return 0;
285 } else { 301 } else {
286 dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx " 302 dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
@@ -296,13 +312,7 @@ do_second:
296 } 312 }
297 } 313 }
298 314
299
300no_page_found: 315no_page_found:
301
302
303 if (perm_err)
304 return -EPERM;
305
306 return -ENOENT; 316 return -ENOENT;
307 317
308no_seg_found: 318no_seg_found:
@@ -334,7 +344,7 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
334 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 344 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
335 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; 345 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
336 slbe->esid = slbe->tb ? esid_1t : esid; 346 slbe->esid = slbe->tb ? esid_1t : esid;
337 slbe->vsid = rs >> 12; 347 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
338 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; 348 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
339 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; 349 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
340 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; 350 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
@@ -375,6 +385,7 @@ static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
375static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) 385static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
376{ 386{
377 struct kvmppc_slb *slbe; 387 struct kvmppc_slb *slbe;
388 u64 seg_size;
378 389
379 dprintk("KVM MMU: slbie(0x%llx)\n", ea); 390 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
380 391
@@ -386,8 +397,11 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
386 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); 397 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
387 398
388 slbe->valid = false; 399 slbe->valid = false;
400 slbe->orige = 0;
401 slbe->origv = 0;
389 402
390 kvmppc_mmu_map_segment(vcpu, ea); 403 seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
404 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
391} 405}
392 406
393static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) 407static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
@@ -396,8 +410,11 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
396 410
397 dprintk("KVM MMU: slbia()\n"); 411 dprintk("KVM MMU: slbia()\n");
398 412
399 for (i = 1; i < vcpu->arch.slb_nr; i++) 413 for (i = 1; i < vcpu->arch.slb_nr; i++) {
400 vcpu->arch.slb[i].valid = false; 414 vcpu->arch.slb[i].valid = false;
415 vcpu->arch.slb[i].orige = 0;
416 vcpu->arch.slb[i].origv = 0;
417 }
401 418
402 if (vcpu->arch.shared->msr & MSR_IR) { 419 if (vcpu->arch.shared->msr & MSR_IR) {
403 kvmppc_mmu_flush_segments(vcpu); 420 kvmppc_mmu_flush_segments(vcpu);
@@ -467,8 +484,14 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
467 484
468 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 485 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
469 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 486 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
470 if (slb) 487 if (slb) {
471 gvsid = slb->vsid; 488 gvsid = slb->vsid;
489 if (slb->tb) {
490 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
491 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
492 gvsid |= VSID_1T;
493 }
494 }
472 } 495 }
473 496
474 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 497 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 3a9a1aceb14f..b350d9494b26 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -301,6 +301,23 @@ out:
301 return r; 301 return r;
302} 302}
303 303
304void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
305{
306 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
307 ulong seg_mask = -seg_size;
308 int i;
309
310 for (i = 1; i < svcpu->slb_max; i++) {
311 if ((svcpu->slb[i].esid & SLB_ESID_V) &&
312 (svcpu->slb[i].esid & seg_mask) == ea) {
313 /* Invalidate this entry */
314 svcpu->slb[i].esid = 0;
315 }
316 }
317
318 svcpu_put(svcpu);
319}
320
304void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 321void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
305{ 322{
306 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 323 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
@@ -325,9 +342,9 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
325 return -1; 342 return -1;
326 vcpu3s->context_id[0] = err; 343 vcpu3s->context_id[0] = err;
327 344
328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) 345 vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
329 << ESID_BITS) - 1; 346 << ESID_BITS) - 1;
330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; 347 vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; 348 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
332 349
333 kvmppc_mmu_hpte_init(vcpu); 350 kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 56b983e7b738..4f0caecc0f9d 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -66,10 +66,6 @@ slb_exit_skip_ ## num:
66 66
67 ld r12, PACA_SLBSHADOWPTR(r13) 67 ld r12, PACA_SLBSHADOWPTR(r13)
68 68
69 /* Save off the first entry so we can slbie it later */
70 ld r10, SHADOW_SLB_ESID(0)(r12)
71 ld r11, SHADOW_SLB_VSID(0)(r12)
72
73 /* Remove bolted entries */ 69 /* Remove bolted entries */
74 UNBOLT_SLB_ENTRY(0) 70 UNBOLT_SLB_ENTRY(0)
75 UNBOLT_SLB_ENTRY(1) 71 UNBOLT_SLB_ENTRY(1)
@@ -81,15 +77,10 @@ slb_exit_skip_ ## num:
81 77
82 /* Flush SLB */ 78 /* Flush SLB */
83 79
80 li r10, 0
81 slbmte r10, r10
84 slbia 82 slbia
85 83
86 /* r0 = esid & ESID_MASK */
87 rldicr r10, r10, 0, 35
88 /* r0 |= CLASS_BIT(VSID) */
89 rldic r12, r11, 56 - 36, 36
90 or r10, r10, r12
91 slbie r10
92
93 /* Fill SLB with our shadow */ 84 /* Fill SLB with our shadow */
94 85
95 lbz r12, SVCPU_SLB_MAX(r3) 86 lbz r12, SVCPU_SLB_MAX(r3)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index bdc40b8e77d9..19498a567a81 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1239,8 +1239,7 @@ out:
1239#ifdef CONFIG_PPC64 1239#ifdef CONFIG_PPC64
1240int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) 1240int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1241{ 1241{
1242 /* No flags */ 1242 info->flags = KVM_PPC_1T_SEGMENTS;
1243 info->flags = 0;
1244 1243
1245 /* SLB is always 64 entries */ 1244 /* SLB is always 64 entries */
1246 info->slb_size = 64; 1245 info->slb_size = 64;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 1a1b51189773..dcc94f016007 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -796,7 +796,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
796 kvmppc_fill_pt_regs(&regs); 796 kvmppc_fill_pt_regs(&regs);
797 timer_interrupt(&regs); 797 timer_interrupt(&regs);
798 break; 798 break;
799#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64) 799#if defined(CONFIG_PPC_DOORBELL)
800 case BOOKE_INTERRUPT_DOORBELL: 800 case BOOKE_INTERRUPT_DOORBELL:
801 kvmppc_fill_pt_regs(&regs); 801 kvmppc_fill_pt_regs(&regs);
802 doorbell_exception(&regs); 802 doorbell_exception(&regs);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 631a2650e4e4..2c52ada30775 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -169,6 +169,9 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
169 vcpu->arch.shared->sprg3 = spr_val; 169 vcpu->arch.shared->sprg3 = spr_val;
170 break; 170 break;
171 171
172 /* PIR can legally be written, but we ignore it */
173 case SPRN_PIR: break;
174
172 default: 175 default:
173 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, 176 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
174 spr_val); 177 spr_val);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index b62aab3e22ec..e17cdfc5ba40 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -193,37 +193,6 @@ config PPC_IO_WORKAROUNDS
193 193
194source "drivers/cpufreq/Kconfig" 194source "drivers/cpufreq/Kconfig"
195 195
196menu "CPU Frequency drivers"
197 depends on CPU_FREQ
198
199config CPU_FREQ_PMAC
200 bool "Support for Apple PowerBooks"
201 depends on ADB_PMU && PPC32
202 select CPU_FREQ_TABLE
203 help
204 This adds support for frequency switching on Apple PowerBooks,
205 this currently includes some models of iBook & Titanium
206 PowerBook.
207
208config CPU_FREQ_PMAC64
209 bool "Support for some Apple G5s"
210 depends on PPC_PMAC && PPC64
211 select CPU_FREQ_TABLE
212 help
213 This adds support for frequency switching on Apple iMac G5,
214 and some of the more recent desktop G5 machines as well.
215
216config PPC_PASEMI_CPUFREQ
217 bool "Support for PA Semi PWRficient"
218 depends on PPC_PASEMI
219 default y
220 select CPU_FREQ_TABLE
221 help
222 This adds the support for frequency switching on PA Semi
223 PWRficient processors.
224
225endmenu
226
227menu "CPUIdle driver" 196menu "CPUIdle driver"
228 197
229source "drivers/cpuidle/Kconfig" 198source "drivers/cpuidle/Kconfig"
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 54f3936001aa..7819c40a6bc3 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -158,6 +158,7 @@ config E500
158config PPC_E500MC 158config PPC_E500MC
159 bool "e500mc Support" 159 bool "e500mc Support"
160 select PPC_FPU 160 select PPC_FPU
161 select COMMON_CLK
161 depends on E500 162 depends on E500
162 help 163 help
163 This must be enabled for running on e500mc (and derivatives 164 This must be enabled for running on e500mc (and derivatives
diff --git a/arch/powerpc/platforms/pasemi/Makefile b/arch/powerpc/platforms/pasemi/Makefile
index ce6d789e0741..8e8d4cae5ebe 100644
--- a/arch/powerpc/platforms/pasemi/Makefile
+++ b/arch/powerpc/platforms/pasemi/Makefile
@@ -1,3 +1,2 @@
1obj-y += setup.o pci.o time.o idle.o powersave.o iommu.o dma_lib.o misc.o 1obj-y += setup.o pci.o time.o idle.o powersave.o iommu.o dma_lib.o misc.o
2obj-$(CONFIG_PPC_PASEMI_MDIO) += gpio_mdio.o 2obj-$(CONFIG_PPC_PASEMI_MDIO) += gpio_mdio.o
3obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += cpufreq.o
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
deleted file mode 100644
index be1e7958909e..000000000000
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * Copyright (C) 2007 PA Semi, Inc
3 *
4 * Authors: Egor Martovetsky <egor@pasemi.com>
5 * Olof Johansson <olof@lixom.net>
6 *
7 * Maintained by: Olof Johansson <olof@lixom.net>
8 *
9 * Based on arch/powerpc/platforms/cell/cbe_cpufreq.c:
10 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 */
27
28#include <linux/cpufreq.h>
29#include <linux/timer.h>
30#include <linux/module.h>
31
32#include <asm/hw_irq.h>
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/time.h>
36#include <asm/smp.h>
37
38#define SDCASR_REG 0x0100
39#define SDCASR_REG_STRIDE 0x1000
40#define SDCPWR_CFGA0_REG 0x0100
41#define SDCPWR_PWST0_REG 0x0000
42#define SDCPWR_GIZTIME_REG 0x0440
43
44/* SDCPWR_GIZTIME_REG fields */
45#define SDCPWR_GIZTIME_GR 0x80000000
46#define SDCPWR_GIZTIME_LONGLOCK 0x000000ff
47
48/* Offset of ASR registers from SDC base */
49#define SDCASR_OFFSET 0x120000
50
51static void __iomem *sdcpwr_mapbase;
52static void __iomem *sdcasr_mapbase;
53
54static DEFINE_MUTEX(pas_switch_mutex);
55
56/* Current astate, is used when waking up from power savings on
57 * one core, in case the other core has switched states during
58 * the idle time.
59 */
60static int current_astate;
61
62/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
63static struct cpufreq_frequency_table pas_freqs[] = {
64 {0, 0},
65 {1, 0},
66 {2, 0},
67 {3, 0},
68 {4, 0},
69 {0, CPUFREQ_TABLE_END},
70};
71
72static struct freq_attr *pas_cpu_freqs_attr[] = {
73 &cpufreq_freq_attr_scaling_available_freqs,
74 NULL,
75};
76
77/*
78 * hardware specific functions
79 */
80
81static int get_astate_freq(int astate)
82{
83 u32 ret;
84 ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10));
85
86 return ret & 0x3f;
87}
88
89static int get_cur_astate(int cpu)
90{
91 u32 ret;
92
93 ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG);
94 ret = (ret >> (cpu * 4)) & 0x7;
95
96 return ret;
97}
98
99static int get_gizmo_latency(void)
100{
101 u32 giztime, ret;
102
103 giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG);
104
105 /* just provide the upper bound */
106 if (giztime & SDCPWR_GIZTIME_GR)
107 ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000;
108 else
109 ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000;
110
111 return ret;
112}
113
114static void set_astate(int cpu, unsigned int astate)
115{
116 unsigned long flags;
117
118 /* Return if called before init has run */
119 if (unlikely(!sdcasr_mapbase))
120 return;
121
122 local_irq_save(flags);
123
124 out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate);
125
126 local_irq_restore(flags);
127}
128
129int check_astate(void)
130{
131 return get_cur_astate(hard_smp_processor_id());
132}
133
134void restore_astate(int cpu)
135{
136 set_astate(cpu, current_astate);
137}
138
139/*
140 * cpufreq functions
141 */
142
143static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
144{
145 const u32 *max_freqp;
146 u32 max_freq;
147 int i, cur_astate;
148 struct resource res;
149 struct device_node *cpu, *dn;
150 int err = -ENODEV;
151
152 cpu = of_get_cpu_node(policy->cpu, NULL);
153
154 if (!cpu)
155 goto out;
156
157 dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
158 if (!dn)
159 dn = of_find_compatible_node(NULL, NULL,
160 "pasemi,pwrficient-sdc");
161 if (!dn)
162 goto out;
163 err = of_address_to_resource(dn, 0, &res);
164 of_node_put(dn);
165 if (err)
166 goto out;
167 sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000);
168 if (!sdcasr_mapbase) {
169 err = -EINVAL;
170 goto out;
171 }
172
173 dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo");
174 if (!dn)
175 dn = of_find_compatible_node(NULL, NULL,
176 "pasemi,pwrficient-gizmo");
177 if (!dn) {
178 err = -ENODEV;
179 goto out_unmap_sdcasr;
180 }
181 err = of_address_to_resource(dn, 0, &res);
182 of_node_put(dn);
183 if (err)
184 goto out_unmap_sdcasr;
185 sdcpwr_mapbase = ioremap(res.start, 0x1000);
186 if (!sdcpwr_mapbase) {
187 err = -EINVAL;
188 goto out_unmap_sdcasr;
189 }
190
191 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
192
193 max_freqp = of_get_property(cpu, "clock-frequency", NULL);
194 if (!max_freqp) {
195 err = -EINVAL;
196 goto out_unmap_sdcpwr;
197 }
198
199 /* we need the freq in kHz */
200 max_freq = *max_freqp / 1000;
201
202 pr_debug("max clock-frequency is at %u kHz\n", max_freq);
203 pr_debug("initializing frequency table\n");
204
205 /* initialize frequency table */
206 for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
207 pas_freqs[i].frequency = get_astate_freq(pas_freqs[i].index) * 100000;
208 pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
209 }
210
211 policy->cpuinfo.transition_latency = get_gizmo_latency();
212
213 cur_astate = get_cur_astate(policy->cpu);
214 pr_debug("current astate is at %d\n",cur_astate);
215
216 policy->cur = pas_freqs[cur_astate].frequency;
217 cpumask_copy(policy->cpus, cpu_online_mask);
218
219 ppc_proc_freq = policy->cur * 1000ul;
220
221 cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
222
223 /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
224 * are set correctly
225 */
226 return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
227
228out_unmap_sdcpwr:
229 iounmap(sdcpwr_mapbase);
230
231out_unmap_sdcasr:
232 iounmap(sdcasr_mapbase);
233out:
234 return err;
235}
236
237static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
238{
239 /*
240 * We don't support CPU hotplug. Don't unmap after the system
241 * has already made it to a running state.
242 */
243 if (system_state != SYSTEM_BOOTING)
244 return 0;
245
246 if (sdcasr_mapbase)
247 iounmap(sdcasr_mapbase);
248 if (sdcpwr_mapbase)
249 iounmap(sdcpwr_mapbase);
250
251 cpufreq_frequency_table_put_attr(policy->cpu);
252 return 0;
253}
254
255static int pas_cpufreq_verify(struct cpufreq_policy *policy)
256{
257 return cpufreq_frequency_table_verify(policy, pas_freqs);
258}
259
260static int pas_cpufreq_target(struct cpufreq_policy *policy,
261 unsigned int target_freq,
262 unsigned int relation)
263{
264 struct cpufreq_freqs freqs;
265 int pas_astate_new;
266 int i;
267
268 cpufreq_frequency_table_target(policy,
269 pas_freqs,
270 target_freq,
271 relation,
272 &pas_astate_new);
273
274 freqs.old = policy->cur;
275 freqs.new = pas_freqs[pas_astate_new].frequency;
276
277 mutex_lock(&pas_switch_mutex);
278 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
279
280 pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
281 policy->cpu,
282 pas_freqs[pas_astate_new].frequency,
283 pas_freqs[pas_astate_new].index);
284
285 current_astate = pas_astate_new;
286
287 for_each_online_cpu(i)
288 set_astate(i, pas_astate_new);
289
290 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
291 mutex_unlock(&pas_switch_mutex);
292
293 ppc_proc_freq = freqs.new * 1000ul;
294 return 0;
295}
296
297static struct cpufreq_driver pas_cpufreq_driver = {
298 .name = "pas-cpufreq",
299 .owner = THIS_MODULE,
300 .flags = CPUFREQ_CONST_LOOPS,
301 .init = pas_cpufreq_cpu_init,
302 .exit = pas_cpufreq_cpu_exit,
303 .verify = pas_cpufreq_verify,
304 .target = pas_cpufreq_target,
305 .attr = pas_cpu_freqs_attr,
306};
307
308/*
309 * module init and destoy
310 */
311
312static int __init pas_cpufreq_init(void)
313{
314 if (!of_machine_is_compatible("PA6T-1682M") &&
315 !of_machine_is_compatible("pasemi,pwrficient"))
316 return -ENODEV;
317
318 return cpufreq_register_driver(&pas_cpufreq_driver);
319}
320
321static void __exit pas_cpufreq_exit(void)
322{
323 cpufreq_unregister_driver(&pas_cpufreq_driver);
324}
325
326module_init(pas_cpufreq_init);
327module_exit(pas_cpufreq_exit);
328
329MODULE_LICENSE("GPL");
330MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index ea47df66fee5..52c6ce1cc985 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -9,8 +9,6 @@ obj-y += pic.o setup.o time.o feature.o pci.o \
9 sleep.o low_i2c.o cache.o pfunc_core.o \ 9 sleep.o low_i2c.o cache.o pfunc_core.o \
10 pfunc_base.o udbg_scc.o udbg_adb.o 10 pfunc_base.o udbg_scc.o udbg_adb.o
11obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o 11obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
12obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o
13obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o
14# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really 12# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really
15# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really 13# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really
16# CONFIG_NVRAM=y 14# CONFIG_NVRAM=y
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
deleted file mode 100644
index 3104fad82480..000000000000
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ /dev/null
@@ -1,721 +0,0 @@
1/*
2 * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
3 * Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * TODO: Need a big cleanup here. Basically, we need to have different
10 * cpufreq_driver structures for the different type of HW instead of the
11 * current mess. We also need to better deal with the detection of the
12 * type of machine.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/sched.h>
22#include <linux/adb.h>
23#include <linux/pmu.h>
24#include <linux/cpufreq.h>
25#include <linux/init.h>
26#include <linux/device.h>
27#include <linux/hardirq.h>
28#include <asm/prom.h>
29#include <asm/machdep.h>
30#include <asm/irq.h>
31#include <asm/pmac_feature.h>
32#include <asm/mmu_context.h>
33#include <asm/sections.h>
34#include <asm/cputable.h>
35#include <asm/time.h>
36#include <asm/mpic.h>
37#include <asm/keylargo.h>
38#include <asm/switch_to.h>
39
40/* WARNING !!! This will cause calibrate_delay() to be called,
41 * but this is an __init function ! So you MUST go edit
42 * init/main.c to make it non-init before enabling DEBUG_FREQ
43 */
44#undef DEBUG_FREQ
45
46extern void low_choose_7447a_dfs(int dfs);
47extern void low_choose_750fx_pll(int pll);
48extern void low_sleep_handler(void);
49
50/*
51 * Currently, PowerMac cpufreq supports only high & low frequencies
52 * that are set by the firmware
53 */
54static unsigned int low_freq;
55static unsigned int hi_freq;
56static unsigned int cur_freq;
57static unsigned int sleep_freq;
58static unsigned long transition_latency;
59
60/*
61 * Different models uses different mechanisms to switch the frequency
62 */
63static int (*set_speed_proc)(int low_speed);
64static unsigned int (*get_speed_proc)(void);
65
66/*
67 * Some definitions used by the various speedprocs
68 */
69static u32 voltage_gpio;
70static u32 frequency_gpio;
71static u32 slew_done_gpio;
72static int no_schedule;
73static int has_cpu_l2lve;
74static int is_pmu_based;
75
76/* There are only two frequency states for each processor. Values
77 * are in kHz for the time being.
78 */
79#define CPUFREQ_HIGH 0
80#define CPUFREQ_LOW 1
81
82static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
83 {CPUFREQ_HIGH, 0},
84 {CPUFREQ_LOW, 0},
85 {0, CPUFREQ_TABLE_END},
86};
87
88static struct freq_attr* pmac_cpu_freqs_attr[] = {
89 &cpufreq_freq_attr_scaling_available_freqs,
90 NULL,
91};
92
93static inline void local_delay(unsigned long ms)
94{
95 if (no_schedule)
96 mdelay(ms);
97 else
98 msleep(ms);
99}
100
101#ifdef DEBUG_FREQ
102static inline void debug_calc_bogomips(void)
103{
104 /* This will cause a recalc of bogomips and display the
105 * result. We backup/restore the value to avoid affecting the
106 * core cpufreq framework's own calculation.
107 */
108 unsigned long save_lpj = loops_per_jiffy;
109 calibrate_delay();
110 loops_per_jiffy = save_lpj;
111}
112#endif /* DEBUG_FREQ */
113
114/* Switch CPU speed under 750FX CPU control
115 */
116static int cpu_750fx_cpu_speed(int low_speed)
117{
118 u32 hid2;
119
120 if (low_speed == 0) {
121 /* ramping up, set voltage first */
122 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
123 /* Make sure we sleep for at least 1ms */
124 local_delay(10);
125
126 /* tweak L2 for high voltage */
127 if (has_cpu_l2lve) {
128 hid2 = mfspr(SPRN_HID2);
129 hid2 &= ~0x2000;
130 mtspr(SPRN_HID2, hid2);
131 }
132 }
133#ifdef CONFIG_6xx
134 low_choose_750fx_pll(low_speed);
135#endif
136 if (low_speed == 1) {
137 /* tweak L2 for low voltage */
138 if (has_cpu_l2lve) {
139 hid2 = mfspr(SPRN_HID2);
140 hid2 |= 0x2000;
141 mtspr(SPRN_HID2, hid2);
142 }
143
144 /* ramping down, set voltage last */
145 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
146 local_delay(10);
147 }
148
149 return 0;
150}
151
152static unsigned int cpu_750fx_get_cpu_speed(void)
153{
154 if (mfspr(SPRN_HID1) & HID1_PS)
155 return low_freq;
156 else
157 return hi_freq;
158}
159
160/* Switch CPU speed using DFS */
161static int dfs_set_cpu_speed(int low_speed)
162{
163 if (low_speed == 0) {
164 /* ramping up, set voltage first */
165 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
166 /* Make sure we sleep for at least 1ms */
167 local_delay(1);
168 }
169
170 /* set frequency */
171#ifdef CONFIG_6xx
172 low_choose_7447a_dfs(low_speed);
173#endif
174 udelay(100);
175
176 if (low_speed == 1) {
177 /* ramping down, set voltage last */
178 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
179 local_delay(1);
180 }
181
182 return 0;
183}
184
185static unsigned int dfs_get_cpu_speed(void)
186{
187 if (mfspr(SPRN_HID1) & HID1_DFS)
188 return low_freq;
189 else
190 return hi_freq;
191}
192
193
194/* Switch CPU speed using slewing GPIOs
195 */
196static int gpios_set_cpu_speed(int low_speed)
197{
198 int gpio, timeout = 0;
199
200 /* If ramping up, set voltage first */
201 if (low_speed == 0) {
202 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
203 /* Delay is way too big but it's ok, we schedule */
204 local_delay(10);
205 }
206
207 /* Set frequency */
208 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
209 if (low_speed == ((gpio & 0x01) == 0))
210 goto skip;
211
212 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
213 low_speed ? 0x04 : 0x05);
214 udelay(200);
215 do {
216 if (++timeout > 100)
217 break;
218 local_delay(1);
219 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
220 } while((gpio & 0x02) == 0);
221 skip:
222 /* If ramping down, set voltage last */
223 if (low_speed == 1) {
224 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
225 /* Delay is way too big but it's ok, we schedule */
226 local_delay(10);
227 }
228
229#ifdef DEBUG_FREQ
230 debug_calc_bogomips();
231#endif
232
233 return 0;
234}
235
236/* Switch CPU speed under PMU control
237 */
238static int pmu_set_cpu_speed(int low_speed)
239{
240 struct adb_request req;
241 unsigned long save_l2cr;
242 unsigned long save_l3cr;
243 unsigned int pic_prio;
244 unsigned long flags;
245
246 preempt_disable();
247
248#ifdef DEBUG_FREQ
249 printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
250#endif
251 pmu_suspend();
252
253 /* Disable all interrupt sources on openpic */
254 pic_prio = mpic_cpu_get_priority();
255 mpic_cpu_set_priority(0xf);
256
257 /* Make sure the decrementer won't interrupt us */
258 asm volatile("mtdec %0" : : "r" (0x7fffffff));
259 /* Make sure any pending DEC interrupt occurring while we did
260 * the above didn't re-enable the DEC */
261 mb();
262 asm volatile("mtdec %0" : : "r" (0x7fffffff));
263
264 /* We can now disable MSR_EE */
265 local_irq_save(flags);
266
267 /* Giveup the FPU & vec */
268 enable_kernel_fp();
269
270#ifdef CONFIG_ALTIVEC
271 if (cpu_has_feature(CPU_FTR_ALTIVEC))
272 enable_kernel_altivec();
273#endif /* CONFIG_ALTIVEC */
274
275 /* Save & disable L2 and L3 caches */
276 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
277 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
278
279 /* Send the new speed command. My assumption is that this command
280 * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
281 */
282 pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
283 while (!req.complete)
284 pmu_poll();
285
286 /* Prepare the northbridge for the speed transition */
287 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
288
289 /* Call low level code to backup CPU state and recover from
290 * hardware reset
291 */
292 low_sleep_handler();
293
294 /* Restore the northbridge */
295 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
296
297 /* Restore L2 cache */
298 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
299 _set_L2CR(save_l2cr);
300 /* Restore L3 cache */
301 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
302 _set_L3CR(save_l3cr);
303
304 /* Restore userland MMU context */
305 switch_mmu_context(NULL, current->active_mm);
306
307#ifdef DEBUG_FREQ
308 printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
309#endif
310
311 /* Restore low level PMU operations */
312 pmu_unlock();
313
314 /*
315 * Restore decrementer; we'll take a decrementer interrupt
316 * as soon as interrupts are re-enabled and the generic
317 * clockevents code will reprogram it with the right value.
318 */
319 set_dec(1);
320
321 /* Restore interrupts */
322 mpic_cpu_set_priority(pic_prio);
323
324 /* Let interrupts flow again ... */
325 local_irq_restore(flags);
326
327#ifdef DEBUG_FREQ
328 debug_calc_bogomips();
329#endif
330
331 pmu_resume();
332
333 preempt_enable();
334
335 return 0;
336}
337
338static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
339 int notify)
340{
341 struct cpufreq_freqs freqs;
342 unsigned long l3cr;
343 static unsigned long prev_l3cr;
344
345 freqs.old = cur_freq;
346 freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
347
348 if (freqs.old == freqs.new)
349 return 0;
350
351 if (notify)
352 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
353 if (speed_mode == CPUFREQ_LOW &&
354 cpu_has_feature(CPU_FTR_L3CR)) {
355 l3cr = _get_L3CR();
356 if (l3cr & L3CR_L3E) {
357 prev_l3cr = l3cr;
358 _set_L3CR(0);
359 }
360 }
361 set_speed_proc(speed_mode == CPUFREQ_LOW);
362 if (speed_mode == CPUFREQ_HIGH &&
363 cpu_has_feature(CPU_FTR_L3CR)) {
364 l3cr = _get_L3CR();
365 if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
366 _set_L3CR(prev_l3cr);
367 }
368 if (notify)
369 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
370 cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
371
372 return 0;
373}
374
375static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
376{
377 return cur_freq;
378}
379
380static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
381{
382 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
383}
384
385static int pmac_cpufreq_target( struct cpufreq_policy *policy,
386 unsigned int target_freq,
387 unsigned int relation)
388{
389 unsigned int newstate = 0;
390 int rc;
391
392 if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
393 target_freq, relation, &newstate))
394 return -EINVAL;
395
396 rc = do_set_cpu_speed(policy, newstate, 1);
397
398 ppc_proc_freq = cur_freq * 1000ul;
399 return rc;
400}
401
402static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
403{
404 if (policy->cpu != 0)
405 return -ENODEV;
406
407 policy->cpuinfo.transition_latency = transition_latency;
408 policy->cur = cur_freq;
409
410 cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
411 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
412}
413
414static u32 read_gpio(struct device_node *np)
415{
416 const u32 *reg = of_get_property(np, "reg", NULL);
417 u32 offset;
418
419 if (reg == NULL)
420 return 0;
421 /* That works for all keylargos but shall be fixed properly
422 * some day... The problem is that it seems we can't rely
423 * on the "reg" property of the GPIO nodes, they are either
424 * relative to the base of KeyLargo or to the base of the
425 * GPIO space, and the device-tree doesn't help.
426 */
427 offset = *reg;
428 if (offset < KEYLARGO_GPIO_LEVELS0)
429 offset += KEYLARGO_GPIO_LEVELS0;
430 return offset;
431}
432
433static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
434{
435 /* Ok, this could be made a bit smarter, but let's be robust for now. We
436 * always force a speed change to high speed before sleep, to make sure
437 * we have appropriate voltage and/or bus speed for the wakeup process,
438 * and to make sure our loops_per_jiffies are "good enough", that is will
439 * not cause too short delays if we sleep in low speed and wake in high
440 * speed..
441 */
442 no_schedule = 1;
443 sleep_freq = cur_freq;
444 if (cur_freq == low_freq && !is_pmu_based)
445 do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
446 return 0;
447}
448
449static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
450{
451 /* If we resume, first check if we have a get() function */
452 if (get_speed_proc)
453 cur_freq = get_speed_proc();
454 else
455 cur_freq = 0;
456
457 /* We don't, hrm... we don't really know our speed here, best
458 * is that we force a switch to whatever it was, which is
459 * probably high speed due to our suspend() routine
460 */
461 do_set_cpu_speed(policy, sleep_freq == low_freq ?
462 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
463
464 ppc_proc_freq = cur_freq * 1000ul;
465
466 no_schedule = 0;
467 return 0;
468}
469
470static struct cpufreq_driver pmac_cpufreq_driver = {
471 .verify = pmac_cpufreq_verify,
472 .target = pmac_cpufreq_target,
473 .get = pmac_cpufreq_get_speed,
474 .init = pmac_cpufreq_cpu_init,
475 .suspend = pmac_cpufreq_suspend,
476 .resume = pmac_cpufreq_resume,
477 .flags = CPUFREQ_PM_NO_WARN,
478 .attr = pmac_cpu_freqs_attr,
479 .name = "powermac",
480 .owner = THIS_MODULE,
481};
482
483
484static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
485{
486 struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
487 "voltage-gpio");
488 struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
489 "frequency-gpio");
490 struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
491 "slewing-done");
492 const u32 *value;
493
494 /*
495 * Check to see if it's GPIO driven or PMU only
496 *
497 * The way we extract the GPIO address is slightly hackish, but it
498 * works well enough for now. We need to abstract the whole GPIO
499 * stuff sooner or later anyway
500 */
501
502 if (volt_gpio_np)
503 voltage_gpio = read_gpio(volt_gpio_np);
504 if (freq_gpio_np)
505 frequency_gpio = read_gpio(freq_gpio_np);
506 if (slew_done_gpio_np)
507 slew_done_gpio = read_gpio(slew_done_gpio_np);
508
509 /* If we use the frequency GPIOs, calculate the min/max speeds based
510 * on the bus frequencies
511 */
512 if (frequency_gpio && slew_done_gpio) {
513 int lenp, rc;
514 const u32 *freqs, *ratio;
515
516 freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
517 lenp /= sizeof(u32);
518 if (freqs == NULL || lenp != 2) {
519 printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
520 return 1;
521 }
522 ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
523 NULL);
524 if (ratio == NULL) {
525 printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
526 return 1;
527 }
528
529 /* Get the min/max bus frequencies */
530 low_freq = min(freqs[0], freqs[1]);
531 hi_freq = max(freqs[0], freqs[1]);
532
533 /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
534 * frequency, it claims it to be around 84Mhz on some models while
535 * it appears to be approx. 101Mhz on all. Let's hack around here...
536 * fortunately, we don't need to be too precise
537 */
538 if (low_freq < 98000000)
539 low_freq = 101000000;
540
541 /* Convert those to CPU core clocks */
542 low_freq = (low_freq * (*ratio)) / 2000;
543 hi_freq = (hi_freq * (*ratio)) / 2000;
544
545 /* Now we get the frequencies, we read the GPIO to see what is out current
546 * speed
547 */
548 rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
549 cur_freq = (rc & 0x01) ? hi_freq : low_freq;
550
551 set_speed_proc = gpios_set_cpu_speed;
552 return 1;
553 }
554
555 /* If we use the PMU, look for the min & max frequencies in the
556 * device-tree
557 */
558 value = of_get_property(cpunode, "min-clock-frequency", NULL);
559 if (!value)
560 return 1;
561 low_freq = (*value) / 1000;
562 /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
563 * here */
564 if (low_freq < 100000)
565 low_freq *= 10;
566
567 value = of_get_property(cpunode, "max-clock-frequency", NULL);
568 if (!value)
569 return 1;
570 hi_freq = (*value) / 1000;
571 set_speed_proc = pmu_set_cpu_speed;
572 is_pmu_based = 1;
573
574 return 0;
575}
576
577static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
578{
579 struct device_node *volt_gpio_np;
580
581 if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
582 return 1;
583
584 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
585 if (volt_gpio_np)
586 voltage_gpio = read_gpio(volt_gpio_np);
587 if (!voltage_gpio){
588 printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
589 return 1;
590 }
591
592 /* OF only reports the high frequency */
593 hi_freq = cur_freq;
594 low_freq = cur_freq/2;
595
596 /* Read actual frequency from CPU */
597 cur_freq = dfs_get_cpu_speed();
598 set_speed_proc = dfs_set_cpu_speed;
599 get_speed_proc = dfs_get_cpu_speed;
600
601 return 0;
602}
603
604static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
605{
606 struct device_node *volt_gpio_np;
607 u32 pvr;
608 const u32 *value;
609
610 if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
611 return 1;
612
613 hi_freq = cur_freq;
614 value = of_get_property(cpunode, "reduced-clock-frequency", NULL);
615 if (!value)
616 return 1;
617 low_freq = (*value) / 1000;
618
619 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
620 if (volt_gpio_np)
621 voltage_gpio = read_gpio(volt_gpio_np);
622
623 pvr = mfspr(SPRN_PVR);
624 has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
625
626 set_speed_proc = cpu_750fx_cpu_speed;
627 get_speed_proc = cpu_750fx_get_cpu_speed;
628 cur_freq = cpu_750fx_get_cpu_speed();
629
630 return 0;
631}
632
633/* Currently, we support the following machines:
634 *
635 * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
636 * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
637 * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
638 * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
639 * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
640 * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
641 * - Recent MacRISC3 laptops
642 * - All new machines with 7447A CPUs
643 */
644static int __init pmac_cpufreq_setup(void)
645{
646 struct device_node *cpunode;
647 const u32 *value;
648
649 if (strstr(cmd_line, "nocpufreq"))
650 return 0;
651
652 /* Assume only one CPU */
653 cpunode = of_find_node_by_type(NULL, "cpu");
654 if (!cpunode)
655 goto out;
656
657 /* Get current cpu clock freq */
658 value = of_get_property(cpunode, "clock-frequency", NULL);
659 if (!value)
660 goto out;
661 cur_freq = (*value) / 1000;
662 transition_latency = CPUFREQ_ETERNAL;
663
664 /* Check for 7447A based MacRISC3 */
665 if (of_machine_is_compatible("MacRISC3") &&
666 of_get_property(cpunode, "dynamic-power-step", NULL) &&
667 PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
668 pmac_cpufreq_init_7447A(cpunode);
669 transition_latency = 8000000;
670 /* Check for other MacRISC3 machines */
671 } else if (of_machine_is_compatible("PowerBook3,4") ||
672 of_machine_is_compatible("PowerBook3,5") ||
673 of_machine_is_compatible("MacRISC3")) {
674 pmac_cpufreq_init_MacRISC3(cpunode);
675 /* Else check for iBook2 500/600 */
676 } else if (of_machine_is_compatible("PowerBook4,1")) {
677 hi_freq = cur_freq;
678 low_freq = 400000;
679 set_speed_proc = pmu_set_cpu_speed;
680 is_pmu_based = 1;
681 }
682 /* Else check for TiPb 550 */
683 else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
684 hi_freq = cur_freq;
685 low_freq = 500000;
686 set_speed_proc = pmu_set_cpu_speed;
687 is_pmu_based = 1;
688 }
689 /* Else check for TiPb 400 & 500 */
690 else if (of_machine_is_compatible("PowerBook3,2")) {
691 /* We only know about the 400 MHz and the 500Mhz model
692 * they both have 300 MHz as low frequency
693 */
694 if (cur_freq < 350000 || cur_freq > 550000)
695 goto out;
696 hi_freq = cur_freq;
697 low_freq = 300000;
698 set_speed_proc = pmu_set_cpu_speed;
699 is_pmu_based = 1;
700 }
701 /* Else check for 750FX */
702 else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
703 pmac_cpufreq_init_750FX(cpunode);
704out:
705 of_node_put(cpunode);
706 if (set_speed_proc == NULL)
707 return -ENODEV;
708
709 pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
710 pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
711 ppc_proc_freq = cur_freq * 1000ul;
712
713 printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
714 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
715 low_freq/1000, hi_freq/1000, cur_freq/1000);
716
717 return cpufreq_register_driver(&pmac_cpufreq_driver);
718}
719
720module_init(pmac_cpufreq_setup);
721
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
deleted file mode 100644
index 7ba423431cfe..000000000000
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ /dev/null
@@ -1,746 +0,0 @@
1/*
2 * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
3 * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
10 * that is iMac G5 and latest single CPU desktop.
11 */
12
13#undef DEBUG
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/delay.h>
20#include <linux/sched.h>
21#include <linux/cpufreq.h>
22#include <linux/init.h>
23#include <linux/completion.h>
24#include <linux/mutex.h>
25#include <asm/prom.h>
26#include <asm/machdep.h>
27#include <asm/irq.h>
28#include <asm/sections.h>
29#include <asm/cputable.h>
30#include <asm/time.h>
31#include <asm/smu.h>
32#include <asm/pmac_pfunc.h>
33
34#define DBG(fmt...) pr_debug(fmt)
35
36/* see 970FX user manual */
37
38#define SCOM_PCR 0x0aa001 /* PCR scom addr */
39
40#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
41#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
42#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
43#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
44#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
45#define PCR_SPEED_SHIFT 17
46#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
47#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
48#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
49#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
50#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
51#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
52
53#define SCOM_PSR 0x408001 /* PSR scom addr */
54/* warning: PSR is a 64 bits register */
55#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
56#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
57#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
58#define PSR_CUR_SPEED_SHIFT (56)
59
60/*
61 * The G5 only supports two frequencies (Quarter speed is not supported)
62 */
63#define CPUFREQ_HIGH 0
64#define CPUFREQ_LOW 1
65
66static struct cpufreq_frequency_table g5_cpu_freqs[] = {
67 {CPUFREQ_HIGH, 0},
68 {CPUFREQ_LOW, 0},
69 {0, CPUFREQ_TABLE_END},
70};
71
72static struct freq_attr* g5_cpu_freqs_attr[] = {
73 &cpufreq_freq_attr_scaling_available_freqs,
74 NULL,
75};
76
77/* Power mode data is an array of the 32 bits PCR values to use for
78 * the various frequencies, retrieved from the device-tree
79 */
80static int g5_pmode_cur;
81
82static void (*g5_switch_volt)(int speed_mode);
83static int (*g5_switch_freq)(int speed_mode);
84static int (*g5_query_freq)(void);
85
86static DEFINE_MUTEX(g5_switch_mutex);
87
88static unsigned long transition_latency;
89
90#ifdef CONFIG_PMAC_SMU
91
92static const u32 *g5_pmode_data;
93static int g5_pmode_max;
94
95static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
96static int g5_fvt_count; /* number of op. points */
97static int g5_fvt_cur; /* current op. point */
98
99/*
100 * SMU based voltage switching for Neo2 platforms
101 */
102
103static void g5_smu_switch_volt(int speed_mode)
104{
105 struct smu_simple_cmd cmd;
106
107 DECLARE_COMPLETION_ONSTACK(comp);
108 smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete,
109 &comp, 'V', 'S', 'L', 'E', 'W',
110 0xff, g5_fvt_cur+1, speed_mode);
111 wait_for_completion(&comp);
112}
113
114/*
115 * Platform function based voltage/vdnap switching for Neo2
116 */
117
118static struct pmf_function *pfunc_set_vdnap0;
119static struct pmf_function *pfunc_vdnap0_complete;
120
121static void g5_vdnap_switch_volt(int speed_mode)
122{
123 struct pmf_args args;
124 u32 slew, done = 0;
125 unsigned long timeout;
126
127 slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0;
128 args.count = 1;
129 args.u[0].p = &slew;
130
131 pmf_call_one(pfunc_set_vdnap0, &args);
132
133 /* It's an irq GPIO so we should be able to just block here,
134 * I'll do that later after I've properly tested the IRQ code for
135 * platform functions
136 */
137 timeout = jiffies + HZ/10;
138 while(!time_after(jiffies, timeout)) {
139 args.count = 1;
140 args.u[0].p = &done;
141 pmf_call_one(pfunc_vdnap0_complete, &args);
142 if (done)
143 break;
144 msleep(1);
145 }
146 if (done == 0)
147 printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
148}
149
150
151/*
152 * SCOM based frequency switching for 970FX rev3
153 */
154static int g5_scom_switch_freq(int speed_mode)
155{
156 unsigned long flags;
157 int to;
158
159 /* If frequency is going up, first ramp up the voltage */
160 if (speed_mode < g5_pmode_cur)
161 g5_switch_volt(speed_mode);
162
163 local_irq_save(flags);
164
165 /* Clear PCR high */
166 scom970_write(SCOM_PCR, 0);
167 /* Clear PCR low */
168 scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
169 /* Set PCR low */
170 scom970_write(SCOM_PCR, PCR_HILO_SELECT |
171 g5_pmode_data[speed_mode]);
172
173 /* Wait for completion */
174 for (to = 0; to < 10; to++) {
175 unsigned long psr = scom970_read(SCOM_PSR);
176
177 if ((psr & PSR_CMD_RECEIVED) == 0 &&
178 (((psr >> PSR_CUR_SPEED_SHIFT) ^
179 (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
180 == 0)
181 break;
182 if (psr & PSR_CMD_COMPLETED)
183 break;
184 udelay(100);
185 }
186
187 local_irq_restore(flags);
188
189 /* If frequency is going down, last ramp the voltage */
190 if (speed_mode > g5_pmode_cur)
191 g5_switch_volt(speed_mode);
192
193 g5_pmode_cur = speed_mode;
194 ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
195
196 return 0;
197}
198
199static int g5_scom_query_freq(void)
200{
201 unsigned long psr = scom970_read(SCOM_PSR);
202 int i;
203
204 for (i = 0; i <= g5_pmode_max; i++)
205 if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
206 (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
207 break;
208 return i;
209}
210
211/*
212 * Fake voltage switching for platforms with missing support
213 */
214
215static void g5_dummy_switch_volt(int speed_mode)
216{
217}
218
219#endif /* CONFIG_PMAC_SMU */
220
221/*
222 * Platform function based voltage switching for PowerMac7,2 & 7,3
223 */
224
225static struct pmf_function *pfunc_cpu0_volt_high;
226static struct pmf_function *pfunc_cpu0_volt_low;
227static struct pmf_function *pfunc_cpu1_volt_high;
228static struct pmf_function *pfunc_cpu1_volt_low;
229
230static void g5_pfunc_switch_volt(int speed_mode)
231{
232 if (speed_mode == CPUFREQ_HIGH) {
233 if (pfunc_cpu0_volt_high)
234 pmf_call_one(pfunc_cpu0_volt_high, NULL);
235 if (pfunc_cpu1_volt_high)
236 pmf_call_one(pfunc_cpu1_volt_high, NULL);
237 } else {
238 if (pfunc_cpu0_volt_low)
239 pmf_call_one(pfunc_cpu0_volt_low, NULL);
240 if (pfunc_cpu1_volt_low)
241 pmf_call_one(pfunc_cpu1_volt_low, NULL);
242 }
243 msleep(10); /* should be faster , to fix */
244}
245
246/*
247 * Platform function based frequency switching for PowerMac7,2 & 7,3
248 */
249
250static struct pmf_function *pfunc_cpu_setfreq_high;
251static struct pmf_function *pfunc_cpu_setfreq_low;
252static struct pmf_function *pfunc_cpu_getfreq;
253static struct pmf_function *pfunc_slewing_done;
254
255static int g5_pfunc_switch_freq(int speed_mode)
256{
257 struct pmf_args args;
258 u32 done = 0;
259 unsigned long timeout;
260 int rc;
261
262 DBG("g5_pfunc_switch_freq(%d)\n", speed_mode);
263
264 /* If frequency is going up, first ramp up the voltage */
265 if (speed_mode < g5_pmode_cur)
266 g5_switch_volt(speed_mode);
267
268 /* Do it */
269 if (speed_mode == CPUFREQ_HIGH)
270 rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL);
271 else
272 rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
273
274 if (rc)
275 printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
276
277 /* It's an irq GPIO so we should be able to just block here,
278 * I'll do that later after I've properly tested the IRQ code for
279 * platform functions
280 */
281 timeout = jiffies + HZ/10;
282 while(!time_after(jiffies, timeout)) {
283 args.count = 1;
284 args.u[0].p = &done;
285 pmf_call_one(pfunc_slewing_done, &args);
286 if (done)
287 break;
288 msleep(1);
289 }
290 if (done == 0)
291 printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
292
293 /* If frequency is going down, last ramp the voltage */
294 if (speed_mode > g5_pmode_cur)
295 g5_switch_volt(speed_mode);
296
297 g5_pmode_cur = speed_mode;
298 ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
299
300 return 0;
301}
302
303static int g5_pfunc_query_freq(void)
304{
305 struct pmf_args args;
306 u32 val = 0;
307
308 args.count = 1;
309 args.u[0].p = &val;
310 pmf_call_one(pfunc_cpu_getfreq, &args);
311 return val ? CPUFREQ_HIGH : CPUFREQ_LOW;
312}
313
314
315/*
316 * Common interface to the cpufreq core
317 */
318
319static int g5_cpufreq_verify(struct cpufreq_policy *policy)
320{
321 return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
322}
323
324static int g5_cpufreq_target(struct cpufreq_policy *policy,
325 unsigned int target_freq, unsigned int relation)
326{
327 unsigned int newstate = 0;
328 struct cpufreq_freqs freqs;
329 int rc;
330
331 if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
332 target_freq, relation, &newstate))
333 return -EINVAL;
334
335 if (g5_pmode_cur == newstate)
336 return 0;
337
338 mutex_lock(&g5_switch_mutex);
339
340 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
341 freqs.new = g5_cpu_freqs[newstate].frequency;
342
343 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
344 rc = g5_switch_freq(newstate);
345 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
346
347 mutex_unlock(&g5_switch_mutex);
348
349 return rc;
350}
351
352static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
353{
354 return g5_cpu_freqs[g5_pmode_cur].frequency;
355}
356
357static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
358{
359 policy->cpuinfo.transition_latency = transition_latency;
360 policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
361 /* secondary CPUs are tied to the primary one by the
362 * cpufreq core if in the secondary policy we tell it that
363 * it actually must be one policy together with all others. */
364 cpumask_copy(policy->cpus, cpu_online_mask);
365 cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
366
367 return cpufreq_frequency_table_cpuinfo(policy,
368 g5_cpu_freqs);
369}
370
371
372static struct cpufreq_driver g5_cpufreq_driver = {
373 .name = "powermac",
374 .owner = THIS_MODULE,
375 .flags = CPUFREQ_CONST_LOOPS,
376 .init = g5_cpufreq_cpu_init,
377 .verify = g5_cpufreq_verify,
378 .target = g5_cpufreq_target,
379 .get = g5_cpufreq_get_speed,
380 .attr = g5_cpu_freqs_attr,
381};
382
383
384#ifdef CONFIG_PMAC_SMU
385
386static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
387{
388 struct device_node *cpunode;
389 unsigned int psize, ssize;
390 unsigned long max_freq;
391 char *freq_method, *volt_method;
392 const u32 *valp;
393 u32 pvr_hi;
394 int use_volts_vdnap = 0;
395 int use_volts_smu = 0;
396 int rc = -ENODEV;
397
398 /* Check supported platforms */
399 if (of_machine_is_compatible("PowerMac8,1") ||
400 of_machine_is_compatible("PowerMac8,2") ||
401 of_machine_is_compatible("PowerMac9,1"))
402 use_volts_smu = 1;
403 else if (of_machine_is_compatible("PowerMac11,2"))
404 use_volts_vdnap = 1;
405 else
406 return -ENODEV;
407
408 /* Get first CPU node */
409 for (cpunode = NULL;
410 (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
411 const u32 *reg = of_get_property(cpunode, "reg", NULL);
412 if (reg == NULL || (*reg) != 0)
413 continue;
414 if (!strcmp(cpunode->type, "cpu"))
415 break;
416 }
417 if (cpunode == NULL) {
418 printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
419 return -ENODEV;
420 }
421
422 /* Check 970FX for now */
423 valp = of_get_property(cpunode, "cpu-version", NULL);
424 if (!valp) {
425 DBG("No cpu-version property !\n");
426 goto bail_noprops;
427 }
428 pvr_hi = (*valp) >> 16;
429 if (pvr_hi != 0x3c && pvr_hi != 0x44) {
430 printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
431 goto bail_noprops;
432 }
433
434 /* Look for the powertune data in the device-tree */
435 g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize);
436 if (!g5_pmode_data) {
437 DBG("No power-mode-data !\n");
438 goto bail_noprops;
439 }
440 g5_pmode_max = psize / sizeof(u32) - 1;
441
442 if (use_volts_smu) {
443 const struct smu_sdbp_header *shdr;
444
445 /* Look for the FVT table */
446 shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
447 if (!shdr)
448 goto bail_noprops;
449 g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
450 ssize = (shdr->len * sizeof(u32)) -
451 sizeof(struct smu_sdbp_header);
452 g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
453 g5_fvt_cur = 0;
454
455 /* Sanity checking */
456 if (g5_fvt_count < 1 || g5_pmode_max < 1)
457 goto bail_noprops;
458
459 g5_switch_volt = g5_smu_switch_volt;
460 volt_method = "SMU";
461 } else if (use_volts_vdnap) {
462 struct device_node *root;
463
464 root = of_find_node_by_path("/");
465 if (root == NULL) {
466 printk(KERN_ERR "cpufreq: Can't find root of "
467 "device tree\n");
468 goto bail_noprops;
469 }
470 pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
471 pfunc_vdnap0_complete =
472 pmf_find_function(root, "slewing-done");
473 if (pfunc_set_vdnap0 == NULL ||
474 pfunc_vdnap0_complete == NULL) {
475 printk(KERN_ERR "cpufreq: Can't find required "
476 "platform function\n");
477 goto bail_noprops;
478 }
479
480 g5_switch_volt = g5_vdnap_switch_volt;
481 volt_method = "GPIO";
482 } else {
483 g5_switch_volt = g5_dummy_switch_volt;
484 volt_method = "none";
485 }
486
487 /*
488 * From what I see, clock-frequency is always the maximal frequency.
489 * The current driver can not slew sysclk yet, so we really only deal
490 * with powertune steps for now. We also only implement full freq and
491 * half freq in this version. So far, I haven't yet seen a machine
492 * supporting anything else.
493 */
494 valp = of_get_property(cpunode, "clock-frequency", NULL);
495 if (!valp)
496 return -ENODEV;
497 max_freq = (*valp)/1000;
498 g5_cpu_freqs[0].frequency = max_freq;
499 g5_cpu_freqs[1].frequency = max_freq/2;
500
501 /* Set callbacks */
502 transition_latency = 12000;
503 g5_switch_freq = g5_scom_switch_freq;
504 g5_query_freq = g5_scom_query_freq;
505 freq_method = "SCOM";
506
507 /* Force apply current frequency to make sure everything is in
508 * sync (voltage is right for example). Firmware may leave us with
509 * a strange setting ...
510 */
511 g5_switch_volt(CPUFREQ_HIGH);
512 msleep(10);
513 g5_pmode_cur = -1;
514 g5_switch_freq(g5_query_freq());
515
516 printk(KERN_INFO "Registering G5 CPU frequency driver\n");
517 printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
518 freq_method, volt_method);
519 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
520 g5_cpu_freqs[1].frequency/1000,
521 g5_cpu_freqs[0].frequency/1000,
522 g5_cpu_freqs[g5_pmode_cur].frequency/1000);
523
524 rc = cpufreq_register_driver(&g5_cpufreq_driver);
525
526 /* We keep the CPU node on hold... hopefully, Apple G5 don't have
527 * hotplug CPU with a dynamic device-tree ...
528 */
529 return rc;
530
531 bail_noprops:
532 of_node_put(cpunode);
533
534 return rc;
535}
536
537#endif /* CONFIG_PMAC_SMU */
538
539
540static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
541{
542 struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL;
543 const u8 *eeprom = NULL;
544 const u32 *valp;
545 u64 max_freq, min_freq, ih, il;
546 int has_volt = 1, rc = 0;
547
548 DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
549 " RackMac3,1...\n");
550
551 /* Get first CPU node */
552 for (cpunode = NULL;
553 (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
554 if (!strcmp(cpunode->type, "cpu"))
555 break;
556 }
557 if (cpunode == NULL) {
558 printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
559 return -ENODEV;
560 }
561
562 /* Lookup the cpuid eeprom node */
563 cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
564 if (cpuid != NULL)
565 eeprom = of_get_property(cpuid, "cpuid", NULL);
566 if (eeprom == NULL) {
567 printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
568 rc = -ENODEV;
569 goto bail;
570 }
571
572 /* Lookup the i2c hwclock */
573 for (hwclock = NULL;
574 (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){
575 const char *loc = of_get_property(hwclock,
576 "hwctrl-location", NULL);
577 if (loc == NULL)
578 continue;
579 if (strcmp(loc, "CPU CLOCK"))
580 continue;
581 if (!of_get_property(hwclock, "platform-get-frequency", NULL))
582 continue;
583 break;
584 }
585 if (hwclock == NULL) {
586 printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
587 rc = -ENODEV;
588 goto bail;
589 }
590
591 DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name);
592
593 /* Now get all the platform functions */
594 pfunc_cpu_getfreq =
595 pmf_find_function(hwclock, "get-frequency");
596 pfunc_cpu_setfreq_high =
597 pmf_find_function(hwclock, "set-frequency-high");
598 pfunc_cpu_setfreq_low =
599 pmf_find_function(hwclock, "set-frequency-low");
600 pfunc_slewing_done =
601 pmf_find_function(hwclock, "slewing-done");
602 pfunc_cpu0_volt_high =
603 pmf_find_function(hwclock, "set-voltage-high-0");
604 pfunc_cpu0_volt_low =
605 pmf_find_function(hwclock, "set-voltage-low-0");
606 pfunc_cpu1_volt_high =
607 pmf_find_function(hwclock, "set-voltage-high-1");
608 pfunc_cpu1_volt_low =
609 pmf_find_function(hwclock, "set-voltage-low-1");
610
611 /* Check we have minimum requirements */
612 if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
613 pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
614 printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
615 rc = -ENODEV;
616 goto bail;
617 }
618
619 /* Check that we have complete sets */
620 if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) {
621 pmf_put_function(pfunc_cpu0_volt_high);
622 pmf_put_function(pfunc_cpu0_volt_low);
623 pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL;
624 has_volt = 0;
625 }
626 if (!has_volt ||
627 pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) {
628 pmf_put_function(pfunc_cpu1_volt_high);
629 pmf_put_function(pfunc_cpu1_volt_low);
630 pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL;
631 }
632
633 /* Note: The device tree also contains a "platform-set-values"
634 * function for which I haven't quite figured out the usage. It
635 * might have to be called on init and/or wakeup, I'm not too sure
636 * but things seem to work fine without it so far ...
637 */
638
639 /* Get max frequency from device-tree */
640 valp = of_get_property(cpunode, "clock-frequency", NULL);
641 if (!valp) {
642 printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
643 rc = -ENODEV;
644 goto bail;
645 }
646
647 max_freq = (*valp)/1000;
648
649 /* Now calculate reduced frequency by using the cpuid input freq
650 * ratio. This requires 64 bits math unless we are willing to lose
651 * some precision
652 */
653 ih = *((u32 *)(eeprom + 0x10));
654 il = *((u32 *)(eeprom + 0x20));
655
656 /* Check for machines with no useful settings */
657 if (il == ih) {
658 printk(KERN_WARNING "cpufreq: No low frequency mode available"
659 " on this model !\n");
660 rc = -ENODEV;
661 goto bail;
662 }
663
664 min_freq = 0;
665 if (ih != 0 && il != 0)
666 min_freq = (max_freq * il) / ih;
667
668 /* Sanity check */
669 if (min_freq >= max_freq || min_freq < 1000) {
670 printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
671 rc = -ENXIO;
672 goto bail;
673 }
674 g5_cpu_freqs[0].frequency = max_freq;
675 g5_cpu_freqs[1].frequency = min_freq;
676
677 /* Set callbacks */
678 transition_latency = CPUFREQ_ETERNAL;
679 g5_switch_volt = g5_pfunc_switch_volt;
680 g5_switch_freq = g5_pfunc_switch_freq;
681 g5_query_freq = g5_pfunc_query_freq;
682
683 /* Force apply current frequency to make sure everything is in
684 * sync (voltage is right for example). Firmware may leave us with
685 * a strange setting ...
686 */
687 g5_switch_volt(CPUFREQ_HIGH);
688 msleep(10);
689 g5_pmode_cur = -1;
690 g5_switch_freq(g5_query_freq());
691
692 printk(KERN_INFO "Registering G5 CPU frequency driver\n");
693 printk(KERN_INFO "Frequency method: i2c/pfunc, "
694 "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
695 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
696 g5_cpu_freqs[1].frequency/1000,
697 g5_cpu_freqs[0].frequency/1000,
698 g5_cpu_freqs[g5_pmode_cur].frequency/1000);
699
700 rc = cpufreq_register_driver(&g5_cpufreq_driver);
701 bail:
702 if (rc != 0) {
703 pmf_put_function(pfunc_cpu_getfreq);
704 pmf_put_function(pfunc_cpu_setfreq_high);
705 pmf_put_function(pfunc_cpu_setfreq_low);
706 pmf_put_function(pfunc_slewing_done);
707 pmf_put_function(pfunc_cpu0_volt_high);
708 pmf_put_function(pfunc_cpu0_volt_low);
709 pmf_put_function(pfunc_cpu1_volt_high);
710 pmf_put_function(pfunc_cpu1_volt_low);
711 }
712 of_node_put(hwclock);
713 of_node_put(cpuid);
714 of_node_put(cpunode);
715
716 return rc;
717}
718
719static int __init g5_cpufreq_init(void)
720{
721 struct device_node *cpus;
722 int rc = 0;
723
724 cpus = of_find_node_by_path("/cpus");
725 if (cpus == NULL) {
726 DBG("No /cpus node !\n");
727 return -ENODEV;
728 }
729
730 if (of_machine_is_compatible("PowerMac7,2") ||
731 of_machine_is_compatible("PowerMac7,3") ||
732 of_machine_is_compatible("RackMac3,1"))
733 rc = g5_pm72_cpufreq_init(cpus);
734#ifdef CONFIG_PMAC_SMU
735 else
736 rc = g5_neo2_cpufreq_init(cpus);
737#endif /* CONFIG_PMAC_SMU */
738
739 of_node_put(cpus);
740 return rc;
741}
742
743module_init(g5_cpufreq_init);
744
745
746MODULE_LICENSE("GPL");
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 7ef60b52d6e0..42be53743133 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -32,7 +32,7 @@
32 * book: 32 * book:
33 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 33 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
34 */ 34 */
35static struct appldata_mem_data { 35struct appldata_mem_data {
36 u64 timestamp; 36 u64 timestamp;
37 u32 sync_count_1; /* after VM collected the record data, */ 37 u32 sync_count_1; /* after VM collected the record data, */
38 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 38 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
@@ -63,7 +63,7 @@ static struct appldata_mem_data {
63 u64 pgmajfault; /* page faults (major only) */ 63 u64 pgmajfault; /* page faults (major only) */
64// <-- New in 2.6 64// <-- New in 2.6
65 65
66} __attribute__((packed)) appldata_mem_data; 66} __packed;
67 67
68 68
69/* 69/*
@@ -118,7 +118,6 @@ static struct appldata_ops ops = {
118 .record_nr = APPLDATA_RECORD_MEM_ID, 118 .record_nr = APPLDATA_RECORD_MEM_ID,
119 .size = sizeof(struct appldata_mem_data), 119 .size = sizeof(struct appldata_mem_data),
120 .callback = &appldata_get_mem_data, 120 .callback = &appldata_get_mem_data,
121 .data = &appldata_mem_data,
122 .owner = THIS_MODULE, 121 .owner = THIS_MODULE,
123 .mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */ 122 .mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
124}; 123};
@@ -131,7 +130,17 @@ static struct appldata_ops ops = {
131 */ 130 */
132static int __init appldata_mem_init(void) 131static int __init appldata_mem_init(void)
133{ 132{
134 return appldata_register_ops(&ops); 133 int ret;
134
135 ops.data = kzalloc(sizeof(struct appldata_mem_data), GFP_KERNEL);
136 if (!ops.data)
137 return -ENOMEM;
138
139 ret = appldata_register_ops(&ops);
140 if (ret)
141 kfree(ops.data);
142
143 return ret;
135} 144}
136 145
137/* 146/*
@@ -142,6 +151,7 @@ static int __init appldata_mem_init(void)
142static void __exit appldata_mem_exit(void) 151static void __exit appldata_mem_exit(void)
143{ 152{
144 appldata_unregister_ops(&ops); 153 appldata_unregister_ops(&ops);
154 kfree(ops.data);
145} 155}
146 156
147 157
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 2d224b945355..66037d2622b4 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -29,7 +29,7 @@
29 * book: 29 * book:
30 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 30 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
31 */ 31 */
32static struct appldata_net_sum_data { 32struct appldata_net_sum_data {
33 u64 timestamp; 33 u64 timestamp;
34 u32 sync_count_1; /* after VM collected the record data, */ 34 u32 sync_count_1; /* after VM collected the record data, */
35 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 35 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
@@ -51,7 +51,7 @@ static struct appldata_net_sum_data {
51 u64 rx_dropped; /* no space in linux buffers */ 51 u64 rx_dropped; /* no space in linux buffers */
52 u64 tx_dropped; /* no space available in linux */ 52 u64 tx_dropped; /* no space available in linux */
53 u64 collisions; /* collisions while transmitting */ 53 u64 collisions; /* collisions while transmitting */
54} __attribute__((packed)) appldata_net_sum_data; 54} __packed;
55 55
56 56
57/* 57/*
@@ -121,7 +121,6 @@ static struct appldata_ops ops = {
121 .record_nr = APPLDATA_RECORD_NET_SUM_ID, 121 .record_nr = APPLDATA_RECORD_NET_SUM_ID,
122 .size = sizeof(struct appldata_net_sum_data), 122 .size = sizeof(struct appldata_net_sum_data),
123 .callback = &appldata_get_net_sum_data, 123 .callback = &appldata_get_net_sum_data,
124 .data = &appldata_net_sum_data,
125 .owner = THIS_MODULE, 124 .owner = THIS_MODULE,
126 .mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */ 125 .mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
127}; 126};
@@ -134,7 +133,17 @@ static struct appldata_ops ops = {
134 */ 133 */
135static int __init appldata_net_init(void) 134static int __init appldata_net_init(void)
136{ 135{
137 return appldata_register_ops(&ops); 136 int ret;
137
138 ops.data = kzalloc(sizeof(struct appldata_net_sum_data), GFP_KERNEL);
139 if (!ops.data)
140 return -ENOMEM;
141
142 ret = appldata_register_ops(&ops);
143 if (ret)
144 kfree(ops.data);
145
146 return ret;
138} 147}
139 148
140/* 149/*
@@ -145,6 +154,7 @@ static int __init appldata_net_init(void)
145static void __exit appldata_net_exit(void) 154static void __exit appldata_net_exit(void)
146{ 155{
147 appldata_unregister_ops(&ops); 156 appldata_unregister_ops(&ops);
157 kfree(ops.data);
148} 158}
149 159
150 160
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 7fd3690b6760..138893e5f736 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -651,9 +651,7 @@ static int hypfs_create_cpu_files(struct super_block *sb,
651 } 651 }
652 diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer); 652 diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
653 rc = hypfs_create_str(sb, cpu_dir, "type", buffer); 653 rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
654 if (IS_ERR(rc)) 654 return PTR_RET(rc);
655 return PTR_ERR(rc);
656 return 0;
657} 655}
658 656
659static void *hypfs_create_lpar_files(struct super_block *sb, 657static void *hypfs_create_lpar_files(struct super_block *sb,
@@ -702,9 +700,7 @@ static int hypfs_create_phys_cpu_files(struct super_block *sb,
702 return PTR_ERR(rc); 700 return PTR_ERR(rc);
703 diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer); 701 diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
704 rc = hypfs_create_str(sb, cpu_dir, "type", buffer); 702 rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
705 if (IS_ERR(rc)) 703 return PTR_RET(rc);
706 return PTR_ERR(rc);
707 return 0;
708} 704}
709 705
710static void *hypfs_create_phys_files(struct super_block *sb, 706static void *hypfs_create_phys_files(struct super_block *sb,
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index 9819891ed7a2..4066cee0c2d2 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -9,9 +9,18 @@
9#ifndef _ASM_S390_AIRQ_H 9#ifndef _ASM_S390_AIRQ_H
10#define _ASM_S390_AIRQ_H 10#define _ASM_S390_AIRQ_H
11 11
12typedef void (*adapter_int_handler_t)(void *, void *); 12struct airq_struct {
13 struct hlist_node list; /* Handler queueing. */
14 void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
15 u8 *lsi_ptr; /* Local-Summary-Indicator pointer */
16 u8 lsi_mask; /* Local-Summary-Indicator mask */
17 u8 isc; /* Interrupt-subclass */
18 u8 flags;
19};
13 20
14void *s390_register_adapter_interrupt(adapter_int_handler_t, void *, u8); 21#define AIRQ_PTR_ALLOCATED 0x01
15void s390_unregister_adapter_interrupt(void *, u8); 22
23int register_adapter_interrupt(struct airq_struct *airq);
24void unregister_adapter_interrupt(struct airq_struct *airq);
16 25
17#endif /* _ASM_S390_AIRQ_H */ 26#endif /* _ASM_S390_AIRQ_H */
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 2f8c1abeb086..3fbc67d9e197 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -53,7 +53,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
53 debug_dma_mapping_error(dev, dma_addr); 53 debug_dma_mapping_error(dev, dma_addr);
54 if (dma_ops->mapping_error) 54 if (dma_ops->mapping_error)
55 return dma_ops->mapping_error(dev, dma_addr); 55 return dma_ops->mapping_error(dev, dma_addr);
56 return (dma_addr == DMA_ERROR_CODE); 56 return dma_addr == DMA_ERROR_CODE;
57} 57}
58 58
59static inline void *dma_alloc_coherent(struct device *dev, size_t size, 59static inline void *dma_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 2ee66a65f2d4..0aa6a7ed95a3 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -13,6 +13,16 @@
13 13
14#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */ 14#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
15 15
16static inline int __test_facility(unsigned long nr, void *facilities)
17{
18 unsigned char *ptr;
19
20 if (nr >= MAX_FACILITY_BIT)
21 return 0;
22 ptr = (unsigned char *) facilities + (nr >> 3);
23 return (*ptr & (0x80 >> (nr & 7))) != 0;
24}
25
16/* 26/*
17 * The test_facility function uses the bit odering where the MSB is bit 0. 27 * The test_facility function uses the bit odering where the MSB is bit 0.
18 * That makes it easier to query facility bits with the bit number as 28 * That makes it easier to query facility bits with the bit number as
@@ -20,12 +30,7 @@
20 */ 30 */
21static inline int test_facility(unsigned long nr) 31static inline int test_facility(unsigned long nr)
22{ 32{
23 unsigned char *ptr; 33 return __test_facility(nr, &S390_lowcore.stfle_fac_list);
24
25 if (nr >= MAX_FACILITY_BIT)
26 return 0;
27 ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
28 return (*ptr & (0x80 >> (nr & 7))) != 0;
29} 34}
30 35
31/** 36/**
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index fd9be010f9b2..cd6b9ee7b69c 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -13,28 +13,6 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/pci_io.h> 14#include <asm/pci_io.h>
15 15
16/*
17 * Change virtual addresses to physical addresses and vv.
18 * These are pretty trivial
19 */
20static inline unsigned long virt_to_phys(volatile void * address)
21{
22 unsigned long real_address;
23 asm volatile(
24 " lra %0,0(%1)\n"
25 " jz 0f\n"
26 " la %0,0\n"
27 "0:"
28 : "=a" (real_address) : "a" (address) : "cc");
29 return real_address;
30}
31#define virt_to_phys virt_to_phys
32
33static inline void * phys_to_virt(unsigned long address)
34{
35 return (void *) address;
36}
37
38void *xlate_dev_mem_ptr(unsigned long phys); 16void *xlate_dev_mem_ptr(unsigned long phys);
39#define xlate_dev_mem_ptr xlate_dev_mem_ptr 17#define xlate_dev_mem_ptr xlate_dev_mem_ptr
40void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 18void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 16bd5d169cdb..3238d4004e84 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -62,13 +62,20 @@ struct sca_block {
62#define CPUSTAT_MCDS 0x00000100 62#define CPUSTAT_MCDS 0x00000100
63#define CPUSTAT_SM 0x00000080 63#define CPUSTAT_SM 0x00000080
64#define CPUSTAT_G 0x00000008 64#define CPUSTAT_G 0x00000008
65#define CPUSTAT_GED 0x00000004
65#define CPUSTAT_J 0x00000002 66#define CPUSTAT_J 0x00000002
66#define CPUSTAT_P 0x00000001 67#define CPUSTAT_P 0x00000001
67 68
68struct kvm_s390_sie_block { 69struct kvm_s390_sie_block {
69 atomic_t cpuflags; /* 0x0000 */ 70 atomic_t cpuflags; /* 0x0000 */
70 __u32 prefix; /* 0x0004 */ 71 __u32 prefix; /* 0x0004 */
71 __u8 reserved8[32]; /* 0x0008 */ 72 __u8 reserved08[4]; /* 0x0008 */
73#define PROG_IN_SIE (1<<0)
74 __u32 prog0c; /* 0x000c */
75 __u8 reserved10[16]; /* 0x0010 */
76#define PROG_BLOCK_SIE 0x00000001
77 atomic_t prog20; /* 0x0020 */
78 __u8 reserved24[4]; /* 0x0024 */
72 __u64 cputm; /* 0x0028 */ 79 __u64 cputm; /* 0x0028 */
73 __u64 ckc; /* 0x0030 */ 80 __u64 ckc; /* 0x0030 */
74 __u64 epoch; /* 0x0038 */ 81 __u64 epoch; /* 0x0038 */
@@ -90,7 +97,8 @@ struct kvm_s390_sie_block {
90 __u32 scaoh; /* 0x005c */ 97 __u32 scaoh; /* 0x005c */
91 __u8 reserved60; /* 0x0060 */ 98 __u8 reserved60; /* 0x0060 */
92 __u8 ecb; /* 0x0061 */ 99 __u8 ecb; /* 0x0061 */
93 __u8 reserved62[2]; /* 0x0062 */ 100 __u8 ecb2; /* 0x0062 */
101 __u8 reserved63[1]; /* 0x0063 */
94 __u32 scaol; /* 0x0064 */ 102 __u32 scaol; /* 0x0064 */
95 __u8 reserved68[4]; /* 0x0068 */ 103 __u8 reserved68[4]; /* 0x0068 */
96 __u32 todpr; /* 0x006c */ 104 __u32 todpr; /* 0x006c */
@@ -130,6 +138,7 @@ struct kvm_vcpu_stat {
130 u32 deliver_program_int; 138 u32 deliver_program_int;
131 u32 deliver_io_int; 139 u32 deliver_io_int;
132 u32 exit_wait_state; 140 u32 exit_wait_state;
141 u32 instruction_pfmf;
133 u32 instruction_stidp; 142 u32 instruction_stidp;
134 u32 instruction_spx; 143 u32 instruction_spx;
135 u32 instruction_stpx; 144 u32 instruction_stpx;
@@ -166,7 +175,7 @@ struct kvm_s390_ext_info {
166}; 175};
167 176
168#define PGM_OPERATION 0x01 177#define PGM_OPERATION 0x01
169#define PGM_PRIVILEGED_OPERATION 0x02 178#define PGM_PRIVILEGED_OP 0x02
170#define PGM_EXECUTE 0x03 179#define PGM_EXECUTE 0x03
171#define PGM_PROTECTION 0x04 180#define PGM_PROTECTION 0x04
172#define PGM_ADDRESSING 0x05 181#define PGM_ADDRESSING 0x05
@@ -219,7 +228,7 @@ struct kvm_s390_local_interrupt {
219 atomic_t active; 228 atomic_t active;
220 struct kvm_s390_float_interrupt *float_int; 229 struct kvm_s390_float_interrupt *float_int;
221 int timer_due; /* event indicator for waitqueue below */ 230 int timer_due; /* event indicator for waitqueue below */
222 wait_queue_head_t wq; 231 wait_queue_head_t *wq;
223 atomic_t *cpuflags; 232 atomic_t *cpuflags;
224 unsigned int action_bits; 233 unsigned int action_bits;
225}; 234};
@@ -266,4 +275,5 @@ struct kvm_arch{
266}; 275};
267 276
268extern int sie64a(struct kvm_s390_sie_block *, u64 *); 277extern int sie64a(struct kvm_s390_sie_block *, u64 *);
278extern char sie_exit;
269#endif 279#endif
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6c1801235db9..6e577ba0e5da 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -120,7 +120,6 @@ struct zpci_dev {
120 120
121 struct dentry *debugfs_dev; 121 struct dentry *debugfs_dev;
122 struct dentry *debugfs_perf; 122 struct dentry *debugfs_perf;
123 struct dentry *debugfs_debug;
124}; 123};
125 124
126struct pci_hp_callback_ops { 125struct pci_hp_callback_ops {
@@ -143,7 +142,6 @@ int zpci_enable_device(struct zpci_dev *);
143int zpci_disable_device(struct zpci_dev *); 142int zpci_disable_device(struct zpci_dev *);
144void zpci_stop_device(struct zpci_dev *); 143void zpci_stop_device(struct zpci_dev *);
145void zpci_free_device(struct zpci_dev *); 144void zpci_free_device(struct zpci_dev *);
146int zpci_scan_device(struct zpci_dev *);
147int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); 145int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
148int zpci_unregister_ioat(struct zpci_dev *, u8); 146int zpci_unregister_ioat(struct zpci_dev *, u8);
149 147
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 5f0173a31693..1141fb3e7b21 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -14,3 +14,13 @@
14/* Per-CPU flags for PMU states */ 14/* Per-CPU flags for PMU states */
15#define PMU_F_RESERVED 0x1000 15#define PMU_F_RESERVED 0x1000
16#define PMU_F_ENABLED 0x2000 16#define PMU_F_ENABLED 0x2000
17
18#ifdef CONFIG_64BIT
19
20/* Perf callbacks */
21struct pt_regs;
22extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
23extern unsigned long perf_misc_flags(struct pt_regs *regs);
24#define perf_misc_flags(regs) perf_misc_flags(regs)
25
26#endif /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 590c3219c634..e1408ddb94f8 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,6 +22,9 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
22void page_table_free(struct mm_struct *, unsigned long *); 22void page_table_free(struct mm_struct *, unsigned long *);
23void page_table_free_rcu(struct mmu_gather *, unsigned long *); 23void page_table_free_rcu(struct mmu_gather *, unsigned long *);
24 24
25int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
26 unsigned long key, bool nq);
27
25static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 28static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
26{ 29{
27 typedef struct { char _[n]; } addrtype; 30 typedef struct { char _[n]; } addrtype;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9aefa3c64eb2..0ea4e591fa78 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -296,18 +296,16 @@ extern unsigned long MODULES_END;
296#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 296#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
297 297
298/* Page status table bits for virtualization */ 298/* Page status table bits for virtualization */
299#define RCP_ACC_BITS 0xf0000000UL 299#define PGSTE_ACC_BITS 0xf0000000UL
300#define RCP_FP_BIT 0x08000000UL 300#define PGSTE_FP_BIT 0x08000000UL
301#define RCP_PCL_BIT 0x00800000UL 301#define PGSTE_PCL_BIT 0x00800000UL
302#define RCP_HR_BIT 0x00400000UL 302#define PGSTE_HR_BIT 0x00400000UL
303#define RCP_HC_BIT 0x00200000UL 303#define PGSTE_HC_BIT 0x00200000UL
304#define RCP_GR_BIT 0x00040000UL 304#define PGSTE_GR_BIT 0x00040000UL
305#define RCP_GC_BIT 0x00020000UL 305#define PGSTE_GC_BIT 0x00020000UL
306#define RCP_IN_BIT 0x00002000UL /* IPTE notify bit */ 306#define PGSTE_UR_BIT 0x00008000UL
307 307#define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */
308/* User dirty / referenced bit for KVM's migration feature */ 308#define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */
309#define KVM_UR_BIT 0x00008000UL
310#define KVM_UC_BIT 0x00004000UL
311 309
312#else /* CONFIG_64BIT */ 310#else /* CONFIG_64BIT */
313 311
@@ -364,18 +362,16 @@ extern unsigned long MODULES_END;
364 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO) 362 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
365 363
366/* Page status table bits for virtualization */ 364/* Page status table bits for virtualization */
367#define RCP_ACC_BITS 0xf000000000000000UL 365#define PGSTE_ACC_BITS 0xf000000000000000UL
368#define RCP_FP_BIT 0x0800000000000000UL 366#define PGSTE_FP_BIT 0x0800000000000000UL
369#define RCP_PCL_BIT 0x0080000000000000UL 367#define PGSTE_PCL_BIT 0x0080000000000000UL
370#define RCP_HR_BIT 0x0040000000000000UL 368#define PGSTE_HR_BIT 0x0040000000000000UL
371#define RCP_HC_BIT 0x0020000000000000UL 369#define PGSTE_HC_BIT 0x0020000000000000UL
372#define RCP_GR_BIT 0x0004000000000000UL 370#define PGSTE_GR_BIT 0x0004000000000000UL
373#define RCP_GC_BIT 0x0002000000000000UL 371#define PGSTE_GC_BIT 0x0002000000000000UL
374#define RCP_IN_BIT 0x0000200000000000UL /* IPTE notify bit */ 372#define PGSTE_UR_BIT 0x0000800000000000UL
375 373#define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */
376/* User dirty / referenced bit for KVM's migration feature */ 374#define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
377#define KVM_UR_BIT 0x0000800000000000UL
378#define KVM_UC_BIT 0x0000400000000000UL
379 375
380#endif /* CONFIG_64BIT */ 376#endif /* CONFIG_64BIT */
381 377
@@ -615,8 +611,8 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
615 asm( 611 asm(
616 " lg %0,%2\n" 612 " lg %0,%2\n"
617 "0: lgr %1,%0\n" 613 "0: lgr %1,%0\n"
618 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */ 614 " nihh %0,0xff7f\n" /* clear PCL bit in old */
619 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */ 615 " oihh %1,0x0080\n" /* set PCL bit in new */
620 " csg %0,%1,%2\n" 616 " csg %0,%1,%2\n"
621 " jl 0b\n" 617 " jl 0b\n"
622 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 618 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
@@ -629,7 +625,7 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
629{ 625{
630#ifdef CONFIG_PGSTE 626#ifdef CONFIG_PGSTE
631 asm( 627 asm(
632 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ 628 " nihh %1,0xff7f\n" /* clear PCL bit */
633 " stg %1,%0\n" 629 " stg %1,%0\n"
634 : "=Q" (ptep[PTRS_PER_PTE]) 630 : "=Q" (ptep[PTRS_PER_PTE])
635 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) 631 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
@@ -662,14 +658,14 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
662 else if (bits) 658 else if (bits)
663 page_reset_referenced(address); 659 page_reset_referenced(address);
664 /* Transfer page changed & referenced bit to guest bits in pgste */ 660 /* Transfer page changed & referenced bit to guest bits in pgste */
665 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ 661 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
666 /* Get host changed & referenced bits from pgste */ 662 /* Get host changed & referenced bits from pgste */
667 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; 663 bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52;
668 /* Transfer page changed & referenced bit to kvm user bits */ 664 /* Transfer page changed & referenced bit to kvm user bits */
669 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ 665 pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
670 /* Clear relevant host bits in pgste. */ 666 /* Clear relevant host bits in pgste. */
671 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); 667 pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT);
672 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); 668 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
673 /* Copy page access key and fetch protection bit to pgste */ 669 /* Copy page access key and fetch protection bit to pgste */
674 pgste_val(pgste) |= 670 pgste_val(pgste) |=
675 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 671 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
@@ -690,15 +686,15 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
690 /* Get referenced bit from storage key */ 686 /* Get referenced bit from storage key */
691 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 687 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
692 if (young) 688 if (young)
693 pgste_val(pgste) |= RCP_GR_BIT; 689 pgste_val(pgste) |= PGSTE_GR_BIT;
694 /* Get host referenced bit from pgste */ 690 /* Get host referenced bit from pgste */
695 if (pgste_val(pgste) & RCP_HR_BIT) { 691 if (pgste_val(pgste) & PGSTE_HR_BIT) {
696 pgste_val(pgste) &= ~RCP_HR_BIT; 692 pgste_val(pgste) &= ~PGSTE_HR_BIT;
697 young = 1; 693 young = 1;
698 } 694 }
699 /* Transfer referenced bit to kvm user bits and pte */ 695 /* Transfer referenced bit to kvm user bits and pte */
700 if (young) { 696 if (young) {
701 pgste_val(pgste) |= KVM_UR_BIT; 697 pgste_val(pgste) |= PGSTE_UR_BIT;
702 pte_val(*ptep) |= _PAGE_SWR; 698 pte_val(*ptep) |= _PAGE_SWR;
703 } 699 }
704#endif 700#endif
@@ -720,7 +716,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
720 * The guest C/R information is still in the PGSTE, set real 716 * The guest C/R information is still in the PGSTE, set real
721 * key C/R to 0. 717 * key C/R to 0.
722 */ 718 */
723 nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; 719 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
724 page_set_storage_key(address, nkey, 0); 720 page_set_storage_key(address, nkey, 0);
725#endif 721#endif
726} 722}
@@ -750,6 +746,7 @@ struct gmap {
750 struct mm_struct *mm; 746 struct mm_struct *mm;
751 unsigned long *table; 747 unsigned long *table;
752 unsigned long asce; 748 unsigned long asce;
749 void *private;
753 struct list_head crst_list; 750 struct list_head crst_list;
754}; 751};
755 752
@@ -808,8 +805,8 @@ static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
808 pte_t *ptep, pgste_t pgste) 805 pte_t *ptep, pgste_t pgste)
809{ 806{
810#ifdef CONFIG_PGSTE 807#ifdef CONFIG_PGSTE
811 if (pgste_val(pgste) & RCP_IN_BIT) { 808 if (pgste_val(pgste) & PGSTE_IN_BIT) {
812 pgste_val(pgste) &= ~RCP_IN_BIT; 809 pgste_val(pgste) &= ~PGSTE_IN_BIT;
813 gmap_do_ipte_notify(mm, addr, ptep); 810 gmap_do_ipte_notify(mm, addr, ptep);
814 } 811 }
815#endif 812#endif
@@ -977,8 +974,8 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
977 if (mm_has_pgste(mm)) { 974 if (mm_has_pgste(mm)) {
978 pgste = pgste_get_lock(ptep); 975 pgste = pgste_get_lock(ptep);
979 pgste = pgste_update_all(ptep, pgste); 976 pgste = pgste_update_all(ptep, pgste);
980 dirty = !!(pgste_val(pgste) & KVM_UC_BIT); 977 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
981 pgste_val(pgste) &= ~KVM_UC_BIT; 978 pgste_val(pgste) &= ~PGSTE_UC_BIT;
982 pgste_set_unlock(ptep, pgste); 979 pgste_set_unlock(ptep, pgste);
983 return dirty; 980 return dirty;
984 } 981 }
@@ -997,8 +994,8 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
997 if (mm_has_pgste(mm)) { 994 if (mm_has_pgste(mm)) {
998 pgste = pgste_get_lock(ptep); 995 pgste = pgste_get_lock(ptep);
999 pgste = pgste_update_young(ptep, pgste); 996 pgste = pgste_update_young(ptep, pgste);
1000 young = !!(pgste_val(pgste) & KVM_UR_BIT); 997 young = !!(pgste_val(pgste) & PGSTE_UR_BIT);
1001 pgste_val(pgste) &= ~KVM_UR_BIT; 998 pgste_val(pgste) &= ~PGSTE_UR_BIT;
1002 pgste_set_unlock(ptep, pgste); 999 pgste_set_unlock(ptep, pgste);
1003 } 1000 }
1004 return young; 1001 return young;
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 559512a455da..52b56533c57c 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -24,6 +24,7 @@ struct pt_regs
24 unsigned long gprs[NUM_GPRS]; 24 unsigned long gprs[NUM_GPRS];
25 unsigned long orig_gpr2; 25 unsigned long orig_gpr2;
26 unsigned int int_code; 26 unsigned int int_code;
27 unsigned int int_parm;
27 unsigned long int_parm_long; 28 unsigned long int_parm_long;
28}; 29};
29 30
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 9ccd1905bdad..6a9a9eb645f5 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -35,6 +35,7 @@ header-y += siginfo.h
35header-y += signal.h 35header-y += signal.h
36header-y += socket.h 36header-y += socket.h
37header-y += sockios.h 37header-y += sockios.h
38header-y += sclp_ctl.h
38header-y += stat.h 39header-y += stat.h
39header-y += statfs.h 40header-y += statfs.h
40header-y += swab.h 41header-y += swab.h
diff --git a/arch/s390/include/uapi/asm/chsc.h b/arch/s390/include/uapi/asm/chsc.h
index 1c6a7f85a581..65dc694725a8 100644
--- a/arch/s390/include/uapi/asm/chsc.h
+++ b/arch/s390/include/uapi/asm/chsc.h
@@ -29,6 +29,16 @@ struct chsc_async_area {
29 __u8 data[CHSC_SIZE - sizeof(struct chsc_async_header)]; 29 __u8 data[CHSC_SIZE - sizeof(struct chsc_async_header)];
30} __attribute__ ((packed)); 30} __attribute__ ((packed));
31 31
32struct chsc_header {
33 __u16 length;
34 __u16 code;
35} __attribute__ ((packed));
36
37struct chsc_sync_area {
38 struct chsc_header header;
39 __u8 data[CHSC_SIZE - sizeof(struct chsc_header)];
40} __attribute__ ((packed));
41
32struct chsc_response_struct { 42struct chsc_response_struct {
33 __u16 length; 43 __u16 length;
34 __u16 code; 44 __u16 code;
@@ -126,5 +136,8 @@ struct chsc_cpd_info {
126#define CHSC_INFO_CCL _IOWR(CHSC_IOCTL_MAGIC, 0x86, struct chsc_comp_list) 136#define CHSC_INFO_CCL _IOWR(CHSC_IOCTL_MAGIC, 0x86, struct chsc_comp_list)
127#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) 137#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
128#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) 138#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
139#define CHSC_START_SYNC _IOWR(CHSC_IOCTL_MAGIC, 0x89, struct chsc_sync_area)
140#define CHSC_ON_CLOSE_SET _IOWR(CHSC_IOCTL_MAGIC, 0x8a, struct chsc_async_area)
141#define CHSC_ON_CLOSE_REMOVE _IO(CHSC_IOCTL_MAGIC, 0x8b)
129 142
130#endif 143#endif
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index 38eca3ba40e2..5812a3b2df9e 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -261,6 +261,10 @@ struct dasd_snid_ioctl_data {
261#define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6) 261#define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6)
262/* Resume IO on device */ 262/* Resume IO on device */
263#define BIODASDRESUME _IO(DASD_IOCTL_LETTER,7) 263#define BIODASDRESUME _IO(DASD_IOCTL_LETTER,7)
264/* Abort all I/O on a device */
265#define BIODASDABORTIO _IO(DASD_IOCTL_LETTER, 240)
266/* Allow I/O on a device */
267#define BIODASDALLOWIO _IO(DASD_IOCTL_LETTER, 241)
264 268
265 269
266/* retrieve API version number */ 270/* retrieve API version number */
diff --git a/arch/s390/include/uapi/asm/sclp_ctl.h b/arch/s390/include/uapi/asm/sclp_ctl.h
new file mode 100644
index 000000000000..f2818613ee41
--- /dev/null
+++ b/arch/s390/include/uapi/asm/sclp_ctl.h
@@ -0,0 +1,24 @@
1/*
2 * IOCTL interface for SCLP
3 *
4 * Copyright IBM Corp. 2012
5 *
6 * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com>
7 */
8
9#ifndef _ASM_SCLP_CTL_H
10#define _ASM_SCLP_CTL_H
11
12#include <linux/types.h>
13
14struct sclp_ctl_sccb {
15 __u32 cmdw;
16 __u64 sccb;
17} __attribute__((packed));
18
19#define SCLP_CTL_IOCTL_MAGIC 0x10
20
21#define SCLP_CTL_SCCB \
22 _IOWR(SCLP_CTL_IOCTL_MAGIC, 0x10, struct sclp_ctl_sccb)
23
24#endif
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 7a82f9f70100..2416138ebd3e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -7,6 +7,7 @@
7#define ASM_OFFSETS_C 7#define ASM_OFFSETS_C
8 8
9#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/kvm_host.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <asm/cputime.h> 12#include <asm/cputime.h>
12#include <asm/vdso.h> 13#include <asm/vdso.h>
@@ -47,6 +48,7 @@ int main(void)
47 DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); 48 DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
48 DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); 49 DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
49 DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); 50 DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code));
51 DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm));
50 DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); 52 DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long));
51 DEFINE(__PT_SIZE, sizeof(struct pt_regs)); 53 DEFINE(__PT_SIZE, sizeof(struct pt_regs));
52 BLANK(); 54 BLANK();
@@ -161,6 +163,8 @@ int main(void)
161 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 163 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
162 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb)); 164 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
163 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 165 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
166 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
167 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
164#endif /* CONFIG_32BIT */ 168#endif /* CONFIG_32BIT */
165 return 0; 169 return 0;
166} 170}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4d5e6f8a7978..be7a408be7a1 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -429,11 +429,19 @@ io_skip:
429 stm %r0,%r7,__PT_R0(%r11) 429 stm %r0,%r7,__PT_R0(%r11)
430 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 430 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
431 stm %r8,%r9,__PT_PSW(%r11) 431 stm %r8,%r9,__PT_PSW(%r11)
432 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
432 TRACE_IRQS_OFF 433 TRACE_IRQS_OFF
433 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 434 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
435io_loop:
434 l %r1,BASED(.Ldo_IRQ) 436 l %r1,BASED(.Ldo_IRQ)
435 lr %r2,%r11 # pass pointer to pt_regs 437 lr %r2,%r11 # pass pointer to pt_regs
436 basr %r14,%r1 # call do_IRQ 438 basr %r14,%r1 # call do_IRQ
439 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
440 jz io_return
441 tpi 0
442 jz io_return
443 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
444 j io_loop
437io_return: 445io_return:
438 LOCKDEP_SYS_EXIT 446 LOCKDEP_SYS_EXIT
439 TRACE_IRQS_ON 447 TRACE_IRQS_ON
@@ -573,10 +581,10 @@ ext_skip:
573 stm %r0,%r7,__PT_R0(%r11) 581 stm %r0,%r7,__PT_R0(%r11)
574 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 582 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
575 stm %r8,%r9,__PT_PSW(%r11) 583 stm %r8,%r9,__PT_PSW(%r11)
584 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
585 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
576 TRACE_IRQS_OFF 586 TRACE_IRQS_OFF
577 lr %r2,%r11 # pass pointer to pt_regs 587 lr %r2,%r11 # pass pointer to pt_regs
578 l %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code
579 l %r4,__LC_EXT_PARAMS # get external parameters
580 l %r1,BASED(.Ldo_extint) 588 l %r1,BASED(.Ldo_extint)
581 basr %r14,%r1 # call do_extint 589 basr %r14,%r1 # call do_extint
582 j io_return 590 j io_return
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index aa0ab02e9595..3ddbc26d246e 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -54,7 +54,7 @@ void handle_signal32(unsigned long sig, struct k_sigaction *ka,
54void do_notify_resume(struct pt_regs *regs); 54void do_notify_resume(struct pt_regs *regs);
55 55
56struct ext_code; 56struct ext_code;
57void do_extint(struct pt_regs *regs, struct ext_code, unsigned int, unsigned long); 57void do_extint(struct pt_regs *regs);
58void do_restart(void); 58void do_restart(void);
59void __init startup_init(void); 59void __init startup_init(void);
60void die(struct pt_regs *regs, const char *str); 60void die(struct pt_regs *regs, const char *str);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 4c17eece707e..1c039d0c24c7 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -47,7 +47,6 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
47 _TIF_MCCK_PENDING) 47 _TIF_MCCK_PENDING)
48_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 48_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
49 _TIF_SYSCALL_TRACEPOINT) 49 _TIF_SYSCALL_TRACEPOINT)
50_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
51 50
52#define BASED(name) name-system_call(%r13) 51#define BASED(name) name-system_call(%r13)
53 52
@@ -81,23 +80,27 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
81#endif 80#endif
82 .endm 81 .endm
83 82
84 .macro HANDLE_SIE_INTERCEPT scratch,pgmcheck 83 .macro HANDLE_SIE_INTERCEPT scratch,reason
85#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 84#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
86 tmhh %r8,0x0001 # interrupting from user ? 85 tmhh %r8,0x0001 # interrupting from user ?
87 jnz .+42 86 jnz .+62
88 lgr \scratch,%r9 87 lgr \scratch,%r9
89 slg \scratch,BASED(.Lsie_loop) 88 slg \scratch,BASED(.Lsie_critical)
90 clg \scratch,BASED(.Lsie_length) 89 clg \scratch,BASED(.Lsie_critical_length)
91 .if \pgmcheck 90 .if \reason==1
92 # Some program interrupts are suppressing (e.g. protection). 91 # Some program interrupts are suppressing (e.g. protection).
93 # We must also check the instruction after SIE in that case. 92 # We must also check the instruction after SIE in that case.
94 # do_protection_exception will rewind to rewind_pad 93 # do_protection_exception will rewind to rewind_pad
95 jh .+22 94 jh .+42
96 .else 95 .else
97 jhe .+22 96 jhe .+42
98 .endif 97 .endif
99 lg %r9,BASED(.Lsie_loop) 98 lg %r14,__SF_EMPTY(%r15) # get control block pointer
100 LPP BASED(.Lhost_id) # set host id 99 LPP __SF_EMPTY+16(%r15) # set host id
100 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
101 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
102 larl %r9,sie_exit # skip forward to sie_exit
103 mvi __SF_EMPTY+31(%r15),\reason # set exit reason
101#endif 104#endif
102 .endm 105 .endm
103 106
@@ -450,7 +453,7 @@ ENTRY(io_int_handler)
450 lg %r12,__LC_THREAD_INFO 453 lg %r12,__LC_THREAD_INFO
451 larl %r13,system_call 454 larl %r13,system_call
452 lmg %r8,%r9,__LC_IO_OLD_PSW 455 lmg %r8,%r9,__LC_IO_OLD_PSW
453 HANDLE_SIE_INTERCEPT %r14,0 456 HANDLE_SIE_INTERCEPT %r14,2
454 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 457 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
455 tmhh %r8,0x0001 # interrupting from user? 458 tmhh %r8,0x0001 # interrupting from user?
456 jz io_skip 459 jz io_skip
@@ -460,10 +463,18 @@ io_skip:
460 stmg %r0,%r7,__PT_R0(%r11) 463 stmg %r0,%r7,__PT_R0(%r11)
461 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 464 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
462 stmg %r8,%r9,__PT_PSW(%r11) 465 stmg %r8,%r9,__PT_PSW(%r11)
466 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
463 TRACE_IRQS_OFF 467 TRACE_IRQS_OFF
464 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 468 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
469io_loop:
465 lgr %r2,%r11 # pass pointer to pt_regs 470 lgr %r2,%r11 # pass pointer to pt_regs
466 brasl %r14,do_IRQ 471 brasl %r14,do_IRQ
472 tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
473 jz io_return
474 tpi 0
475 jz io_return
476 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
477 j io_loop
467io_return: 478io_return:
468 LOCKDEP_SYS_EXIT 479 LOCKDEP_SYS_EXIT
469 TRACE_IRQS_ON 480 TRACE_IRQS_ON
@@ -595,7 +606,7 @@ ENTRY(ext_int_handler)
595 lg %r12,__LC_THREAD_INFO 606 lg %r12,__LC_THREAD_INFO
596 larl %r13,system_call 607 larl %r13,system_call
597 lmg %r8,%r9,__LC_EXT_OLD_PSW 608 lmg %r8,%r9,__LC_EXT_OLD_PSW
598 HANDLE_SIE_INTERCEPT %r14,0 609 HANDLE_SIE_INTERCEPT %r14,3
599 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 610 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
600 tmhh %r8,0x0001 # interrupting from user ? 611 tmhh %r8,0x0001 # interrupting from user ?
601 jz ext_skip 612 jz ext_skip
@@ -605,13 +616,13 @@ ext_skip:
605 stmg %r0,%r7,__PT_R0(%r11) 616 stmg %r0,%r7,__PT_R0(%r11)
606 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 617 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
607 stmg %r8,%r9,__PT_PSW(%r11) 618 stmg %r8,%r9,__PT_PSW(%r11)
619 lghi %r1,__LC_EXT_PARAMS2
620 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
621 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
622 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
608 TRACE_IRQS_OFF 623 TRACE_IRQS_OFF
609 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 624 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
610 lghi %r1,4096
611 lgr %r2,%r11 # pass pointer to pt_regs 625 lgr %r2,%r11 # pass pointer to pt_regs
612 llgf %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code
613 llgf %r4,__LC_EXT_PARAMS # get external parameter
614 lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter
615 brasl %r14,do_extint 626 brasl %r14,do_extint
616 j io_return 627 j io_return
617 628
@@ -643,7 +654,7 @@ ENTRY(mcck_int_handler)
643 lg %r12,__LC_THREAD_INFO 654 lg %r12,__LC_THREAD_INFO
644 larl %r13,system_call 655 larl %r13,system_call
645 lmg %r8,%r9,__LC_MCK_OLD_PSW 656 lmg %r8,%r9,__LC_MCK_OLD_PSW
646 HANDLE_SIE_INTERCEPT %r14,0 657 HANDLE_SIE_INTERCEPT %r14,4
647 tm __LC_MCCK_CODE,0x80 # system damage? 658 tm __LC_MCCK_CODE,0x80 # system damage?
648 jo mcck_panic # yes -> rest of mcck code invalid 659 jo mcck_panic # yes -> rest of mcck code invalid
649 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 660 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
@@ -937,56 +948,50 @@ ENTRY(sie64a)
937 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 948 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
938 stg %r2,__SF_EMPTY(%r15) # save control block pointer 949 stg %r2,__SF_EMPTY(%r15) # save control block pointer
939 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 950 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
940 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0 951 xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
941 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 952 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
942# some program checks are suppressing. C code (e.g. do_protection_exception)
943# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
944# instructions in the sie_loop should not cause program interrupts. So
945# lets use a nop (47 00 00 00) as a landing pad.
946# See also HANDLE_SIE_INTERCEPT
947rewind_pad:
948 nop 0
949sie_loop:
950 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
951 tm __TI_flags+7(%r14),_TIF_EXIT_SIE
952 jnz sie_exit
953 lg %r14,__LC_GMAP # get gmap pointer 953 lg %r14,__LC_GMAP # get gmap pointer
954 ltgr %r14,%r14 954 ltgr %r14,%r14
955 jz sie_gmap 955 jz sie_gmap
956 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 956 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
957sie_gmap: 957sie_gmap:
958 lg %r14,__SF_EMPTY(%r15) # get control block pointer 958 lg %r14,__SF_EMPTY(%r15) # get control block pointer
959 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
960 tm __SIE_PROG20+3(%r14),1 # last exit...
961 jnz sie_done
959 LPP __SF_EMPTY(%r15) # set guest id 962 LPP __SF_EMPTY(%r15) # set guest id
960 sie 0(%r14) 963 sie 0(%r14)
961sie_done: 964sie_done:
962 LPP __SF_EMPTY+16(%r15) # set host id 965 LPP __SF_EMPTY+16(%r15) # set host id
963 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 966 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
964sie_exit:
965 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 967 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
968# some program checks are suppressing. C code (e.g. do_protection_exception)
969# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
970# instructions beween sie64a and sie_done should not cause program
971# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
972# See also HANDLE_SIE_INTERCEPT
973rewind_pad:
974 nop 0
975 .globl sie_exit
976sie_exit:
966 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 977 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
967 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 978 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
968 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 979 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
969 lghi %r2,0 980 lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
970 br %r14 981 br %r14
971sie_fault: 982sie_fault:
972 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 983 lghi %r14,-EFAULT
973 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 984 stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
974 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 985 j sie_exit
975 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
976 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
977 lghi %r2,-EFAULT
978 br %r14
979 986
980 .align 8 987 .align 8
981.Lsie_loop: 988.Lsie_critical:
982 .quad sie_loop 989 .quad sie_gmap
983.Lsie_length: 990.Lsie_critical_length:
984 .quad sie_done - sie_loop 991 .quad sie_done - sie_gmap
985.Lhost_id:
986 .quad 0
987 992
988 EX_TABLE(rewind_pad,sie_fault) 993 EX_TABLE(rewind_pad,sie_fault)
989 EX_TABLE(sie_loop,sie_fault) 994 EX_TABLE(sie_exit,sie_fault)
990#endif 995#endif
991 996
992 .section .rodata, "a" 997 .section .rodata, "a"
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index dd3c1994b8bd..54b0995514e8 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -234,9 +234,9 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
234} 234}
235EXPORT_SYMBOL(unregister_external_interrupt); 235EXPORT_SYMBOL(unregister_external_interrupt);
236 236
237void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code, 237void __irq_entry do_extint(struct pt_regs *regs)
238 unsigned int param32, unsigned long param64)
239{ 238{
239 struct ext_code ext_code;
240 struct pt_regs *old_regs; 240 struct pt_regs *old_regs;
241 struct ext_int_info *p; 241 struct ext_int_info *p;
242 int index; 242 int index;
@@ -248,6 +248,7 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
248 clock_comparator_work(); 248 clock_comparator_work();
249 } 249 }
250 kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL); 250 kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
251 ext_code = *(struct ext_code *) &regs->int_code;
251 if (ext_code.code != 0x1004) 252 if (ext_code.code != 0x1004)
252 __get_cpu_var(s390_idle).nohz_delay = 1; 253 __get_cpu_var(s390_idle).nohz_delay = 1;
253 254
@@ -255,7 +256,8 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
255 rcu_read_lock(); 256 rcu_read_lock();
256 list_for_each_entry_rcu(p, &ext_int_hash[index], entry) 257 list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
257 if (likely(p->code == ext_code.code)) 258 if (likely(p->code == ext_code.code))
258 p->handler(ext_code, param32, param64); 259 p->handler(ext_code, regs->int_parm,
260 regs->int_parm_long);
259 rcu_read_unlock(); 261 rcu_read_unlock();
260 irq_exit(); 262 irq_exit();
261 set_irq_regs(old_regs); 263 set_irq_regs(old_regs);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index f58f37f66824..a6fc037671b1 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
16#include <linux/kvm_host.h>
16#include <linux/percpu.h> 17#include <linux/percpu.h>
17#include <linux/export.h> 18#include <linux/export.h>
18#include <asm/irq.h> 19#include <asm/irq.h>
@@ -39,6 +40,57 @@ int perf_num_counters(void)
39} 40}
40EXPORT_SYMBOL(perf_num_counters); 41EXPORT_SYMBOL(perf_num_counters);
41 42
43static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
44{
45 struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
46
47 if (!stack)
48 return NULL;
49
50 return (struct kvm_s390_sie_block *) stack->empty1[0];
51}
52
53static bool is_in_guest(struct pt_regs *regs)
54{
55 unsigned long ip = instruction_pointer(regs);
56
57 if (user_mode(regs))
58 return false;
59
60 return ip == (unsigned long) &sie_exit;
61}
62
63static unsigned long guest_is_user_mode(struct pt_regs *regs)
64{
65 return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
66}
67
68static unsigned long instruction_pointer_guest(struct pt_regs *regs)
69{
70 return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
71}
72
73unsigned long perf_instruction_pointer(struct pt_regs *regs)
74{
75 return is_in_guest(regs) ? instruction_pointer_guest(regs)
76 : instruction_pointer(regs);
77}
78
79static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
80{
81 return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
82 : PERF_RECORD_MISC_GUEST_KERNEL;
83}
84
85unsigned long perf_misc_flags(struct pt_regs *regs)
86{
87 if (is_in_guest(regs))
88 return perf_misc_guest_flags(regs);
89
90 return user_mode(regs) ? PERF_RECORD_MISC_USER
91 : PERF_RECORD_MISC_KERNEL;
92}
93
42void perf_event_print_debug(void) 94void perf_event_print_debug(void)
43{ 95{
44 struct cpumf_ctr_info cf_info; 96 struct cpumf_ctr_info cf_info;
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 9bdbcef1da9e..3bac589844a7 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -7,6 +7,7 @@ EXPORT_SYMBOL(_mcount);
7#endif 7#endif
8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
9EXPORT_SYMBOL(sie64a); 9EXPORT_SYMBOL(sie64a);
10EXPORT_SYMBOL(sie_exit);
10#endif 11#endif
11EXPORT_SYMBOL(memcpy); 12EXPORT_SYMBOL(memcpy);
12EXPORT_SYMBOL(memset); 13EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 4f977d0d25c2..15a016c10563 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -49,7 +49,6 @@
49 49
50enum { 50enum {
51 ec_schedule = 0, 51 ec_schedule = 0,
52 ec_call_function,
53 ec_call_function_single, 52 ec_call_function_single,
54 ec_stop_cpu, 53 ec_stop_cpu,
55}; 54};
@@ -438,8 +437,6 @@ static void smp_handle_ext_call(void)
438 smp_stop_cpu(); 437 smp_stop_cpu();
439 if (test_bit(ec_schedule, &bits)) 438 if (test_bit(ec_schedule, &bits))
440 scheduler_ipi(); 439 scheduler_ipi();
441 if (test_bit(ec_call_function, &bits))
442 generic_smp_call_function_interrupt();
443 if (test_bit(ec_call_function_single, &bits)) 440 if (test_bit(ec_call_function_single, &bits))
444 generic_smp_call_function_single_interrupt(); 441 generic_smp_call_function_single_interrupt();
445} 442}
@@ -456,7 +453,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
456 int cpu; 453 int cpu;
457 454
458 for_each_cpu(cpu, mask) 455 for_each_cpu(cpu, mask)
459 pcpu_ec_call(pcpu_devices + cpu, ec_call_function); 456 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
460} 457}
461 458
462void arch_send_call_function_single_ipi(int cpu) 459void arch_send_call_function_single_ipi(int cpu)
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 8fe9d65a4585..40b4c6470f88 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -6,7 +6,8 @@
6# it under the terms of the GNU General Public License (version 2 only) 6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation. 7# as published by the Free Software Foundation.
8 8
9common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o eventfd.o) 9KVM := ../../../virt/kvm
10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o
10 11
11ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
12 13
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 1c01a9912989..3074475c8ae0 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -132,6 +132,9 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
132{ 132{
133 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; 133 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
134 134
135 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
136 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
137
135 trace_kvm_s390_handle_diag(vcpu, code); 138 trace_kvm_s390_handle_diag(vcpu, code);
136 switch (code) { 139 switch (code) {
137 case 0x10: 140 case 0x10:
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index b7d1b2edeeb3..5ee56e5acc23 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -22,87 +22,6 @@
22#include "trace.h" 22#include "trace.h"
23#include "trace-s390.h" 23#include "trace-s390.h"
24 24
25static int handle_lctlg(struct kvm_vcpu *vcpu)
26{
27 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
28 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
29 u64 useraddr;
30 int reg, rc;
31
32 vcpu->stat.instruction_lctlg++;
33
34 useraddr = kvm_s390_get_base_disp_rsy(vcpu);
35
36 if (useraddr & 7)
37 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
38
39 reg = reg1;
40
41 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
42 useraddr);
43 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
44
45 do {
46 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
47 (u64 __user *) useraddr);
48 if (rc)
49 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
50 useraddr += 8;
51 if (reg == reg3)
52 break;
53 reg = (reg + 1) % 16;
54 } while (1);
55 return 0;
56}
57
58static int handle_lctl(struct kvm_vcpu *vcpu)
59{
60 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
61 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
62 u64 useraddr;
63 u32 val = 0;
64 int reg, rc;
65
66 vcpu->stat.instruction_lctl++;
67
68 useraddr = kvm_s390_get_base_disp_rs(vcpu);
69
70 if (useraddr & 3)
71 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
72
73 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
74 useraddr);
75 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
76
77 reg = reg1;
78 do {
79 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
80 if (rc)
81 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
82 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
83 vcpu->arch.sie_block->gcr[reg] |= val;
84 useraddr += 4;
85 if (reg == reg3)
86 break;
87 reg = (reg + 1) % 16;
88 } while (1);
89 return 0;
90}
91
92static const intercept_handler_t eb_handlers[256] = {
93 [0x2f] = handle_lctlg,
94 [0x8a] = kvm_s390_handle_priv_eb,
95};
96
97static int handle_eb(struct kvm_vcpu *vcpu)
98{
99 intercept_handler_t handler;
100
101 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
102 if (handler)
103 return handler(vcpu);
104 return -EOPNOTSUPP;
105}
106 25
107static const intercept_handler_t instruction_handlers[256] = { 26static const intercept_handler_t instruction_handlers[256] = {
108 [0x01] = kvm_s390_handle_01, 27 [0x01] = kvm_s390_handle_01,
@@ -110,10 +29,10 @@ static const intercept_handler_t instruction_handlers[256] = {
110 [0x83] = kvm_s390_handle_diag, 29 [0x83] = kvm_s390_handle_diag,
111 [0xae] = kvm_s390_handle_sigp, 30 [0xae] = kvm_s390_handle_sigp,
112 [0xb2] = kvm_s390_handle_b2, 31 [0xb2] = kvm_s390_handle_b2,
113 [0xb7] = handle_lctl, 32 [0xb7] = kvm_s390_handle_lctl,
114 [0xb9] = kvm_s390_handle_b9, 33 [0xb9] = kvm_s390_handle_b9,
115 [0xe5] = kvm_s390_handle_e5, 34 [0xe5] = kvm_s390_handle_e5,
116 [0xeb] = handle_eb, 35 [0xeb] = kvm_s390_handle_eb,
117}; 36};
118 37
119static int handle_noop(struct kvm_vcpu *vcpu) 38static int handle_noop(struct kvm_vcpu *vcpu)
@@ -174,47 +93,12 @@ static int handle_stop(struct kvm_vcpu *vcpu)
174 93
175static int handle_validity(struct kvm_vcpu *vcpu) 94static int handle_validity(struct kvm_vcpu *vcpu)
176{ 95{
177 unsigned long vmaddr;
178 int viwhy = vcpu->arch.sie_block->ipb >> 16; 96 int viwhy = vcpu->arch.sie_block->ipb >> 16;
179 int rc;
180 97
181 vcpu->stat.exit_validity++; 98 vcpu->stat.exit_validity++;
182 trace_kvm_s390_intercept_validity(vcpu, viwhy); 99 trace_kvm_s390_intercept_validity(vcpu, viwhy);
183 if (viwhy == 0x37) { 100 WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
184 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, 101 return -EOPNOTSUPP;
185 vcpu->arch.gmap);
186 if (IS_ERR_VALUE(vmaddr)) {
187 rc = -EOPNOTSUPP;
188 goto out;
189 }
190 rc = fault_in_pages_writeable((char __user *) vmaddr,
191 PAGE_SIZE);
192 if (rc) {
193 /* user will receive sigsegv, exit to user */
194 rc = -EOPNOTSUPP;
195 goto out;
196 }
197 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
198 vcpu->arch.gmap);
199 if (IS_ERR_VALUE(vmaddr)) {
200 rc = -EOPNOTSUPP;
201 goto out;
202 }
203 rc = fault_in_pages_writeable((char __user *) vmaddr,
204 PAGE_SIZE);
205 if (rc) {
206 /* user will receive sigsegv, exit to user */
207 rc = -EOPNOTSUPP;
208 goto out;
209 }
210 } else
211 rc = -EOPNOTSUPP;
212
213out:
214 if (rc)
215 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
216 viwhy);
217 return rc;
218} 102}
219 103
220static int handle_instruction(struct kvm_vcpu *vcpu) 104static int handle_instruction(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5c948177529e..7f35cb33e510 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -438,7 +438,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
438no_timer: 438no_timer:
439 spin_lock(&vcpu->arch.local_int.float_int->lock); 439 spin_lock(&vcpu->arch.local_int.float_int->lock);
440 spin_lock_bh(&vcpu->arch.local_int.lock); 440 spin_lock_bh(&vcpu->arch.local_int.lock);
441 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 441 add_wait_queue(&vcpu->wq, &wait);
442 while (list_empty(&vcpu->arch.local_int.list) && 442 while (list_empty(&vcpu->arch.local_int.list) &&
443 list_empty(&vcpu->arch.local_int.float_int->list) && 443 list_empty(&vcpu->arch.local_int.float_int->list) &&
444 (!vcpu->arch.local_int.timer_due) && 444 (!vcpu->arch.local_int.timer_due) &&
@@ -452,7 +452,7 @@ no_timer:
452 } 452 }
453 __unset_cpu_idle(vcpu); 453 __unset_cpu_idle(vcpu);
454 __set_current_state(TASK_RUNNING); 454 __set_current_state(TASK_RUNNING);
455 remove_wait_queue(&vcpu->arch.local_int.wq, &wait); 455 remove_wait_queue(&vcpu->wq, &wait);
456 spin_unlock_bh(&vcpu->arch.local_int.lock); 456 spin_unlock_bh(&vcpu->arch.local_int.lock);
457 spin_unlock(&vcpu->arch.local_int.float_int->lock); 457 spin_unlock(&vcpu->arch.local_int.float_int->lock);
458 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 458 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
@@ -465,8 +465,8 @@ void kvm_s390_tasklet(unsigned long parm)
465 465
466 spin_lock(&vcpu->arch.local_int.lock); 466 spin_lock(&vcpu->arch.local_int.lock);
467 vcpu->arch.local_int.timer_due = 1; 467 vcpu->arch.local_int.timer_due = 1;
468 if (waitqueue_active(&vcpu->arch.local_int.wq)) 468 if (waitqueue_active(&vcpu->wq))
469 wake_up_interruptible(&vcpu->arch.local_int.wq); 469 wake_up_interruptible(&vcpu->wq);
470 spin_unlock(&vcpu->arch.local_int.lock); 470 spin_unlock(&vcpu->arch.local_int.lock);
471} 471}
472 472
@@ -613,7 +613,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
613 spin_lock_bh(&li->lock); 613 spin_lock_bh(&li->lock);
614 list_add(&inti->list, &li->list); 614 list_add(&inti->list, &li->list);
615 atomic_set(&li->active, 1); 615 atomic_set(&li->active, 1);
616 BUG_ON(waitqueue_active(&li->wq)); 616 BUG_ON(waitqueue_active(li->wq));
617 spin_unlock_bh(&li->lock); 617 spin_unlock_bh(&li->lock);
618 return 0; 618 return 0;
619} 619}
@@ -746,8 +746,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
746 li = fi->local_int[sigcpu]; 746 li = fi->local_int[sigcpu];
747 spin_lock_bh(&li->lock); 747 spin_lock_bh(&li->lock);
748 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 748 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
749 if (waitqueue_active(&li->wq)) 749 if (waitqueue_active(li->wq))
750 wake_up_interruptible(&li->wq); 750 wake_up_interruptible(li->wq);
751 spin_unlock_bh(&li->lock); 751 spin_unlock_bh(&li->lock);
752 spin_unlock(&fi->lock); 752 spin_unlock(&fi->lock);
753 mutex_unlock(&kvm->lock); 753 mutex_unlock(&kvm->lock);
@@ -832,8 +832,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
832 if (inti->type == KVM_S390_SIGP_STOP) 832 if (inti->type == KVM_S390_SIGP_STOP)
833 li->action_bits |= ACTION_STOP_ON_STOP; 833 li->action_bits |= ACTION_STOP_ON_STOP;
834 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 834 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
835 if (waitqueue_active(&li->wq)) 835 if (waitqueue_active(&vcpu->wq))
836 wake_up_interruptible(&vcpu->arch.local_int.wq); 836 wake_up_interruptible(&vcpu->wq);
837 spin_unlock_bh(&li->lock); 837 spin_unlock_bh(&li->lock);
838 mutex_unlock(&vcpu->kvm->lock); 838 mutex_unlock(&vcpu->kvm->lock);
839 return 0; 839 return 0;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c1c7c683fa26..ba694d2ba51e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -59,6 +59,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 63 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) }, 64 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 65 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
@@ -84,6 +85,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
84}; 85};
85 86
86static unsigned long long *facilities; 87static unsigned long long *facilities;
88static struct gmap_notifier gmap_notifier;
87 89
88/* Section: not file related */ 90/* Section: not file related */
89int kvm_arch_hardware_enable(void *garbage) 91int kvm_arch_hardware_enable(void *garbage)
@@ -96,13 +98,18 @@ void kvm_arch_hardware_disable(void *garbage)
96{ 98{
97} 99}
98 100
101static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
102
99int kvm_arch_hardware_setup(void) 103int kvm_arch_hardware_setup(void)
100{ 104{
105 gmap_notifier.notifier_call = kvm_gmap_notifier;
106 gmap_register_ipte_notifier(&gmap_notifier);
101 return 0; 107 return 0;
102} 108}
103 109
104void kvm_arch_hardware_unsetup(void) 110void kvm_arch_hardware_unsetup(void)
105{ 111{
112 gmap_unregister_ipte_notifier(&gmap_notifier);
106} 113}
107 114
108void kvm_arch_check_processor_compat(void *rtn) 115void kvm_arch_check_processor_compat(void *rtn)
@@ -239,6 +246,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
239 kvm->arch.gmap = gmap_alloc(current->mm); 246 kvm->arch.gmap = gmap_alloc(current->mm);
240 if (!kvm->arch.gmap) 247 if (!kvm->arch.gmap)
241 goto out_nogmap; 248 goto out_nogmap;
249 kvm->arch.gmap->private = kvm;
242 } 250 }
243 251
244 kvm->arch.css_support = 0; 252 kvm->arch.css_support = 0;
@@ -270,7 +278,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
270 278
271 free_page((unsigned long)(vcpu->arch.sie_block)); 279 free_page((unsigned long)(vcpu->arch.sie_block));
272 kvm_vcpu_uninit(vcpu); 280 kvm_vcpu_uninit(vcpu);
273 kfree(vcpu); 281 kmem_cache_free(kvm_vcpu_cache, vcpu);
274} 282}
275 283
276static void kvm_free_vcpus(struct kvm *kvm) 284static void kvm_free_vcpus(struct kvm *kvm)
@@ -309,6 +317,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
309 vcpu->arch.gmap = gmap_alloc(current->mm); 317 vcpu->arch.gmap = gmap_alloc(current->mm);
310 if (!vcpu->arch.gmap) 318 if (!vcpu->arch.gmap)
311 return -ENOMEM; 319 return -ENOMEM;
320 vcpu->arch.gmap->private = vcpu->kvm;
312 return 0; 321 return 0;
313 } 322 }
314 323
@@ -373,8 +382,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
373{ 382{
374 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 383 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
375 CPUSTAT_SM | 384 CPUSTAT_SM |
376 CPUSTAT_STOPPED); 385 CPUSTAT_STOPPED |
386 CPUSTAT_GED);
377 vcpu->arch.sie_block->ecb = 6; 387 vcpu->arch.sie_block->ecb = 6;
388 vcpu->arch.sie_block->ecb2 = 8;
378 vcpu->arch.sie_block->eca = 0xC1002001U; 389 vcpu->arch.sie_block->eca = 0xC1002001U;
379 vcpu->arch.sie_block->fac = (int) (long) facilities; 390 vcpu->arch.sie_block->fac = (int) (long) facilities;
380 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 391 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
@@ -397,7 +408,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
397 408
398 rc = -ENOMEM; 409 rc = -ENOMEM;
399 410
400 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); 411 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
401 if (!vcpu) 412 if (!vcpu)
402 goto out; 413 goto out;
403 414
@@ -427,7 +438,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
427 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 438 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
428 spin_lock(&kvm->arch.float_int.lock); 439 spin_lock(&kvm->arch.float_int.lock);
429 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; 440 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
430 init_waitqueue_head(&vcpu->arch.local_int.wq); 441 vcpu->arch.local_int.wq = &vcpu->wq;
431 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 442 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
432 spin_unlock(&kvm->arch.float_int.lock); 443 spin_unlock(&kvm->arch.float_int.lock);
433 444
@@ -442,7 +453,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
442out_free_sie_block: 453out_free_sie_block:
443 free_page((unsigned long)(vcpu->arch.sie_block)); 454 free_page((unsigned long)(vcpu->arch.sie_block));
444out_free_cpu: 455out_free_cpu:
445 kfree(vcpu); 456 kmem_cache_free(kvm_vcpu_cache, vcpu);
446out: 457out:
447 return ERR_PTR(rc); 458 return ERR_PTR(rc);
448} 459}
@@ -454,6 +465,50 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
454 return 0; 465 return 0;
455} 466}
456 467
468void s390_vcpu_block(struct kvm_vcpu *vcpu)
469{
470 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
471}
472
473void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
474{
475 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
476}
477
478/*
479 * Kick a guest cpu out of SIE and wait until SIE is not running.
480 * If the CPU is not running (e.g. waiting as idle) the function will
481 * return immediately. */
482void exit_sie(struct kvm_vcpu *vcpu)
483{
484 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
485 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
486 cpu_relax();
487}
488
489/* Kick a guest cpu out of SIE and prevent SIE-reentry */
490void exit_sie_sync(struct kvm_vcpu *vcpu)
491{
492 s390_vcpu_block(vcpu);
493 exit_sie(vcpu);
494}
495
496static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
497{
498 int i;
499 struct kvm *kvm = gmap->private;
500 struct kvm_vcpu *vcpu;
501
502 kvm_for_each_vcpu(i, vcpu, kvm) {
503 /* match against both prefix pages */
504 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
505 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
506 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
507 exit_sie_sync(vcpu);
508 }
509 }
510}
511
457int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 512int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
458{ 513{
459 /* kvm common code refers to this, but never calls it */ 514 /* kvm common code refers to this, but never calls it */
@@ -606,6 +661,27 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
606 return -EINVAL; /* not implemented yet */ 661 return -EINVAL; /* not implemented yet */
607} 662}
608 663
664static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
665{
666 /*
667 * We use MMU_RELOAD just to re-arm the ipte notifier for the
668 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
669 * This ensures that the ipte instruction for this request has
670 * already finished. We might race against a second unmapper that
671 * wants to set the blocking bit. Lets just retry the request loop.
672 */
673 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
674 int rc;
675 rc = gmap_ipte_notify(vcpu->arch.gmap,
676 vcpu->arch.sie_block->prefix,
677 PAGE_SIZE * 2);
678 if (rc)
679 return rc;
680 s390_vcpu_unblock(vcpu);
681 }
682 return 0;
683}
684
609static int __vcpu_run(struct kvm_vcpu *vcpu) 685static int __vcpu_run(struct kvm_vcpu *vcpu)
610{ 686{
611 int rc; 687 int rc;
@@ -621,6 +697,10 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
621 if (!kvm_is_ucontrol(vcpu->kvm)) 697 if (!kvm_is_ucontrol(vcpu->kvm))
622 kvm_s390_deliver_pending_interrupts(vcpu); 698 kvm_s390_deliver_pending_interrupts(vcpu);
623 699
700 rc = kvm_s390_handle_requests(vcpu);
701 if (rc)
702 return rc;
703
624 vcpu->arch.sie_block->icptcode = 0; 704 vcpu->arch.sie_block->icptcode = 0;
625 preempt_disable(); 705 preempt_disable();
626 kvm_guest_enter(); 706 kvm_guest_enter();
@@ -630,7 +710,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
630 trace_kvm_s390_sie_enter(vcpu, 710 trace_kvm_s390_sie_enter(vcpu,
631 atomic_read(&vcpu->arch.sie_block->cpuflags)); 711 atomic_read(&vcpu->arch.sie_block->cpuflags));
632 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); 712 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
633 if (rc) { 713 if (rc > 0)
714 rc = 0;
715 if (rc < 0) {
634 if (kvm_is_ucontrol(vcpu->kvm)) { 716 if (kvm_is_ucontrol(vcpu->kvm)) {
635 rc = SIE_INTERCEPT_UCONTROL; 717 rc = SIE_INTERCEPT_UCONTROL;
636 } else { 718 } else {
@@ -1046,7 +1128,7 @@ static int __init kvm_s390_init(void)
1046 return -ENOMEM; 1128 return -ENOMEM;
1047 } 1129 }
1048 memcpy(facilities, S390_lowcore.stfle_fac_list, 16); 1130 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1049 facilities[0] &= 0xff00fff3f47c0000ULL; 1131 facilities[0] &= 0xff82fff3f47c0000ULL;
1050 facilities[1] &= 0x001c000000000000ULL; 1132 facilities[1] &= 0x001c000000000000ULL;
1051 return 0; 1133 return 0;
1052} 1134}
@@ -1059,3 +1141,12 @@ static void __exit kvm_s390_exit(void)
1059 1141
1060module_init(kvm_s390_init); 1142module_init(kvm_s390_init);
1061module_exit(kvm_s390_exit); 1143module_exit(kvm_s390_exit);
1144
1145/*
1146 * Enable autoloading of the kvm module.
1147 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1148 * since x86 takes a different approach.
1149 */
1150#include <linux/miscdevice.h>
1151MODULE_ALIAS_MISCDEV(KVM_MINOR);
1152MODULE_ALIAS("devname:kvm");
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index efc14f687265..028ca9fd2158 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -63,6 +63,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
63{ 63{
64 vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u; 64 vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u;
65 vcpu->arch.sie_block->ihcpu = 0xffff; 65 vcpu->arch.sie_block->ihcpu = 0xffff;
66 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
66} 67}
67 68
68static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) 69static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
@@ -85,6 +86,12 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
85 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 86 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
86} 87}
87 88
89static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
90{
91 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
92 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
93}
94
88static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) 95static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
89{ 96{
90 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 97 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
@@ -125,7 +132,8 @@ int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
125int kvm_s390_handle_01(struct kvm_vcpu *vcpu); 132int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
126int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); 133int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
127int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); 134int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
128int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu); 135int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
136int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
129 137
130/* implemented in sigp.c */ 138/* implemented in sigp.c */
131int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); 139int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
@@ -133,6 +141,10 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
133/* implemented in kvm-s390.c */ 141/* implemented in kvm-s390.c */
134int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, 142int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu,
135 unsigned long addr); 143 unsigned long addr);
144void s390_vcpu_block(struct kvm_vcpu *vcpu);
145void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
146void exit_sie(struct kvm_vcpu *vcpu);
147void exit_sie_sync(struct kvm_vcpu *vcpu);
136/* implemented in diag.c */ 148/* implemented in diag.c */
137int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); 149int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
138 150
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 6bbd7b5a0bbe..0da3e6eb6be6 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * handling privileged instructions 2 * handling privileged instructions
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008, 2013
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -20,6 +20,9 @@
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/sysinfo.h> 22#include <asm/sysinfo.h>
23#include <asm/pgtable.h>
24#include <asm/pgalloc.h>
25#include <asm/io.h>
23#include <asm/ptrace.h> 26#include <asm/ptrace.h>
24#include <asm/compat.h> 27#include <asm/compat.h>
25#include "gaccess.h" 28#include "gaccess.h"
@@ -34,6 +37,9 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
34 37
35 vcpu->stat.instruction_spx++; 38 vcpu->stat.instruction_spx++;
36 39
40 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
41 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
42
37 operand2 = kvm_s390_get_base_disp_s(vcpu); 43 operand2 = kvm_s390_get_base_disp_s(vcpu);
38 44
39 /* must be word boundary */ 45 /* must be word boundary */
@@ -65,6 +71,9 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
65 71
66 vcpu->stat.instruction_stpx++; 72 vcpu->stat.instruction_stpx++;
67 73
74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
76
68 operand2 = kvm_s390_get_base_disp_s(vcpu); 77 operand2 = kvm_s390_get_base_disp_s(vcpu);
69 78
70 /* must be word boundary */ 79 /* must be word boundary */
@@ -89,6 +98,9 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
89 98
90 vcpu->stat.instruction_stap++; 99 vcpu->stat.instruction_stap++;
91 100
101 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
102 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
103
92 useraddr = kvm_s390_get_base_disp_s(vcpu); 104 useraddr = kvm_s390_get_base_disp_s(vcpu);
93 105
94 if (useraddr & 1) 106 if (useraddr & 1)
@@ -105,7 +117,12 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
105static int handle_skey(struct kvm_vcpu *vcpu) 117static int handle_skey(struct kvm_vcpu *vcpu)
106{ 118{
107 vcpu->stat.instruction_storage_key++; 119 vcpu->stat.instruction_storage_key++;
108 vcpu->arch.sie_block->gpsw.addr -= 4; 120
121 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
122 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
123
124 vcpu->arch.sie_block->gpsw.addr =
125 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
109 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 126 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
110 return 0; 127 return 0;
111} 128}
@@ -129,9 +146,10 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
129 * Store the two-word I/O interruption code into the 146 * Store the two-word I/O interruption code into the
130 * provided area. 147 * provided area.
131 */ 148 */
132 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr); 149 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
133 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2)); 150 || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
134 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4)); 151 || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
152 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
135 } else { 153 } else {
136 /* 154 /*
137 * Store the three-word I/O interruption code into 155 * Store the three-word I/O interruption code into
@@ -182,6 +200,9 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
182{ 200{
183 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); 201 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
184 202
203 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
204 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
205
185 if (vcpu->kvm->arch.css_support) { 206 if (vcpu->kvm->arch.css_support) {
186 /* 207 /*
187 * Most I/O instructions will be handled by userspace. 208 * Most I/O instructions will be handled by userspace.
@@ -210,8 +231,12 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
210 int rc; 231 int rc;
211 232
212 vcpu->stat.instruction_stfl++; 233 vcpu->stat.instruction_stfl++;
234
235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
237
213 /* only pass the facility bits, which we can handle */ 238 /* only pass the facility bits, which we can handle */
214 facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3; 239 facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3;
215 240
216 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 241 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
217 &facility_list, sizeof(facility_list)); 242 &facility_list, sizeof(facility_list));
@@ -255,8 +280,8 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
255 u64 addr; 280 u64 addr;
256 281
257 if (gpsw->mask & PSW_MASK_PSTATE) 282 if (gpsw->mask & PSW_MASK_PSTATE)
258 return kvm_s390_inject_program_int(vcpu, 283 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
259 PGM_PRIVILEGED_OPERATION); 284
260 addr = kvm_s390_get_base_disp_s(vcpu); 285 addr = kvm_s390_get_base_disp_s(vcpu);
261 if (addr & 7) 286 if (addr & 7)
262 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 287 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -278,6 +303,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
278 psw_t new_psw; 303 psw_t new_psw;
279 u64 addr; 304 u64 addr;
280 305
306 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
307 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
308
281 addr = kvm_s390_get_base_disp_s(vcpu); 309 addr = kvm_s390_get_base_disp_s(vcpu);
282 if (addr & 7) 310 if (addr & 7)
283 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 311 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -296,6 +324,9 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
296 324
297 vcpu->stat.instruction_stidp++; 325 vcpu->stat.instruction_stidp++;
298 326
327 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
328 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
329
299 operand2 = kvm_s390_get_base_disp_s(vcpu); 330 operand2 = kvm_s390_get_base_disp_s(vcpu);
300 331
301 if (operand2 & 7) 332 if (operand2 & 7)
@@ -351,16 +382,30 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
351 vcpu->stat.instruction_stsi++; 382 vcpu->stat.instruction_stsi++;
352 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 383 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
353 384
354 operand2 = kvm_s390_get_base_disp_s(vcpu); 385 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
386 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
387
388 if (fc > 3) {
389 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; /* cc 3 */
390 return 0;
391 }
355 392
356 if (operand2 & 0xfff && fc > 0) 393 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
394 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
357 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 395 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
358 396
359 switch (fc) { 397 if (fc == 0) {
360 case 0:
361 vcpu->run->s.regs.gprs[0] = 3 << 28; 398 vcpu->run->s.regs.gprs[0] = 3 << 28;
362 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 399 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); /* cc 0 */
363 return 0; 400 return 0;
401 }
402
403 operand2 = kvm_s390_get_base_disp_s(vcpu);
404
405 if (operand2 & 0xfff)
406 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
407
408 switch (fc) {
364 case 1: /* same handling for 1 and 2 */ 409 case 1: /* same handling for 1 and 2 */
365 case 2: 410 case 2:
366 mem = get_zeroed_page(GFP_KERNEL); 411 mem = get_zeroed_page(GFP_KERNEL);
@@ -377,8 +422,6 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
377 goto out_no_data; 422 goto out_no_data;
378 handle_stsi_3_2_2(vcpu, (void *) mem); 423 handle_stsi_3_2_2(vcpu, (void *) mem);
379 break; 424 break;
380 default:
381 goto out_no_data;
382 } 425 }
383 426
384 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { 427 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
@@ -432,20 +475,14 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
432 intercept_handler_t handler; 475 intercept_handler_t handler;
433 476
434 /* 477 /*
435 * a lot of B2 instructions are priviledged. We first check for 478 * A lot of B2 instructions are priviledged. Here we check for
436 * the privileged ones, that we can handle in the kernel. If the 479 * the privileged ones, that we can handle in the kernel.
437 * kernel can handle this instruction, we check for the problem 480 * Anything else goes to userspace.
438 * state bit and (a) handle the instruction or (b) send a code 2 481 */
439 * program check.
440 * Anything else goes to userspace.*/
441 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 482 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
442 if (handler) { 483 if (handler)
443 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 484 return handler(vcpu);
444 return kvm_s390_inject_program_int(vcpu, 485
445 PGM_PRIVILEGED_OPERATION);
446 else
447 return handler(vcpu);
448 }
449 return -EOPNOTSUPP; 486 return -EOPNOTSUPP;
450} 487}
451 488
@@ -453,8 +490,7 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
453{ 490{
454 int reg1, reg2; 491 int reg1, reg2;
455 492
456 reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24; 493 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
457 reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
458 494
459 /* This basically extracts the mask half of the psw. */ 495 /* This basically extracts the mask half of the psw. */
460 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; 496 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
@@ -467,9 +503,88 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
467 return 0; 503 return 0;
468} 504}
469 505
506#define PFMF_RESERVED 0xfffc0101UL
507#define PFMF_SK 0x00020000UL
508#define PFMF_CF 0x00010000UL
509#define PFMF_UI 0x00008000UL
510#define PFMF_FSC 0x00007000UL
511#define PFMF_NQ 0x00000800UL
512#define PFMF_MR 0x00000400UL
513#define PFMF_MC 0x00000200UL
514#define PFMF_KEY 0x000000feUL
515
516static int handle_pfmf(struct kvm_vcpu *vcpu)
517{
518 int reg1, reg2;
519 unsigned long start, end;
520
521 vcpu->stat.instruction_pfmf++;
522
523 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
524
525 if (!MACHINE_HAS_PFMF)
526 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
527
528 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
529 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
530
531 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
532 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
533
534 /* Only provide non-quiescing support if the host supports it */
535 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
536 S390_lowcore.stfl_fac_list & 0x00020000)
537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
538
539 /* No support for conditional-SSKE */
540 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
541 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
542
543 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
544 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
545 case 0x00000000:
546 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
547 break;
548 case 0x00001000:
549 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
550 break;
551 /* We dont support EDAT2
552 case 0x00002000:
553 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
554 break;*/
555 default:
556 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
557 }
558 while (start < end) {
559 unsigned long useraddr;
560
561 useraddr = gmap_translate(start, vcpu->arch.gmap);
562 if (IS_ERR((void *)useraddr))
563 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
564
565 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
566 if (clear_user((void __user *)useraddr, PAGE_SIZE))
567 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
568 }
569
570 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
571 if (set_guest_storage_key(current->mm, useraddr,
572 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
573 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
574 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
575 }
576
577 start += PAGE_SIZE;
578 }
579 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
580 vcpu->run->s.regs.gprs[reg2] = end;
581 return 0;
582}
583
470static const intercept_handler_t b9_handlers[256] = { 584static const intercept_handler_t b9_handlers[256] = {
471 [0x8d] = handle_epsw, 585 [0x8d] = handle_epsw,
472 [0x9c] = handle_io_inst, 586 [0x9c] = handle_io_inst,
587 [0xaf] = handle_pfmf,
473}; 588};
474 589
475int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) 590int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
@@ -478,29 +593,96 @@ int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
478 593
479 /* This is handled just as for the B2 instructions. */ 594 /* This is handled just as for the B2 instructions. */
480 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; 595 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
481 if (handler) { 596 if (handler)
482 if ((handler != handle_epsw) && 597 return handler(vcpu);
483 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)) 598
484 return kvm_s390_inject_program_int(vcpu,
485 PGM_PRIVILEGED_OPERATION);
486 else
487 return handler(vcpu);
488 }
489 return -EOPNOTSUPP; 599 return -EOPNOTSUPP;
490} 600}
491 601
602int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
603{
604 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
605 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
606 u64 useraddr;
607 u32 val = 0;
608 int reg, rc;
609
610 vcpu->stat.instruction_lctl++;
611
612 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
613 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
614
615 useraddr = kvm_s390_get_base_disp_rs(vcpu);
616
617 if (useraddr & 3)
618 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
619
620 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
621 useraddr);
622 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
623
624 reg = reg1;
625 do {
626 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
627 if (rc)
628 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
629 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
630 vcpu->arch.sie_block->gcr[reg] |= val;
631 useraddr += 4;
632 if (reg == reg3)
633 break;
634 reg = (reg + 1) % 16;
635 } while (1);
636
637 return 0;
638}
639
640static int handle_lctlg(struct kvm_vcpu *vcpu)
641{
642 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
643 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
644 u64 useraddr;
645 int reg, rc;
646
647 vcpu->stat.instruction_lctlg++;
648
649 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
650 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
651
652 useraddr = kvm_s390_get_base_disp_rsy(vcpu);
653
654 if (useraddr & 7)
655 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
656
657 reg = reg1;
658
659 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
660 useraddr);
661 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
662
663 do {
664 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
665 (u64 __user *) useraddr);
666 if (rc)
667 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
668 useraddr += 8;
669 if (reg == reg3)
670 break;
671 reg = (reg + 1) % 16;
672 } while (1);
673
674 return 0;
675}
676
492static const intercept_handler_t eb_handlers[256] = { 677static const intercept_handler_t eb_handlers[256] = {
678 [0x2f] = handle_lctlg,
493 [0x8a] = handle_io_inst, 679 [0x8a] = handle_io_inst,
494}; 680};
495 681
496int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu) 682int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
497{ 683{
498 intercept_handler_t handler; 684 intercept_handler_t handler;
499 685
500 /* All eb instructions that end up here are privileged. */
501 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
502 return kvm_s390_inject_program_int(vcpu,
503 PGM_PRIVILEGED_OPERATION);
504 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; 686 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
505 if (handler) 687 if (handler)
506 return handler(vcpu); 688 return handler(vcpu);
@@ -515,6 +697,9 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
515 697
516 vcpu->stat.instruction_tprot++; 698 vcpu->stat.instruction_tprot++;
517 699
700 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
701 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
702
518 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 703 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
519 704
520 /* we only handle the Linux memory detection case: 705 /* we only handle the Linux memory detection case:
@@ -560,8 +745,7 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
560 u32 value; 745 u32 value;
561 746
562 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 747 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
563 return kvm_s390_inject_program_int(vcpu, 748 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
564 PGM_PRIVILEGED_OPERATION);
565 749
566 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) 750 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
567 return kvm_s390_inject_program_int(vcpu, 751 return kvm_s390_inject_program_int(vcpu,
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 1c48ab2845e0..bec398c57acf 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -79,8 +79,8 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
79 list_add_tail(&inti->list, &li->list); 79 list_add_tail(&inti->list, &li->list);
80 atomic_set(&li->active, 1); 80 atomic_set(&li->active, 1);
81 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 81 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 if (waitqueue_active(&li->wq)) 82 if (waitqueue_active(li->wq))
83 wake_up_interruptible(&li->wq); 83 wake_up_interruptible(li->wq);
84 spin_unlock_bh(&li->lock); 84 spin_unlock_bh(&li->lock);
85 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 85 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
@@ -117,8 +117,8 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
117 list_add_tail(&inti->list, &li->list); 117 list_add_tail(&inti->list, &li->list);
118 atomic_set(&li->active, 1); 118 atomic_set(&li->active, 1);
119 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 119 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
120 if (waitqueue_active(&li->wq)) 120 if (waitqueue_active(li->wq))
121 wake_up_interruptible(&li->wq); 121 wake_up_interruptible(li->wq);
122 spin_unlock_bh(&li->lock); 122 spin_unlock_bh(&li->lock);
123 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 123 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
124 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 124 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
@@ -145,8 +145,8 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
145 atomic_set(&li->active, 1); 145 atomic_set(&li->active, 1);
146 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 146 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
147 li->action_bits |= action; 147 li->action_bits |= action;
148 if (waitqueue_active(&li->wq)) 148 if (waitqueue_active(li->wq))
149 wake_up_interruptible(&li->wq); 149 wake_up_interruptible(li->wq);
150out: 150out:
151 spin_unlock_bh(&li->lock); 151 spin_unlock_bh(&li->lock);
152 152
@@ -250,8 +250,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
250 250
251 list_add_tail(&inti->list, &li->list); 251 list_add_tail(&inti->list, &li->list);
252 atomic_set(&li->active, 1); 252 atomic_set(&li->active, 1);
253 if (waitqueue_active(&li->wq)) 253 if (waitqueue_active(li->wq))
254 wake_up_interruptible(&li->wq); 254 wake_up_interruptible(li->wq);
255 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 255 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
256 256
257 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 257 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
@@ -333,8 +333,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
333 333
334 /* sigp in userspace can exit */ 334 /* sigp in userspace can exit */
335 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 335 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
336 return kvm_s390_inject_program_int(vcpu, 336 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
337 PGM_PRIVILEGED_OPERATION);
338 337
339 order_code = kvm_s390_get_base_disp_rs(vcpu); 338 order_code = kvm_s390_get_base_disp_rs(vcpu);
340 339
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a938b548f07e..17bf4d3d303a 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -689,7 +689,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
689 entry = *ptep; 689 entry = *ptep;
690 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { 690 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) {
691 pgste = pgste_get_lock(ptep); 691 pgste = pgste_get_lock(ptep);
692 pgste_val(pgste) |= RCP_IN_BIT; 692 pgste_val(pgste) |= PGSTE_IN_BIT;
693 pgste_set_unlock(ptep, pgste); 693 pgste_set_unlock(ptep, pgste);
694 start += PAGE_SIZE; 694 start += PAGE_SIZE;
695 len -= PAGE_SIZE; 695 len -= PAGE_SIZE;
@@ -771,6 +771,54 @@ static inline void page_table_free_pgste(unsigned long *table)
771 __free_page(page); 771 __free_page(page);
772} 772}
773 773
774int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
775 unsigned long key, bool nq)
776{
777 spinlock_t *ptl;
778 pgste_t old, new;
779 pte_t *ptep;
780
781 down_read(&mm->mmap_sem);
782 ptep = get_locked_pte(current->mm, addr, &ptl);
783 if (unlikely(!ptep)) {
784 up_read(&mm->mmap_sem);
785 return -EFAULT;
786 }
787
788 new = old = pgste_get_lock(ptep);
789 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
790 PGSTE_ACC_BITS | PGSTE_FP_BIT);
791 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
792 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
793 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
794 unsigned long address, bits;
795 unsigned char skey;
796
797 address = pte_val(*ptep) & PAGE_MASK;
798 skey = page_get_storage_key(address);
799 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
800 /* Set storage key ACC and FP */
801 page_set_storage_key(address,
802 (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)),
803 !nq);
804
805 /* Merge host changed & referenced into pgste */
806 pgste_val(new) |= bits << 52;
807 /* Transfer skey changed & referenced bit to kvm user bits */
808 pgste_val(new) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
809 }
810 /* changing the guest storage key is considered a change of the page */
811 if ((pgste_val(new) ^ pgste_val(old)) &
812 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
813 pgste_val(new) |= PGSTE_UC_BIT;
814
815 pgste_set_unlock(ptep, new);
816 pte_unmap_unlock(*ptep, ptl);
817 up_read(&mm->mmap_sem);
818 return 0;
819}
820EXPORT_SYMBOL(set_guest_storage_key);
821
774#else /* CONFIG_PGSTE */ 822#else /* CONFIG_PGSTE */
775 823
776static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 824static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
index 1912f3bb190c..0022e1ebfbde 100644
--- a/arch/s390/oprofile/hwsampler.h
+++ b/arch/s390/oprofile/hwsampler.h
@@ -81,8 +81,8 @@ struct hws_data_entry {
81 unsigned int:16; 81 unsigned int:16;
82 unsigned int prim_asn:16; /* primary ASN */ 82 unsigned int prim_asn:16; /* primary ASN */
83 unsigned long long ia; /* Instruction Address */ 83 unsigned long long ia; /* Instruction Address */
84 unsigned long long lpp; /* Logical-Partition Program Param. */ 84 unsigned long long gpp; /* Guest Program Parameter */
85 unsigned long long vpp; /* Virtual-Machine Program Param. */ 85 unsigned long long hpp; /* Host Program Parameter */
86}; 86};
87 87
88struct hws_trailer_entry { 88struct hws_trailer_entry {
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index f1e5be85d592..e2956ad39a4f 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -82,10 +82,13 @@ struct intr_bucket {
82 82
83static struct intr_bucket *bucket; 83static struct intr_bucket *bucket;
84 84
85/* Adapter local summary indicator */ 85/* Adapter interrupt definitions */
86static u8 *zpci_irq_si; 86static void zpci_irq_handler(struct airq_struct *airq);
87 87
88static atomic_t irq_retries = ATOMIC_INIT(0); 88static struct airq_struct zpci_airq = {
89 .handler = zpci_irq_handler,
90 .isc = PCI_ISC,
91};
89 92
90/* I/O Map */ 93/* I/O Map */
91static DEFINE_SPINLOCK(zpci_iomap_lock); 94static DEFINE_SPINLOCK(zpci_iomap_lock);
@@ -404,7 +407,7 @@ static struct pci_ops pci_root_ops = {
404/* store the last handled bit to implement fair scheduling of devices */ 407/* store the last handled bit to implement fair scheduling of devices */
405static DEFINE_PER_CPU(unsigned long, next_sbit); 408static DEFINE_PER_CPU(unsigned long, next_sbit);
406 409
407static void zpci_irq_handler(void *dont, void *need) 410static void zpci_irq_handler(struct airq_struct *airq)
408{ 411{
409 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); 412 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
410 int rescan = 0, max = aisb_max; 413 int rescan = 0, max = aisb_max;
@@ -452,7 +455,6 @@ scan:
452 max = aisb_max; 455 max = aisb_max;
453 sbit = find_first_bit_left(bucket->aisb, max); 456 sbit = find_first_bit_left(bucket->aisb, max);
454 if (sbit != max) { 457 if (sbit != max) {
455 atomic_inc(&irq_retries);
456 rescan++; 458 rescan++;
457 goto scan; 459 goto scan;
458 } 460 }
@@ -565,7 +567,21 @@ static void zpci_map_resources(struct zpci_dev *zdev)
565 pr_debug("BAR%i: -> start: %Lx end: %Lx\n", 567 pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
566 i, pdev->resource[i].start, pdev->resource[i].end); 568 i, pdev->resource[i].start, pdev->resource[i].end);
567 } 569 }
568}; 570}
571
572static void zpci_unmap_resources(struct zpci_dev *zdev)
573{
574 struct pci_dev *pdev = zdev->pdev;
575 resource_size_t len;
576 int i;
577
578 for (i = 0; i < PCI_BAR_COUNT; i++) {
579 len = pci_resource_len(pdev, i);
580 if (!len)
581 continue;
582 pci_iounmap(pdev, (void *) pdev->resource[i].start);
583 }
584}
569 585
570struct zpci_dev *zpci_alloc_device(void) 586struct zpci_dev *zpci_alloc_device(void)
571{ 587{
@@ -701,25 +717,20 @@ static int __init zpci_irq_init(void)
701 goto out_alloc; 717 goto out_alloc;
702 } 718 }
703 719
704 isc_register(PCI_ISC); 720 rc = register_adapter_interrupt(&zpci_airq);
705 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC); 721 if (rc)
706 if (IS_ERR(zpci_irq_si)) {
707 rc = PTR_ERR(zpci_irq_si);
708 zpci_irq_si = NULL;
709 goto out_ai; 722 goto out_ai;
710 } 723 /* Set summary to 1 to be called every time for the ISC. */
724 *zpci_airq.lsi_ptr = 1;
711 725
712 for_each_online_cpu(cpu) 726 for_each_online_cpu(cpu)
713 per_cpu(next_sbit, cpu) = 0; 727 per_cpu(next_sbit, cpu) = 0;
714 728
715 spin_lock_init(&bucket->lock); 729 spin_lock_init(&bucket->lock);
716 /* set summary to 1 to be called every time for the ISC */
717 *zpci_irq_si = 1;
718 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 730 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
719 return 0; 731 return 0;
720 732
721out_ai: 733out_ai:
722 isc_unregister(PCI_ISC);
723 free_page((unsigned long) bucket->alloc); 734 free_page((unsigned long) bucket->alloc);
724out_alloc: 735out_alloc:
725 free_page((unsigned long) bucket->aisb); 736 free_page((unsigned long) bucket->aisb);
@@ -732,21 +743,10 @@ static void zpci_irq_exit(void)
732{ 743{
733 free_page((unsigned long) bucket->alloc); 744 free_page((unsigned long) bucket->alloc);
734 free_page((unsigned long) bucket->aisb); 745 free_page((unsigned long) bucket->aisb);
735 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC); 746 unregister_adapter_interrupt(&zpci_airq);
736 isc_unregister(PCI_ISC);
737 kfree(bucket); 747 kfree(bucket);
738} 748}
739 749
740void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m)
741{
742 if (!zdev)
743 return;
744
745 seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries));
746 seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n",
747 get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb);
748}
749
750static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, 750static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
751 unsigned long flags, int domain) 751 unsigned long flags, int domain)
752{ 752{
@@ -810,6 +810,16 @@ int pcibios_add_device(struct pci_dev *pdev)
810 return 0; 810 return 0;
811} 811}
812 812
813void pcibios_release_device(struct pci_dev *pdev)
814{
815 struct zpci_dev *zdev = get_zdev(pdev);
816
817 zpci_unmap_resources(zdev);
818 zpci_fmb_disable_device(zdev);
819 zpci_debug_exit_device(zdev);
820 zdev->pdev = NULL;
821}
822
813static int zpci_scan_bus(struct zpci_dev *zdev) 823static int zpci_scan_bus(struct zpci_dev *zdev)
814{ 824{
815 struct resource *res; 825 struct resource *res;
@@ -950,25 +960,6 @@ void zpci_stop_device(struct zpci_dev *zdev)
950} 960}
951EXPORT_SYMBOL_GPL(zpci_stop_device); 961EXPORT_SYMBOL_GPL(zpci_stop_device);
952 962
953int zpci_scan_device(struct zpci_dev *zdev)
954{
955 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
956 if (!zdev->pdev) {
957 pr_err("pci_scan_single_device failed for fid: 0x%x\n",
958 zdev->fid);
959 goto out;
960 }
961
962 pci_bus_add_devices(zdev->bus);
963
964 return 0;
965out:
966 zpci_dma_exit_device(zdev);
967 clp_disable_fh(zdev);
968 return -EIO;
969}
970EXPORT_SYMBOL_GPL(zpci_scan_device);
971
972static inline int barsize(u8 size) 963static inline int barsize(u8 size)
973{ 964{
974 return (size) ? (1 << size) >> 10 : 0; 965 return (size) ? (1 << size) >> 10 : 0;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index bd34359d1546..2e9539625d93 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -236,7 +236,6 @@ int clp_disable_fh(struct zpci_dev *zdev)
236 if (!zdev_enabled(zdev)) 236 if (!zdev_enabled(zdev))
237 return 0; 237 return 0;
238 238
239 dev_info(&zdev->pdev->dev, "disabling fn handle: 0x%x\n", fh);
240 rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN); 239 rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
241 if (!rc) 240 if (!rc)
242 /* Success -> store disabled handle in zdev */ 241 /* Success -> store disabled handle in zdev */
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 771b82359af4..75c69b402e05 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -115,27 +115,6 @@ static const struct file_operations debugfs_pci_perf_fops = {
115 .release = single_release, 115 .release = single_release,
116}; 116};
117 117
118static int pci_debug_show(struct seq_file *m, void *v)
119{
120 struct zpci_dev *zdev = m->private;
121
122 zpci_debug_info(zdev, m);
123 return 0;
124}
125
126static int pci_debug_seq_open(struct inode *inode, struct file *filp)
127{
128 return single_open(filp, pci_debug_show,
129 file_inode(filp)->i_private);
130}
131
132static const struct file_operations debugfs_pci_debug_fops = {
133 .open = pci_debug_seq_open,
134 .read = seq_read,
135 .llseek = seq_lseek,
136 .release = single_release,
137};
138
139void zpci_debug_init_device(struct zpci_dev *zdev) 118void zpci_debug_init_device(struct zpci_dev *zdev)
140{ 119{
141 zdev->debugfs_dev = debugfs_create_dir(dev_name(&zdev->pdev->dev), 120 zdev->debugfs_dev = debugfs_create_dir(dev_name(&zdev->pdev->dev),
@@ -149,19 +128,11 @@ void zpci_debug_init_device(struct zpci_dev *zdev)
149 &debugfs_pci_perf_fops); 128 &debugfs_pci_perf_fops);
150 if (IS_ERR(zdev->debugfs_perf)) 129 if (IS_ERR(zdev->debugfs_perf))
151 zdev->debugfs_perf = NULL; 130 zdev->debugfs_perf = NULL;
152
153 zdev->debugfs_debug = debugfs_create_file("debug",
154 S_IFREG | S_IRUGO | S_IWUSR,
155 zdev->debugfs_dev, zdev,
156 &debugfs_pci_debug_fops);
157 if (IS_ERR(zdev->debugfs_debug))
158 zdev->debugfs_debug = NULL;
159} 131}
160 132
161void zpci_debug_exit_device(struct zpci_dev *zdev) 133void zpci_debug_exit_device(struct zpci_dev *zdev)
162{ 134{
163 debugfs_remove(zdev->debugfs_perf); 135 debugfs_remove(zdev->debugfs_perf);
164 debugfs_remove(zdev->debugfs_debug);
165 debugfs_remove(zdev->debugfs_dev); 136 debugfs_remove(zdev->debugfs_dev);
166} 137}
167 138
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index f8e69d5bc0a9..a2343c1f6e04 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -263,7 +263,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
263 enum dma_data_direction direction, 263 enum dma_data_direction direction,
264 struct dma_attrs *attrs) 264 struct dma_attrs *attrs)
265{ 265{
266 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); 266 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
267 unsigned long nr_pages, iommu_page_index; 267 unsigned long nr_pages, iommu_page_index;
268 unsigned long pa = page_to_phys(page) + offset; 268 unsigned long pa = page_to_phys(page) + offset;
269 int flags = ZPCI_PTE_VALID; 269 int flags = ZPCI_PTE_VALID;
@@ -304,7 +304,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
304 size_t size, enum dma_data_direction direction, 304 size_t size, enum dma_data_direction direction,
305 struct dma_attrs *attrs) 305 struct dma_attrs *attrs)
306{ 306{
307 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); 307 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
308 unsigned long iommu_page_index; 308 unsigned long iommu_page_index;
309 int npages; 309 int npages;
310 310
@@ -323,7 +323,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
323 dma_addr_t *dma_handle, gfp_t flag, 323 dma_addr_t *dma_handle, gfp_t flag,
324 struct dma_attrs *attrs) 324 struct dma_attrs *attrs)
325{ 325{
326 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); 326 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
327 struct page *page; 327 struct page *page;
328 unsigned long pa; 328 unsigned long pa;
329 dma_addr_t map; 329 dma_addr_t map;
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index a42cce69d0a0..e99a2557f186 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -15,40 +15,36 @@
15static ssize_t show_fid(struct device *dev, struct device_attribute *attr, 15static ssize_t show_fid(struct device *dev, struct device_attribute *attr,
16 char *buf) 16 char *buf)
17{ 17{
18 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); 18 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
19 19
20 sprintf(buf, "0x%08x\n", zdev->fid); 20 return sprintf(buf, "0x%08x\n", zdev->fid);
21 return strlen(buf);
22} 21}
23static DEVICE_ATTR(function_id, S_IRUGO, show_fid, NULL); 22static DEVICE_ATTR(function_id, S_IRUGO, show_fid, NULL);
24 23
25static ssize_t show_fh(struct device *dev, struct device_attribute *attr, 24static ssize_t show_fh(struct device *dev, struct device_attribute *attr,
26 char *buf) 25 char *buf)
27{ 26{
28 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); 27 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
29 28
30 sprintf(buf, "0x%08x\n", zdev->fh); 29 return sprintf(buf, "0x%08x\n", zdev->fh);
31 return strlen(buf);
32} 30}
33static DEVICE_ATTR(function_handle, S_IRUGO, show_fh, NULL); 31static DEVICE_ATTR(function_handle, S_IRUGO, show_fh, NULL);
34 32
35static ssize_t show_pchid(struct device *dev, struct device_attribute *attr, 33static ssize_t show_pchid(struct device *dev, struct device_attribute *attr,
36 char *buf) 34 char *buf)
37{ 35{
38 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); 36 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
39 37
40 sprintf(buf, "0x%04x\n", zdev->pchid); 38 return sprintf(buf, "0x%04x\n", zdev->pchid);
41 return strlen(buf);
42} 39}
43static DEVICE_ATTR(pchid, S_IRUGO, show_pchid, NULL); 40static DEVICE_ATTR(pchid, S_IRUGO, show_pchid, NULL);
44 41
45static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr, 42static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
46 char *buf) 43 char *buf)
47{ 44{
48 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); 45 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
49 46
50 sprintf(buf, "0x%02x\n", zdev->pfgid); 47 return sprintf(buf, "0x%02x\n", zdev->pfgid);
51 return strlen(buf);
52} 48}
53static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL); 49static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
54 50
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index cebaff8069a1..e1c7bb999b06 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -3,3 +3,4 @@ header-y +=
3 3
4generic-y += clkdev.h 4generic-y += clkdev.h
5generic-y += trace_clock.h 5generic-y += trace_clock.h
6generic-y += xor.h
diff --git a/arch/score/include/asm/dma-mapping.h b/arch/score/include/asm/dma-mapping.h
deleted file mode 100644
index f9c0193c7a53..000000000000
--- a/arch/score/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_SCORE_DMA_MAPPING_H
2#define _ASM_SCORE_DMA_MAPPING_H
3
4#include <asm-generic/dma-mapping-broken.h>
5
6#endif /* _ASM_SCORE_DMA_MAPPING_H */
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 2031c65fd4ea..bc4d3f5d2e5d 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -254,7 +254,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
254 const char *type; 254 const char *type;
255 u32 class; 255 u32 class;
256 256
257 dev = alloc_pci_dev(); 257 dev = pci_alloc_dev(bus);
258 if (!dev) 258 if (!dev)
259 return NULL; 259 return NULL;
260 260
@@ -281,7 +281,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
281 printk(" create device, devfn: %x, type: %s\n", 281 printk(" create device, devfn: %x, type: %s\n",
282 devfn, type); 282 devfn, type);
283 283
284 dev->bus = bus;
285 dev->sysdata = node; 284 dev->sysdata = node;
286 dev->dev.parent = bus->bridge; 285 dev->dev.parent = bus->bridge;
287 dev->dev.bus = &pci_bus_type; 286 dev->dev.bus = &pci_bus_type;
@@ -327,7 +326,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
327 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) 326 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
328 pci_set_master(dev); 327 pci_set_master(dev);
329 328
330 dev->current_state = 4; /* unknown power state */ 329 dev->current_state = PCI_UNKNOWN; /* unknown power state */
331 dev->error_state = pci_channel_io_normal; 330 dev->error_state = pci_channel_io_normal;
332 dev->dma_mask = 0xffffffff; 331 dev->dma_mask = 0xffffffff;
333 332
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index 950a9afa38f8..96494fb646f7 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -17,7 +17,7 @@ OBJS := misc.o
17 17
18# font.c and font.o 18# font.c and font.o
19CFLAGS_font.o := -Dstatic= 19CFLAGS_font.o := -Dstatic=
20$(obj)/font.c: $(srctree)/drivers/video/console/font_8x8.c 20$(obj)/font.c: $(srctree)/lib/fonts/font_8x8.c
21 $(call cmd,shipped) 21 $(call cmd,shipped)
22 22
23# piggy.S and piggy.o 23# piggy.S and piggy.o
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c
index ef69c0c82825..374a055a8e6b 100644
--- a/arch/unicore32/kernel/pci.c
+++ b/arch/unicore32/kernel/pci.c
@@ -277,11 +277,6 @@ static int __init pci_common_init(void)
277 pci_bus_assign_resources(puv3_bus); 277 pci_bus_assign_resources(puv3_bus);
278 } 278 }
279 279
280 /*
281 * Tell drivers about devices found.
282 */
283 pci_bus_add_devices(puv3_bus);
284
285 return 0; 280 return 0;
286} 281}
287subsys_initcall(pci_common_init); 282subsys_initcall(pci_common_init);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index b31bf97775fc..2dfac58f3b11 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -111,7 +111,7 @@ static inline void acpi_disable_pci(void)
111} 111}
112 112
113/* Low-level suspend routine. */ 113/* Low-level suspend routine. */
114extern int acpi_suspend_lowlevel(void); 114extern int (*acpi_suspend_lowlevel)(void);
115 115
116/* Physical address to resume after wakeup */ 116/* Physical address to resume after wakeup */
117#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start)) 117#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index af9c5525434d..f87f7fcefa0a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -222,14 +222,22 @@ struct kvm_mmu_page {
222 int root_count; /* Currently serving as active root */ 222 int root_count; /* Currently serving as active root */
223 unsigned int unsync_children; 223 unsigned int unsync_children;
224 unsigned long parent_ptes; /* Reverse mapping for parent_pte */ 224 unsigned long parent_ptes; /* Reverse mapping for parent_pte */
225
226 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
227 unsigned long mmu_valid_gen;
228
225 DECLARE_BITMAP(unsync_child_bitmap, 512); 229 DECLARE_BITMAP(unsync_child_bitmap, 512);
226 230
227#ifdef CONFIG_X86_32 231#ifdef CONFIG_X86_32
232 /*
233 * Used out of the mmu-lock to avoid reading spte values while an
234 * update is in progress; see the comments in __get_spte_lockless().
235 */
228 int clear_spte_count; 236 int clear_spte_count;
229#endif 237#endif
230 238
239 /* Number of writes since the last time traversal visited this page. */
231 int write_flooding_count; 240 int write_flooding_count;
232 bool mmio_cached;
233}; 241};
234 242
235struct kvm_pio_request { 243struct kvm_pio_request {
@@ -529,11 +537,14 @@ struct kvm_arch {
529 unsigned int n_requested_mmu_pages; 537 unsigned int n_requested_mmu_pages;
530 unsigned int n_max_mmu_pages; 538 unsigned int n_max_mmu_pages;
531 unsigned int indirect_shadow_pages; 539 unsigned int indirect_shadow_pages;
540 unsigned long mmu_valid_gen;
532 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 541 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
533 /* 542 /*
534 * Hash table of struct kvm_mmu_page. 543 * Hash table of struct kvm_mmu_page.
535 */ 544 */
536 struct list_head active_mmu_pages; 545 struct list_head active_mmu_pages;
546 struct list_head zapped_obsolete_pages;
547
537 struct list_head assigned_dev_head; 548 struct list_head assigned_dev_head;
538 struct iommu_domain *iommu_domain; 549 struct iommu_domain *iommu_domain;
539 int iommu_flags; 550 int iommu_flags;
@@ -769,7 +780,7 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
769 struct kvm_memory_slot *slot, 780 struct kvm_memory_slot *slot,
770 gfn_t gfn_offset, unsigned long mask); 781 gfn_t gfn_offset, unsigned long mask);
771void kvm_mmu_zap_all(struct kvm *kvm); 782void kvm_mmu_zap_all(struct kvm *kvm);
772void kvm_mmu_zap_mmio_sptes(struct kvm *kvm); 783void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
773unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 784unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
774void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 785void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
775 786
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 230c8ea878e5..d81a972dd506 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -44,6 +44,7 @@
44#include <asm/mpspec.h> 44#include <asm/mpspec.h>
45#include <asm/smp.h> 45#include <asm/smp.h>
46 46
47#include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
47static int __initdata acpi_force = 0; 48static int __initdata acpi_force = 0;
48u32 acpi_rsdt_forced; 49u32 acpi_rsdt_forced;
49int acpi_disabled; 50int acpi_disabled;
@@ -559,6 +560,12 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
559int (*__acpi_register_gsi)(struct device *dev, u32 gsi, 560int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
560 int trigger, int polarity) = acpi_register_gsi_pic; 561 int trigger, int polarity) = acpi_register_gsi_pic;
561 562
563#ifdef CONFIG_ACPI_SLEEP
564int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel;
565#else
566int (*acpi_suspend_lowlevel)(void);
567#endif
568
562/* 569/*
563 * success: return IRQ number (>=0) 570 * success: return IRQ number (>=0)
564 * failure: return < 0 571 * failure: return < 0
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index b44577bc9744..2a34aaf3c8f1 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -26,12 +26,12 @@ static char temp_stack[4096];
26#endif 26#endif
27 27
28/** 28/**
29 * acpi_suspend_lowlevel - save kernel state 29 * x86_acpi_suspend_lowlevel - save kernel state
30 * 30 *
31 * Create an identity mapped page table and copy the wakeup routine to 31 * Create an identity mapped page table and copy the wakeup routine to
32 * low memory. 32 * low memory.
33 */ 33 */
34int acpi_suspend_lowlevel(void) 34int x86_acpi_suspend_lowlevel(void)
35{ 35{
36 struct wakeup_header *header = 36 struct wakeup_header *header =
37 (struct wakeup_header *) __va(real_mode_header->wakeup_header); 37 (struct wakeup_header *) __va(real_mode_header->wakeup_header);
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 67f59f8c6956..c9c2c982d5e4 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -15,3 +15,5 @@ extern unsigned long acpi_copy_wakeup_routine(unsigned long);
15extern void wakeup_long64(void); 15extern void wakeup_long64(void);
16 16
17extern void do_suspend_lowlevel(void); 17extern void do_suspend_lowlevel(void);
18
19extern int x86_acpi_suspend_lowlevel(void);
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 2f3a7995e56a..98f2083832eb 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -182,11 +182,6 @@ static int therm_throt_process(bool new_event, int event, int level)
182 this_cpu, 182 this_cpu,
183 level == CORE_LEVEL ? "Core" : "Package", 183 level == CORE_LEVEL ? "Core" : "Package",
184 state->count); 184 state->count);
185 else
186 printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
187 this_cpu,
188 level == CORE_LEVEL ? "Core" : "Package",
189 state->count);
190 return 1; 185 return 1;
191 } 186 }
192 if (old_event) { 187 if (old_event) {
@@ -194,10 +189,6 @@ static int therm_throt_process(bool new_event, int event, int level)
194 printk(KERN_INFO "CPU%d: %s temperature/speed normal\n", 189 printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
195 this_cpu, 190 this_cpu,
196 level == CORE_LEVEL ? "Core" : "Package"); 191 level == CORE_LEVEL ? "Core" : "Package");
197 else
198 printk(KERN_INFO "CPU%d: %s power limit normal\n",
199 this_cpu,
200 level == CORE_LEVEL ? "Core" : "Package");
201 return 1; 192 return 1;
202 } 193 }
203 194
@@ -220,6 +211,15 @@ static int thresh_event_valid(int event)
220 return 1; 211 return 1;
221} 212}
222 213
214static bool int_pln_enable;
215static int __init int_pln_enable_setup(char *s)
216{
217 int_pln_enable = true;
218
219 return 1;
220}
221__setup("int_pln_enable", int_pln_enable_setup);
222
223#ifdef CONFIG_SYSFS 223#ifdef CONFIG_SYSFS
224/* Add/Remove thermal_throttle interface for CPU device: */ 224/* Add/Remove thermal_throttle interface for CPU device: */
225static __cpuinit int thermal_throttle_add_dev(struct device *dev, 225static __cpuinit int thermal_throttle_add_dev(struct device *dev,
@@ -232,7 +232,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev,
232 if (err) 232 if (err)
233 return err; 233 return err;
234 234
235 if (cpu_has(c, X86_FEATURE_PLN)) 235 if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
236 err = sysfs_add_file_to_group(&dev->kobj, 236 err = sysfs_add_file_to_group(&dev->kobj,
237 &dev_attr_core_power_limit_count.attr, 237 &dev_attr_core_power_limit_count.attr,
238 thermal_attr_group.name); 238 thermal_attr_group.name);
@@ -240,7 +240,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev,
240 err = sysfs_add_file_to_group(&dev->kobj, 240 err = sysfs_add_file_to_group(&dev->kobj,
241 &dev_attr_package_throttle_count.attr, 241 &dev_attr_package_throttle_count.attr,
242 thermal_attr_group.name); 242 thermal_attr_group.name);
243 if (cpu_has(c, X86_FEATURE_PLN)) 243 if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
244 err = sysfs_add_file_to_group(&dev->kobj, 244 err = sysfs_add_file_to_group(&dev->kobj,
245 &dev_attr_package_power_limit_count.attr, 245 &dev_attr_package_power_limit_count.attr,
246 thermal_attr_group.name); 246 thermal_attr_group.name);
@@ -353,7 +353,7 @@ static void intel_thermal_interrupt(void)
353 CORE_LEVEL) != 0) 353 CORE_LEVEL) != 0)
354 mce_log_therm_throt_event(msr_val); 354 mce_log_therm_throt_event(msr_val);
355 355
356 if (this_cpu_has(X86_FEATURE_PLN)) 356 if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
357 therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, 357 therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
358 POWER_LIMIT_EVENT, 358 POWER_LIMIT_EVENT,
359 CORE_LEVEL); 359 CORE_LEVEL);
@@ -363,7 +363,7 @@ static void intel_thermal_interrupt(void)
363 therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, 363 therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
364 THERMAL_THROTTLING_EVENT, 364 THERMAL_THROTTLING_EVENT,
365 PACKAGE_LEVEL); 365 PACKAGE_LEVEL);
366 if (this_cpu_has(X86_FEATURE_PLN)) 366 if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
367 therm_throt_process(msr_val & 367 therm_throt_process(msr_val &
368 PACKAGE_THERM_STATUS_POWER_LIMIT, 368 PACKAGE_THERM_STATUS_POWER_LIMIT,
369 POWER_LIMIT_EVENT, 369 POWER_LIMIT_EVENT,
@@ -482,9 +482,13 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
482 apic_write(APIC_LVTTHMR, h); 482 apic_write(APIC_LVTTHMR, h);
483 483
484 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); 484 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
485 if (cpu_has(c, X86_FEATURE_PLN)) 485 if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
486 wrmsr(MSR_IA32_THERM_INTERRUPT, 486 wrmsr(MSR_IA32_THERM_INTERRUPT,
487 l | (THERM_INT_LOW_ENABLE 487 (l | (THERM_INT_LOW_ENABLE
488 | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h);
489 else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
490 wrmsr(MSR_IA32_THERM_INTERRUPT,
491 l | (THERM_INT_LOW_ENABLE
488 | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h); 492 | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
489 else 493 else
490 wrmsr(MSR_IA32_THERM_INTERRUPT, 494 wrmsr(MSR_IA32_THERM_INTERRUPT,
@@ -492,9 +496,14 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
492 496
493 if (cpu_has(c, X86_FEATURE_PTS)) { 497 if (cpu_has(c, X86_FEATURE_PTS)) {
494 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); 498 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
495 if (cpu_has(c, X86_FEATURE_PLN)) 499 if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
496 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, 500 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
497 l | (PACKAGE_THERM_INT_LOW_ENABLE 501 (l | (PACKAGE_THERM_INT_LOW_ENABLE
502 | PACKAGE_THERM_INT_HIGH_ENABLE))
503 & ~PACKAGE_THERM_INT_PLN_ENABLE, h);
504 else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
505 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
506 l | (PACKAGE_THERM_INT_LOW_ENABLE
498 | PACKAGE_THERM_INT_HIGH_ENABLE 507 | PACKAGE_THERM_INT_HIGH_ENABLE
499 | PACKAGE_THERM_INT_PLN_ENABLE), h); 508 | PACKAGE_THERM_INT_PLN_ENABLE), h);
500 else 509 else
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index d609e1d84048..bf4fb04d0112 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -5,12 +5,13 @@ CFLAGS_x86.o := -I.
5CFLAGS_svm.o := -I. 5CFLAGS_svm.o := -I.
6CFLAGS_vmx.o := -I. 6CFLAGS_vmx.o := -I.
7 7
8kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 8KVM := ../../../virt/kvm
9 coalesced_mmio.o irq_comm.o eventfd.o \ 9
10 irqchip.o) 10kvm-y += $(KVM)/kvm_main.o $(KVM)/ioapic.o \
11kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += $(addprefix ../../../virt/kvm/, \ 11 $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o \
12 assigned-dev.o iommu.o) 12 $(KVM)/eventfd.o $(KVM)/irqchip.o
13kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o) 13kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += $(KVM)/assigned-dev.o $(KVM)/iommu.o
14kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
14 15
15kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ 16kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
16 i8254.o cpuid.o pmu.o 17 i8254.o cpuid.o pmu.o
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5953dcea752d..2bc1e81045b0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -61,6 +61,8 @@
61#define OpMem8 26ull /* 8-bit zero extended memory operand */ 61#define OpMem8 26ull /* 8-bit zero extended memory operand */
62#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ 62#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ 63#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
64 66
65#define OpBits 5 /* Width of operand field */ 67#define OpBits 5 /* Width of operand field */
66#define OpMask ((1ull << OpBits) - 1) 68#define OpMask ((1ull << OpBits) - 1)
@@ -86,6 +88,7 @@
86#define DstMem64 (OpMem64 << DstShift) 88#define DstMem64 (OpMem64 << DstShift)
87#define DstImmUByte (OpImmUByte << DstShift) 89#define DstImmUByte (OpImmUByte << DstShift)
88#define DstDX (OpDX << DstShift) 90#define DstDX (OpDX << DstShift)
91#define DstAccLo (OpAccLo << DstShift)
89#define DstMask (OpMask << DstShift) 92#define DstMask (OpMask << DstShift)
90/* Source operand type. */ 93/* Source operand type. */
91#define SrcShift 6 94#define SrcShift 6
@@ -108,6 +111,7 @@
108#define SrcImm64 (OpImm64 << SrcShift) 111#define SrcImm64 (OpImm64 << SrcShift)
109#define SrcDX (OpDX << SrcShift) 112#define SrcDX (OpDX << SrcShift)
110#define SrcMem8 (OpMem8 << SrcShift) 113#define SrcMem8 (OpMem8 << SrcShift)
114#define SrcAccHi (OpAccHi << SrcShift)
111#define SrcMask (OpMask << SrcShift) 115#define SrcMask (OpMask << SrcShift)
112#define BitOp (1<<11) 116#define BitOp (1<<11)
113#define MemAbs (1<<12) /* Memory operand is absolute displacement */ 117#define MemAbs (1<<12) /* Memory operand is absolute displacement */
@@ -138,6 +142,7 @@
138/* Source 2 operand type */ 142/* Source 2 operand type */
139#define Src2Shift (31) 143#define Src2Shift (31)
140#define Src2None (OpNone << Src2Shift) 144#define Src2None (OpNone << Src2Shift)
145#define Src2Mem (OpMem << Src2Shift)
141#define Src2CL (OpCL << Src2Shift) 146#define Src2CL (OpCL << Src2Shift)
142#define Src2ImmByte (OpImmByte << Src2Shift) 147#define Src2ImmByte (OpImmByte << Src2Shift)
143#define Src2One (OpOne << Src2Shift) 148#define Src2One (OpOne << Src2Shift)
@@ -155,6 +160,9 @@
155#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ 160#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
156#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ 161#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
157#define NoWrite ((u64)1 << 45) /* No writeback */ 162#define NoWrite ((u64)1 << 45) /* No writeback */
163#define SrcWrite ((u64)1 << 46) /* Write back src operand */
164
165#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
158 166
159#define X2(x...) x, x 167#define X2(x...) x, x
160#define X3(x...) X2(x), x 168#define X3(x...) X2(x), x
@@ -171,10 +179,11 @@
171/* 179/*
172 * fastop functions have a special calling convention: 180 * fastop functions have a special calling convention:
173 * 181 *
174 * dst: [rdx]:rax (in/out) 182 * dst: rax (in/out)
175 * src: rbx (in/out) 183 * src: rdx (in/out)
176 * src2: rcx (in) 184 * src2: rcx (in)
177 * flags: rflags (in/out) 185 * flags: rflags (in/out)
186 * ex: rsi (in:fastop pointer, out:zero if exception)
178 * 187 *
179 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for 188 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
180 * different operand sizes can be reached by calculation, rather than a jump 189 * different operand sizes can be reached by calculation, rather than a jump
@@ -276,174 +285,17 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
276} 285}
277 286
278/* 287/*
279 * Instruction emulation:
280 * Most instructions are emulated directly via a fragment of inline assembly
281 * code. This allows us to save/restore EFLAGS and thus very easily pick up
282 * any modified flags.
283 */
284
285#if defined(CONFIG_X86_64)
286#define _LO32 "k" /* force 32-bit operand */
287#define _STK "%%rsp" /* stack pointer */
288#elif defined(__i386__)
289#define _LO32 "" /* force 32-bit operand */
290#define _STK "%%esp" /* stack pointer */
291#endif
292
293/*
294 * These EFLAGS bits are restored from saved value during emulation, and 288 * These EFLAGS bits are restored from saved value during emulation, and
295 * any changes are written back to the saved value after emulation. 289 * any changes are written back to the saved value after emulation.
296 */ 290 */
297#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) 291#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
298 292
299/* Before executing instruction: restore necessary bits in EFLAGS. */
300#define _PRE_EFLAGS(_sav, _msk, _tmp) \
301 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
302 "movl %"_sav",%"_LO32 _tmp"; " \
303 "push %"_tmp"; " \
304 "push %"_tmp"; " \
305 "movl %"_msk",%"_LO32 _tmp"; " \
306 "andl %"_LO32 _tmp",("_STK"); " \
307 "pushf; " \
308 "notl %"_LO32 _tmp"; " \
309 "andl %"_LO32 _tmp",("_STK"); " \
310 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
311 "pop %"_tmp"; " \
312 "orl %"_LO32 _tmp",("_STK"); " \
313 "popf; " \
314 "pop %"_sav"; "
315
316/* After executing instruction: write-back necessary bits in EFLAGS. */
317#define _POST_EFLAGS(_sav, _msk, _tmp) \
318 /* _sav |= EFLAGS & _msk; */ \
319 "pushf; " \
320 "pop %"_tmp"; " \
321 "andl %"_msk",%"_LO32 _tmp"; " \
322 "orl %"_LO32 _tmp",%"_sav"; "
323
324#ifdef CONFIG_X86_64 293#ifdef CONFIG_X86_64
325#define ON64(x) x 294#define ON64(x) x
326#else 295#else
327#define ON64(x) 296#define ON64(x)
328#endif 297#endif
329 298
330#define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
331 do { \
332 __asm__ __volatile__ ( \
333 _PRE_EFLAGS("0", "4", "2") \
334 _op _suffix " %"_x"3,%1; " \
335 _POST_EFLAGS("0", "4", "2") \
336 : "=m" ((ctxt)->eflags), \
337 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
338 "=&r" (_tmp) \
339 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
340 } while (0)
341
342
343/* Raw emulation: instruction has two explicit operands. */
344#define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
345 do { \
346 unsigned long _tmp; \
347 \
348 switch ((ctxt)->dst.bytes) { \
349 case 2: \
350 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
351 break; \
352 case 4: \
353 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
354 break; \
355 case 8: \
356 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
357 break; \
358 } \
359 } while (0)
360
361#define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
362 do { \
363 unsigned long _tmp; \
364 switch ((ctxt)->dst.bytes) { \
365 case 1: \
366 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
367 break; \
368 default: \
369 __emulate_2op_nobyte(ctxt, _op, \
370 _wx, _wy, _lx, _ly, _qx, _qy); \
371 break; \
372 } \
373 } while (0)
374
375/* Source operand is byte-sized and may be restricted to just %cl. */
376#define emulate_2op_SrcB(ctxt, _op) \
377 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
378
379/* Source operand is byte, word, long or quad sized. */
380#define emulate_2op_SrcV(ctxt, _op) \
381 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
382
383/* Source operand is word, long or quad sized. */
384#define emulate_2op_SrcV_nobyte(ctxt, _op) \
385 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
386
387/* Instruction has three operands and one operand is stored in ECX register */
388#define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
389 do { \
390 unsigned long _tmp; \
391 _type _clv = (ctxt)->src2.val; \
392 _type _srcv = (ctxt)->src.val; \
393 _type _dstv = (ctxt)->dst.val; \
394 \
395 __asm__ __volatile__ ( \
396 _PRE_EFLAGS("0", "5", "2") \
397 _op _suffix " %4,%1 \n" \
398 _POST_EFLAGS("0", "5", "2") \
399 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
400 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
401 ); \
402 \
403 (ctxt)->src2.val = (unsigned long) _clv; \
404 (ctxt)->src2.val = (unsigned long) _srcv; \
405 (ctxt)->dst.val = (unsigned long) _dstv; \
406 } while (0)
407
408#define emulate_2op_cl(ctxt, _op) \
409 do { \
410 switch ((ctxt)->dst.bytes) { \
411 case 2: \
412 __emulate_2op_cl(ctxt, _op, "w", u16); \
413 break; \
414 case 4: \
415 __emulate_2op_cl(ctxt, _op, "l", u32); \
416 break; \
417 case 8: \
418 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
419 break; \
420 } \
421 } while (0)
422
423#define __emulate_1op(ctxt, _op, _suffix) \
424 do { \
425 unsigned long _tmp; \
426 \
427 __asm__ __volatile__ ( \
428 _PRE_EFLAGS("0", "3", "2") \
429 _op _suffix " %1; " \
430 _POST_EFLAGS("0", "3", "2") \
431 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
432 "=&r" (_tmp) \
433 : "i" (EFLAGS_MASK)); \
434 } while (0)
435
436/* Instruction has only one explicit operand (no source operand). */
437#define emulate_1op(ctxt, _op) \
438 do { \
439 switch ((ctxt)->dst.bytes) { \
440 case 1: __emulate_1op(ctxt, _op, "b"); break; \
441 case 2: __emulate_1op(ctxt, _op, "w"); break; \
442 case 4: __emulate_1op(ctxt, _op, "l"); break; \
443 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
444 } \
445 } while (0)
446
447static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); 299static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
448 300
449#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" 301#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
@@ -462,7 +314,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
462#define FOPNOP() FOP_ALIGN FOP_RET 314#define FOPNOP() FOP_ALIGN FOP_RET
463 315
464#define FOP1E(op, dst) \ 316#define FOP1E(op, dst) \
465 FOP_ALIGN #op " %" #dst " \n\t" FOP_RET 317 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
318
319#define FOP1EEX(op, dst) \
320 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
466 321
467#define FASTOP1(op) \ 322#define FASTOP1(op) \
468 FOP_START(op) \ 323 FOP_START(op) \
@@ -472,24 +327,42 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
472 ON64(FOP1E(op##q, rax)) \ 327 ON64(FOP1E(op##q, rax)) \
473 FOP_END 328 FOP_END
474 329
330/* 1-operand, using src2 (for MUL/DIV r/m) */
331#define FASTOP1SRC2(op, name) \
332 FOP_START(name) \
333 FOP1E(op, cl) \
334 FOP1E(op, cx) \
335 FOP1E(op, ecx) \
336 ON64(FOP1E(op, rcx)) \
337 FOP_END
338
339/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
340#define FASTOP1SRC2EX(op, name) \
341 FOP_START(name) \
342 FOP1EEX(op, cl) \
343 FOP1EEX(op, cx) \
344 FOP1EEX(op, ecx) \
345 ON64(FOP1EEX(op, rcx)) \
346 FOP_END
347
475#define FOP2E(op, dst, src) \ 348#define FOP2E(op, dst, src) \
476 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET 349 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
477 350
478#define FASTOP2(op) \ 351#define FASTOP2(op) \
479 FOP_START(op) \ 352 FOP_START(op) \
480 FOP2E(op##b, al, bl) \ 353 FOP2E(op##b, al, dl) \
481 FOP2E(op##w, ax, bx) \ 354 FOP2E(op##w, ax, dx) \
482 FOP2E(op##l, eax, ebx) \ 355 FOP2E(op##l, eax, edx) \
483 ON64(FOP2E(op##q, rax, rbx)) \ 356 ON64(FOP2E(op##q, rax, rdx)) \
484 FOP_END 357 FOP_END
485 358
486/* 2 operand, word only */ 359/* 2 operand, word only */
487#define FASTOP2W(op) \ 360#define FASTOP2W(op) \
488 FOP_START(op) \ 361 FOP_START(op) \
489 FOPNOP() \ 362 FOPNOP() \
490 FOP2E(op##w, ax, bx) \ 363 FOP2E(op##w, ax, dx) \
491 FOP2E(op##l, eax, ebx) \ 364 FOP2E(op##l, eax, edx) \
492 ON64(FOP2E(op##q, rax, rbx)) \ 365 ON64(FOP2E(op##q, rax, rdx)) \
493 FOP_END 366 FOP_END
494 367
495/* 2 operand, src is CL */ 368/* 2 operand, src is CL */
@@ -508,14 +381,17 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
508#define FASTOP3WCL(op) \ 381#define FASTOP3WCL(op) \
509 FOP_START(op) \ 382 FOP_START(op) \
510 FOPNOP() \ 383 FOPNOP() \
511 FOP3E(op##w, ax, bx, cl) \ 384 FOP3E(op##w, ax, dx, cl) \
512 FOP3E(op##l, eax, ebx, cl) \ 385 FOP3E(op##l, eax, edx, cl) \
513 ON64(FOP3E(op##q, rax, rbx, cl)) \ 386 ON64(FOP3E(op##q, rax, rdx, cl)) \
514 FOP_END 387 FOP_END
515 388
516/* Special case for SETcc - 1 instruction per cc */ 389/* Special case for SETcc - 1 instruction per cc */
517#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" 390#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
518 391
392asm(".global kvm_fastop_exception \n"
393 "kvm_fastop_exception: xor %esi, %esi; ret");
394
519FOP_START(setcc) 395FOP_START(setcc)
520FOP_SETCC(seto) 396FOP_SETCC(seto)
521FOP_SETCC(setno) 397FOP_SETCC(setno)
@@ -538,47 +414,6 @@ FOP_END;
538FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET 414FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
539FOP_END; 415FOP_END;
540 416
541#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
542 do { \
543 unsigned long _tmp; \
544 ulong *rax = reg_rmw((ctxt), VCPU_REGS_RAX); \
545 ulong *rdx = reg_rmw((ctxt), VCPU_REGS_RDX); \
546 \
547 __asm__ __volatile__ ( \
548 _PRE_EFLAGS("0", "5", "1") \
549 "1: \n\t" \
550 _op _suffix " %6; " \
551 "2: \n\t" \
552 _POST_EFLAGS("0", "5", "1") \
553 ".pushsection .fixup,\"ax\" \n\t" \
554 "3: movb $1, %4 \n\t" \
555 "jmp 2b \n\t" \
556 ".popsection \n\t" \
557 _ASM_EXTABLE(1b, 3b) \
558 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
559 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
560 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
561 } while (0)
562
563/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
564#define emulate_1op_rax_rdx(ctxt, _op, _ex) \
565 do { \
566 switch((ctxt)->src.bytes) { \
567 case 1: \
568 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
569 break; \
570 case 2: \
571 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
572 break; \
573 case 4: \
574 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
575 break; \
576 case 8: ON64( \
577 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
578 break; \
579 } \
580 } while (0)
581
582static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, 417static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
583 enum x86_intercept intercept, 418 enum x86_intercept intercept,
584 enum x86_intercept_stage stage) 419 enum x86_intercept_stage stage)
@@ -988,6 +823,11 @@ FASTOP2(xor);
988FASTOP2(cmp); 823FASTOP2(cmp);
989FASTOP2(test); 824FASTOP2(test);
990 825
826FASTOP1SRC2(mul, mul_ex);
827FASTOP1SRC2(imul, imul_ex);
828FASTOP1SRC2EX(div, div_ex);
829FASTOP1SRC2EX(idiv, idiv_ex);
830
991FASTOP3WCL(shld); 831FASTOP3WCL(shld);
992FASTOP3WCL(shrd); 832FASTOP3WCL(shrd);
993 833
@@ -1013,6 +853,8 @@ FASTOP2W(bts);
1013FASTOP2W(btr); 853FASTOP2W(btr);
1014FASTOP2W(btc); 854FASTOP2W(btc);
1015 855
856FASTOP2(xadd);
857
1016static u8 test_cc(unsigned int condition, unsigned long flags) 858static u8 test_cc(unsigned int condition, unsigned long flags)
1017{ 859{
1018 u8 rc; 860 u8 rc;
@@ -1726,45 +1568,42 @@ static void write_register_operand(struct operand *op)
1726 } 1568 }
1727} 1569}
1728 1570
1729static int writeback(struct x86_emulate_ctxt *ctxt) 1571static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1730{ 1572{
1731 int rc; 1573 int rc;
1732 1574
1733 if (ctxt->d & NoWrite) 1575 switch (op->type) {
1734 return X86EMUL_CONTINUE;
1735
1736 switch (ctxt->dst.type) {
1737 case OP_REG: 1576 case OP_REG:
1738 write_register_operand(&ctxt->dst); 1577 write_register_operand(op);
1739 break; 1578 break;
1740 case OP_MEM: 1579 case OP_MEM:
1741 if (ctxt->lock_prefix) 1580 if (ctxt->lock_prefix)
1742 rc = segmented_cmpxchg(ctxt, 1581 rc = segmented_cmpxchg(ctxt,
1743 ctxt->dst.addr.mem, 1582 op->addr.mem,
1744 &ctxt->dst.orig_val, 1583 &op->orig_val,
1745 &ctxt->dst.val, 1584 &op->val,
1746 ctxt->dst.bytes); 1585 op->bytes);
1747 else 1586 else
1748 rc = segmented_write(ctxt, 1587 rc = segmented_write(ctxt,
1749 ctxt->dst.addr.mem, 1588 op->addr.mem,
1750 &ctxt->dst.val, 1589 &op->val,
1751 ctxt->dst.bytes); 1590 op->bytes);
1752 if (rc != X86EMUL_CONTINUE) 1591 if (rc != X86EMUL_CONTINUE)
1753 return rc; 1592 return rc;
1754 break; 1593 break;
1755 case OP_MEM_STR: 1594 case OP_MEM_STR:
1756 rc = segmented_write(ctxt, 1595 rc = segmented_write(ctxt,
1757 ctxt->dst.addr.mem, 1596 op->addr.mem,
1758 ctxt->dst.data, 1597 op->data,
1759 ctxt->dst.bytes * ctxt->dst.count); 1598 op->bytes * op->count);
1760 if (rc != X86EMUL_CONTINUE) 1599 if (rc != X86EMUL_CONTINUE)
1761 return rc; 1600 return rc;
1762 break; 1601 break;
1763 case OP_XMM: 1602 case OP_XMM:
1764 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm); 1603 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1765 break; 1604 break;
1766 case OP_MM: 1605 case OP_MM:
1767 write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm); 1606 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1768 break; 1607 break;
1769 case OP_NONE: 1608 case OP_NONE:
1770 /* no writeback */ 1609 /* no writeback */
@@ -2117,42 +1956,6 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2117 return X86EMUL_CONTINUE; 1956 return X86EMUL_CONTINUE;
2118} 1957}
2119 1958
2120static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
2121{
2122 u8 ex = 0;
2123
2124 emulate_1op_rax_rdx(ctxt, "mul", ex);
2125 return X86EMUL_CONTINUE;
2126}
2127
2128static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
2129{
2130 u8 ex = 0;
2131
2132 emulate_1op_rax_rdx(ctxt, "imul", ex);
2133 return X86EMUL_CONTINUE;
2134}
2135
2136static int em_div_ex(struct x86_emulate_ctxt *ctxt)
2137{
2138 u8 de = 0;
2139
2140 emulate_1op_rax_rdx(ctxt, "div", de);
2141 if (de)
2142 return emulate_de(ctxt);
2143 return X86EMUL_CONTINUE;
2144}
2145
2146static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
2147{
2148 u8 de = 0;
2149
2150 emulate_1op_rax_rdx(ctxt, "idiv", de);
2151 if (de)
2152 return emulate_de(ctxt);
2153 return X86EMUL_CONTINUE;
2154}
2155
2156static int em_grp45(struct x86_emulate_ctxt *ctxt) 1959static int em_grp45(struct x86_emulate_ctxt *ctxt)
2157{ 1960{
2158 int rc = X86EMUL_CONTINUE; 1961 int rc = X86EMUL_CONTINUE;
@@ -3734,10 +3537,10 @@ static const struct opcode group3[] = {
3734 F(DstMem | SrcImm | NoWrite, em_test), 3537 F(DstMem | SrcImm | NoWrite, em_test),
3735 F(DstMem | SrcNone | Lock, em_not), 3538 F(DstMem | SrcNone | Lock, em_not),
3736 F(DstMem | SrcNone | Lock, em_neg), 3539 F(DstMem | SrcNone | Lock, em_neg),
3737 I(SrcMem, em_mul_ex), 3540 F(DstXacc | Src2Mem, em_mul_ex),
3738 I(SrcMem, em_imul_ex), 3541 F(DstXacc | Src2Mem, em_imul_ex),
3739 I(SrcMem, em_div_ex), 3542 F(DstXacc | Src2Mem, em_div_ex),
3740 I(SrcMem, em_idiv_ex), 3543 F(DstXacc | Src2Mem, em_idiv_ex),
3741}; 3544};
3742 3545
3743static const struct opcode group4[] = { 3546static const struct opcode group4[] = {
@@ -4064,7 +3867,7 @@ static const struct opcode twobyte_table[256] = {
4064 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), 3867 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4065 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 3868 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4066 /* 0xC0 - 0xC7 */ 3869 /* 0xC0 - 0xC7 */
4067 D2bv(DstMem | SrcReg | ModRM | Lock), 3870 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4068 N, D(DstMem | SrcReg | ModRM | Mov), 3871 N, D(DstMem | SrcReg | ModRM | Mov),
4069 N, N, N, GD(0, &group9), 3872 N, N, N, GD(0, &group9),
4070 /* 0xC8 - 0xCF */ 3873 /* 0xC8 - 0xCF */
@@ -4172,6 +3975,24 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4172 fetch_register_operand(op); 3975 fetch_register_operand(op);
4173 op->orig_val = op->val; 3976 op->orig_val = op->val;
4174 break; 3977 break;
3978 case OpAccLo:
3979 op->type = OP_REG;
3980 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
3981 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
3982 fetch_register_operand(op);
3983 op->orig_val = op->val;
3984 break;
3985 case OpAccHi:
3986 if (ctxt->d & ByteOp) {
3987 op->type = OP_NONE;
3988 break;
3989 }
3990 op->type = OP_REG;
3991 op->bytes = ctxt->op_bytes;
3992 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3993 fetch_register_operand(op);
3994 op->orig_val = op->val;
3995 break;
4175 case OpDI: 3996 case OpDI:
4176 op->type = OP_MEM; 3997 op->type = OP_MEM;
4177 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 3998 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
@@ -4553,11 +4374,15 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4553static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) 4374static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4554{ 4375{
4555 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; 4376 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4556 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; 4377 if (!(ctxt->d & ByteOp))
4378 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4557 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" 4379 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4558 : "+a"(ctxt->dst.val), "+b"(ctxt->src.val), [flags]"+D"(flags) 4380 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4559 : "c"(ctxt->src2.val), [fastop]"S"(fop)); 4381 [fastop]"+S"(fop)
4382 : "c"(ctxt->src2.val));
4560 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); 4383 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4384 if (!fop) /* exception is returned in fop variable */
4385 return emulate_de(ctxt);
4561 return X86EMUL_CONTINUE; 4386 return X86EMUL_CONTINUE;
4562} 4387}
4563 4388
@@ -4773,9 +4598,17 @@ special_insn:
4773 goto done; 4598 goto done;
4774 4599
4775writeback: 4600writeback:
4776 rc = writeback(ctxt); 4601 if (!(ctxt->d & NoWrite)) {
4777 if (rc != X86EMUL_CONTINUE) 4602 rc = writeback(ctxt, &ctxt->dst);
4778 goto done; 4603 if (rc != X86EMUL_CONTINUE)
4604 goto done;
4605 }
4606 if (ctxt->d & SrcWrite) {
4607 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4608 rc = writeback(ctxt, &ctxt->src);
4609 if (rc != X86EMUL_CONTINUE)
4610 goto done;
4611 }
4779 4612
4780 /* 4613 /*
4781 * restore dst type in case the decoding will be reused 4614 * restore dst type in case the decoding will be reused
@@ -4872,12 +4705,6 @@ twobyte_insn:
4872 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : 4705 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4873 (s16) ctxt->src.val; 4706 (s16) ctxt->src.val;
4874 break; 4707 break;
4875 case 0xc0 ... 0xc1: /* xadd */
4876 fastop(ctxt, em_add);
4877 /* Write back the register source. */
4878 ctxt->src.val = ctxt->dst.orig_val;
4879 write_register_operand(&ctxt->src);
4880 break;
4881 case 0xc3: /* movnti */ 4708 case 0xc3: /* movnti */
4882 ctxt->dst.bytes = ctxt->op_bytes; 4709 ctxt->dst.bytes = ctxt->op_bytes;
4883 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val : 4710 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0eee2c8b64d1..afc11245827c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1608,8 +1608,8 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1608 return; 1608 return;
1609 1609
1610 if (atomic_read(&apic->lapic_timer.pending) > 0) { 1610 if (atomic_read(&apic->lapic_timer.pending) > 0) {
1611 if (kvm_apic_local_deliver(apic, APIC_LVTT)) 1611 kvm_apic_local_deliver(apic, APIC_LVTT);
1612 atomic_dec(&apic->lapic_timer.pending); 1612 atomic_set(&apic->lapic_timer.pending, 0);
1613 } 1613 }
1614} 1614}
1615 1615
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 004cc87b781c..0d094da49541 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -197,15 +197,63 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
197} 197}
198EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 198EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
199 199
200static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access) 200/*
201 * spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number,
202 * the bits of bits 52 ~ bit 61 are used as high 10 bits of generation
203 * number.
204 */
205#define MMIO_SPTE_GEN_LOW_SHIFT 3
206#define MMIO_SPTE_GEN_HIGH_SHIFT 52
207
208#define MMIO_GEN_SHIFT 19
209#define MMIO_GEN_LOW_SHIFT 9
210#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1)
211#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
212#define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
213
214static u64 generation_mmio_spte_mask(unsigned int gen)
201{ 215{
202 struct kvm_mmu_page *sp = page_header(__pa(sptep)); 216 u64 mask;
217
218 WARN_ON(gen > MMIO_MAX_GEN);
219
220 mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
221 mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
222 return mask;
223}
224
225static unsigned int get_mmio_spte_generation(u64 spte)
226{
227 unsigned int gen;
228
229 spte &= ~shadow_mmio_mask;
230
231 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
232 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
233 return gen;
234}
235
236static unsigned int kvm_current_mmio_generation(struct kvm *kvm)
237{
238 /*
239 * Init kvm generation close to MMIO_MAX_GEN to easily test the
240 * code of handling generation number wrap-around.
241 */
242 return (kvm_memslots(kvm)->generation +
243 MMIO_MAX_GEN - 150) & MMIO_GEN_MASK;
244}
245
246static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn,
247 unsigned access)
248{
249 unsigned int gen = kvm_current_mmio_generation(kvm);
250 u64 mask = generation_mmio_spte_mask(gen);
203 251
204 access &= ACC_WRITE_MASK | ACC_USER_MASK; 252 access &= ACC_WRITE_MASK | ACC_USER_MASK;
253 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
205 254
206 sp->mmio_cached = true; 255 trace_mark_mmio_spte(sptep, gfn, access, gen);
207 trace_mark_mmio_spte(sptep, gfn, access); 256 mmu_spte_set(sptep, mask);
208 mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT);
209} 257}
210 258
211static bool is_mmio_spte(u64 spte) 259static bool is_mmio_spte(u64 spte)
@@ -215,24 +263,38 @@ static bool is_mmio_spte(u64 spte)
215 263
216static gfn_t get_mmio_spte_gfn(u64 spte) 264static gfn_t get_mmio_spte_gfn(u64 spte)
217{ 265{
218 return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT; 266 u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
267 return (spte & ~mask) >> PAGE_SHIFT;
219} 268}
220 269
221static unsigned get_mmio_spte_access(u64 spte) 270static unsigned get_mmio_spte_access(u64 spte)
222{ 271{
223 return (spte & ~shadow_mmio_mask) & ~PAGE_MASK; 272 u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
273 return (spte & ~mask) & ~PAGE_MASK;
224} 274}
225 275
226static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access) 276static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
277 pfn_t pfn, unsigned access)
227{ 278{
228 if (unlikely(is_noslot_pfn(pfn))) { 279 if (unlikely(is_noslot_pfn(pfn))) {
229 mark_mmio_spte(sptep, gfn, access); 280 mark_mmio_spte(kvm, sptep, gfn, access);
230 return true; 281 return true;
231 } 282 }
232 283
233 return false; 284 return false;
234} 285}
235 286
287static bool check_mmio_spte(struct kvm *kvm, u64 spte)
288{
289 unsigned int kvm_gen, spte_gen;
290
291 kvm_gen = kvm_current_mmio_generation(kvm);
292 spte_gen = get_mmio_spte_generation(spte);
293
294 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
295 return likely(kvm_gen == spte_gen);
296}
297
236static inline u64 rsvd_bits(int s, int e) 298static inline u64 rsvd_bits(int s, int e)
237{ 299{
238 return ((1ULL << (e - s + 1)) - 1) << s; 300 return ((1ULL << (e - s + 1)) - 1) << s;
@@ -404,9 +466,20 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
404/* 466/*
405 * The idea using the light way get the spte on x86_32 guest is from 467 * The idea using the light way get the spte on x86_32 guest is from
406 * gup_get_pte(arch/x86/mm/gup.c). 468 * gup_get_pte(arch/x86/mm/gup.c).
407 * The difference is we can not catch the spte tlb flush if we leave 469 *
408 * guest mode, so we emulate it by increase clear_spte_count when spte 470 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
409 * is cleared. 471 * coalesces them and we are running out of the MMU lock. Therefore
472 * we need to protect against in-progress updates of the spte.
473 *
474 * Reading the spte while an update is in progress may get the old value
475 * for the high part of the spte. The race is fine for a present->non-present
476 * change (because the high part of the spte is ignored for non-present spte),
477 * but for a present->present change we must reread the spte.
478 *
479 * All such changes are done in two steps (present->non-present and
480 * non-present->present), hence it is enough to count the number of
481 * present->non-present updates: if it changed while reading the spte,
482 * we might have hit the race. This is done using clear_spte_count.
410 */ 483 */
411static u64 __get_spte_lockless(u64 *sptep) 484static u64 __get_spte_lockless(u64 *sptep)
412{ 485{
@@ -1511,6 +1584,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
1511 if (!direct) 1584 if (!direct)
1512 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); 1585 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1513 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 1586 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1587
1588 /*
1589 * The active_mmu_pages list is the FIFO list, do not move the
1590 * page until it is zapped. kvm_zap_obsolete_pages depends on
1591 * this feature. See the comments in kvm_zap_obsolete_pages().
1592 */
1514 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 1593 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1515 sp->parent_ptes = 0; 1594 sp->parent_ptes = 0;
1516 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 1595 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
@@ -1648,6 +1727,16 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1648static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1727static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1649 struct list_head *invalid_list); 1728 struct list_head *invalid_list);
1650 1729
1730/*
1731 * NOTE: we should pay more attention on the zapped-obsolete page
1732 * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
1733 * since it has been deleted from active_mmu_pages but still can be found
1734 * at hast list.
1735 *
1736 * for_each_gfn_indirect_valid_sp has skipped that kind of page and
1737 * kvm_mmu_get_page(), the only user of for_each_gfn_sp(), has skipped
1738 * all the obsolete pages.
1739 */
1651#define for_each_gfn_sp(_kvm, _sp, _gfn) \ 1740#define for_each_gfn_sp(_kvm, _sp, _gfn) \
1652 hlist_for_each_entry(_sp, \ 1741 hlist_for_each_entry(_sp, \
1653 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ 1742 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
@@ -1838,6 +1927,11 @@ static void clear_sp_write_flooding_count(u64 *spte)
1838 __clear_sp_write_flooding_count(sp); 1927 __clear_sp_write_flooding_count(sp);
1839} 1928}
1840 1929
1930static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1931{
1932 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1933}
1934
1841static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, 1935static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1842 gfn_t gfn, 1936 gfn_t gfn,
1843 gva_t gaddr, 1937 gva_t gaddr,
@@ -1864,6 +1958,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1864 role.quadrant = quadrant; 1958 role.quadrant = quadrant;
1865 } 1959 }
1866 for_each_gfn_sp(vcpu->kvm, sp, gfn) { 1960 for_each_gfn_sp(vcpu->kvm, sp, gfn) {
1961 if (is_obsolete_sp(vcpu->kvm, sp))
1962 continue;
1963
1867 if (!need_sync && sp->unsync) 1964 if (!need_sync && sp->unsync)
1868 need_sync = true; 1965 need_sync = true;
1869 1966
@@ -1900,6 +1997,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1900 1997
1901 account_shadowed(vcpu->kvm, gfn); 1998 account_shadowed(vcpu->kvm, gfn);
1902 } 1999 }
2000 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1903 init_shadow_page_table(sp); 2001 init_shadow_page_table(sp);
1904 trace_kvm_mmu_get_page(sp, true); 2002 trace_kvm_mmu_get_page(sp, true);
1905 return sp; 2003 return sp;
@@ -2070,8 +2168,10 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2070 ret = mmu_zap_unsync_children(kvm, sp, invalid_list); 2168 ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
2071 kvm_mmu_page_unlink_children(kvm, sp); 2169 kvm_mmu_page_unlink_children(kvm, sp);
2072 kvm_mmu_unlink_parents(kvm, sp); 2170 kvm_mmu_unlink_parents(kvm, sp);
2171
2073 if (!sp->role.invalid && !sp->role.direct) 2172 if (!sp->role.invalid && !sp->role.direct)
2074 unaccount_shadowed(kvm, sp->gfn); 2173 unaccount_shadowed(kvm, sp->gfn);
2174
2075 if (sp->unsync) 2175 if (sp->unsync)
2076 kvm_unlink_unsync_page(kvm, sp); 2176 kvm_unlink_unsync_page(kvm, sp);
2077 if (!sp->root_count) { 2177 if (!sp->root_count) {
@@ -2081,7 +2181,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2081 kvm_mod_used_mmu_pages(kvm, -1); 2181 kvm_mod_used_mmu_pages(kvm, -1);
2082 } else { 2182 } else {
2083 list_move(&sp->link, &kvm->arch.active_mmu_pages); 2183 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2084 kvm_reload_remote_mmus(kvm); 2184
2185 /*
2186 * The obsolete pages can not be used on any vcpus.
2187 * See the comments in kvm_mmu_invalidate_zap_all_pages().
2188 */
2189 if (!sp->role.invalid && !is_obsolete_sp(kvm, sp))
2190 kvm_reload_remote_mmus(kvm);
2085 } 2191 }
2086 2192
2087 sp->role.invalid = 1; 2193 sp->role.invalid = 1;
@@ -2331,7 +2437,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2331 u64 spte; 2437 u64 spte;
2332 int ret = 0; 2438 int ret = 0;
2333 2439
2334 if (set_mmio_spte(sptep, gfn, pfn, pte_access)) 2440 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access))
2335 return 0; 2441 return 0;
2336 2442
2337 spte = PT_PRESENT_MASK; 2443 spte = PT_PRESENT_MASK;
@@ -2869,22 +2975,25 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
2869 2975
2870 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2976 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2871 return; 2977 return;
2872 spin_lock(&vcpu->kvm->mmu_lock); 2978
2873 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && 2979 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
2874 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || 2980 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
2875 vcpu->arch.mmu.direct_map)) { 2981 vcpu->arch.mmu.direct_map)) {
2876 hpa_t root = vcpu->arch.mmu.root_hpa; 2982 hpa_t root = vcpu->arch.mmu.root_hpa;
2877 2983
2984 spin_lock(&vcpu->kvm->mmu_lock);
2878 sp = page_header(root); 2985 sp = page_header(root);
2879 --sp->root_count; 2986 --sp->root_count;
2880 if (!sp->root_count && sp->role.invalid) { 2987 if (!sp->root_count && sp->role.invalid) {
2881 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); 2988 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2882 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 2989 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2883 } 2990 }
2884 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2885 spin_unlock(&vcpu->kvm->mmu_lock); 2991 spin_unlock(&vcpu->kvm->mmu_lock);
2992 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2886 return; 2993 return;
2887 } 2994 }
2995
2996 spin_lock(&vcpu->kvm->mmu_lock);
2888 for (i = 0; i < 4; ++i) { 2997 for (i = 0; i < 4; ++i) {
2889 hpa_t root = vcpu->arch.mmu.pae_root[i]; 2998 hpa_t root = vcpu->arch.mmu.pae_root[i];
2890 2999
@@ -3148,17 +3257,12 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
3148 return spte; 3257 return spte;
3149} 3258}
3150 3259
3151/*
3152 * If it is a real mmio page fault, return 1 and emulat the instruction
3153 * directly, return 0 to let CPU fault again on the address, -1 is
3154 * returned if bug is detected.
3155 */
3156int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) 3260int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3157{ 3261{
3158 u64 spte; 3262 u64 spte;
3159 3263
3160 if (quickly_check_mmio_pf(vcpu, addr, direct)) 3264 if (quickly_check_mmio_pf(vcpu, addr, direct))
3161 return 1; 3265 return RET_MMIO_PF_EMULATE;
3162 3266
3163 spte = walk_shadow_page_get_mmio_spte(vcpu, addr); 3267 spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
3164 3268
@@ -3166,12 +3270,15 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3166 gfn_t gfn = get_mmio_spte_gfn(spte); 3270 gfn_t gfn = get_mmio_spte_gfn(spte);
3167 unsigned access = get_mmio_spte_access(spte); 3271 unsigned access = get_mmio_spte_access(spte);
3168 3272
3273 if (!check_mmio_spte(vcpu->kvm, spte))
3274 return RET_MMIO_PF_INVALID;
3275
3169 if (direct) 3276 if (direct)
3170 addr = 0; 3277 addr = 0;
3171 3278
3172 trace_handle_mmio_page_fault(addr, gfn, access); 3279 trace_handle_mmio_page_fault(addr, gfn, access);
3173 vcpu_cache_mmio_info(vcpu, addr, gfn, access); 3280 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3174 return 1; 3281 return RET_MMIO_PF_EMULATE;
3175 } 3282 }
3176 3283
3177 /* 3284 /*
@@ -3179,13 +3286,13 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3179 * it's a BUG if the gfn is not a mmio page. 3286 * it's a BUG if the gfn is not a mmio page.
3180 */ 3287 */
3181 if (direct && !check_direct_spte_mmio_pf(spte)) 3288 if (direct && !check_direct_spte_mmio_pf(spte))
3182 return -1; 3289 return RET_MMIO_PF_BUG;
3183 3290
3184 /* 3291 /*
3185 * If the page table is zapped by other cpus, let CPU fault again on 3292 * If the page table is zapped by other cpus, let CPU fault again on
3186 * the address. 3293 * the address.
3187 */ 3294 */
3188 return 0; 3295 return RET_MMIO_PF_RETRY;
3189} 3296}
3190EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common); 3297EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
3191 3298
@@ -3195,7 +3302,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
3195 int ret; 3302 int ret;
3196 3303
3197 ret = handle_mmio_page_fault_common(vcpu, addr, direct); 3304 ret = handle_mmio_page_fault_common(vcpu, addr, direct);
3198 WARN_ON(ret < 0); 3305 WARN_ON(ret == RET_MMIO_PF_BUG);
3199 return ret; 3306 return ret;
3200} 3307}
3201 3308
@@ -3207,8 +3314,12 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3207 3314
3208 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); 3315 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
3209 3316
3210 if (unlikely(error_code & PFERR_RSVD_MASK)) 3317 if (unlikely(error_code & PFERR_RSVD_MASK)) {
3211 return handle_mmio_page_fault(vcpu, gva, error_code, true); 3318 r = handle_mmio_page_fault(vcpu, gva, error_code, true);
3319
3320 if (likely(r != RET_MMIO_PF_INVALID))
3321 return r;
3322 }
3212 3323
3213 r = mmu_topup_memory_caches(vcpu); 3324 r = mmu_topup_memory_caches(vcpu);
3214 if (r) 3325 if (r)
@@ -3284,8 +3395,12 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3284 ASSERT(vcpu); 3395 ASSERT(vcpu);
3285 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3396 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
3286 3397
3287 if (unlikely(error_code & PFERR_RSVD_MASK)) 3398 if (unlikely(error_code & PFERR_RSVD_MASK)) {
3288 return handle_mmio_page_fault(vcpu, gpa, error_code, true); 3399 r = handle_mmio_page_fault(vcpu, gpa, error_code, true);
3400
3401 if (likely(r != RET_MMIO_PF_INVALID))
3402 return r;
3403 }
3289 3404
3290 r = mmu_topup_memory_caches(vcpu); 3405 r = mmu_topup_memory_caches(vcpu);
3291 if (r) 3406 if (r)
@@ -3391,8 +3506,8 @@ static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
3391 *access &= mask; 3506 *access &= mask;
3392} 3507}
3393 3508
3394static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, 3509static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
3395 int *nr_present) 3510 unsigned access, int *nr_present)
3396{ 3511{
3397 if (unlikely(is_mmio_spte(*sptep))) { 3512 if (unlikely(is_mmio_spte(*sptep))) {
3398 if (gfn != get_mmio_spte_gfn(*sptep)) { 3513 if (gfn != get_mmio_spte_gfn(*sptep)) {
@@ -3401,7 +3516,7 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
3401 } 3516 }
3402 3517
3403 (*nr_present)++; 3518 (*nr_present)++;
3404 mark_mmio_spte(sptep, gfn, access); 3519 mark_mmio_spte(kvm, sptep, gfn, access);
3405 return true; 3520 return true;
3406 } 3521 }
3407 3522
@@ -3764,9 +3879,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
3764 if (r) 3879 if (r)
3765 goto out; 3880 goto out;
3766 r = mmu_alloc_roots(vcpu); 3881 r = mmu_alloc_roots(vcpu);
3767 spin_lock(&vcpu->kvm->mmu_lock); 3882 kvm_mmu_sync_roots(vcpu);
3768 mmu_sync_roots(vcpu);
3769 spin_unlock(&vcpu->kvm->mmu_lock);
3770 if (r) 3883 if (r)
3771 goto out; 3884 goto out;
3772 /* set_cr3() should ensure TLB has been flushed */ 3885 /* set_cr3() should ensure TLB has been flushed */
@@ -4179,39 +4292,107 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
4179 spin_unlock(&kvm->mmu_lock); 4292 spin_unlock(&kvm->mmu_lock);
4180} 4293}
4181 4294
4182void kvm_mmu_zap_all(struct kvm *kvm) 4295#define BATCH_ZAP_PAGES 10
4296static void kvm_zap_obsolete_pages(struct kvm *kvm)
4183{ 4297{
4184 struct kvm_mmu_page *sp, *node; 4298 struct kvm_mmu_page *sp, *node;
4185 LIST_HEAD(invalid_list); 4299 int batch = 0;
4186 4300
4187 spin_lock(&kvm->mmu_lock);
4188restart: 4301restart:
4189 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) 4302 list_for_each_entry_safe_reverse(sp, node,
4190 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) 4303 &kvm->arch.active_mmu_pages, link) {
4304 int ret;
4305
4306 /*
4307 * No obsolete page exists before new created page since
4308 * active_mmu_pages is the FIFO list.
4309 */
4310 if (!is_obsolete_sp(kvm, sp))
4311 break;
4312
4313 /*
4314 * Since we are reversely walking the list and the invalid
4315 * list will be moved to the head, skip the invalid page
4316 * can help us to avoid the infinity list walking.
4317 */
4318 if (sp->role.invalid)
4319 continue;
4320
4321 /*
4322 * Need not flush tlb since we only zap the sp with invalid
4323 * generation number.
4324 */
4325 if (batch >= BATCH_ZAP_PAGES &&
4326 cond_resched_lock(&kvm->mmu_lock)) {
4327 batch = 0;
4328 goto restart;
4329 }
4330
4331 ret = kvm_mmu_prepare_zap_page(kvm, sp,
4332 &kvm->arch.zapped_obsolete_pages);
4333 batch += ret;
4334
4335 if (ret)
4191 goto restart; 4336 goto restart;
4337 }
4192 4338
4193 kvm_mmu_commit_zap_page(kvm, &invalid_list); 4339 /*
4194 spin_unlock(&kvm->mmu_lock); 4340 * Should flush tlb before free page tables since lockless-walking
4341 * may use the pages.
4342 */
4343 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
4195} 4344}
4196 4345
4197void kvm_mmu_zap_mmio_sptes(struct kvm *kvm) 4346/*
4347 * Fast invalidate all shadow pages and use lock-break technique
4348 * to zap obsolete pages.
4349 *
4350 * It's required when memslot is being deleted or VM is being
4351 * destroyed, in these cases, we should ensure that KVM MMU does
4352 * not use any resource of the being-deleted slot or all slots
4353 * after calling the function.
4354 */
4355void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
4198{ 4356{
4199 struct kvm_mmu_page *sp, *node;
4200 LIST_HEAD(invalid_list);
4201
4202 spin_lock(&kvm->mmu_lock); 4357 spin_lock(&kvm->mmu_lock);
4203restart: 4358 trace_kvm_mmu_invalidate_zap_all_pages(kvm);
4204 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { 4359 kvm->arch.mmu_valid_gen++;
4205 if (!sp->mmio_cached)
4206 continue;
4207 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
4208 goto restart;
4209 }
4210 4360
4211 kvm_mmu_commit_zap_page(kvm, &invalid_list); 4361 /*
4362 * Notify all vcpus to reload its shadow page table
4363 * and flush TLB. Then all vcpus will switch to new
4364 * shadow page table with the new mmu_valid_gen.
4365 *
4366 * Note: we should do this under the protection of
4367 * mmu-lock, otherwise, vcpu would purge shadow page
4368 * but miss tlb flush.
4369 */
4370 kvm_reload_remote_mmus(kvm);
4371
4372 kvm_zap_obsolete_pages(kvm);
4212 spin_unlock(&kvm->mmu_lock); 4373 spin_unlock(&kvm->mmu_lock);
4213} 4374}
4214 4375
4376static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
4377{
4378 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
4379}
4380
4381void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
4382{
4383 /*
4384 * The very rare case: if the generation-number is round,
4385 * zap all shadow pages.
4386 *
4387 * The max value is MMIO_MAX_GEN - 1 since it is not called
4388 * when mark memslot invalid.
4389 */
4390 if (unlikely(kvm_current_mmio_generation(kvm) >= (MMIO_MAX_GEN - 1))) {
4391 printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
4392 kvm_mmu_invalidate_zap_all_pages(kvm);
4393 }
4394}
4395
4215static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) 4396static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4216{ 4397{
4217 struct kvm *kvm; 4398 struct kvm *kvm;
@@ -4240,15 +4421,23 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4240 * want to shrink a VM that only started to populate its MMU 4421 * want to shrink a VM that only started to populate its MMU
4241 * anyway. 4422 * anyway.
4242 */ 4423 */
4243 if (!kvm->arch.n_used_mmu_pages) 4424 if (!kvm->arch.n_used_mmu_pages &&
4425 !kvm_has_zapped_obsolete_pages(kvm))
4244 continue; 4426 continue;
4245 4427
4246 idx = srcu_read_lock(&kvm->srcu); 4428 idx = srcu_read_lock(&kvm->srcu);
4247 spin_lock(&kvm->mmu_lock); 4429 spin_lock(&kvm->mmu_lock);
4248 4430
4431 if (kvm_has_zapped_obsolete_pages(kvm)) {
4432 kvm_mmu_commit_zap_page(kvm,
4433 &kvm->arch.zapped_obsolete_pages);
4434 goto unlock;
4435 }
4436
4249 prepare_zap_oldest_mmu_page(kvm, &invalid_list); 4437 prepare_zap_oldest_mmu_page(kvm, &invalid_list);
4250 kvm_mmu_commit_zap_page(kvm, &invalid_list); 4438 kvm_mmu_commit_zap_page(kvm, &invalid_list);
4251 4439
4440unlock:
4252 spin_unlock(&kvm->mmu_lock); 4441 spin_unlock(&kvm->mmu_lock);
4253 srcu_read_unlock(&kvm->srcu, idx); 4442 srcu_read_unlock(&kvm->srcu, idx);
4254 4443
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 2adcbc2cac6d..5b59c573aba7 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -52,6 +52,23 @@
52 52
53int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 53int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
54void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); 54void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
55
56/*
57 * Return values of handle_mmio_page_fault_common:
58 * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
59 * directly.
60 * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
61 * fault path update the mmio spte.
62 * RET_MMIO_PF_RETRY: let CPU fault again on the address.
63 * RET_MMIO_PF_BUG: bug is detected.
64 */
65enum {
66 RET_MMIO_PF_EMULATE = 1,
67 RET_MMIO_PF_INVALID = 2,
68 RET_MMIO_PF_RETRY = 0,
69 RET_MMIO_PF_BUG = -1
70};
71
55int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); 72int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
56int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 73int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
57 74
@@ -97,4 +114,5 @@ static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access,
97 return (mmu->permissions[pfec >> 1] >> pte_access) & 1; 114 return (mmu->permissions[pfec >> 1] >> pte_access) & 1;
98} 115}
99 116
117void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
100#endif 118#endif
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index b8f6172f4174..9d2e0ffcb190 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -7,16 +7,18 @@
7#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvmmmu 8#define TRACE_SYSTEM kvmmmu
9 9
10#define KVM_MMU_PAGE_FIELDS \ 10#define KVM_MMU_PAGE_FIELDS \
11 __field(__u64, gfn) \ 11 __field(unsigned long, mmu_valid_gen) \
12 __field(__u32, role) \ 12 __field(__u64, gfn) \
13 __field(__u32, root_count) \ 13 __field(__u32, role) \
14 __field(__u32, root_count) \
14 __field(bool, unsync) 15 __field(bool, unsync)
15 16
16#define KVM_MMU_PAGE_ASSIGN(sp) \ 17#define KVM_MMU_PAGE_ASSIGN(sp) \
17 __entry->gfn = sp->gfn; \ 18 __entry->mmu_valid_gen = sp->mmu_valid_gen; \
18 __entry->role = sp->role.word; \ 19 __entry->gfn = sp->gfn; \
19 __entry->root_count = sp->root_count; \ 20 __entry->role = sp->role.word; \
21 __entry->root_count = sp->root_count; \
20 __entry->unsync = sp->unsync; 22 __entry->unsync = sp->unsync;
21 23
22#define KVM_MMU_PAGE_PRINTK() ({ \ 24#define KVM_MMU_PAGE_PRINTK() ({ \
@@ -28,8 +30,8 @@
28 \ 30 \
29 role.word = __entry->role; \ 31 role.word = __entry->role; \
30 \ 32 \
31 trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \ 33 trace_seq_printf(p, "sp gen %lx gfn %llx %u%s q%u%s %s%s" \
32 " %snxe root %u %s%c", \ 34 " %snxe root %u %s%c", __entry->mmu_valid_gen, \
33 __entry->gfn, role.level, \ 35 __entry->gfn, role.level, \
34 role.cr4_pae ? " pae" : "", \ 36 role.cr4_pae ? " pae" : "", \
35 role.quadrant, \ 37 role.quadrant, \
@@ -197,23 +199,25 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
197 199
198TRACE_EVENT( 200TRACE_EVENT(
199 mark_mmio_spte, 201 mark_mmio_spte,
200 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access), 202 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
201 TP_ARGS(sptep, gfn, access), 203 TP_ARGS(sptep, gfn, access, gen),
202 204
203 TP_STRUCT__entry( 205 TP_STRUCT__entry(
204 __field(void *, sptep) 206 __field(void *, sptep)
205 __field(gfn_t, gfn) 207 __field(gfn_t, gfn)
206 __field(unsigned, access) 208 __field(unsigned, access)
209 __field(unsigned int, gen)
207 ), 210 ),
208 211
209 TP_fast_assign( 212 TP_fast_assign(
210 __entry->sptep = sptep; 213 __entry->sptep = sptep;
211 __entry->gfn = gfn; 214 __entry->gfn = gfn;
212 __entry->access = access; 215 __entry->access = access;
216 __entry->gen = gen;
213 ), 217 ),
214 218
215 TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn, 219 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
216 __entry->access) 220 __entry->gfn, __entry->access, __entry->gen)
217); 221);
218 222
219TRACE_EVENT( 223TRACE_EVENT(
@@ -274,6 +278,50 @@ TRACE_EVENT(
274 __spte_satisfied(old_spte), __spte_satisfied(new_spte) 278 __spte_satisfied(old_spte), __spte_satisfied(new_spte)
275 ) 279 )
276); 280);
281
282TRACE_EVENT(
283 kvm_mmu_invalidate_zap_all_pages,
284 TP_PROTO(struct kvm *kvm),
285 TP_ARGS(kvm),
286
287 TP_STRUCT__entry(
288 __field(unsigned long, mmu_valid_gen)
289 __field(unsigned int, mmu_used_pages)
290 ),
291
292 TP_fast_assign(
293 __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
294 __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
295 ),
296
297 TP_printk("kvm-mmu-valid-gen %lx used_pages %x",
298 __entry->mmu_valid_gen, __entry->mmu_used_pages
299 )
300);
301
302
303TRACE_EVENT(
304 check_mmio_spte,
305 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
306 TP_ARGS(spte, kvm_gen, spte_gen),
307
308 TP_STRUCT__entry(
309 __field(unsigned int, kvm_gen)
310 __field(unsigned int, spte_gen)
311 __field(u64, spte)
312 ),
313
314 TP_fast_assign(
315 __entry->kvm_gen = kvm_gen;
316 __entry->spte_gen = spte_gen;
317 __entry->spte = spte;
318 ),
319
320 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
321 __entry->kvm_gen, __entry->spte_gen,
322 __entry->kvm_gen == __entry->spte_gen
323 )
324);
277#endif /* _TRACE_KVMMMU_H */ 325#endif /* _TRACE_KVMMMU_H */
278 326
279#undef TRACE_INCLUDE_PATH 327#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index da20860b457a..7769699d48a8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -552,9 +552,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
552 552
553 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 553 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
554 554
555 if (unlikely(error_code & PFERR_RSVD_MASK)) 555 if (unlikely(error_code & PFERR_RSVD_MASK)) {
556 return handle_mmio_page_fault(vcpu, addr, error_code, 556 r = handle_mmio_page_fault(vcpu, addr, error_code,
557 mmu_is_nested(vcpu)); 557 mmu_is_nested(vcpu));
558 if (likely(r != RET_MMIO_PF_INVALID))
559 return r;
560 };
558 561
559 r = mmu_topup_memory_caches(vcpu); 562 r = mmu_topup_memory_caches(vcpu);
560 if (r) 563 if (r)
@@ -792,7 +795,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
792 pte_access &= gpte_access(vcpu, gpte); 795 pte_access &= gpte_access(vcpu, gpte);
793 protect_clean_gpte(&pte_access, gpte); 796 protect_clean_gpte(&pte_access, gpte);
794 797
795 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present)) 798 if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
799 &nr_present))
796 continue; 800 continue;
797 801
798 if (gfn != sp->gfns[i]) { 802 if (gfn != sp->gfns[i]) {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a14a6eaf871d..c0bc80391e40 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1026,7 +1026,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1026 g_tsc_offset = svm->vmcb->control.tsc_offset - 1026 g_tsc_offset = svm->vmcb->control.tsc_offset -
1027 svm->nested.hsave->control.tsc_offset; 1027 svm->nested.hsave->control.tsc_offset;
1028 svm->nested.hsave->control.tsc_offset = offset; 1028 svm->nested.hsave->control.tsc_offset = offset;
1029 } 1029 } else
1030 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1031 svm->vmcb->control.tsc_offset,
1032 offset);
1030 1033
1031 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; 1034 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1032 1035
@@ -1044,6 +1047,11 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
1044 svm->vmcb->control.tsc_offset += adjustment; 1047 svm->vmcb->control.tsc_offset += adjustment;
1045 if (is_guest_mode(vcpu)) 1048 if (is_guest_mode(vcpu))
1046 svm->nested.hsave->control.tsc_offset += adjustment; 1049 svm->nested.hsave->control.tsc_offset += adjustment;
1050 else
1051 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1052 svm->vmcb->control.tsc_offset - adjustment,
1053 svm->vmcb->control.tsc_offset);
1054
1047 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1055 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1048} 1056}
1049 1057
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index fe5e00ed7036..545245d7cc63 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -756,6 +756,27 @@ TRACE_EVENT(
756 __entry->gpa_match ? "GPA" : "GVA") 756 __entry->gpa_match ? "GPA" : "GVA")
757); 757);
758 758
759TRACE_EVENT(kvm_write_tsc_offset,
760 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
761 __u64 next_tsc_offset),
762 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
763
764 TP_STRUCT__entry(
765 __field( unsigned int, vcpu_id )
766 __field( __u64, previous_tsc_offset )
767 __field( __u64, next_tsc_offset )
768 ),
769
770 TP_fast_assign(
771 __entry->vcpu_id = vcpu_id;
772 __entry->previous_tsc_offset = previous_tsc_offset;
773 __entry->next_tsc_offset = next_tsc_offset;
774 ),
775
776 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
777 __entry->previous_tsc_offset, __entry->next_tsc_offset)
778);
779
759#ifdef CONFIG_X86_64 780#ifdef CONFIG_X86_64
760 781
761#define host_clocks \ 782#define host_clocks \
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b30f5a54a2ab..a7e18551c968 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2096,6 +2096,8 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2096 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? 2096 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2097 vmcs12->tsc_offset : 0)); 2097 vmcs12->tsc_offset : 0));
2098 } else { 2098 } else {
2099 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2100 vmcs_read64(TSC_OFFSET), offset);
2099 vmcs_write64(TSC_OFFSET, offset); 2101 vmcs_write64(TSC_OFFSET, offset);
2100 } 2102 }
2101} 2103}
@@ -2103,11 +2105,14 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2103static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) 2105static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
2104{ 2106{
2105 u64 offset = vmcs_read64(TSC_OFFSET); 2107 u64 offset = vmcs_read64(TSC_OFFSET);
2108
2106 vmcs_write64(TSC_OFFSET, offset + adjustment); 2109 vmcs_write64(TSC_OFFSET, offset + adjustment);
2107 if (is_guest_mode(vcpu)) { 2110 if (is_guest_mode(vcpu)) {
2108 /* Even when running L2, the adjustment needs to apply to L1 */ 2111 /* Even when running L2, the adjustment needs to apply to L1 */
2109 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; 2112 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
2110 } 2113 } else
2114 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
2115 offset + adjustment);
2111} 2116}
2112 2117
2113static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2118static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
@@ -4176,10 +4181,10 @@ static void ept_set_mmio_spte_mask(void)
4176 /* 4181 /*
4177 * EPT Misconfigurations can be generated if the value of bits 2:0 4182 * EPT Misconfigurations can be generated if the value of bits 2:0
4178 * of an EPT paging-structure entry is 110b (write/execute). 4183 * of an EPT paging-structure entry is 110b (write/execute).
4179 * Also, magic bits (0xffull << 49) is set to quickly identify mmio 4184 * Also, magic bits (0x3ull << 62) is set to quickly identify mmio
4180 * spte. 4185 * spte.
4181 */ 4186 */
4182 kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull); 4187 kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
4183} 4188}
4184 4189
4185/* 4190/*
@@ -5366,10 +5371,14 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5366 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5371 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5367 5372
5368 ret = handle_mmio_page_fault_common(vcpu, gpa, true); 5373 ret = handle_mmio_page_fault_common(vcpu, gpa, true);
5369 if (likely(ret == 1)) 5374 if (likely(ret == RET_MMIO_PF_EMULATE))
5370 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == 5375 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
5371 EMULATE_DONE; 5376 EMULATE_DONE;
5372 if (unlikely(!ret)) 5377
5378 if (unlikely(ret == RET_MMIO_PF_INVALID))
5379 return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
5380
5381 if (unlikely(ret == RET_MMIO_PF_RETRY))
5373 return 1; 5382 return 1;
5374 5383
5375 /* It is the real ept misconfig */ 5384 /* It is the real ept misconfig */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 292e6ca89f42..d21bce505315 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1193,20 +1193,37 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1193 elapsed = ns - kvm->arch.last_tsc_nsec; 1193 elapsed = ns - kvm->arch.last_tsc_nsec;
1194 1194
1195 if (vcpu->arch.virtual_tsc_khz) { 1195 if (vcpu->arch.virtual_tsc_khz) {
1196 int faulted = 0;
1197
1196 /* n.b - signed multiplication and division required */ 1198 /* n.b - signed multiplication and division required */
1197 usdiff = data - kvm->arch.last_tsc_write; 1199 usdiff = data - kvm->arch.last_tsc_write;
1198#ifdef CONFIG_X86_64 1200#ifdef CONFIG_X86_64
1199 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; 1201 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1200#else 1202#else
1201 /* do_div() only does unsigned */ 1203 /* do_div() only does unsigned */
1202 asm("idivl %2; xor %%edx, %%edx" 1204 asm("1: idivl %[divisor]\n"
1203 : "=A"(usdiff) 1205 "2: xor %%edx, %%edx\n"
1204 : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz)); 1206 " movl $0, %[faulted]\n"
1207 "3:\n"
1208 ".section .fixup,\"ax\"\n"
1209 "4: movl $1, %[faulted]\n"
1210 " jmp 3b\n"
1211 ".previous\n"
1212
1213 _ASM_EXTABLE(1b, 4b)
1214
1215 : "=A"(usdiff), [faulted] "=r" (faulted)
1216 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
1217
1205#endif 1218#endif
1206 do_div(elapsed, 1000); 1219 do_div(elapsed, 1000);
1207 usdiff -= elapsed; 1220 usdiff -= elapsed;
1208 if (usdiff < 0) 1221 if (usdiff < 0)
1209 usdiff = -usdiff; 1222 usdiff = -usdiff;
1223
1224 /* idivl overflow => difference is larger than USEC_PER_SEC */
1225 if (faulted)
1226 usdiff = USEC_PER_SEC;
1210 } else 1227 } else
1211 usdiff = USEC_PER_SEC; /* disable TSC match window below */ 1228 usdiff = USEC_PER_SEC; /* disable TSC match window below */
1212 1229
@@ -1587,6 +1604,30 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1587 return 0; 1604 return 0;
1588} 1605}
1589 1606
1607/*
1608 * kvmclock updates which are isolated to a given vcpu, such as
1609 * vcpu->cpu migration, should not allow system_timestamp from
1610 * the rest of the vcpus to remain static. Otherwise ntp frequency
1611 * correction applies to one vcpu's system_timestamp but not
1612 * the others.
1613 *
1614 * So in those cases, request a kvmclock update for all vcpus.
1615 * The worst case for a remote vcpu to update its kvmclock
1616 * is then bounded by maximum nohz sleep latency.
1617 */
1618
1619static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1620{
1621 int i;
1622 struct kvm *kvm = v->kvm;
1623 struct kvm_vcpu *vcpu;
1624
1625 kvm_for_each_vcpu(i, vcpu, kvm) {
1626 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
1627 kvm_vcpu_kick(vcpu);
1628 }
1629}
1630
1590static bool msr_mtrr_valid(unsigned msr) 1631static bool msr_mtrr_valid(unsigned msr)
1591{ 1632{
1592 switch (msr) { 1633 switch (msr) {
@@ -1984,7 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1984 kvmclock_reset(vcpu); 2025 kvmclock_reset(vcpu);
1985 2026
1986 vcpu->arch.time = data; 2027 vcpu->arch.time = data;
1987 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2028 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
1988 2029
1989 /* we verify if the enable bit is set... */ 2030 /* we verify if the enable bit is set... */
1990 if (!(data & 1)) 2031 if (!(data & 1))
@@ -2701,7 +2742,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2701 * kvmclock on vcpu->cpu migration 2742 * kvmclock on vcpu->cpu migration
2702 */ 2743 */
2703 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 2744 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
2704 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2745 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2705 if (vcpu->cpu != cpu) 2746 if (vcpu->cpu != cpu)
2706 kvm_migrate_timers(vcpu); 2747 kvm_migrate_timers(vcpu);
2707 vcpu->cpu = cpu; 2748 vcpu->cpu = cpu;
@@ -5238,7 +5279,13 @@ static void kvm_set_mmio_spte_mask(void)
5238 * Set the reserved bits and the present bit of an paging-structure 5279 * Set the reserved bits and the present bit of an paging-structure
5239 * entry to generate page fault with PFER.RSV = 1. 5280 * entry to generate page fault with PFER.RSV = 1.
5240 */ 5281 */
5241 mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr; 5282 /* Mask the reserved physical address bits. */
5283 mask = ((1ull << (51 - maxphyaddr + 1)) - 1) << maxphyaddr;
5284
5285 /* Bit 62 is always reserved for 32bit host. */
5286 mask |= 0x3ull << 62;
5287
5288 /* Set the present bit. */
5242 mask |= 1ull; 5289 mask |= 1ull;
5243 5290
5244#ifdef CONFIG_X86_64 5291#ifdef CONFIG_X86_64
@@ -5498,13 +5545,6 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5498 char instruction[3]; 5545 char instruction[3];
5499 unsigned long rip = kvm_rip_read(vcpu); 5546 unsigned long rip = kvm_rip_read(vcpu);
5500 5547
5501 /*
5502 * Blow out the MMU to ensure that no other VCPU has an active mapping
5503 * to ensure that the updated hypercall appears atomically across all
5504 * VCPUs.
5505 */
5506 kvm_mmu_zap_all(vcpu->kvm);
5507
5508 kvm_x86_ops->patch_hypercall(vcpu, instruction); 5548 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5509 5549
5510 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5550 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
@@ -5702,6 +5742,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5702 __kvm_migrate_timers(vcpu); 5742 __kvm_migrate_timers(vcpu);
5703 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 5743 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
5704 kvm_gen_update_masterclock(vcpu->kvm); 5744 kvm_gen_update_masterclock(vcpu->kvm);
5745 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
5746 kvm_gen_kvmclock_update(vcpu);
5705 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 5747 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
5706 r = kvm_guest_time_update(vcpu); 5748 r = kvm_guest_time_update(vcpu);
5707 if (unlikely(r)) 5749 if (unlikely(r))
@@ -6812,6 +6854,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
6812 return -EINVAL; 6854 return -EINVAL;
6813 6855
6814 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 6856 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6857 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
6815 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 6858 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
6816 6859
6817 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 6860 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
@@ -7040,22 +7083,18 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
7040 * If memory slot is created, or moved, we need to clear all 7083 * If memory slot is created, or moved, we need to clear all
7041 * mmio sptes. 7084 * mmio sptes.
7042 */ 7085 */
7043 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 7086 kvm_mmu_invalidate_mmio_sptes(kvm);
7044 kvm_mmu_zap_mmio_sptes(kvm);
7045 kvm_reload_remote_mmus(kvm);
7046 }
7047} 7087}
7048 7088
7049void kvm_arch_flush_shadow_all(struct kvm *kvm) 7089void kvm_arch_flush_shadow_all(struct kvm *kvm)
7050{ 7090{
7051 kvm_mmu_zap_all(kvm); 7091 kvm_mmu_invalidate_zap_all_pages(kvm);
7052 kvm_reload_remote_mmus(kvm);
7053} 7092}
7054 7093
7055void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 7094void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7056 struct kvm_memory_slot *slot) 7095 struct kvm_memory_slot *slot)
7057{ 7096{
7058 kvm_arch_flush_shadow_all(kvm); 7097 kvm_mmu_invalidate_zap_all_pages(kvm);
7059} 7098}
7060 7099
7061int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 7100int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
@@ -7263,3 +7302,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
7263EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 7302EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
7264EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 7303EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
7265EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 7304EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
7305EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
diff --git a/arch/x86/lguest/Makefile b/arch/x86/lguest/Makefile
index 94e0e54056a9..8f38d577a2fa 100644
--- a/arch/x86/lguest/Makefile
+++ b/arch/x86/lguest/Makefile
@@ -1,2 +1,2 @@
1obj-y := i386_head.o boot.o 1obj-y := head_32.o boot.o
2CFLAGS_boot.o := $(call cc-option, -fno-stack-protector) 2CFLAGS_boot.o := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/head_32.S
index 6ddfe4fc23c3..6ddfe4fc23c3 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/head_32.S
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 3e724256dbee..d641897a1f4e 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -324,14 +324,11 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
324 res->start = start; 324 res->start = start;
325 res->end = end; 325 res->end = end;
326 info->res_offset[info->res_num] = addr.translation_offset; 326 info->res_offset[info->res_num] = addr.translation_offset;
327 info->res_num++;
327 328
328 if (!pci_use_crs) { 329 if (!pci_use_crs)
329 dev_printk(KERN_DEBUG, &info->bridge->dev, 330 dev_printk(KERN_DEBUG, &info->bridge->dev,
330 "host bridge window %pR (ignored)\n", res); 331 "host bridge window %pR (ignored)\n", res);
331 return AE_OK;
332 }
333
334 info->res_num++;
335 332
336 return AE_OK; 333 return AE_OK;
337} 334}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d99cae8147d1..c1367b29c3b1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -40,11 +40,15 @@
40 40
41cpumask_var_t xen_cpu_initialized_map; 41cpumask_var_t xen_cpu_initialized_map;
42 42
43static DEFINE_PER_CPU(int, xen_resched_irq); 43struct xen_common_irq {
44static DEFINE_PER_CPU(int, xen_callfunc_irq); 44 int irq;
45static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); 45 char *name;
46static DEFINE_PER_CPU(int, xen_irq_work); 46};
47static DEFINE_PER_CPU(int, xen_debug_irq) = -1; 47static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
48static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
49static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
50static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
51static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
48 52
49static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 53static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
50static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 54static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -99,10 +103,47 @@ static void __cpuinit cpu_bringup_and_idle(void)
99 cpu_startup_entry(CPUHP_ONLINE); 103 cpu_startup_entry(CPUHP_ONLINE);
100} 104}
101 105
106static void xen_smp_intr_free(unsigned int cpu)
107{
108 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
109 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
110 per_cpu(xen_resched_irq, cpu).irq = -1;
111 kfree(per_cpu(xen_resched_irq, cpu).name);
112 per_cpu(xen_resched_irq, cpu).name = NULL;
113 }
114 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
115 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
116 per_cpu(xen_callfunc_irq, cpu).irq = -1;
117 kfree(per_cpu(xen_callfunc_irq, cpu).name);
118 per_cpu(xen_callfunc_irq, cpu).name = NULL;
119 }
120 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
121 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
122 per_cpu(xen_debug_irq, cpu).irq = -1;
123 kfree(per_cpu(xen_debug_irq, cpu).name);
124 per_cpu(xen_debug_irq, cpu).name = NULL;
125 }
126 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
127 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
128 NULL);
129 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
130 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
131 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
132 }
133 if (xen_hvm_domain())
134 return;
135
136 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
137 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
138 per_cpu(xen_irq_work, cpu).irq = -1;
139 kfree(per_cpu(xen_irq_work, cpu).name);
140 per_cpu(xen_irq_work, cpu).name = NULL;
141 }
142};
102static int xen_smp_intr_init(unsigned int cpu) 143static int xen_smp_intr_init(unsigned int cpu)
103{ 144{
104 int rc; 145 int rc;
105 const char *resched_name, *callfunc_name, *debug_name; 146 char *resched_name, *callfunc_name, *debug_name;
106 147
107 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 148 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
108 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 149 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -113,7 +154,8 @@ static int xen_smp_intr_init(unsigned int cpu)
113 NULL); 154 NULL);
114 if (rc < 0) 155 if (rc < 0)
115 goto fail; 156 goto fail;
116 per_cpu(xen_resched_irq, cpu) = rc; 157 per_cpu(xen_resched_irq, cpu).irq = rc;
158 per_cpu(xen_resched_irq, cpu).name = resched_name;
117 159
118 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 160 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
119 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 161 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -124,7 +166,8 @@ static int xen_smp_intr_init(unsigned int cpu)
124 NULL); 166 NULL);
125 if (rc < 0) 167 if (rc < 0)
126 goto fail; 168 goto fail;
127 per_cpu(xen_callfunc_irq, cpu) = rc; 169 per_cpu(xen_callfunc_irq, cpu).irq = rc;
170 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
128 171
129 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 172 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
130 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 173 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -132,7 +175,8 @@ static int xen_smp_intr_init(unsigned int cpu)
132 debug_name, NULL); 175 debug_name, NULL);
133 if (rc < 0) 176 if (rc < 0)
134 goto fail; 177 goto fail;
135 per_cpu(xen_debug_irq, cpu) = rc; 178 per_cpu(xen_debug_irq, cpu).irq = rc;
179 per_cpu(xen_debug_irq, cpu).name = debug_name;
136 180
137 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 181 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
138 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 182 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -143,7 +187,8 @@ static int xen_smp_intr_init(unsigned int cpu)
143 NULL); 187 NULL);
144 if (rc < 0) 188 if (rc < 0)
145 goto fail; 189 goto fail;
146 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 190 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
191 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
147 192
148 /* 193 /*
149 * The IRQ worker on PVHVM goes through the native path and uses the 194 * The IRQ worker on PVHVM goes through the native path and uses the
@@ -161,26 +206,13 @@ static int xen_smp_intr_init(unsigned int cpu)
161 NULL); 206 NULL);
162 if (rc < 0) 207 if (rc < 0)
163 goto fail; 208 goto fail;
164 per_cpu(xen_irq_work, cpu) = rc; 209 per_cpu(xen_irq_work, cpu).irq = rc;
210 per_cpu(xen_irq_work, cpu).name = callfunc_name;
165 211
166 return 0; 212 return 0;
167 213
168 fail: 214 fail:
169 if (per_cpu(xen_resched_irq, cpu) >= 0) 215 xen_smp_intr_free(cpu);
170 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
171 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
172 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
173 if (per_cpu(xen_debug_irq, cpu) >= 0)
174 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
175 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
176 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
177 NULL);
178 if (xen_hvm_domain())
179 return rc;
180
181 if (per_cpu(xen_irq_work, cpu) >= 0)
182 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
183
184 return rc; 216 return rc;
185} 217}
186 218
@@ -433,12 +465,7 @@ static void xen_cpu_die(unsigned int cpu)
433 current->state = TASK_UNINTERRUPTIBLE; 465 current->state = TASK_UNINTERRUPTIBLE;
434 schedule_timeout(HZ/10); 466 schedule_timeout(HZ/10);
435 } 467 }
436 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 468 xen_smp_intr_free(cpu);
437 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
438 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
439 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
440 if (!xen_hvm_domain())
441 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
442 xen_uninit_lock_cpu(cpu); 469 xen_uninit_lock_cpu(cpu);
443 xen_teardown_timer(cpu); 470 xen_teardown_timer(cpu);
444} 471}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 3002ec1bb71a..a40f8508e760 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -7,6 +7,7 @@
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/log2.h> 8#include <linux/log2.h>
9#include <linux/gfp.h> 9#include <linux/gfp.h>
10#include <linux/slab.h>
10 11
11#include <asm/paravirt.h> 12#include <asm/paravirt.h>
12 13
@@ -165,6 +166,7 @@ static int xen_spin_trylock(struct arch_spinlock *lock)
165 return old == 0; 166 return old == 0;
166} 167}
167 168
169static DEFINE_PER_CPU(char *, irq_name);
168static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 170static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
169static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 171static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
170 172
@@ -362,7 +364,7 @@ static irqreturn_t dummy_handler(int irq, void *dev_id)
362void __cpuinit xen_init_lock_cpu(int cpu) 364void __cpuinit xen_init_lock_cpu(int cpu)
363{ 365{
364 int irq; 366 int irq;
365 const char *name; 367 char *name;
366 368
367 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", 369 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
368 cpu, per_cpu(lock_kicker_irq, cpu)); 370 cpu, per_cpu(lock_kicker_irq, cpu));
@@ -385,6 +387,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
385 if (irq >= 0) { 387 if (irq >= 0) {
386 disable_irq(irq); /* make sure it's never delivered */ 388 disable_irq(irq); /* make sure it's never delivered */
387 per_cpu(lock_kicker_irq, cpu) = irq; 389 per_cpu(lock_kicker_irq, cpu) = irq;
390 per_cpu(irq_name, cpu) = name;
388 } 391 }
389 392
390 printk("cpu %d spinlock event irq %d\n", cpu, irq); 393 printk("cpu %d spinlock event irq %d\n", cpu, irq);
@@ -401,6 +404,8 @@ void xen_uninit_lock_cpu(int cpu)
401 404
402 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); 405 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
403 per_cpu(lock_kicker_irq, cpu) = -1; 406 per_cpu(lock_kicker_irq, cpu) = -1;
407 kfree(per_cpu(irq_name, cpu));
408 per_cpu(irq_name, cpu) = NULL;
404} 409}
405 410
406void __init xen_init_spinlocks(void) 411void __init xen_init_spinlocks(void)
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 3d88bfdf9e1c..a690868be837 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -14,6 +14,7 @@
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/math64.h> 15#include <linux/math64.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/slab.h>
17 18
18#include <asm/pvclock.h> 19#include <asm/pvclock.h>
19#include <asm/xen/hypervisor.h> 20#include <asm/xen/hypervisor.h>
@@ -36,9 +37,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
36/* snapshots of runstate info */ 37/* snapshots of runstate info */
37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); 38static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
38 39
39/* unused ns of stolen and blocked time */ 40/* unused ns of stolen time */
40static DEFINE_PER_CPU(u64, xen_residual_stolen); 41static DEFINE_PER_CPU(u64, xen_residual_stolen);
41static DEFINE_PER_CPU(u64, xen_residual_blocked);
42 42
43/* return an consistent snapshot of 64-bit time/counter value */ 43/* return an consistent snapshot of 64-bit time/counter value */
44static u64 get64(const u64 *p) 44static u64 get64(const u64 *p)
@@ -115,7 +115,7 @@ static void do_stolen_accounting(void)
115{ 115{
116 struct vcpu_runstate_info state; 116 struct vcpu_runstate_info state;
117 struct vcpu_runstate_info *snap; 117 struct vcpu_runstate_info *snap;
118 s64 blocked, runnable, offline, stolen; 118 s64 runnable, offline, stolen;
119 cputime_t ticks; 119 cputime_t ticks;
120 120
121 get_runstate_snapshot(&state); 121 get_runstate_snapshot(&state);
@@ -125,7 +125,6 @@ static void do_stolen_accounting(void)
125 snap = &__get_cpu_var(xen_runstate_snapshot); 125 snap = &__get_cpu_var(xen_runstate_snapshot);
126 126
127 /* work out how much time the VCPU has not been runn*ing* */ 127 /* work out how much time the VCPU has not been runn*ing* */
128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; 128 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
130 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; 129 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
131 130
@@ -141,17 +140,6 @@ static void do_stolen_accounting(void)
141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 140 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
142 __this_cpu_write(xen_residual_stolen, stolen); 141 __this_cpu_write(xen_residual_stolen, stolen);
143 account_steal_ticks(ticks); 142 account_steal_ticks(ticks);
144
145 /* Add the appropriate number of ticks of blocked time,
146 including any left-overs from last time. */
147 blocked += __this_cpu_read(xen_residual_blocked);
148
149 if (blocked < 0)
150 blocked = 0;
151
152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
153 __this_cpu_write(xen_residual_blocked, blocked);
154 account_idle_ticks(ticks);
155} 143}
156 144
157/* Get the TSC speed from Xen */ 145/* Get the TSC speed from Xen */
@@ -377,11 +365,16 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
377 365
378static const struct clock_event_device *xen_clockevent = 366static const struct clock_event_device *xen_clockevent =
379 &xen_timerop_clockevent; 367 &xen_timerop_clockevent;
380static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 }; 368
369struct xen_clock_event_device {
370 struct clock_event_device evt;
371 char *name;
372};
373static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
381 374
382static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) 375static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
383{ 376{
384 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events); 377 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
385 irqreturn_t ret; 378 irqreturn_t ret;
386 379
387 ret = IRQ_NONE; 380 ret = IRQ_NONE;
@@ -395,14 +388,30 @@ static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
395 return ret; 388 return ret;
396} 389}
397 390
391void xen_teardown_timer(int cpu)
392{
393 struct clock_event_device *evt;
394 BUG_ON(cpu == 0);
395 evt = &per_cpu(xen_clock_events, cpu).evt;
396
397 if (evt->irq >= 0) {
398 unbind_from_irqhandler(evt->irq, NULL);
399 evt->irq = -1;
400 kfree(per_cpu(xen_clock_events, cpu).name);
401 per_cpu(xen_clock_events, cpu).name = NULL;
402 }
403}
404
398void xen_setup_timer(int cpu) 405void xen_setup_timer(int cpu)
399{ 406{
400 const char *name; 407 char *name;
401 struct clock_event_device *evt; 408 struct clock_event_device *evt;
402 int irq; 409 int irq;
403 410
404 evt = &per_cpu(xen_clock_events, cpu); 411 evt = &per_cpu(xen_clock_events, cpu).evt;
405 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); 412 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
413 if (evt->irq >= 0)
414 xen_teardown_timer(cpu);
406 415
407 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); 416 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
408 417
@@ -420,22 +429,15 @@ void xen_setup_timer(int cpu)
420 429
421 evt->cpumask = cpumask_of(cpu); 430 evt->cpumask = cpumask_of(cpu);
422 evt->irq = irq; 431 evt->irq = irq;
432 per_cpu(xen_clock_events, cpu).name = name;
423} 433}
424 434
425void xen_teardown_timer(int cpu)
426{
427 struct clock_event_device *evt;
428 BUG_ON(cpu == 0);
429 evt = &per_cpu(xen_clock_events, cpu);
430 unbind_from_irqhandler(evt->irq, NULL);
431 evt->irq = -1;
432}
433 435
434void xen_setup_cpu_clockevents(void) 436void xen_setup_cpu_clockevents(void)
435{ 437{
436 BUG_ON(preemptible()); 438 BUG_ON(preemptible());
437 439
438 clockevents_register_device(&__get_cpu_var(xen_clock_events)); 440 clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
439} 441}
440 442
441void xen_timer_resume(void) 443void xen_timer_resume(void)