aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS3
-rw-r--r--Documentation/ia64/paravirt_ops.txt137
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/Makefile6
-rw-r--r--arch/ia64/kernel/Makefile44
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/ia64/kernel/entry.S115
-rw-r--r--arch/ia64/kernel/head.S41
-rw-r--r--arch/ia64/kernel/iosapic.c45
-rw-r--r--arch/ia64/kernel/irq_ia64.c19
-rw-r--r--arch/ia64/kernel/ivt.S462
-rw-r--r--arch/ia64/kernel/minstate.h13
-rw-r--r--arch/ia64/kernel/module.c3
-rw-r--r--arch/ia64/kernel/nr-irqs.c24
-rw-r--r--arch/ia64/kernel/paravirt.c369
-rw-r--r--arch/ia64/kernel/paravirt_inst.h29
-rw-r--r--arch/ia64/kernel/paravirtentry.S60
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/time.c23
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/kernel/acpi/boot.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.h1
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/setup_percpu.c6
-rw-r--r--arch/x86/kernel/smpboot.c52
-rw-r--r--arch/x86/mm/numa_64.c4
-rw-r--r--arch/x86/pci/early.c16
-rw-r--r--drivers/char/mmtimer.c29
-rw-r--r--drivers/cpufreq/cpufreq.c45
-rw-r--r--drivers/cpufreq/cpufreq_stats.c24
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--include/asm-ia64/Kbuild2
-rw-r--r--include/asm-ia64/gcc_intrin.h24
-rw-r--r--include/asm-ia64/hw_irq.h23
-rw-r--r--include/asm-ia64/intel_intrin.h41
-rw-r--r--include/asm-ia64/intrinsics.h55
-rw-r--r--include/asm-ia64/iosapic.h18
-rw-r--r--include/asm-ia64/irq.h9
-rw-r--r--include/asm-ia64/mmu_context.h6
-rw-r--r--include/asm-ia64/native/inst.h175
-rw-r--r--include/asm-ia64/native/irq.h35
-rw-r--r--include/asm-ia64/paravirt.h255
-rw-r--r--include/asm-ia64/paravirt_privop.h114
-rw-r--r--include/asm-ia64/smp.h2
-rw-r--r--include/asm-ia64/system.h11
-rw-r--r--include/asm-ia64/uv/uv_mmrs.h423
-rw-r--r--include/asm-x86/apic.h2
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h6
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--mm/slub.c10
53 files changed, 2340 insertions, 494 deletions
diff --git a/CREDITS b/CREDITS
index e97bea06b59f..077b147388bd 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3344,8 +3344,7 @@ S: Spain
3344N: Linus Torvalds 3344N: Linus Torvalds
3345E: torvalds@linux-foundation.org 3345E: torvalds@linux-foundation.org
3346D: Original kernel hacker 3346D: Original kernel hacker
3347S: 12725 SW Millikan Way, Suite 400 3347S: Portland, Oregon 97005
3348S: Beaverton, Oregon 97005
3349S: USA 3348S: USA
3350 3349
3351N: Marcelo Tosatti 3350N: Marcelo Tosatti
diff --git a/Documentation/ia64/paravirt_ops.txt b/Documentation/ia64/paravirt_ops.txt
new file mode 100644
index 000000000000..39ded02ec33f
--- /dev/null
+++ b/Documentation/ia64/paravirt_ops.txt
@@ -0,0 +1,137 @@
1Paravirt_ops on IA64
2====================
3 21 May 2008, Isaku Yamahata <yamahata@valinux.co.jp>
4
5
6Introduction
7------------
8The aim of this documentation is to help with maintainability and/or to
9encourage people to use paravirt_ops/IA64.
10
11paravirt_ops (pv_ops in short) is a way for virtualization support of
12Linux kernel on x86. Several ways for virtualization support were
13proposed, paravirt_ops is the winner.
14On the other hand, now there are also several IA64 virtualization
15technologies like kvm/IA64, xen/IA64 and many other academic IA64
16hypervisors so that it is good to add generic virtualization
17infrastructure on Linux/IA64.
18
19
20What is paravirt_ops?
21---------------------
22It has been developed on x86 as virtualization support via API, not ABI.
23It allows each hypervisor to override operations which are important for
24hypervisors at API level. And it allows a single kernel binary to run on
25all supported execution environments including native machine.
26Essentially paravirt_ops is a set of function pointers which represent
27operations corresponding to low level sensitive instructions and high
28level functionalities in various area. But one significant difference
29from usual function pointer table is that it allows optimization with
30binary patch. It is because some of these operations are very
31performance sensitive and indirect call overhead is not negligible.
32With binary patch, indirect C function call can be transformed into
33direct C function call or in-place execution to eliminate the overhead.
34
35Thus, operations of paravirt_ops are classified into three categories.
36- simple indirect call
37 These operations correspond to high level functionality so that the
38 overhead of indirect call isn't very important.
39
40- indirect call which allows optimization with binary patch
41 Usually these operations correspond to low level instructions. They
42 are called frequently and performance critical. So the overhead is
43 very important.
44
45- a set of macros for hand written assembly code
46 Hand written assembly codes (.S files) also need paravirtualization
47 because they include sensitive instructions or some of code paths in
48 them are very performance critical.
49
50
51The relation to the IA64 machine vector
52---------------------------------------
53Linux/IA64 has the IA64 machine vector functionality which allows the
54kernel to switch implementations (e.g. initialization, ipi, dma api...)
55depending on executing platform.
56We can replace some implementations very easily defining a new machine
57vector. Thus another approach for virtualization support would be
58enhancing the machine vector functionality.
59But paravirt_ops approach was taken because
60- virtualization support needs wider support than machine vector does.
61 e.g. low level instruction paravirtualization. It must be
62 initialized very early before platform detection.
63
64- virtualization support needs more functionality like binary patch.
65 Probably the calling overhead might not be very large compared to the
66 emulation overhead of virtualization. However in the native case, the
67 overhead should be eliminated completely.
68 A single kernel binary should run on each environment including native,
69 and the overhead of paravirt_ops on native environment should be as
70 small as possible.
71
72- for full virtualization technology, e.g. KVM/IA64 or
73 Xen/IA64 HVM domain, the result would be
74 (the emulated platform machine vector. probably dig) + (pv_ops).
75 This means that the virtualization support layer should be under
76 the machine vector layer.
77
78Possibly it might be better to move some function pointers from
79paravirt_ops to machine vector. In fact, Xen domU case utilizes both
80pv_ops and machine vector.
81
82
83IA64 paravirt_ops
84-----------------
85In this section, the concrete paravirt_ops will be discussed.
86Because of the architecture difference between ia64 and x86, the
87resulting set of functions is very different from x86 pv_ops.
88
89- C function pointer tables
90They are not very performance critical so that simple C indirect
91function call is acceptable. The following structures are defined at
92this moment. For details see linux/include/asm-ia64/paravirt.h
93 - struct pv_info
94 This structure describes the execution environment.
95 - struct pv_init_ops
96 This structure describes the various initialization hooks.
97 - struct pv_iosapic_ops
98 This structure describes hooks to iosapic operations.
99 - struct pv_irq_ops
100 This structure describes hooks to irq related operations
101 - struct pv_time_op
102 This structure describes hooks to steal time accounting.
103
104- a set of indirect calls which need optimization
105Currently this class of functions correspond to a subset of IA64
106intrinsics. At this moment the optimization with binary patch isn't
107implemented yet.
108struct pv_cpu_op is defined. For details see
109linux/include/asm-ia64/paravirt_privop.h
110Mostly they correspond to ia64 intrinsics 1-to-1.
111Caveat: Now they are defined as C indirect function pointers, but in
112order to support binary patch optimization, they will be changed
113using GCC extended inline assembly code.
114
115- a set of macros for hand written assembly code (.S files)
116For maintenance purpose, the taken approach for .S files is single
117source code and compile multiple times with different macros definitions.
118Each pv_ops instance must define those macros to compile.
119The important thing here is that sensitive, but non-privileged
120instructions must be paravirtualized and that some privileged
121instructions also need paravirtualization for reasonable performance.
122Developers who modify .S files must be aware of that. At this moment
123an easy checker is implemented to detect paravirtualization breakage.
124But it doesn't cover all the cases.
125
126Sometimes this set of macros is called pv_cpu_asm_op. But there is no
127corresponding structure in the source code.
128Those macros mostly 1:1 correspond to a subset of privileged
129instructions. See linux/include/asm-ia64/native/inst.h.
130And some functions written in assembly also need to be overrided so
131that each pv_ops instance have to define some macros. Again see
132linux/include/asm-ia64/native/inst.h.
133
134
135Those structures must be initialized very early before start_kernel.
136Probably initialized in head.S using multi entry point or some other trick.
137For native case implementation see linux/arch/ia64/kernel/paravirt.c.
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 18bcc10903b4..451f2ffb137b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -540,8 +540,8 @@ config KEXEC
540 strongly in flux, so no good recommendation can be made. 540 strongly in flux, so no good recommendation can be made.
541 541
542config CRASH_DUMP 542config CRASH_DUMP
543 bool "kernel crash dumps (EXPERIMENTAL)" 543 bool "kernel crash dumps"
544 depends on EXPERIMENTAL && IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) 544 depends on IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
545 help 545 help
546 Generate crash dump after being started by kexec. 546 Generate crash dump after being started by kexec.
547 547
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index e67ee3f27698..905d25b13d5a 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -100,3 +100,9 @@ define archhelp
100 echo ' boot - Build vmlinux and bootloader for Ski simulator' 100 echo ' boot - Build vmlinux and bootloader for Ski simulator'
101 echo '* unwcheck - Check vmlinux for invalid unwind info' 101 echo '* unwcheck - Check vmlinux for invalid unwind info'
102endef 102endef
103
104archprepare: make_nr_irqs_h FORCE
105PHONY += make_nr_irqs_h FORCE
106
107make_nr_irqs_h: FORCE
108 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 13fd10e8699e..87fea11aecb7 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -36,6 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
36mca_recovery-y += mca_drv.o mca_drv_asm.o 36mca_recovery-y += mca_drv.o mca_drv_asm.o
37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
38 38
39obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o
40
39obj-$(CONFIG_IA64_ESI) += esi.o 41obj-$(CONFIG_IA64_ESI) += esi.o
40ifneq ($(CONFIG_IA64_ESI),) 42ifneq ($(CONFIG_IA64_ESI),)
41obj-y += esi_stub.o # must be in kernel proper 43obj-y += esi_stub.o # must be in kernel proper
@@ -70,3 +72,45 @@ $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
70# We must build gate.so before we can assemble it. 72# We must build gate.so before we can assemble it.
71# Note: kbuild does not track this dependency due to usage of .incbin 73# Note: kbuild does not track this dependency due to usage of .incbin
72$(obj)/gate-data.o: $(obj)/gate.so 74$(obj)/gate-data.o: $(obj)/gate.so
75
76# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
77define sed-y
78 "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
79endef
80quiet_cmd_nr_irqs = GEN $@
81define cmd_nr_irqs
82 (set -e; \
83 echo "#ifndef __ASM_NR_IRQS_H__"; \
84 echo "#define __ASM_NR_IRQS_H__"; \
85 echo "/*"; \
86 echo " * DO NOT MODIFY."; \
87 echo " *"; \
88 echo " * This file was generated by Kbuild"; \
89 echo " *"; \
90 echo " */"; \
91 echo ""; \
92 sed -ne $(sed-y) $<; \
93 echo ""; \
94 echo "#endif" ) > $@
95endef
96
97# We use internal kbuild rules to avoid the "is up to date" message from make
98arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \
99 $(wildcard $(srctree)/include/asm-ia64/*/irq.h)
100 $(Q)mkdir -p $(dir $@)
101 $(call if_changed_dep,cc_s_c)
102
103include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
104 $(Q)mkdir -p $(dir $@)
105 $(call cmd,nr_irqs)
106
107clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
108
109#
110# native ivt.S and entry.S
111#
112ASM_PARAVIRT_OBJS = ivt.o entry.o
113define paravirtualized_native
114AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
115endef
116$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 43687cc60dfb..5d1eb7ee2bf6 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -774,7 +774,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
774 */ 774 */
775#ifdef CONFIG_ACPI_HOTPLUG_CPU 775#ifdef CONFIG_ACPI_HOTPLUG_CPU
776static 776static
777int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) 777int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
778{ 778{
779#ifdef CONFIG_ACPI_NUMA 779#ifdef CONFIG_ACPI_NUMA
780 int pxm_id; 780 int pxm_id;
@@ -854,8 +854,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
854 union acpi_object *obj; 854 union acpi_object *obj;
855 struct acpi_madt_local_sapic *lsapic; 855 struct acpi_madt_local_sapic *lsapic;
856 cpumask_t tmp_map; 856 cpumask_t tmp_map;
857 long physid; 857 int cpu, physid;
858 int cpu;
859 858
860 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 859 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
861 return -EINVAL; 860 return -EINVAL;
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index b8498ea62068..7b435451b3dc 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -51,7 +51,7 @@ processor_set_pstate (
51 retval = ia64_pal_set_pstate((u64)value); 51 retval = ia64_pal_set_pstate((u64)value);
52 52
53 if (retval) { 53 if (retval) {
54 dprintk("Failed to set freq to 0x%x, with error 0x%x\n", 54 dprintk("Failed to set freq to 0x%x, with error 0x%lx\n",
55 value, retval); 55 value, retval);
56 return -ENODEV; 56 return -ENODEV;
57 } 57 }
@@ -74,7 +74,7 @@ processor_get_pstate (
74 74
75 if (retval) 75 if (retval)
76 dprintk("Failed to get current freq with " 76 dprintk("Failed to get current freq with "
77 "error 0x%x, idx 0x%x\n", retval, *value); 77 "error 0x%lx, idx 0x%x\n", retval, *value);
78 78
79 return (int)retval; 79 return (int)retval;
80} 80}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ca2bb95726de..56ab156c48ae 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -23,6 +23,11 @@
23 * 11/07/2000 23 * 11/07/2000
24 */ 24 */
25/* 25/*
26 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
27 * VA Linux Systems Japan K.K.
28 * pv_ops.
29 */
30/*
26 * Global (preserved) predicate usage on syscall entry/exit path: 31 * Global (preserved) predicate usage on syscall entry/exit path:
27 * 32 *
28 * pKStk: See entry.h. 33 * pKStk: See entry.h.
@@ -45,6 +50,7 @@
45 50
46#include "minstate.h" 51#include "minstate.h"
47 52
53#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
48 /* 54 /*
49 * execve() is special because in case of success, we need to 55 * execve() is special because in case of success, we need to
50 * setup a null register window frame. 56 * setup a null register window frame.
@@ -173,6 +179,7 @@ GLOBAL_ENTRY(sys_clone)
173 mov rp=loc0 179 mov rp=loc0
174 br.ret.sptk.many rp 180 br.ret.sptk.many rp
175END(sys_clone) 181END(sys_clone)
182#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
176 183
177/* 184/*
178 * prev_task <- ia64_switch_to(struct task_struct *next) 185 * prev_task <- ia64_switch_to(struct task_struct *next)
@@ -180,7 +187,7 @@ END(sys_clone)
180 * called. The code starting at .map relies on this. The rest of the code 187 * called. The code starting at .map relies on this. The rest of the code
181 * doesn't care about the interrupt masking status. 188 * doesn't care about the interrupt masking status.
182 */ 189 */
183GLOBAL_ENTRY(ia64_switch_to) 190GLOBAL_ENTRY(__paravirt_switch_to)
184 .prologue 191 .prologue
185 alloc r16=ar.pfs,1,0,0,0 192 alloc r16=ar.pfs,1,0,0,0
186 DO_SAVE_SWITCH_STACK 193 DO_SAVE_SWITCH_STACK
@@ -204,7 +211,7 @@ GLOBAL_ENTRY(ia64_switch_to)
204 ;; 211 ;;
205.done: 212.done:
206 ld8 sp=[r21] // load kernel stack pointer of new task 213 ld8 sp=[r21] // load kernel stack pointer of new task
207 mov IA64_KR(CURRENT)=in0 // update "current" application register 214 MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register
208 mov r8=r13 // return pointer to previously running task 215 mov r8=r13 // return pointer to previously running task
209 mov r13=in0 // set "current" pointer 216 mov r13=in0 // set "current" pointer
210 ;; 217 ;;
@@ -216,26 +223,25 @@ GLOBAL_ENTRY(ia64_switch_to)
216 br.ret.sptk.many rp // boogie on out in new context 223 br.ret.sptk.many rp // boogie on out in new context
217 224
218.map: 225.map:
219 rsm psr.ic // interrupts (psr.i) are already disabled here 226 RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
220 movl r25=PAGE_KERNEL 227 movl r25=PAGE_KERNEL
221 ;; 228 ;;
222 srlz.d 229 srlz.d
223 or r23=r25,r20 // construct PA | page properties 230 or r23=r25,r20 // construct PA | page properties
224 mov r25=IA64_GRANULE_SHIFT<<2 231 mov r25=IA64_GRANULE_SHIFT<<2
225 ;; 232 ;;
226 mov cr.itir=r25 233 MOV_TO_ITIR(p0, r25, r8)
227 mov cr.ifa=in0 // VA of next task... 234 MOV_TO_IFA(in0, r8) // VA of next task...
228 ;; 235 ;;
229 mov r25=IA64_TR_CURRENT_STACK 236 mov r25=IA64_TR_CURRENT_STACK
230 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... 237 MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped...
231 ;; 238 ;;
232 itr.d dtr[r25]=r23 // wire in new mapping... 239 itr.d dtr[r25]=r23 // wire in new mapping...
233 ssm psr.ic // reenable the psr.ic bit 240 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
234 ;;
235 srlz.d
236 br.cond.sptk .done 241 br.cond.sptk .done
237END(ia64_switch_to) 242END(__paravirt_switch_to)
238 243
244#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
239/* 245/*
240 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This 246 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
241 * means that we may get an interrupt with "sp" pointing to the new kernel stack while 247 * means that we may get an interrupt with "sp" pointing to the new kernel stack while
@@ -375,7 +381,7 @@ END(save_switch_stack)
375 * - b7 holds address to return to 381 * - b7 holds address to return to
376 * - must not touch r8-r11 382 * - must not touch r8-r11
377 */ 383 */
378ENTRY(load_switch_stack) 384GLOBAL_ENTRY(load_switch_stack)
379 .prologue 385 .prologue
380 .altrp b7 386 .altrp b7
381 387
@@ -571,7 +577,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
571.ret3: 577.ret3:
572(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 578(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
573(pUStk) rsm psr.i // disable interrupts 579(pUStk) rsm psr.i // disable interrupts
574 br.cond.sptk .work_pending_syscall_end 580 br.cond.sptk ia64_work_pending_syscall_end
575 581
576strace_error: 582strace_error:
577 ld8 r3=[r2] // load pt_regs.r8 583 ld8 r3=[r2] // load pt_regs.r8
@@ -636,8 +642,17 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
636 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 642 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
637 mov r10=r0 // clear error indication in r10 643 mov r10=r0 // clear error indication in r10
638(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure 644(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
645#ifdef CONFIG_PARAVIRT
646 ;;
647 br.cond.sptk.few ia64_leave_syscall
648 ;;
649#endif /* CONFIG_PARAVIRT */
639END(ia64_ret_from_syscall) 650END(ia64_ret_from_syscall)
651#ifndef CONFIG_PARAVIRT
640 // fall through 652 // fall through
653#endif
654#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
655
641/* 656/*
642 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't 657 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
643 * need to switch to bank 0 and doesn't restore the scratch registers. 658 * need to switch to bank 0 and doesn't restore the scratch registers.
@@ -682,7 +697,7 @@ END(ia64_ret_from_syscall)
682 * ar.csd: cleared 697 * ar.csd: cleared
683 * ar.ssd: cleared 698 * ar.ssd: cleared
684 */ 699 */
685ENTRY(ia64_leave_syscall) 700GLOBAL_ENTRY(__paravirt_leave_syscall)
686 PT_REGS_UNWIND_INFO(0) 701 PT_REGS_UNWIND_INFO(0)
687 /* 702 /*
688 * work.need_resched etc. mustn't get changed by this CPU before it returns to 703 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -692,11 +707,11 @@ ENTRY(ia64_leave_syscall)
692 * extra work. We always check for extra work when returning to user-level. 707 * extra work. We always check for extra work when returning to user-level.
693 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count 708 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
694 * is 0. After extra work processing has been completed, execution 709 * is 0. After extra work processing has been completed, execution
695 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check 710 * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
696 * needs to be redone. 711 * needs to be redone.
697 */ 712 */
698#ifdef CONFIG_PREEMPT 713#ifdef CONFIG_PREEMPT
699 rsm psr.i // disable interrupts 714 RSM_PSR_I(p0, r2, r18) // disable interrupts
700 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 715 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
701(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 716(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
702 ;; 717 ;;
@@ -706,11 +721,12 @@ ENTRY(ia64_leave_syscall)
706 ;; 721 ;;
707 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 722 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
708#else /* !CONFIG_PREEMPT */ 723#else /* !CONFIG_PREEMPT */
709(pUStk) rsm psr.i 724 RSM_PSR_I(pUStk, r2, r18)
710 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 725 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
711(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 726(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
712#endif 727#endif
713.work_processed_syscall: 728.global __paravirt_work_processed_syscall;
729__paravirt_work_processed_syscall:
714#ifdef CONFIG_VIRT_CPU_ACCOUNTING 730#ifdef CONFIG_VIRT_CPU_ACCOUNTING
715 adds r2=PT(LOADRS)+16,r12 731 adds r2=PT(LOADRS)+16,r12
716(pUStk) mov.m r22=ar.itc // fetch time at leave 732(pUStk) mov.m r22=ar.itc // fetch time at leave
@@ -744,7 +760,7 @@ ENTRY(ia64_leave_syscall)
744(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! 760(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
745 ;; 761 ;;
746 invala // M0|1 invalidate ALAT 762 invala // M0|1 invalidate ALAT
747 rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection 763 RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection
748 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs 764 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
749 765
750 ld8 r29=[r2],16 // M0|1 load cr.ipsr 766 ld8 r29=[r2],16 // M0|1 load cr.ipsr
@@ -765,7 +781,7 @@ ENTRY(ia64_leave_syscall)
765 ;; 781 ;;
766#endif 782#endif
767 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs 783 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
768(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 784 MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled
769 nop 0 785 nop 0
770 ;; 786 ;;
771 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 787 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
@@ -798,7 +814,7 @@ ENTRY(ia64_leave_syscall)
798 814
799 srlz.d // M0 ensure interruption collection is off (for cover) 815 srlz.d // M0 ensure interruption collection is off (for cover)
800 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition 816 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
801 cover // B add current frame into dirty partition & set cr.ifs 817 COVER // B add current frame into dirty partition & set cr.ifs
802 ;; 818 ;;
803#ifdef CONFIG_VIRT_CPU_ACCOUNTING 819#ifdef CONFIG_VIRT_CPU_ACCOUNTING
804 mov r19=ar.bsp // M2 get new backing store pointer 820 mov r19=ar.bsp // M2 get new backing store pointer
@@ -823,8 +839,9 @@ ENTRY(ia64_leave_syscall)
823 mov.m ar.ssd=r0 // M2 clear ar.ssd 839 mov.m ar.ssd=r0 // M2 clear ar.ssd
824 mov f11=f0 // F clear f11 840 mov f11=f0 // F clear f11
825 br.cond.sptk.many rbs_switch // B 841 br.cond.sptk.many rbs_switch // B
826END(ia64_leave_syscall) 842END(__paravirt_leave_syscall)
827 843
844#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
828#ifdef CONFIG_IA32_SUPPORT 845#ifdef CONFIG_IA32_SUPPORT
829GLOBAL_ENTRY(ia64_ret_from_ia32_execve) 846GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
830 PT_REGS_UNWIND_INFO(0) 847 PT_REGS_UNWIND_INFO(0)
@@ -835,10 +852,20 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
835 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit 852 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
836 .mem.offset 8,0 853 .mem.offset 8,0
837 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit 854 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
855#ifdef CONFIG_PARAVIRT
856 ;;
857 // don't fall through, ia64_leave_kernel may be #define'd
858 br.cond.sptk.few ia64_leave_kernel
859 ;;
860#endif /* CONFIG_PARAVIRT */
838END(ia64_ret_from_ia32_execve) 861END(ia64_ret_from_ia32_execve)
862#ifndef CONFIG_PARAVIRT
839 // fall through 863 // fall through
864#endif
840#endif /* CONFIG_IA32_SUPPORT */ 865#endif /* CONFIG_IA32_SUPPORT */
841GLOBAL_ENTRY(ia64_leave_kernel) 866#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
867
868GLOBAL_ENTRY(__paravirt_leave_kernel)
842 PT_REGS_UNWIND_INFO(0) 869 PT_REGS_UNWIND_INFO(0)
843 /* 870 /*
844 * work.need_resched etc. mustn't get changed by this CPU before it returns to 871 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -852,7 +879,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
852 * needs to be redone. 879 * needs to be redone.
853 */ 880 */
854#ifdef CONFIG_PREEMPT 881#ifdef CONFIG_PREEMPT
855 rsm psr.i // disable interrupts 882 RSM_PSR_I(p0, r17, r31) // disable interrupts
856 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel 883 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
857(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 884(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
858 ;; 885 ;;
@@ -862,7 +889,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
862 ;; 889 ;;
863 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 890 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
864#else 891#else
865(pUStk) rsm psr.i 892 RSM_PSR_I(pUStk, r17, r31)
866 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel 893 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
867(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 894(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
868#endif 895#endif
@@ -910,7 +937,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
910 mov ar.csd=r30 937 mov ar.csd=r30
911 mov ar.ssd=r31 938 mov ar.ssd=r31
912 ;; 939 ;;
913 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection 940 RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection
914 invala // invalidate ALAT 941 invala // invalidate ALAT
915 ;; 942 ;;
916 ld8.fill r22=[r2],24 943 ld8.fill r22=[r2],24
@@ -942,7 +969,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
942 mov ar.ccv=r15 969 mov ar.ccv=r15
943 ;; 970 ;;
944 ldf.fill f11=[r2] 971 ldf.fill f11=[r2]
945 bsw.0 // switch back to bank 0 (no stop bit required beforehand...) 972 BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...)
946 ;; 973 ;;
947(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) 974(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
948 adds r16=PT(CR_IPSR)+16,r12 975 adds r16=PT(CR_IPSR)+16,r12
@@ -950,12 +977,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
950 977
951#ifdef CONFIG_VIRT_CPU_ACCOUNTING 978#ifdef CONFIG_VIRT_CPU_ACCOUNTING
952 .pred.rel.mutex pUStk,pKStk 979 .pred.rel.mutex pUStk,pKStk
953(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 980 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
954(pUStk) mov.m r22=ar.itc // M fetch time at leave 981(pUStk) mov.m r22=ar.itc // M fetch time at leave
955 nop.i 0 982 nop.i 0
956 ;; 983 ;;
957#else 984#else
958(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 985 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
959 nop.i 0 986 nop.i 0
960 nop.i 0 987 nop.i 0
961 ;; 988 ;;
@@ -1027,7 +1054,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
1027 * NOTE: alloc, loadrs, and cover can't be predicated. 1054 * NOTE: alloc, loadrs, and cover can't be predicated.
1028 */ 1055 */
1029(pNonSys) br.cond.dpnt dont_preserve_current_frame 1056(pNonSys) br.cond.dpnt dont_preserve_current_frame
1030 cover // add current frame into dirty partition and set cr.ifs 1057 COVER // add current frame into dirty partition and set cr.ifs
1031 ;; 1058 ;;
1032 mov r19=ar.bsp // get new backing store pointer 1059 mov r19=ar.bsp // get new backing store pointer
1033rbs_switch: 1060rbs_switch:
@@ -1130,16 +1157,16 @@ skip_rbs_switch:
1130(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp 1157(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
1131(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise 1158(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1132 ;; 1159 ;;
1133 mov cr.ipsr=r29 // M2 1160 MOV_TO_IPSR(p0, r29, r25) // M2
1134 mov ar.pfs=r26 // I0 1161 mov ar.pfs=r26 // I0
1135(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise 1162(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1136 1163
1137(p9) mov cr.ifs=r30 // M2 1164 MOV_TO_IFS(p9, r30, r25)// M2
1138 mov b0=r21 // I0 1165 mov b0=r21 // I0
1139(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise 1166(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1140 1167
1141 mov ar.fpsr=r20 // M2 1168 mov ar.fpsr=r20 // M2
1142 mov cr.iip=r28 // M2 1169 MOV_TO_IIP(r28, r25) // M2
1143 nop 0 1170 nop 0
1144 ;; 1171 ;;
1145(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode 1172(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
@@ -1148,7 +1175,7 @@ skip_rbs_switch:
1148 1175
1149 mov ar.rsc=r27 // M2 1176 mov ar.rsc=r27 // M2
1150 mov pr=r31,-1 // I0 1177 mov pr=r31,-1 // I0
1151 rfi // B 1178 RFI // B
1152 1179
1153 /* 1180 /*
1154 * On entry: 1181 * On entry:
@@ -1174,35 +1201,36 @@ skip_rbs_switch:
1174 ;; 1201 ;;
1175(pKStk) st4 [r20]=r21 1202(pKStk) st4 [r20]=r21
1176#endif 1203#endif
1177 ssm psr.i // enable interrupts 1204 SSM_PSR_I(p0, p6, r2) // enable interrupts
1178 br.call.spnt.many rp=schedule 1205 br.call.spnt.many rp=schedule
1179.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) 1206.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1180 rsm psr.i // disable interrupts 1207 RSM_PSR_I(p0, r2, r20) // disable interrupts
1181 ;; 1208 ;;
1182#ifdef CONFIG_PREEMPT 1209#ifdef CONFIG_PREEMPT
1183(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 1210(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
1184 ;; 1211 ;;
1185(pKStk) st4 [r20]=r0 // preempt_count() <- 0 1212(pKStk) st4 [r20]=r0 // preempt_count() <- 0
1186#endif 1213#endif
1187(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1214(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1188 br.cond.sptk.many .work_processed_kernel 1215 br.cond.sptk.many .work_processed_kernel
1189 1216
1190.notify: 1217.notify:
1191(pUStk) br.call.spnt.many rp=notify_resume_user 1218(pUStk) br.call.spnt.many rp=notify_resume_user
1192.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) 1219.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1193(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1220(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1194 br.cond.sptk.many .work_processed_kernel 1221 br.cond.sptk.many .work_processed_kernel
1195 1222
1196.work_pending_syscall_end: 1223.global __paravirt_pending_syscall_end;
1224__paravirt_pending_syscall_end:
1197 adds r2=PT(R8)+16,r12 1225 adds r2=PT(R8)+16,r12
1198 adds r3=PT(R10)+16,r12 1226 adds r3=PT(R10)+16,r12
1199 ;; 1227 ;;
1200 ld8 r8=[r2] 1228 ld8 r8=[r2]
1201 ld8 r10=[r3] 1229 ld8 r10=[r3]
1202 br.cond.sptk.many .work_processed_syscall 1230 br.cond.sptk.many __paravirt_work_processed_syscall_target
1203 1231END(__paravirt_leave_kernel)
1204END(ia64_leave_kernel)
1205 1232
1233#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
1206ENTRY(handle_syscall_error) 1234ENTRY(handle_syscall_error)
1207 /* 1235 /*
1208 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could 1236 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
@@ -1244,7 +1272,7 @@ END(ia64_invoke_schedule_tail)
1244 * We declare 8 input registers so the system call args get preserved, 1272 * We declare 8 input registers so the system call args get preserved,
1245 * in case we need to restart a system call. 1273 * in case we need to restart a system call.
1246 */ 1274 */
1247ENTRY(notify_resume_user) 1275GLOBAL_ENTRY(notify_resume_user)
1248 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) 1276 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1249 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! 1277 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1250 mov r9=ar.unat 1278 mov r9=ar.unat
@@ -1306,7 +1334,7 @@ ENTRY(sys_rt_sigreturn)
1306 adds sp=16,sp 1334 adds sp=16,sp
1307 ;; 1335 ;;
1308 ld8 r9=[sp] // load new ar.unat 1336 ld8 r9=[sp] // load new ar.unat
1309 mov.sptk b7=r8,ia64_leave_kernel 1337 mov.sptk b7=r8,ia64_native_leave_kernel
1310 ;; 1338 ;;
1311 mov ar.unat=r9 1339 mov ar.unat=r9
1312 br.many b7 1340 br.many b7
@@ -1665,3 +1693,4 @@ sys_call_table:
1665 data8 sys_timerfd_gettime 1693 data8 sys_timerfd_gettime
1666 1694
1667 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1695 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1696#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index ddeab4e36fd5..db540e58c783 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -26,11 +26,14 @@
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27#include <asm/asm-offsets.h> 27#include <asm/asm-offsets.h>
28#include <asm/pal.h> 28#include <asm/pal.h>
29#include <asm/paravirt.h>
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
30#include <asm/processor.h> 31#include <asm/processor.h>
31#include <asm/ptrace.h> 32#include <asm/ptrace.h>
32#include <asm/system.h> 33#include <asm/system.h>
33#include <asm/mca_asm.h> 34#include <asm/mca_asm.h>
35#include <linux/init.h>
36#include <linux/linkage.h>
34 37
35#ifdef CONFIG_HOTPLUG_CPU 38#ifdef CONFIG_HOTPLUG_CPU
36#define SAL_PSR_BITS_TO_SET \ 39#define SAL_PSR_BITS_TO_SET \
@@ -367,6 +370,44 @@ start_ap:
367 ;; 370 ;;
368(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader 371(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
369 372
373#ifdef CONFIG_PARAVIRT
374
375 movl r14=hypervisor_setup_hooks
376 movl r15=hypervisor_type
377 mov r16=num_hypervisor_hooks
378 ;;
379 ld8 r2=[r15]
380 ;;
381 cmp.ltu p7,p0=r2,r16 // array size check
382 shladd r8=r2,3,r14
383 ;;
384(p7) ld8 r9=[r8]
385 ;;
386(p7) mov b1=r9
387(p7) cmp.ne.unc p7,p0=r9,r0 // no actual branch to NULL
388 ;;
389(p7) br.call.sptk.many rp=b1
390
391 __INITDATA
392
393default_setup_hook = 0 // Currently nothing needs to be done.
394
395 .weak xen_setup_hook
396
397 .global hypervisor_type
398hypervisor_type:
399 data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
400
401 // must have the same order with PARAVIRT_HYPERVISOR_TYPE_xxx
402
403hypervisor_setup_hooks:
404 data8 default_setup_hook
405 data8 xen_setup_hook
406num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
407 .previous
408
409#endif
410
370#ifdef CONFIG_SMP 411#ifdef CONFIG_SMP
371(isAP) br.call.sptk.many rp=start_secondary 412(isAP) br.call.sptk.many rp=start_secondary
372.ret0: 413.ret0:
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 39752cdef6ff..3bc2fa64f87f 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -585,6 +585,15 @@ static inline int irq_is_shared (int irq)
585 return (iosapic_intr_info[irq].count > 1); 585 return (iosapic_intr_info[irq].count > 1);
586} 586}
587 587
588struct irq_chip*
589ia64_native_iosapic_get_irq_chip(unsigned long trigger)
590{
591 if (trigger == IOSAPIC_EDGE)
592 return &irq_type_iosapic_edge;
593 else
594 return &irq_type_iosapic_level;
595}
596
588static int 597static int
589register_intr (unsigned int gsi, int irq, unsigned char delivery, 598register_intr (unsigned int gsi, int irq, unsigned char delivery,
590 unsigned long polarity, unsigned long trigger) 599 unsigned long polarity, unsigned long trigger)
@@ -635,13 +644,10 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
635 iosapic_intr_info[irq].dmode = delivery; 644 iosapic_intr_info[irq].dmode = delivery;
636 iosapic_intr_info[irq].trigger = trigger; 645 iosapic_intr_info[irq].trigger = trigger;
637 646
638 if (trigger == IOSAPIC_EDGE) 647 irq_type = iosapic_get_irq_chip(trigger);
639 irq_type = &irq_type_iosapic_edge;
640 else
641 irq_type = &irq_type_iosapic_level;
642 648
643 idesc = irq_desc + irq; 649 idesc = irq_desc + irq;
644 if (idesc->chip != irq_type) { 650 if (irq_type != NULL && idesc->chip != irq_type) {
645 if (idesc->chip != &no_irq_type) 651 if (idesc->chip != &no_irq_type)
646 printk(KERN_WARNING 652 printk(KERN_WARNING
647 "%s: changing vector %d from %s to %s\n", 653 "%s: changing vector %d from %s to %s\n",
@@ -974,6 +980,22 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
974} 980}
975 981
976void __init 982void __init
983ia64_native_iosapic_pcat_compat_init(void)
984{
985 if (pcat_compat) {
986 /*
987 * Disable the compatibility mode interrupts (8259 style),
988 * needs IN/OUT support enabled.
989 */
990 printk(KERN_INFO
991 "%s: Disabling PC-AT compatible 8259 interrupts\n",
992 __func__);
993 outb(0xff, 0xA1);
994 outb(0xff, 0x21);
995 }
996}
997
998void __init
977iosapic_system_init (int system_pcat_compat) 999iosapic_system_init (int system_pcat_compat)
978{ 1000{
979 int irq; 1001 int irq;
@@ -987,17 +1009,8 @@ iosapic_system_init (int system_pcat_compat)
987 } 1009 }
988 1010
989 pcat_compat = system_pcat_compat; 1011 pcat_compat = system_pcat_compat;
990 if (pcat_compat) { 1012 if (pcat_compat)
991 /* 1013 iosapic_pcat_compat_init();
992 * Disable the compatibility mode interrupts (8259 style),
993 * needs IN/OUT support enabled.
994 */
995 printk(KERN_INFO
996 "%s: Disabling PC-AT compatible 8259 interrupts\n",
997 __func__);
998 outb(0xff, 0xA1);
999 outb(0xff, 0x21);
1000 }
1001} 1014}
1002 1015
1003static inline int 1016static inline int
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5538471e8d68..28d3d483db92 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -196,7 +196,7 @@ static void clear_irq_vector(int irq)
196} 196}
197 197
198int 198int
199assign_irq_vector (int irq) 199ia64_native_assign_irq_vector (int irq)
200{ 200{
201 unsigned long flags; 201 unsigned long flags;
202 int vector, cpu; 202 int vector, cpu;
@@ -222,7 +222,7 @@ assign_irq_vector (int irq)
222} 222}
223 223
224void 224void
225free_irq_vector (int vector) 225ia64_native_free_irq_vector (int vector)
226{ 226{
227 if (vector < IA64_FIRST_DEVICE_VECTOR || 227 if (vector < IA64_FIRST_DEVICE_VECTOR ||
228 vector > IA64_LAST_DEVICE_VECTOR) 228 vector > IA64_LAST_DEVICE_VECTOR)
@@ -600,7 +600,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id)
600{ 600{
601 BUG(); 601 BUG();
602} 602}
603extern irqreturn_t handle_IPI (int irq, void *dev_id);
604 603
605static struct irqaction ipi_irqaction = { 604static struct irqaction ipi_irqaction = {
606 .handler = handle_IPI, 605 .handler = handle_IPI,
@@ -623,7 +622,7 @@ static struct irqaction tlb_irqaction = {
623#endif 622#endif
624 623
625void 624void
626register_percpu_irq (ia64_vector vec, struct irqaction *action) 625ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
627{ 626{
628 irq_desc_t *desc; 627 irq_desc_t *desc;
629 unsigned int irq; 628 unsigned int irq;
@@ -638,13 +637,21 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
638} 637}
639 638
640void __init 639void __init
641init_IRQ (void) 640ia64_native_register_ipi(void)
642{ 641{
643 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
644#ifdef CONFIG_SMP 642#ifdef CONFIG_SMP
645 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 643 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
646 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); 644 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
647 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); 645 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
646#endif
647}
648
649void __init
650init_IRQ (void)
651{
652 ia64_register_ipi();
653 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
654#ifdef CONFIG_SMP
648#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) 655#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
649 if (vector_domain_type != VECTOR_DOMAIN_NONE) { 656 if (vector_domain_type != VECTOR_DOMAIN_NONE) {
650 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); 657 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 80b44ea052d7..c39627df3cde 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -12,6 +12,14 @@
12 * 12 *
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP 13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. 14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
15 *
16 * Copyright (C) 2005 Hewlett-Packard Co
17 * Dan Magenheimer <dan.magenheimer@hp.com>
18 * Xen paravirtualization
19 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
20 * VA Linux Systems Japan K.K.
21 * pv_ops.
22 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 */ 23 */
16/* 24/*
17 * This file defines the interruption vector table used by the CPU. 25 * This file defines the interruption vector table used by the CPU.
@@ -102,13 +110,13 @@ ENTRY(vhpt_miss)
102 * - the faulting virtual address uses unimplemented address bits 110 * - the faulting virtual address uses unimplemented address bits
103 * - the faulting virtual address has no valid page table mapping 111 * - the faulting virtual address has no valid page table mapping
104 */ 112 */
105 mov r16=cr.ifa // get address that caused the TLB miss 113 MOV_FROM_IFA(r16) // get address that caused the TLB miss
106#ifdef CONFIG_HUGETLB_PAGE 114#ifdef CONFIG_HUGETLB_PAGE
107 movl r18=PAGE_SHIFT 115 movl r18=PAGE_SHIFT
108 mov r25=cr.itir 116 MOV_FROM_ITIR(r25)
109#endif 117#endif
110 ;; 118 ;;
111 rsm psr.dt // use physical addressing for data 119 RSM_PSR_DT // use physical addressing for data
112 mov r31=pr // save the predicate registers 120 mov r31=pr // save the predicate registers
113 mov r19=IA64_KR(PT_BASE) // get page table base address 121 mov r19=IA64_KR(PT_BASE) // get page table base address
114 shl r21=r16,3 // shift bit 60 into sign bit 122 shl r21=r16,3 // shift bit 60 into sign bit
@@ -168,21 +176,21 @@ ENTRY(vhpt_miss)
168 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) 176 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
169 ;; 177 ;;
170(p7) ld8 r18=[r21] // read *pte 178(p7) ld8 r18=[r21] // read *pte
171 mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss 179 MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss
172 ;; 180 ;;
173(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 181(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
174 mov r22=cr.iha // get the VHPT address that caused the TLB miss 182 MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss
175 ;; // avoid RAW on p7 183 ;; // avoid RAW on p7
176(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? 184(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
177 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address 185 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
178 ;; 186 ;;
179(p10) itc.i r18 // insert the instruction TLB entry 187 ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and
180(p11) itc.d r18 // insert the data TLB entry 188 // insert the data TLB entry
181(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) 189(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
182 mov cr.ifa=r22 190 MOV_TO_IFA(r22, r24)
183 191
184#ifdef CONFIG_HUGETLB_PAGE 192#ifdef CONFIG_HUGETLB_PAGE
185(p8) mov cr.itir=r25 // change to default page-size for VHPT 193 MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT
186#endif 194#endif
187 195
188 /* 196 /*
@@ -192,7 +200,7 @@ ENTRY(vhpt_miss)
192 */ 200 */
193 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 201 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
194 ;; 202 ;;
195(p7) itc.d r24 203 ITC_D(p7, r24, r25)
196 ;; 204 ;;
197#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
198 /* 206 /*
@@ -234,7 +242,7 @@ ENTRY(vhpt_miss)
234#endif 242#endif
235 243
236 mov pr=r31,-1 // restore predicate registers 244 mov pr=r31,-1 // restore predicate registers
237 rfi 245 RFI
238END(vhpt_miss) 246END(vhpt_miss)
239 247
240 .org ia64_ivt+0x400 248 .org ia64_ivt+0x400
@@ -248,11 +256,11 @@ ENTRY(itlb_miss)
248 * mode, walk the page table, and then re-execute the PTE read and 256 * mode, walk the page table, and then re-execute the PTE read and
249 * go on normally after that. 257 * go on normally after that.
250 */ 258 */
251 mov r16=cr.ifa // get virtual address 259 MOV_FROM_IFA(r16) // get virtual address
252 mov r29=b0 // save b0 260 mov r29=b0 // save b0
253 mov r31=pr // save predicates 261 mov r31=pr // save predicates
254.itlb_fault: 262.itlb_fault:
255 mov r17=cr.iha // get virtual address of PTE 263 MOV_FROM_IHA(r17) // get virtual address of PTE
256 movl r30=1f // load nested fault continuation point 264 movl r30=1f // load nested fault continuation point
257 ;; 265 ;;
2581: ld8 r18=[r17] // read *pte 2661: ld8 r18=[r17] // read *pte
@@ -261,7 +269,7 @@ ENTRY(itlb_miss)
261 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 269 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
262(p6) br.cond.spnt page_fault 270(p6) br.cond.spnt page_fault
263 ;; 271 ;;
264 itc.i r18 272 ITC_I(p0, r18, r19)
265 ;; 273 ;;
266#ifdef CONFIG_SMP 274#ifdef CONFIG_SMP
267 /* 275 /*
@@ -278,7 +286,7 @@ ENTRY(itlb_miss)
278(p7) ptc.l r16,r20 286(p7) ptc.l r16,r20
279#endif 287#endif
280 mov pr=r31,-1 288 mov pr=r31,-1
281 rfi 289 RFI
282END(itlb_miss) 290END(itlb_miss)
283 291
284 .org ia64_ivt+0x0800 292 .org ia64_ivt+0x0800
@@ -292,11 +300,11 @@ ENTRY(dtlb_miss)
292 * mode, walk the page table, and then re-execute the PTE read and 300 * mode, walk the page table, and then re-execute the PTE read and
293 * go on normally after that. 301 * go on normally after that.
294 */ 302 */
295 mov r16=cr.ifa // get virtual address 303 MOV_FROM_IFA(r16) // get virtual address
296 mov r29=b0 // save b0 304 mov r29=b0 // save b0
297 mov r31=pr // save predicates 305 mov r31=pr // save predicates
298dtlb_fault: 306dtlb_fault:
299 mov r17=cr.iha // get virtual address of PTE 307 MOV_FROM_IHA(r17) // get virtual address of PTE
300 movl r30=1f // load nested fault continuation point 308 movl r30=1f // load nested fault continuation point
301 ;; 309 ;;
3021: ld8 r18=[r17] // read *pte 3101: ld8 r18=[r17] // read *pte
@@ -305,7 +313,7 @@ dtlb_fault:
305 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 313 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
306(p6) br.cond.spnt page_fault 314(p6) br.cond.spnt page_fault
307 ;; 315 ;;
308 itc.d r18 316 ITC_D(p0, r18, r19)
309 ;; 317 ;;
310#ifdef CONFIG_SMP 318#ifdef CONFIG_SMP
311 /* 319 /*
@@ -322,7 +330,7 @@ dtlb_fault:
322(p7) ptc.l r16,r20 330(p7) ptc.l r16,r20
323#endif 331#endif
324 mov pr=r31,-1 332 mov pr=r31,-1
325 rfi 333 RFI
326END(dtlb_miss) 334END(dtlb_miss)
327 335
328 .org ia64_ivt+0x0c00 336 .org ia64_ivt+0x0c00
@@ -330,9 +338,9 @@ END(dtlb_miss)
330// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 338// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
331ENTRY(alt_itlb_miss) 339ENTRY(alt_itlb_miss)
332 DBG_FAULT(3) 340 DBG_FAULT(3)
333 mov r16=cr.ifa // get address that caused the TLB miss 341 MOV_FROM_IFA(r16) // get address that caused the TLB miss
334 movl r17=PAGE_KERNEL 342 movl r17=PAGE_KERNEL
335 mov r21=cr.ipsr 343 MOV_FROM_IPSR(p0, r21)
336 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 344 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
337 mov r31=pr 345 mov r31=pr
338 ;; 346 ;;
@@ -341,9 +349,9 @@ ENTRY(alt_itlb_miss)
341 ;; 349 ;;
342 cmp.gt p8,p0=6,r22 // user mode 350 cmp.gt p8,p0=6,r22 // user mode
343 ;; 351 ;;
344(p8) thash r17=r16 352 THASH(p8, r17, r16, r23)
345 ;; 353 ;;
346(p8) mov cr.iha=r17 354 MOV_TO_IHA(p8, r17, r23)
347(p8) mov r29=b0 // save b0 355(p8) mov r29=b0 // save b0
348(p8) br.cond.dptk .itlb_fault 356(p8) br.cond.dptk .itlb_fault
349#endif 357#endif
@@ -358,9 +366,9 @@ ENTRY(alt_itlb_miss)
358 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 366 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
359(p8) br.cond.spnt page_fault 367(p8) br.cond.spnt page_fault
360 ;; 368 ;;
361 itc.i r19 // insert the TLB entry 369 ITC_I(p0, r19, r18) // insert the TLB entry
362 mov pr=r31,-1 370 mov pr=r31,-1
363 rfi 371 RFI
364END(alt_itlb_miss) 372END(alt_itlb_miss)
365 373
366 .org ia64_ivt+0x1000 374 .org ia64_ivt+0x1000
@@ -368,11 +376,11 @@ END(alt_itlb_miss)
368// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 376// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
369ENTRY(alt_dtlb_miss) 377ENTRY(alt_dtlb_miss)
370 DBG_FAULT(4) 378 DBG_FAULT(4)
371 mov r16=cr.ifa // get address that caused the TLB miss 379 MOV_FROM_IFA(r16) // get address that caused the TLB miss
372 movl r17=PAGE_KERNEL 380 movl r17=PAGE_KERNEL
373 mov r20=cr.isr 381 MOV_FROM_ISR(r20)
374 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 382 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
375 mov r21=cr.ipsr 383 MOV_FROM_IPSR(p0, r21)
376 mov r31=pr 384 mov r31=pr
377 mov r24=PERCPU_ADDR 385 mov r24=PERCPU_ADDR
378 ;; 386 ;;
@@ -381,9 +389,9 @@ ENTRY(alt_dtlb_miss)
381 ;; 389 ;;
382 cmp.gt p8,p0=6,r22 // access to region 0-5 390 cmp.gt p8,p0=6,r22 // access to region 0-5
383 ;; 391 ;;
384(p8) thash r17=r16 392 THASH(p8, r17, r16, r25)
385 ;; 393 ;;
386(p8) mov cr.iha=r17 394 MOV_TO_IHA(p8, r17, r25)
387(p8) mov r29=b0 // save b0 395(p8) mov r29=b0 // save b0
388(p8) br.cond.dptk dtlb_fault 396(p8) br.cond.dptk dtlb_fault
389#endif 397#endif
@@ -402,7 +410,7 @@ ENTRY(alt_dtlb_miss)
402 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? 410 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
403 ;; 411 ;;
404(p10) sub r19=r19,r26 412(p10) sub r19=r19,r26
405(p10) mov cr.itir=r25 413 MOV_TO_ITIR(p10, r25, r24)
406 cmp.ne p8,p0=r0,r23 414 cmp.ne p8,p0=r0,r23
407(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 415(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
408(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr 416(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr
@@ -411,11 +419,11 @@ ENTRY(alt_dtlb_miss)
411 dep r21=-1,r21,IA64_PSR_ED_BIT,1 419 dep r21=-1,r21,IA64_PSR_ED_BIT,1
412 ;; 420 ;;
413 or r19=r19,r17 // insert PTE control bits into r19 421 or r19=r19,r17 // insert PTE control bits into r19
414(p6) mov cr.ipsr=r21 422 MOV_TO_IPSR(p6, r21, r24)
415 ;; 423 ;;
416(p7) itc.d r19 // insert the TLB entry 424 ITC_D(p7, r19, r18) // insert the TLB entry
417 mov pr=r31,-1 425 mov pr=r31,-1
418 rfi 426 RFI
419END(alt_dtlb_miss) 427END(alt_dtlb_miss)
420 428
421 .org ia64_ivt+0x1400 429 .org ia64_ivt+0x1400
@@ -444,10 +452,10 @@ ENTRY(nested_dtlb_miss)
444 * 452 *
445 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) 453 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
446 */ 454 */
447 rsm psr.dt // switch to using physical data addressing 455 RSM_PSR_DT // switch to using physical data addressing
448 mov r19=IA64_KR(PT_BASE) // get the page table base address 456 mov r19=IA64_KR(PT_BASE) // get the page table base address
449 shl r21=r16,3 // shift bit 60 into sign bit 457 shl r21=r16,3 // shift bit 60 into sign bit
450 mov r18=cr.itir 458 MOV_FROM_ITIR(r18)
451 ;; 459 ;;
452 shr.u r17=r16,61 // get the region number into r17 460 shr.u r17=r16,61 // get the region number into r17
453 extr.u r18=r18,2,6 // get the faulting page size 461 extr.u r18=r18,2,6 // get the faulting page size
@@ -507,33 +515,6 @@ ENTRY(ikey_miss)
507 FAULT(6) 515 FAULT(6)
508END(ikey_miss) 516END(ikey_miss)
509 517
510 //-----------------------------------------------------------------------------------
511 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
512ENTRY(page_fault)
513 ssm psr.dt
514 ;;
515 srlz.i
516 ;;
517 SAVE_MIN_WITH_COVER
518 alloc r15=ar.pfs,0,0,3,0
519 mov out0=cr.ifa
520 mov out1=cr.isr
521 adds r3=8,r2 // set up second base pointer
522 ;;
523 ssm psr.ic | PSR_DEFAULT_BITS
524 ;;
525 srlz.i // guarantee that interruption collectin is on
526 ;;
527(p15) ssm psr.i // restore psr.i
528 movl r14=ia64_leave_kernel
529 ;;
530 SAVE_REST
531 mov rp=r14
532 ;;
533 adds out2=16,r12 // out2 = pointer to pt_regs
534 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
535END(page_fault)
536
537 .org ia64_ivt+0x1c00 518 .org ia64_ivt+0x1c00
538///////////////////////////////////////////////////////////////////////////////////////// 519/////////////////////////////////////////////////////////////////////////////////////////
539// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 520// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
@@ -556,10 +537,10 @@ ENTRY(dirty_bit)
556 * page table TLB entry isn't present, we take a nested TLB miss hit where we look 537 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
557 * up the physical address of the L3 PTE and then continue at label 1 below. 538 * up the physical address of the L3 PTE and then continue at label 1 below.
558 */ 539 */
559 mov r16=cr.ifa // get the address that caused the fault 540 MOV_FROM_IFA(r16) // get the address that caused the fault
560 movl r30=1f // load continuation point in case of nested fault 541 movl r30=1f // load continuation point in case of nested fault
561 ;; 542 ;;
562 thash r17=r16 // compute virtual address of L3 PTE 543 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
563 mov r29=b0 // save b0 in case of nested fault 544 mov r29=b0 // save b0 in case of nested fault
564 mov r31=pr // save pr 545 mov r31=pr // save pr
565#ifdef CONFIG_SMP 546#ifdef CONFIG_SMP
@@ -576,7 +557,7 @@ ENTRY(dirty_bit)
576 ;; 557 ;;
577(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present 558(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
578 ;; 559 ;;
579(p6) itc.d r25 // install updated PTE 560 ITC_D(p6, r25, r18) // install updated PTE
580 ;; 561 ;;
581 /* 562 /*
582 * Tell the assemblers dependency-violation checker that the above "itc" instructions 563 * Tell the assemblers dependency-violation checker that the above "itc" instructions
@@ -602,7 +583,7 @@ ENTRY(dirty_bit)
602 itc.d r18 // install updated PTE 583 itc.d r18 // install updated PTE
603#endif 584#endif
604 mov pr=r31,-1 // restore pr 585 mov pr=r31,-1 // restore pr
605 rfi 586 RFI
606END(dirty_bit) 587END(dirty_bit)
607 588
608 .org ia64_ivt+0x2400 589 .org ia64_ivt+0x2400
@@ -611,22 +592,22 @@ END(dirty_bit)
611ENTRY(iaccess_bit) 592ENTRY(iaccess_bit)
612 DBG_FAULT(9) 593 DBG_FAULT(9)
613 // Like Entry 8, except for instruction access 594 // Like Entry 8, except for instruction access
614 mov r16=cr.ifa // get the address that caused the fault 595 MOV_FROM_IFA(r16) // get the address that caused the fault
615 movl r30=1f // load continuation point in case of nested fault 596 movl r30=1f // load continuation point in case of nested fault
616 mov r31=pr // save predicates 597 mov r31=pr // save predicates
617#ifdef CONFIG_ITANIUM 598#ifdef CONFIG_ITANIUM
618 /* 599 /*
619 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. 600 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
620 */ 601 */
621 mov r17=cr.ipsr 602 MOV_FROM_IPSR(p0, r17)
622 ;; 603 ;;
623 mov r18=cr.iip 604 MOV_FROM_IIP(r18)
624 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? 605 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
625 ;; 606 ;;
626(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa 607(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
627#endif /* CONFIG_ITANIUM */ 608#endif /* CONFIG_ITANIUM */
628 ;; 609 ;;
629 thash r17=r16 // compute virtual address of L3 PTE 610 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
630 mov r29=b0 // save b0 in case of nested fault) 611 mov r29=b0 // save b0 in case of nested fault)
631#ifdef CONFIG_SMP 612#ifdef CONFIG_SMP
632 mov r28=ar.ccv // save ar.ccv 613 mov r28=ar.ccv // save ar.ccv
@@ -642,7 +623,7 @@ ENTRY(iaccess_bit)
642 ;; 623 ;;
643(p6) cmp.eq p6,p7=r26,r18 // Only if page present 624(p6) cmp.eq p6,p7=r26,r18 // Only if page present
644 ;; 625 ;;
645(p6) itc.i r25 // install updated PTE 626 ITC_I(p6, r25, r26) // install updated PTE
646 ;; 627 ;;
647 /* 628 /*
648 * Tell the assemblers dependency-violation checker that the above "itc" instructions 629 * Tell the assemblers dependency-violation checker that the above "itc" instructions
@@ -668,7 +649,7 @@ ENTRY(iaccess_bit)
668 itc.i r18 // install updated PTE 649 itc.i r18 // install updated PTE
669#endif /* !CONFIG_SMP */ 650#endif /* !CONFIG_SMP */
670 mov pr=r31,-1 651 mov pr=r31,-1
671 rfi 652 RFI
672END(iaccess_bit) 653END(iaccess_bit)
673 654
674 .org ia64_ivt+0x2800 655 .org ia64_ivt+0x2800
@@ -677,10 +658,10 @@ END(iaccess_bit)
677ENTRY(daccess_bit) 658ENTRY(daccess_bit)
678 DBG_FAULT(10) 659 DBG_FAULT(10)
679 // Like Entry 8, except for data access 660 // Like Entry 8, except for data access
680 mov r16=cr.ifa // get the address that caused the fault 661 MOV_FROM_IFA(r16) // get the address that caused the fault
681 movl r30=1f // load continuation point in case of nested fault 662 movl r30=1f // load continuation point in case of nested fault
682 ;; 663 ;;
683 thash r17=r16 // compute virtual address of L3 PTE 664 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
684 mov r31=pr 665 mov r31=pr
685 mov r29=b0 // save b0 in case of nested fault) 666 mov r29=b0 // save b0 in case of nested fault)
686#ifdef CONFIG_SMP 667#ifdef CONFIG_SMP
@@ -697,7 +678,7 @@ ENTRY(daccess_bit)
697 ;; 678 ;;
698(p6) cmp.eq p6,p7=r26,r18 // Only if page is present 679(p6) cmp.eq p6,p7=r26,r18 // Only if page is present
699 ;; 680 ;;
700(p6) itc.d r25 // install updated PTE 681 ITC_D(p6, r25, r26) // install updated PTE
701 /* 682 /*
702 * Tell the assemblers dependency-violation checker that the above "itc" instructions 683 * Tell the assemblers dependency-violation checker that the above "itc" instructions
703 * cannot possibly affect the following loads: 684 * cannot possibly affect the following loads:
@@ -721,7 +702,7 @@ ENTRY(daccess_bit)
721#endif 702#endif
722 mov b0=r29 // restore b0 703 mov b0=r29 // restore b0
723 mov pr=r31,-1 704 mov pr=r31,-1
724 rfi 705 RFI
725END(daccess_bit) 706END(daccess_bit)
726 707
727 .org ia64_ivt+0x2c00 708 .org ia64_ivt+0x2c00
@@ -745,10 +726,10 @@ ENTRY(break_fault)
745 */ 726 */
746 DBG_FAULT(11) 727 DBG_FAULT(11)
747 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) 728 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
748 mov r29=cr.ipsr // M2 (12 cyc) 729 MOV_FROM_IPSR(p0, r29) // M2 (12 cyc)
749 mov r31=pr // I0 (2 cyc) 730 mov r31=pr // I0 (2 cyc)
750 731
751 mov r17=cr.iim // M2 (2 cyc) 732 MOV_FROM_IIM(r17) // M2 (2 cyc)
752 mov.m r27=ar.rsc // M2 (12 cyc) 733 mov.m r27=ar.rsc // M2 (12 cyc)
753 mov r18=__IA64_BREAK_SYSCALL // A 734 mov r18=__IA64_BREAK_SYSCALL // A
754 735
@@ -767,7 +748,7 @@ ENTRY(break_fault)
767 nop.m 0 748 nop.m 0
768 movl r30=sys_call_table // X 749 movl r30=sys_call_table // X
769 750
770 mov r28=cr.iip // M2 (2 cyc) 751 MOV_FROM_IIP(r28) // M2 (2 cyc)
771 cmp.eq p0,p7=r18,r17 // I0 is this a system call? 752 cmp.eq p0,p7=r18,r17 // I0 is this a system call?
772(p7) br.cond.spnt non_syscall // B no -> 753(p7) br.cond.spnt non_syscall // B no ->
773 // 754 //
@@ -864,18 +845,17 @@ ENTRY(break_fault)
864#endif 845#endif
865 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 846 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
866 nop 0 847 nop 0
867 bsw.1 // B (6 cyc) regs are saved, switch to bank 1 848 BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1
868 ;; 849 ;;
869 850
870 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection 851 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection
852 // M0 ensure interruption collection is on
871 movl r3=ia64_ret_from_syscall // X 853 movl r3=ia64_ret_from_syscall // X
872 ;; 854 ;;
873
874 srlz.i // M0 ensure interruption collection is on
875 mov rp=r3 // I0 set the real return addr 855 mov rp=r3 // I0 set the real return addr
876(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT 856(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
877 857
878(p15) ssm psr.i // M2 restore psr.i 858 SSM_PSR_I(p15, p15, r16) // M2 restore psr.i
879(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) 859(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
880 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic 860 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
881 // NOT REACHED 861 // NOT REACHED
@@ -895,27 +875,8 @@ END(break_fault)
895///////////////////////////////////////////////////////////////////////////////////////// 875/////////////////////////////////////////////////////////////////////////////////////////
896// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 876// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
897ENTRY(interrupt) 877ENTRY(interrupt)
898 DBG_FAULT(12) 878 /* interrupt handler has become too big to fit this area. */
899 mov r31=pr // prepare to save predicates 879 br.sptk.many __interrupt
900 ;;
901 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
902 ssm psr.ic | PSR_DEFAULT_BITS
903 ;;
904 adds r3=8,r2 // set up second base pointer for SAVE_REST
905 srlz.i // ensure everybody knows psr.ic is back on
906 ;;
907 SAVE_REST
908 ;;
909 MCA_RECOVER_RANGE(interrupt)
910 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
911 mov out0=cr.ivr // pass cr.ivr as first arg
912 add out1=16,sp // pass pointer to pt_regs as second arg
913 ;;
914 srlz.d // make sure we see the effect of cr.ivr
915 movl r14=ia64_leave_kernel
916 ;;
917 mov rp=r14
918 br.call.sptk.many b6=ia64_handle_irq
919END(interrupt) 880END(interrupt)
920 881
921 .org ia64_ivt+0x3400 882 .org ia64_ivt+0x3400
@@ -978,6 +939,7 @@ END(interrupt)
978 * - ar.fpsr: set to kernel settings 939 * - ar.fpsr: set to kernel settings
979 * - b6: preserved (same as on entry) 940 * - b6: preserved (same as on entry)
980 */ 941 */
942#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
981GLOBAL_ENTRY(ia64_syscall_setup) 943GLOBAL_ENTRY(ia64_syscall_setup)
982#if PT(B6) != 0 944#if PT(B6) != 0
983# error This code assumes that b6 is the first field in pt_regs. 945# error This code assumes that b6 is the first field in pt_regs.
@@ -1069,6 +1031,7 @@ GLOBAL_ENTRY(ia64_syscall_setup)
1069(p10) mov r8=-EINVAL 1031(p10) mov r8=-EINVAL
1070 br.ret.sptk.many b7 1032 br.ret.sptk.many b7
1071END(ia64_syscall_setup) 1033END(ia64_syscall_setup)
1034#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
1072 1035
1073 .org ia64_ivt+0x3c00 1036 .org ia64_ivt+0x3c00
1074///////////////////////////////////////////////////////////////////////////////////////// 1037/////////////////////////////////////////////////////////////////////////////////////////
@@ -1082,7 +1045,7 @@ END(ia64_syscall_setup)
1082 DBG_FAULT(16) 1045 DBG_FAULT(16)
1083 FAULT(16) 1046 FAULT(16)
1084 1047
1085#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1048#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
1086 /* 1049 /*
1087 * There is no particular reason for this code to be here, other than 1050 * There is no particular reason for this code to be here, other than
1088 * that there happens to be space here that would go unused otherwise. 1051 * that there happens to be space here that would go unused otherwise.
@@ -1092,7 +1055,7 @@ END(ia64_syscall_setup)
1092 * account_sys_enter is called from SAVE_MIN* macros if accounting is 1055 * account_sys_enter is called from SAVE_MIN* macros if accounting is
1093 * enabled and if the macro is entered from user mode. 1056 * enabled and if the macro is entered from user mode.
1094 */ 1057 */
1095ENTRY(account_sys_enter) 1058GLOBAL_ENTRY(account_sys_enter)
1096 // mov.m r20=ar.itc is called in advance, and r13 is current 1059 // mov.m r20=ar.itc is called in advance, and r13 is current
1097 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 1060 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
1098 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 1061 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
@@ -1123,110 +1086,18 @@ END(account_sys_enter)
1123 DBG_FAULT(17) 1086 DBG_FAULT(17)
1124 FAULT(17) 1087 FAULT(17)
1125 1088
1126ENTRY(non_syscall)
1127 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1128 ;;
1129 SAVE_MIN_WITH_COVER
1130
1131 // There is no particular reason for this code to be here, other than that
1132 // there happens to be space here that would go unused otherwise. If this
1133 // fault ever gets "unreserved", simply moved the following code to a more
1134 // suitable spot...
1135
1136 alloc r14=ar.pfs,0,0,2,0
1137 mov out0=cr.iim
1138 add out1=16,sp
1139 adds r3=8,r2 // set up second base pointer for SAVE_REST
1140
1141 ssm psr.ic | PSR_DEFAULT_BITS
1142 ;;
1143 srlz.i // guarantee that interruption collection is on
1144 ;;
1145(p15) ssm psr.i // restore psr.i
1146 movl r15=ia64_leave_kernel
1147 ;;
1148 SAVE_REST
1149 mov rp=r15
1150 ;;
1151 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1152END(non_syscall)
1153
1154 .org ia64_ivt+0x4800 1089 .org ia64_ivt+0x4800
1155///////////////////////////////////////////////////////////////////////////////////////// 1090/////////////////////////////////////////////////////////////////////////////////////////
1156// 0x4800 Entry 18 (size 64 bundles) Reserved 1091// 0x4800 Entry 18 (size 64 bundles) Reserved
1157 DBG_FAULT(18) 1092 DBG_FAULT(18)
1158 FAULT(18) 1093 FAULT(18)
1159 1094
1160 /*
1161 * There is no particular reason for this code to be here, other than that
1162 * there happens to be space here that would go unused otherwise. If this
1163 * fault ever gets "unreserved", simply moved the following code to a more
1164 * suitable spot...
1165 */
1166
1167ENTRY(dispatch_unaligned_handler)
1168 SAVE_MIN_WITH_COVER
1169 ;;
1170 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1171 mov out0=cr.ifa
1172 adds out1=16,sp
1173
1174 ssm psr.ic | PSR_DEFAULT_BITS
1175 ;;
1176 srlz.i // guarantee that interruption collection is on
1177 ;;
1178(p15) ssm psr.i // restore psr.i
1179 adds r3=8,r2 // set up second base pointer
1180 ;;
1181 SAVE_REST
1182 movl r14=ia64_leave_kernel
1183 ;;
1184 mov rp=r14
1185 br.sptk.many ia64_prepare_handle_unaligned
1186END(dispatch_unaligned_handler)
1187
1188 .org ia64_ivt+0x4c00 1095 .org ia64_ivt+0x4c00
1189///////////////////////////////////////////////////////////////////////////////////////// 1096/////////////////////////////////////////////////////////////////////////////////////////
1190// 0x4c00 Entry 19 (size 64 bundles) Reserved 1097// 0x4c00 Entry 19 (size 64 bundles) Reserved
1191 DBG_FAULT(19) 1098 DBG_FAULT(19)
1192 FAULT(19) 1099 FAULT(19)
1193 1100
1194 /*
1195 * There is no particular reason for this code to be here, other than that
1196 * there happens to be space here that would go unused otherwise. If this
1197 * fault ever gets "unreserved", simply moved the following code to a more
1198 * suitable spot...
1199 */
1200
1201ENTRY(dispatch_to_fault_handler)
1202 /*
1203 * Input:
1204 * psr.ic: off
1205 * r19: fault vector number (e.g., 24 for General Exception)
1206 * r31: contains saved predicates (pr)
1207 */
1208 SAVE_MIN_WITH_COVER_R19
1209 alloc r14=ar.pfs,0,0,5,0
1210 mov out0=r15
1211 mov out1=cr.isr
1212 mov out2=cr.ifa
1213 mov out3=cr.iim
1214 mov out4=cr.itir
1215 ;;
1216 ssm psr.ic | PSR_DEFAULT_BITS
1217 ;;
1218 srlz.i // guarantee that interruption collection is on
1219 ;;
1220(p15) ssm psr.i // restore psr.i
1221 adds r3=8,r2 // set up second base pointer for SAVE_REST
1222 ;;
1223 SAVE_REST
1224 movl r14=ia64_leave_kernel
1225 ;;
1226 mov rp=r14
1227 br.call.sptk.many b6=ia64_fault
1228END(dispatch_to_fault_handler)
1229
1230// 1101//
1231// --- End of long entries, Beginning of short entries 1102// --- End of long entries, Beginning of short entries
1232// 1103//
@@ -1236,8 +1107,8 @@ END(dispatch_to_fault_handler)
1236// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) 1107// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1237ENTRY(page_not_present) 1108ENTRY(page_not_present)
1238 DBG_FAULT(20) 1109 DBG_FAULT(20)
1239 mov r16=cr.ifa 1110 MOV_FROM_IFA(r16)
1240 rsm psr.dt 1111 RSM_PSR_DT
1241 /* 1112 /*
1242 * The Linux page fault handler doesn't expect non-present pages to be in 1113 * The Linux page fault handler doesn't expect non-present pages to be in
1243 * the TLB. Flush the existing entry now, so we meet that expectation. 1114 * the TLB. Flush the existing entry now, so we meet that expectation.
@@ -1256,8 +1127,8 @@ END(page_not_present)
1256// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) 1127// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1257ENTRY(key_permission) 1128ENTRY(key_permission)
1258 DBG_FAULT(21) 1129 DBG_FAULT(21)
1259 mov r16=cr.ifa 1130 MOV_FROM_IFA(r16)
1260 rsm psr.dt 1131 RSM_PSR_DT
1261 mov r31=pr 1132 mov r31=pr
1262 ;; 1133 ;;
1263 srlz.d 1134 srlz.d
@@ -1269,8 +1140,8 @@ END(key_permission)
1269// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 1140// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1270ENTRY(iaccess_rights) 1141ENTRY(iaccess_rights)
1271 DBG_FAULT(22) 1142 DBG_FAULT(22)
1272 mov r16=cr.ifa 1143 MOV_FROM_IFA(r16)
1273 rsm psr.dt 1144 RSM_PSR_DT
1274 mov r31=pr 1145 mov r31=pr
1275 ;; 1146 ;;
1276 srlz.d 1147 srlz.d
@@ -1282,8 +1153,8 @@ END(iaccess_rights)
1282// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 1153// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1283ENTRY(daccess_rights) 1154ENTRY(daccess_rights)
1284 DBG_FAULT(23) 1155 DBG_FAULT(23)
1285 mov r16=cr.ifa 1156 MOV_FROM_IFA(r16)
1286 rsm psr.dt 1157 RSM_PSR_DT
1287 mov r31=pr 1158 mov r31=pr
1288 ;; 1159 ;;
1289 srlz.d 1160 srlz.d
@@ -1295,7 +1166,7 @@ END(daccess_rights)
1295// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 1166// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1296ENTRY(general_exception) 1167ENTRY(general_exception)
1297 DBG_FAULT(24) 1168 DBG_FAULT(24)
1298 mov r16=cr.isr 1169 MOV_FROM_ISR(r16)
1299 mov r31=pr 1170 mov r31=pr
1300 ;; 1171 ;;
1301 cmp4.eq p6,p0=0,r16 1172 cmp4.eq p6,p0=0,r16
@@ -1324,8 +1195,8 @@ END(disabled_fp_reg)
1324ENTRY(nat_consumption) 1195ENTRY(nat_consumption)
1325 DBG_FAULT(26) 1196 DBG_FAULT(26)
1326 1197
1327 mov r16=cr.ipsr 1198 MOV_FROM_IPSR(p0, r16)
1328 mov r17=cr.isr 1199 MOV_FROM_ISR(r17)
1329 mov r31=pr // save PR 1200 mov r31=pr // save PR
1330 ;; 1201 ;;
1331 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} 1202 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
@@ -1335,10 +1206,10 @@ ENTRY(nat_consumption)
1335 dep r16=-1,r16,IA64_PSR_ED_BIT,1 1206 dep r16=-1,r16,IA64_PSR_ED_BIT,1
1336(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) 1207(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
1337 ;; 1208 ;;
1338 mov cr.ipsr=r16 // set cr.ipsr.na 1209 MOV_TO_IPSR(p0, r16, r18)
1339 mov pr=r31,-1 1210 mov pr=r31,-1
1340 ;; 1211 ;;
1341 rfi 1212 RFI
1342 1213
13431: mov pr=r31,-1 12141: mov pr=r31,-1
1344 ;; 1215 ;;
@@ -1360,26 +1231,26 @@ ENTRY(speculation_vector)
1360 * 1231 *
1361 * cr.imm contains zero_ext(imm21) 1232 * cr.imm contains zero_ext(imm21)
1362 */ 1233 */
1363 mov r18=cr.iim 1234 MOV_FROM_IIM(r18)
1364 ;; 1235 ;;
1365 mov r17=cr.iip 1236 MOV_FROM_IIP(r17)
1366 shl r18=r18,43 // put sign bit in position (43=64-21) 1237 shl r18=r18,43 // put sign bit in position (43=64-21)
1367 ;; 1238 ;;
1368 1239
1369 mov r16=cr.ipsr 1240 MOV_FROM_IPSR(p0, r16)
1370 shr r18=r18,39 // sign extend (39=43-4) 1241 shr r18=r18,39 // sign extend (39=43-4)
1371 ;; 1242 ;;
1372 1243
1373 add r17=r17,r18 // now add the offset 1244 add r17=r17,r18 // now add the offset
1374 ;; 1245 ;;
1375 mov cr.iip=r17 1246 MOV_FROM_IIP(r17)
1376 dep r16=0,r16,41,2 // clear EI 1247 dep r16=0,r16,41,2 // clear EI
1377 ;; 1248 ;;
1378 1249
1379 mov cr.ipsr=r16 1250 MOV_FROM_IPSR(p0, r16)
1380 ;; 1251 ;;
1381 1252
1382 rfi // and go back 1253 RFI
1383END(speculation_vector) 1254END(speculation_vector)
1384 1255
1385 .org ia64_ivt+0x5800 1256 .org ia64_ivt+0x5800
@@ -1517,11 +1388,11 @@ ENTRY(ia32_intercept)
1517 DBG_FAULT(46) 1388 DBG_FAULT(46)
1518#ifdef CONFIG_IA32_SUPPORT 1389#ifdef CONFIG_IA32_SUPPORT
1519 mov r31=pr 1390 mov r31=pr
1520 mov r16=cr.isr 1391 MOV_FROM_ISR(r16)
1521 ;; 1392 ;;
1522 extr.u r17=r16,16,8 // get ISR.code 1393 extr.u r17=r16,16,8 // get ISR.code
1523 mov r18=ar.eflag 1394 mov r18=ar.eflag
1524 mov r19=cr.iim // old eflag value 1395 MOV_FROM_IIM(r19) // old eflag value
1525 ;; 1396 ;;
1526 cmp.ne p6,p0=2,r17 1397 cmp.ne p6,p0=2,r17
1527(p6) br.cond.spnt 1f // not a system flag fault 1398(p6) br.cond.spnt 1f // not a system flag fault
@@ -1533,7 +1404,7 @@ ENTRY(ia32_intercept)
1533(p6) br.cond.spnt 1f // eflags.ac bit didn't change 1404(p6) br.cond.spnt 1f // eflags.ac bit didn't change
1534 ;; 1405 ;;
1535 mov pr=r31,-1 // restore predicate registers 1406 mov pr=r31,-1 // restore predicate registers
1536 rfi 1407 RFI
1537 1408
15381: 14091:
1539#endif // CONFIG_IA32_SUPPORT 1410#endif // CONFIG_IA32_SUPPORT
@@ -1673,6 +1544,137 @@ END(ia32_interrupt)
1673 DBG_FAULT(67) 1544 DBG_FAULT(67)
1674 FAULT(67) 1545 FAULT(67)
1675 1546
1547 //-----------------------------------------------------------------------------------
1548 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
1549ENTRY(page_fault)
1550 SSM_PSR_DT_AND_SRLZ_I
1551 ;;
1552 SAVE_MIN_WITH_COVER
1553 alloc r15=ar.pfs,0,0,3,0
1554 MOV_FROM_IFA(out0)
1555 MOV_FROM_ISR(out1)
1556 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
1557 adds r3=8,r2 // set up second base pointer
1558 SSM_PSR_I(p15, p15, r14) // restore psr.i
1559 movl r14=ia64_leave_kernel
1560 ;;
1561 SAVE_REST
1562 mov rp=r14
1563 ;;
1564 adds out2=16,r12 // out2 = pointer to pt_regs
1565 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
1566END(page_fault)
1567
1568ENTRY(non_syscall)
1569 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1570 ;;
1571 SAVE_MIN_WITH_COVER
1572
1573 // There is no particular reason for this code to be here, other than that
1574 // there happens to be space here that would go unused otherwise. If this
1575 // fault ever gets "unreserved", simply moved the following code to a more
1576 // suitable spot...
1577
1578 alloc r14=ar.pfs,0,0,2,0
1579 MOV_FROM_IIM(out0)
1580 add out1=16,sp
1581 adds r3=8,r2 // set up second base pointer for SAVE_REST
1582
1583 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
1584 // guarantee that interruption collection is on
1585 SSM_PSR_I(p15, p15, r15) // restore psr.i
1586 movl r15=ia64_leave_kernel
1587 ;;
1588 SAVE_REST
1589 mov rp=r15
1590 ;;
1591 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1592END(non_syscall)
1593
1594ENTRY(__interrupt)
1595 DBG_FAULT(12)
1596 mov r31=pr // prepare to save predicates
1597 ;;
1598 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1599 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
1600 // ensure everybody knows psr.ic is back on
1601 adds r3=8,r2 // set up second base pointer for SAVE_REST
1602 ;;
1603 SAVE_REST
1604 ;;
1605 MCA_RECOVER_RANGE(interrupt)
1606 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1607 MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg
1608 add out1=16,sp // pass pointer to pt_regs as second arg
1609 ;;
1610 srlz.d // make sure we see the effect of cr.ivr
1611 movl r14=ia64_leave_kernel
1612 ;;
1613 mov rp=r14
1614 br.call.sptk.many b6=ia64_handle_irq
1615END(__interrupt)
1616
1617 /*
1618 * There is no particular reason for this code to be here, other than that
1619 * there happens to be space here that would go unused otherwise. If this
1620 * fault ever gets "unreserved", simply moved the following code to a more
1621 * suitable spot...
1622 */
1623
1624ENTRY(dispatch_unaligned_handler)
1625 SAVE_MIN_WITH_COVER
1626 ;;
1627 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1628 MOV_FROM_IFA(out0)
1629 adds out1=16,sp
1630
1631 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1632 // guarantee that interruption collection is on
1633 SSM_PSR_I(p15, p15, r3) // restore psr.i
1634 adds r3=8,r2 // set up second base pointer
1635 ;;
1636 SAVE_REST
1637 movl r14=ia64_leave_kernel
1638 ;;
1639 mov rp=r14
1640 br.sptk.many ia64_prepare_handle_unaligned
1641END(dispatch_unaligned_handler)
1642
1643 /*
1644 * There is no particular reason for this code to be here, other than that
1645 * there happens to be space here that would go unused otherwise. If this
1646 * fault ever gets "unreserved", simply moved the following code to a more
1647 * suitable spot...
1648 */
1649
1650ENTRY(dispatch_to_fault_handler)
1651 /*
1652 * Input:
1653 * psr.ic: off
1654 * r19: fault vector number (e.g., 24 for General Exception)
1655 * r31: contains saved predicates (pr)
1656 */
1657 SAVE_MIN_WITH_COVER_R19
1658 alloc r14=ar.pfs,0,0,5,0
1659 MOV_FROM_ISR(out1)
1660 MOV_FROM_IFA(out2)
1661 MOV_FROM_IIM(out3)
1662 MOV_FROM_ITIR(out4)
1663 ;;
1664 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
1665 // guarantee that interruption collection is on
1666 mov out0=r15
1667 ;;
1668 SSM_PSR_I(p15, p15, r3) // restore psr.i
1669 adds r3=8,r2 // set up second base pointer for SAVE_REST
1670 ;;
1671 SAVE_REST
1672 movl r14=ia64_leave_kernel
1673 ;;
1674 mov rp=r14
1675 br.call.sptk.many b6=ia64_fault
1676END(dispatch_to_fault_handler)
1677
1676 /* 1678 /*
1677 * Squatting in this space ... 1679 * Squatting in this space ...
1678 * 1680 *
@@ -1686,11 +1688,10 @@ ENTRY(dispatch_illegal_op_fault)
1686 .prologue 1688 .prologue
1687 .body 1689 .body
1688 SAVE_MIN_WITH_COVER 1690 SAVE_MIN_WITH_COVER
1689 ssm psr.ic | PSR_DEFAULT_BITS 1691 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1690 ;; 1692 // guarantee that interruption collection is on
1691 srlz.i // guarantee that interruption collection is on
1692 ;; 1693 ;;
1693(p15) ssm psr.i // restore psr.i 1694 SSM_PSR_I(p15, p15, r3) // restore psr.i
1694 adds r3=8,r2 // set up second base pointer for SAVE_REST 1695 adds r3=8,r2 // set up second base pointer for SAVE_REST
1695 ;; 1696 ;;
1696 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group 1697 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
@@ -1729,12 +1730,11 @@ END(dispatch_illegal_op_fault)
1729ENTRY(dispatch_to_ia32_handler) 1730ENTRY(dispatch_to_ia32_handler)
1730 SAVE_MIN 1731 SAVE_MIN
1731 ;; 1732 ;;
1732 mov r14=cr.isr 1733 MOV_FROM_ISR(r14)
1733 ssm psr.ic | PSR_DEFAULT_BITS 1734 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1734 ;; 1735 // guarantee that interruption collection is on
1735 srlz.i // guarantee that interruption collection is on
1736 ;; 1736 ;;
1737(p15) ssm psr.i 1737 SSM_PSR_I(p15, p15, r3)
1738 adds r3=8,r2 // Base pointer for SAVE_REST 1738 adds r3=8,r2 // Base pointer for SAVE_REST
1739 ;; 1739 ;;
1740 SAVE_REST 1740 SAVE_REST
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 74b6d670aaef..292e214a3b84 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -2,6 +2,7 @@
2#include <asm/cache.h> 2#include <asm/cache.h>
3 3
4#include "entry.h" 4#include "entry.h"
5#include "paravirt_inst.h"
5 6
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING 7#ifdef CONFIG_VIRT_CPU_ACCOUNTING
7/* read ar.itc in advance, and use it before leaving bank 0 */ 8/* read ar.itc in advance, and use it before leaving bank 0 */
@@ -43,16 +44,16 @@
43 * Note that psr.ic is NOT turned on by this macro. This is so that 44 * Note that psr.ic is NOT turned on by this macro. This is so that
44 * we can pass interruption state as arguments to a handler. 45 * we can pass interruption state as arguments to a handler.
45 */ 46 */
46#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \ 47#define IA64_NATIVE_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
47 mov r16=IA64_KR(CURRENT); /* M */ \ 48 mov r16=IA64_KR(CURRENT); /* M */ \
48 mov r27=ar.rsc; /* M */ \ 49 mov r27=ar.rsc; /* M */ \
49 mov r20=r1; /* A */ \ 50 mov r20=r1; /* A */ \
50 mov r25=ar.unat; /* M */ \ 51 mov r25=ar.unat; /* M */ \
51 mov r29=cr.ipsr; /* M */ \ 52 MOV_FROM_IPSR(p0,r29); /* M */ \
52 mov r26=ar.pfs; /* I */ \ 53 mov r26=ar.pfs; /* I */ \
53 mov r28=cr.iip; /* M */ \ 54 MOV_FROM_IIP(r28); /* M */ \
54 mov r21=ar.fpsr; /* M */ \ 55 mov r21=ar.fpsr; /* M */ \
55 COVER; /* B;; (or nothing) */ \ 56 __COVER; /* B;; (or nothing) */ \
56 ;; \ 57 ;; \
57 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ 58 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
58 ;; \ 59 ;; \
@@ -244,6 +245,6 @@
2441: \ 2451: \
245 .pred.rel "mutex", pKStk, pUStk 246 .pred.rel "mutex", pKStk, pUStk
246 247
247#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND) 248#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(COVER, mov r30=cr.ifs, , RSE_WORKAROUND)
248#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND) 249#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(COVER, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
249#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , ) 250#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index e83e2ea3b3e0..29aad349e0c4 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -321,7 +321,8 @@ module_alloc (unsigned long size)
321void 321void
322module_free (struct module *mod, void *module_region) 322module_free (struct module *mod, void *module_region)
323{ 323{
324 if (mod->arch.init_unw_table && module_region == mod->module_init) { 324 if (mod && mod->arch.init_unw_table &&
325 module_region == mod->module_init) {
325 unw_remove_unwind_table(mod->arch.init_unw_table); 326 unw_remove_unwind_table(mod->arch.init_unw_table);
326 mod->arch.init_unw_table = NULL; 327 mod->arch.init_unw_table = NULL;
327 } 328 }
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
new file mode 100644
index 000000000000..1ae049181e83
--- /dev/null
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -0,0 +1,24 @@
1/*
2 * calculate
3 * NR_IRQS = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, FOO_NR_IRQS...)
4 * depending on config.
5 * This must be calculated before processing asm-offset.c.
6 */
7
8#define ASM_OFFSETS_C 1
9
10#include <linux/kbuild.h>
11#include <linux/threads.h>
12#include <asm-ia64/native/irq.h>
13
14void foo(void)
15{
16 union paravirt_nr_irqs_max {
17 char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
18#ifdef CONFIG_XEN
19 char xen_nr_irqs[XEN_NR_IRQS];
20#endif
21 };
22
23 DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));
24}
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
new file mode 100644
index 000000000000..afaf5b9a2cf0
--- /dev/null
+++ b/arch/ia64/kernel/paravirt.c
@@ -0,0 +1,369 @@
1/******************************************************************************
2 * arch/ia64/kernel/paravirt.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/init.h>
25
26#include <linux/compiler.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/types.h>
31
32#include <asm/iosapic.h>
33#include <asm/paravirt.h>
34
35/***************************************************************************
36 * general info
37 */
38struct pv_info pv_info = {
39 .kernel_rpl = 0,
40 .paravirt_enabled = 0,
41 .name = "bare hardware"
42};
43
44/***************************************************************************
45 * pv_init_ops
46 * initialization hooks.
47 */
48
49struct pv_init_ops pv_init_ops;
50
51/***************************************************************************
52 * pv_cpu_ops
53 * intrinsics hooks.
54 */
55
56/* ia64_native_xxx are macros so that we have to make them real functions */
57
58#define DEFINE_VOID_FUNC1(name) \
59 static void \
60 ia64_native_ ## name ## _func(unsigned long arg) \
61 { \
62 ia64_native_ ## name(arg); \
63 } \
64
65#define DEFINE_VOID_FUNC2(name) \
66 static void \
67 ia64_native_ ## name ## _func(unsigned long arg0, \
68 unsigned long arg1) \
69 { \
70 ia64_native_ ## name(arg0, arg1); \
71 } \
72
73#define DEFINE_FUNC0(name) \
74 static unsigned long \
75 ia64_native_ ## name ## _func(void) \
76 { \
77 return ia64_native_ ## name(); \
78 }
79
80#define DEFINE_FUNC1(name, type) \
81 static unsigned long \
82 ia64_native_ ## name ## _func(type arg) \
83 { \
84 return ia64_native_ ## name(arg); \
85 } \
86
87DEFINE_VOID_FUNC1(fc);
88DEFINE_VOID_FUNC1(intrin_local_irq_restore);
89
90DEFINE_VOID_FUNC2(ptcga);
91DEFINE_VOID_FUNC2(set_rr);
92
93DEFINE_FUNC0(get_psr_i);
94
95DEFINE_FUNC1(thash, unsigned long);
96DEFINE_FUNC1(get_cpuid, int);
97DEFINE_FUNC1(get_pmd, int);
98DEFINE_FUNC1(get_rr, unsigned long);
99
100static void
101ia64_native_ssm_i_func(void)
102{
103 ia64_native_ssm(IA64_PSR_I);
104}
105
106static void
107ia64_native_rsm_i_func(void)
108{
109 ia64_native_rsm(IA64_PSR_I);
110}
111
112static void
113ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
114 unsigned long val2, unsigned long val3,
115 unsigned long val4)
116{
117 ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4);
118}
119
120#define CASE_GET_REG(id) \
121 case _IA64_REG_ ## id: \
122 res = ia64_native_getreg(_IA64_REG_ ## id); \
123 break;
124#define CASE_GET_AR(id) CASE_GET_REG(AR_ ## id)
125#define CASE_GET_CR(id) CASE_GET_REG(CR_ ## id)
126
127unsigned long
128ia64_native_getreg_func(int regnum)
129{
130 unsigned long res = -1;
131 switch (regnum) {
132 CASE_GET_REG(GP);
133 CASE_GET_REG(IP);
134 CASE_GET_REG(PSR);
135 CASE_GET_REG(TP);
136 CASE_GET_REG(SP);
137
138 CASE_GET_AR(KR0);
139 CASE_GET_AR(KR1);
140 CASE_GET_AR(KR2);
141 CASE_GET_AR(KR3);
142 CASE_GET_AR(KR4);
143 CASE_GET_AR(KR5);
144 CASE_GET_AR(KR6);
145 CASE_GET_AR(KR7);
146 CASE_GET_AR(RSC);
147 CASE_GET_AR(BSP);
148 CASE_GET_AR(BSPSTORE);
149 CASE_GET_AR(RNAT);
150 CASE_GET_AR(FCR);
151 CASE_GET_AR(EFLAG);
152 CASE_GET_AR(CSD);
153 CASE_GET_AR(SSD);
154 CASE_GET_AR(CFLAG);
155 CASE_GET_AR(FSR);
156 CASE_GET_AR(FIR);
157 CASE_GET_AR(FDR);
158 CASE_GET_AR(CCV);
159 CASE_GET_AR(UNAT);
160 CASE_GET_AR(FPSR);
161 CASE_GET_AR(ITC);
162 CASE_GET_AR(PFS);
163 CASE_GET_AR(LC);
164 CASE_GET_AR(EC);
165
166 CASE_GET_CR(DCR);
167 CASE_GET_CR(ITM);
168 CASE_GET_CR(IVA);
169 CASE_GET_CR(PTA);
170 CASE_GET_CR(IPSR);
171 CASE_GET_CR(ISR);
172 CASE_GET_CR(IIP);
173 CASE_GET_CR(IFA);
174 CASE_GET_CR(ITIR);
175 CASE_GET_CR(IIPA);
176 CASE_GET_CR(IFS);
177 CASE_GET_CR(IIM);
178 CASE_GET_CR(IHA);
179 CASE_GET_CR(LID);
180 CASE_GET_CR(IVR);
181 CASE_GET_CR(TPR);
182 CASE_GET_CR(EOI);
183 CASE_GET_CR(IRR0);
184 CASE_GET_CR(IRR1);
185 CASE_GET_CR(IRR2);
186 CASE_GET_CR(IRR3);
187 CASE_GET_CR(ITV);
188 CASE_GET_CR(PMV);
189 CASE_GET_CR(CMCV);
190 CASE_GET_CR(LRR0);
191 CASE_GET_CR(LRR1);
192
193 default:
194 printk(KERN_CRIT "wrong_getreg %d\n", regnum);
195 break;
196 }
197 return res;
198}
199
200#define CASE_SET_REG(id) \
201 case _IA64_REG_ ## id: \
202 ia64_native_setreg(_IA64_REG_ ## id, val); \
203 break;
204#define CASE_SET_AR(id) CASE_SET_REG(AR_ ## id)
205#define CASE_SET_CR(id) CASE_SET_REG(CR_ ## id)
206
207void
208ia64_native_setreg_func(int regnum, unsigned long val)
209{
210 switch (regnum) {
211 case _IA64_REG_PSR_L:
212 ia64_native_setreg(_IA64_REG_PSR_L, val);
213 ia64_dv_serialize_data();
214 break;
215 CASE_SET_REG(SP);
216 CASE_SET_REG(GP);
217
218 CASE_SET_AR(KR0);
219 CASE_SET_AR(KR1);
220 CASE_SET_AR(KR2);
221 CASE_SET_AR(KR3);
222 CASE_SET_AR(KR4);
223 CASE_SET_AR(KR5);
224 CASE_SET_AR(KR6);
225 CASE_SET_AR(KR7);
226 CASE_SET_AR(RSC);
227 CASE_SET_AR(BSP);
228 CASE_SET_AR(BSPSTORE);
229 CASE_SET_AR(RNAT);
230 CASE_SET_AR(FCR);
231 CASE_SET_AR(EFLAG);
232 CASE_SET_AR(CSD);
233 CASE_SET_AR(SSD);
234 CASE_SET_AR(CFLAG);
235 CASE_SET_AR(FSR);
236 CASE_SET_AR(FIR);
237 CASE_SET_AR(FDR);
238 CASE_SET_AR(CCV);
239 CASE_SET_AR(UNAT);
240 CASE_SET_AR(FPSR);
241 CASE_SET_AR(ITC);
242 CASE_SET_AR(PFS);
243 CASE_SET_AR(LC);
244 CASE_SET_AR(EC);
245
246 CASE_SET_CR(DCR);
247 CASE_SET_CR(ITM);
248 CASE_SET_CR(IVA);
249 CASE_SET_CR(PTA);
250 CASE_SET_CR(IPSR);
251 CASE_SET_CR(ISR);
252 CASE_SET_CR(IIP);
253 CASE_SET_CR(IFA);
254 CASE_SET_CR(ITIR);
255 CASE_SET_CR(IIPA);
256 CASE_SET_CR(IFS);
257 CASE_SET_CR(IIM);
258 CASE_SET_CR(IHA);
259 CASE_SET_CR(LID);
260 CASE_SET_CR(IVR);
261 CASE_SET_CR(TPR);
262 CASE_SET_CR(EOI);
263 CASE_SET_CR(IRR0);
264 CASE_SET_CR(IRR1);
265 CASE_SET_CR(IRR2);
266 CASE_SET_CR(IRR3);
267 CASE_SET_CR(ITV);
268 CASE_SET_CR(PMV);
269 CASE_SET_CR(CMCV);
270 CASE_SET_CR(LRR0);
271 CASE_SET_CR(LRR1);
272 default:
273 printk(KERN_CRIT "wrong setreg %d\n", regnum);
274 break;
275 }
276}
277
278struct pv_cpu_ops pv_cpu_ops = {
279 .fc = ia64_native_fc_func,
280 .thash = ia64_native_thash_func,
281 .get_cpuid = ia64_native_get_cpuid_func,
282 .get_pmd = ia64_native_get_pmd_func,
283 .ptcga = ia64_native_ptcga_func,
284 .get_rr = ia64_native_get_rr_func,
285 .set_rr = ia64_native_set_rr_func,
286 .set_rr0_to_rr4 = ia64_native_set_rr0_to_rr4_func,
287 .ssm_i = ia64_native_ssm_i_func,
288 .getreg = ia64_native_getreg_func,
289 .setreg = ia64_native_setreg_func,
290 .rsm_i = ia64_native_rsm_i_func,
291 .get_psr_i = ia64_native_get_psr_i_func,
292 .intrin_local_irq_restore
293 = ia64_native_intrin_local_irq_restore_func,
294};
295EXPORT_SYMBOL(pv_cpu_ops);
296
297/******************************************************************************
298 * replacement of hand written assembly codes.
299 */
300
301void
302paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch)
303{
304 extern unsigned long paravirt_switch_to_targ;
305 extern unsigned long paravirt_leave_syscall_targ;
306 extern unsigned long paravirt_work_processed_syscall_targ;
307 extern unsigned long paravirt_leave_kernel_targ;
308
309 paravirt_switch_to_targ = cpu_asm_switch->switch_to;
310 paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall;
311 paravirt_work_processed_syscall_targ =
312 cpu_asm_switch->work_processed_syscall;
313 paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel;
314}
315
316/***************************************************************************
317 * pv_iosapic_ops
318 * iosapic read/write hooks.
319 */
320
321static unsigned int
322ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
323{
324 return __ia64_native_iosapic_read(iosapic, reg);
325}
326
327static void
328ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
329{
330 __ia64_native_iosapic_write(iosapic, reg, val);
331}
332
333struct pv_iosapic_ops pv_iosapic_ops = {
334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
335 .get_irq_chip = ia64_native_iosapic_get_irq_chip,
336
337 .__read = ia64_native_iosapic_read,
338 .__write = ia64_native_iosapic_write,
339};
340
341/***************************************************************************
342 * pv_irq_ops
343 * irq operations
344 */
345
346struct pv_irq_ops pv_irq_ops = {
347 .register_ipi = ia64_native_register_ipi,
348
349 .assign_irq_vector = ia64_native_assign_irq_vector,
350 .free_irq_vector = ia64_native_free_irq_vector,
351 .register_percpu_irq = ia64_native_register_percpu_irq,
352
353 .resend_irq = ia64_native_resend_irq,
354};
355
356/***************************************************************************
357 * pv_time_ops
358 * time operations
359 */
360
361static int
362ia64_native_do_steal_accounting(unsigned long *new_itm)
363{
364 return 0;
365}
366
367struct pv_time_ops pv_time_ops = {
368 .do_steal_accounting = ia64_native_do_steal_accounting,
369};
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
new file mode 100644
index 000000000000..5cad6fb2ed19
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -0,0 +1,29 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirt_inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN
24#include <asm/xen/inst.h>
25#include <asm/xen/minstate.h>
26#else
27#include <asm/native/inst.h>
28#endif
29
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
new file mode 100644
index 000000000000..2f42fcb9776a
--- /dev/null
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -0,0 +1,60 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirtentry.S
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/asmmacro.h>
24#include <asm/asm-offsets.h>
25#include "entry.h"
26
27#define DATA8(sym, init_value) \
28 .pushsection .data.read_mostly ; \
29 .align 8 ; \
30 .global sym ; \
31 sym: ; \
32 data8 init_value ; \
33 .popsection
34
35#define BRANCH(targ, reg, breg) \
36 movl reg=targ ; \
37 ;; \
38 ld8 reg=[reg] ; \
39 ;; \
40 mov breg=reg ; \
41 br.cond.sptk.many breg
42
43#define BRANCH_PROC(sym, reg, breg) \
44 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
45 GLOBAL_ENTRY(paravirt_ ## sym) ; \
46 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
47 END(paravirt_ ## sym)
48
49#define BRANCH_PROC_UNWINFO(sym, reg, breg) \
50 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
51 GLOBAL_ENTRY(paravirt_ ## sym) ; \
52 PT_REGS_UNWIND_INFO(0) ; \
53 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
54 END(paravirt_ ## sym)
55
56
57BRANCH_PROC(switch_to, r22, b7)
58BRANCH_PROC_UNWINFO(leave_syscall, r22, b7)
59BRANCH_PROC(work_processed_syscall, r2, b7)
60BRANCH_PROC_UNWINFO(leave_kernel, r22, b7)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 632cda8f2e76..e5c2de9b29a5 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -51,6 +51,7 @@
51#include <asm/mca.h> 51#include <asm/mca.h>
52#include <asm/meminit.h> 52#include <asm/meminit.h>
53#include <asm/page.h> 53#include <asm/page.h>
54#include <asm/paravirt.h>
54#include <asm/patch.h> 55#include <asm/patch.h>
55#include <asm/pgtable.h> 56#include <asm/pgtable.h>
56#include <asm/processor.h> 57#include <asm/processor.h>
@@ -341,6 +342,8 @@ reserve_memory (void)
341 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 342 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
342 n++; 343 n++;
343 344
345 n += paravirt_reserve_memory(&rsvd_region[n]);
346
344#ifdef CONFIG_BLK_DEV_INITRD 347#ifdef CONFIG_BLK_DEV_INITRD
345 if (ia64_boot_param->initrd_start) { 348 if (ia64_boot_param->initrd_start) {
346 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 349 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
@@ -519,6 +522,8 @@ setup_arch (char **cmdline_p)
519{ 522{
520 unw_init(); 523 unw_init();
521 524
525 paravirt_arch_setup_early();
526
522 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 527 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
523 528
524 *cmdline_p = __va(ia64_boot_param->command_line); 529 *cmdline_p = __va(ia64_boot_param->command_line);
@@ -583,6 +588,9 @@ setup_arch (char **cmdline_p)
583 acpi_boot_init(); 588 acpi_boot_init();
584#endif 589#endif
585 590
591 paravirt_banner();
592 paravirt_arch_setup_console(cmdline_p);
593
586#ifdef CONFIG_VT 594#ifdef CONFIG_VT
587 if (!conswitchp) { 595 if (!conswitchp) {
588# if defined(CONFIG_DUMMY_CONSOLE) 596# if defined(CONFIG_DUMMY_CONSOLE)
@@ -602,6 +610,8 @@ setup_arch (char **cmdline_p)
602#endif 610#endif
603 611
604 /* enable IA-64 Machine Check Abort Handling unless disabled */ 612 /* enable IA-64 Machine Check Abort Handling unless disabled */
613 if (paravirt_arch_setup_nomca())
614 nomca = 1;
605 if (!nomca) 615 if (!nomca)
606 ia64_mca_init(); 616 ia64_mca_init();
607 617
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 9d1d429c6c59..03f1a9908afc 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -50,6 +50,7 @@
50#include <asm/machvec.h> 50#include <asm/machvec.h>
51#include <asm/mca.h> 51#include <asm/mca.h>
52#include <asm/page.h> 52#include <asm/page.h>
53#include <asm/paravirt.h>
53#include <asm/pgalloc.h> 54#include <asm/pgalloc.h>
54#include <asm/pgtable.h> 55#include <asm/pgtable.h>
55#include <asm/processor.h> 56#include <asm/processor.h>
@@ -642,6 +643,7 @@ void __devinit smp_prepare_boot_cpu(void)
642 cpu_set(smp_processor_id(), cpu_online_map); 643 cpu_set(smp_processor_id(), cpu_online_map);
643 cpu_set(smp_processor_id(), cpu_callin_map); 644 cpu_set(smp_processor_id(), cpu_callin_map);
644 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 645 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
646 paravirt_post_smp_prepare_boot_cpu();
645} 647}
646 648
647#ifdef CONFIG_HOTPLUG_CPU 649#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aad1b7b1fff9..65c10a42c88f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -24,6 +24,7 @@
24#include <asm/machvec.h> 24#include <asm/machvec.h>
25#include <asm/delay.h> 25#include <asm/delay.h>
26#include <asm/hw_irq.h> 26#include <asm/hw_irq.h>
27#include <asm/paravirt.h>
27#include <asm/ptrace.h> 28#include <asm/ptrace.h>
28#include <asm/sal.h> 29#include <asm/sal.h>
29#include <asm/sections.h> 30#include <asm/sections.h>
@@ -48,6 +49,15 @@ EXPORT_SYMBOL(last_cli_ip);
48 49
49#endif 50#endif
50 51
52#ifdef CONFIG_PARAVIRT
53static void
54paravirt_clocksource_resume(void)
55{
56 if (pv_time_ops.clocksource_resume)
57 pv_time_ops.clocksource_resume();
58}
59#endif
60
51static struct clocksource clocksource_itc = { 61static struct clocksource clocksource_itc = {
52 .name = "itc", 62 .name = "itc",
53 .rating = 350, 63 .rating = 350,
@@ -56,6 +66,9 @@ static struct clocksource clocksource_itc = {
56 .mult = 0, /*to be calculated*/ 66 .mult = 0, /*to be calculated*/
57 .shift = 16, 67 .shift = 16,
58 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 68 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
69#ifdef CONFIG_PARAVIRT
70 .resume = paravirt_clocksource_resume,
71#endif
59}; 72};
60static struct clocksource *itc_clocksource; 73static struct clocksource *itc_clocksource;
61 74
@@ -157,6 +170,9 @@ timer_interrupt (int irq, void *dev_id)
157 170
158 profile_tick(CPU_PROFILING); 171 profile_tick(CPU_PROFILING);
159 172
173 if (paravirt_do_steal_accounting(&new_itm))
174 goto skip_process_time_accounting;
175
160 while (1) { 176 while (1) {
161 update_process_times(user_mode(get_irq_regs())); 177 update_process_times(user_mode(get_irq_regs()));
162 178
@@ -186,6 +202,8 @@ timer_interrupt (int irq, void *dev_id)
186 local_irq_disable(); 202 local_irq_disable();
187 } 203 }
188 204
205skip_process_time_accounting:
206
189 do { 207 do {
190 /* 208 /*
191 * If we're too close to the next clock tick for 209 * If we're too close to the next clock tick for
@@ -335,6 +353,11 @@ ia64_init_itm (void)
335 */ 353 */
336 clocksource_itc.rating = 50; 354 clocksource_itc.rating = 50;
337 355
356 paravirt_init_missing_ticks_accounting(smp_processor_id());
357
358 /* avoid softlock up message when cpu is unplug and plugged again. */
359 touch_softlockup_watchdog();
360
338 /* Setup the CPU local timer tick */ 361 /* Setup the CPU local timer tick */
339 ia64_cpu_local_tick(); 362 ia64_cpu_local_tick();
340 363
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5929ab10a289..5a77206c2492 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -4,7 +4,6 @@
4#include <asm/system.h> 4#include <asm/system.h>
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6 6
7#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
8#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
9 8
10#define IVT_TEXT \ 9#define IVT_TEXT \
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 85a87d2ac0c0..092f019e033a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -8,7 +8,7 @@ source "lib/Kconfig.debug"
8config STRICT_DEVMEM 8config STRICT_DEVMEM
9 bool "Filter access to /dev/mem" 9 bool "Filter access to /dev/mem"
10 help 10 help
11 If this option is left on, you allow userspace (root) access to all 11 If this option is disabled, you allow userspace (root) access to all
12 of memory, including kernel and userspace memory. Accidental 12 of memory, including kernel and userspace memory. Accidental
13 access to this is obviously disastrous, but specific access can 13 access to this is obviously disastrous, but specific access can
14 be used by people debugging the kernel. Note that with PAT support 14 be used by people debugging the kernel. Note that with PAT support
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index f489d7a9be92..fa88a1d71290 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1021,7 +1021,7 @@ void __init mp_config_acpi_legacy_irqs(void)
1021 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; 1021 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
1022#endif 1022#endif
1023 set_bit(MP_ISA_BUS, mp_bus_not_pci); 1023 set_bit(MP_ISA_BUS, mp_bus_not_pci);
1024 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); 1024 pr_debug("Bus #%d is ISA\n", MP_ISA_BUS);
1025 1025
1026#ifdef CONFIG_X86_ES7000 1026#ifdef CONFIG_X86_ES7000
1027 /* 1027 /*
@@ -1127,8 +1127,8 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1127 return gsi; 1127 return gsi;
1128 } 1128 }
1129 if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) { 1129 if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) {
1130 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 1130 pr_debug(KERN_DEBUG "Pin %d-%d already programmed\n",
1131 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 1131 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1132#ifdef CONFIG_X86_32 1132#ifdef CONFIG_X86_32
1133 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); 1133 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
1134#else 1134#else
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h b/arch/x86/kernel/cpu/cpufreq/powernow-k7.h
index f8a63b3664e3..35fb4eaf6e1c 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * $Id: powernow-k7.h,v 1.2 2003/02/10 18:26:01 davej Exp $
3 * (C) 2003 Dave Jones. 2 * (C) 2003 Dave Jones.
4 * 3 *
5 * Licensed under the terms of the GNU GPL License version 2. 4 * Licensed under the terms of the GNU GPL License version 2.
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 6d4bdc02388a..de7439f82b92 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -250,7 +250,7 @@ static void write_watchdog_counter(unsigned int perfctr_msr,
250 250
251 do_div(count, nmi_hz); 251 do_div(count, nmi_hz);
252 if(descr) 252 if(descr)
253 Dprintk("setting %s to -0x%08Lx\n", descr, count); 253 pr_debug("setting %s to -0x%08Lx\n", descr, count);
254 wrmsrl(perfctr_msr, 0 - count); 254 wrmsrl(perfctr_msr, 0 - count);
255} 255}
256 256
@@ -261,7 +261,7 @@ static void write_watchdog_counter32(unsigned int perfctr_msr,
261 261
262 do_div(count, nmi_hz); 262 do_div(count, nmi_hz);
263 if(descr) 263 if(descr)
264 Dprintk("setting %s to -0x%08Lx\n", descr, count); 264 pr_debug("setting %s to -0x%08Lx\n", descr, count);
265 wrmsr(perfctr_msr, (u32)(-count), 0); 265 wrmsr(perfctr_msr, (u32)(-count), 0);
266} 266}
267 267
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index cac68430d31f..f7745f94c006 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -227,8 +227,8 @@ static void __init setup_node_to_cpumask_map(void)
227 /* allocate the map */ 227 /* allocate the map */
228 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); 228 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
229 229
230 Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n", 230 pr_debug(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
231 map, nr_node_ids); 231 map, nr_node_ids);
232 232
233 /* node_to_cpumask() will now work */ 233 /* node_to_cpumask() will now work */
234 node_to_cpumask_map = map; 234 node_to_cpumask_map = map;
@@ -248,7 +248,7 @@ void __cpuinit numa_set_node(int cpu, int node)
248 per_cpu(x86_cpu_to_node_map, cpu) = node; 248 per_cpu(x86_cpu_to_node_map, cpu) = node;
249 249
250 else 250 else
251 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu); 251 pr_debug("Setting node for non-present cpu %d\n", cpu);
252} 252}
253 253
254void __cpuinit numa_clear_node(int cpu) 254void __cpuinit numa_clear_node(int cpu)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 27640196eb7c..4b53a647bc0a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -216,7 +216,7 @@ static void __cpuinit smp_callin(void)
216 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, 216 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
217 phys_id, cpuid); 217 phys_id, cpuid);
218 } 218 }
219 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); 219 pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
220 220
221 /* 221 /*
222 * STARTUP IPIs are fragile beasts as they might sometimes 222 * STARTUP IPIs are fragile beasts as they might sometimes
@@ -251,7 +251,7 @@ static void __cpuinit smp_callin(void)
251 * boards) 251 * boards)
252 */ 252 */
253 253
254 Dprintk("CALLIN, before setup_local_APIC().\n"); 254 pr_debug("CALLIN, before setup_local_APIC().\n");
255 smp_callin_clear_local_apic(); 255 smp_callin_clear_local_apic();
256 setup_local_APIC(); 256 setup_local_APIC();
257 end_local_APIC_setup(); 257 end_local_APIC_setup();
@@ -266,7 +266,7 @@ static void __cpuinit smp_callin(void)
266 local_irq_enable(); 266 local_irq_enable();
267 calibrate_delay(); 267 calibrate_delay();
268 local_irq_disable(); 268 local_irq_disable();
269 Dprintk("Stack at about %p\n", &cpuid); 269 pr_debug("Stack at about %p\n", &cpuid);
270 270
271 /* 271 /*
272 * Save our processor parameters 272 * Save our processor parameters
@@ -513,7 +513,7 @@ static void impress_friends(void)
513 /* 513 /*
514 * Allow the user to impress friends. 514 * Allow the user to impress friends.
515 */ 515 */
516 Dprintk("Before bogomips.\n"); 516 pr_debug("Before bogomips.\n");
517 for_each_possible_cpu(cpu) 517 for_each_possible_cpu(cpu)
518 if (cpu_isset(cpu, cpu_callout_map)) 518 if (cpu_isset(cpu, cpu_callout_map))
519 bogosum += cpu_data(cpu).loops_per_jiffy; 519 bogosum += cpu_data(cpu).loops_per_jiffy;
@@ -523,7 +523,7 @@ static void impress_friends(void)
523 bogosum/(500000/HZ), 523 bogosum/(500000/HZ),
524 (bogosum/(5000/HZ))%100); 524 (bogosum/(5000/HZ))%100);
525 525
526 Dprintk("Before bogocount - setting activated=1.\n"); 526 pr_debug("Before bogocount - setting activated=1.\n");
527} 527}
528 528
529static inline void __inquire_remote_apic(int apicid) 529static inline void __inquire_remote_apic(int apicid)
@@ -585,7 +585,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
585 /* Kick the second */ 585 /* Kick the second */
586 apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); 586 apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
587 587
588 Dprintk("Waiting for send to finish...\n"); 588 pr_debug("Waiting for send to finish...\n");
589 send_status = safe_apic_wait_icr_idle(); 589 send_status = safe_apic_wait_icr_idle();
590 590
591 /* 591 /*
@@ -596,7 +596,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
596 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 596 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
597 apic_write(APIC_ESR, 0); 597 apic_write(APIC_ESR, 0);
598 accept_status = (apic_read(APIC_ESR) & 0xEF); 598 accept_status = (apic_read(APIC_ESR) & 0xEF);
599 Dprintk("NMI sent.\n"); 599 pr_debug("NMI sent.\n");
600 600
601 if (send_status) 601 if (send_status)
602 printk(KERN_ERR "APIC never delivered???\n"); 602 printk(KERN_ERR "APIC never delivered???\n");
@@ -631,7 +631,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
631 apic_read(APIC_ESR); 631 apic_read(APIC_ESR);
632 } 632 }
633 633
634 Dprintk("Asserting INIT.\n"); 634 pr_debug("Asserting INIT.\n");
635 635
636 /* 636 /*
637 * Turn INIT on target chip 637 * Turn INIT on target chip
@@ -644,12 +644,12 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
644 apic_write(APIC_ICR, 644 apic_write(APIC_ICR,
645 APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT); 645 APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT);
646 646
647 Dprintk("Waiting for send to finish...\n"); 647 pr_debug("Waiting for send to finish...\n");
648 send_status = safe_apic_wait_icr_idle(); 648 send_status = safe_apic_wait_icr_idle();
649 649
650 mdelay(10); 650 mdelay(10);
651 651
652 Dprintk("Deasserting INIT.\n"); 652 pr_debug("Deasserting INIT.\n");
653 653
654 /* Target chip */ 654 /* Target chip */
655 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); 655 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
@@ -657,7 +657,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
657 /* Send IPI */ 657 /* Send IPI */
658 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); 658 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
659 659
660 Dprintk("Waiting for send to finish...\n"); 660 pr_debug("Waiting for send to finish...\n");
661 send_status = safe_apic_wait_icr_idle(); 661 send_status = safe_apic_wait_icr_idle();
662 662
663 mb(); 663 mb();
@@ -684,14 +684,14 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
684 /* 684 /*
685 * Run STARTUP IPI loop. 685 * Run STARTUP IPI loop.
686 */ 686 */
687 Dprintk("#startup loops: %d.\n", num_starts); 687 pr_debug("#startup loops: %d.\n", num_starts);
688 688
689 for (j = 1; j <= num_starts; j++) { 689 for (j = 1; j <= num_starts; j++) {
690 Dprintk("Sending STARTUP #%d.\n", j); 690 pr_debug("Sending STARTUP #%d.\n", j);
691 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 691 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
692 apic_write(APIC_ESR, 0); 692 apic_write(APIC_ESR, 0);
693 apic_read(APIC_ESR); 693 apic_read(APIC_ESR);
694 Dprintk("After apic_write.\n"); 694 pr_debug("After apic_write.\n");
695 695
696 /* 696 /*
697 * STARTUP IPI 697 * STARTUP IPI
@@ -709,9 +709,9 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
709 */ 709 */
710 udelay(300); 710 udelay(300);
711 711
712 Dprintk("Startup point 1.\n"); 712 pr_debug("Startup point 1.\n");
713 713
714 Dprintk("Waiting for send to finish...\n"); 714 pr_debug("Waiting for send to finish...\n");
715 send_status = safe_apic_wait_icr_idle(); 715 send_status = safe_apic_wait_icr_idle();
716 716
717 /* 717 /*
@@ -724,7 +724,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
724 if (send_status || accept_status) 724 if (send_status || accept_status)
725 break; 725 break;
726 } 726 }
727 Dprintk("After Startup.\n"); 727 pr_debug("After Startup.\n");
728 728
729 if (send_status) 729 if (send_status)
730 printk(KERN_ERR "APIC never delivered???\n"); 730 printk(KERN_ERR "APIC never delivered???\n");
@@ -875,7 +875,7 @@ do_rest:
875 875
876 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { 876 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
877 877
878 Dprintk("Setting warm reset code and vector.\n"); 878 pr_debug("Setting warm reset code and vector.\n");
879 879
880 store_NMI_vector(&nmi_high, &nmi_low); 880 store_NMI_vector(&nmi_high, &nmi_low);
881 881
@@ -896,9 +896,9 @@ do_rest:
896 /* 896 /*
897 * allow APs to start initializing. 897 * allow APs to start initializing.
898 */ 898 */
899 Dprintk("Before Callout %d.\n", cpu); 899 pr_debug("Before Callout %d.\n", cpu);
900 cpu_set(cpu, cpu_callout_map); 900 cpu_set(cpu, cpu_callout_map);
901 Dprintk("After Callout %d.\n", cpu); 901 pr_debug("After Callout %d.\n", cpu);
902 902
903 /* 903 /*
904 * Wait 5s total for a response 904 * Wait 5s total for a response
@@ -911,10 +911,10 @@ do_rest:
911 911
912 if (cpu_isset(cpu, cpu_callin_map)) { 912 if (cpu_isset(cpu, cpu_callin_map)) {
913 /* number CPUs logically, starting from 1 (BSP is 0) */ 913 /* number CPUs logically, starting from 1 (BSP is 0) */
914 Dprintk("OK.\n"); 914 pr_debug("OK.\n");
915 printk(KERN_INFO "CPU%d: ", cpu); 915 printk(KERN_INFO "CPU%d: ", cpu);
916 print_cpu_info(&cpu_data(cpu)); 916 print_cpu_info(&cpu_data(cpu));
917 Dprintk("CPU has booted.\n"); 917 pr_debug("CPU has booted.\n");
918 } else { 918 } else {
919 boot_error = 1; 919 boot_error = 1;
920 if (*((volatile unsigned char *)trampoline_base) 920 if (*((volatile unsigned char *)trampoline_base)
@@ -959,7 +959,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
959 959
960 WARN_ON(irqs_disabled()); 960 WARN_ON(irqs_disabled());
961 961
962 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); 962 pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
963 963
964 if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || 964 if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
965 !physid_isset(apicid, phys_cpu_present_map)) { 965 !physid_isset(apicid, phys_cpu_present_map)) {
@@ -971,7 +971,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
971 * Already booted CPU? 971 * Already booted CPU?
972 */ 972 */
973 if (cpu_isset(cpu, cpu_callin_map)) { 973 if (cpu_isset(cpu, cpu_callin_map)) {
974 Dprintk("do_boot_cpu %d Already started\n", cpu); 974 pr_debug("do_boot_cpu %d Already started\n", cpu);
975 return -ENOSYS; 975 return -ENOSYS;
976 } 976 }
977 977
@@ -998,7 +998,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
998 err = do_boot_cpu(apicid, cpu); 998 err = do_boot_cpu(apicid, cpu);
999#endif 999#endif
1000 if (err) { 1000 if (err) {
1001 Dprintk("do_boot_cpu failed %d\n", err); 1001 pr_debug("do_boot_cpu failed %d\n", err);
1002 return -EIO; 1002 return -EIO;
1003 } 1003 }
1004 1004
@@ -1202,7 +1202,7 @@ void __init native_smp_prepare_boot_cpu(void)
1202 1202
1203void __init native_smp_cpus_done(unsigned int max_cpus) 1203void __init native_smp_cpus_done(unsigned int max_cpus)
1204{ 1204{
1205 Dprintk("Boot done.\n"); 1205 pr_debug("Boot done.\n");
1206 1206
1207 impress_friends(); 1207 impress_friends();
1208 smp_checks(); 1208 smp_checks();
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index b432d5781773..9782f42dd319 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -20,10 +20,6 @@
20#include <asm/acpi.h> 20#include <asm/acpi.h>
21#include <asm/k8.h> 21#include <asm/k8.h>
22 22
23#ifndef Dprintk
24#define Dprintk(x...)
25#endif
26
27struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
28EXPORT_SYMBOL(node_data); 24EXPORT_SYMBOL(node_data);
29 25
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index 858dbe3399f9..86631ccbc25a 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -7,15 +7,13 @@
7/* Direct PCI access. This is used for PCI accesses in early boot before 7/* Direct PCI access. This is used for PCI accesses in early boot before
8 the PCI subsystem works. */ 8 the PCI subsystem works. */
9 9
10#define PDprintk(x...)
11
12u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) 10u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
13{ 11{
14 u32 v; 12 u32 v;
15 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 13 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
16 v = inl(0xcfc); 14 v = inl(0xcfc);
17 if (v != 0xffffffff) 15 if (v != 0xffffffff)
18 PDprintk("%x reading 4 from %x: %x\n", slot, offset, v); 16 pr_debug("%x reading 4 from %x: %x\n", slot, offset, v);
19 return v; 17 return v;
20} 18}
21 19
@@ -24,7 +22,7 @@ u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
24 u8 v; 22 u8 v;
25 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 23 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
26 v = inb(0xcfc + (offset&3)); 24 v = inb(0xcfc + (offset&3));
27 PDprintk("%x reading 1 from %x: %x\n", slot, offset, v); 25 pr_debug("%x reading 1 from %x: %x\n", slot, offset, v);
28 return v; 26 return v;
29} 27}
30 28
@@ -33,28 +31,28 @@ u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
33 u16 v; 31 u16 v;
34 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 32 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
35 v = inw(0xcfc + (offset&2)); 33 v = inw(0xcfc + (offset&2));
36 PDprintk("%x reading 2 from %x: %x\n", slot, offset, v); 34 pr_debug("%x reading 2 from %x: %x\n", slot, offset, v);
37 return v; 35 return v;
38} 36}
39 37
40void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, 38void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
41 u32 val) 39 u32 val)
42{ 40{
43 PDprintk("%x writing to %x: %x\n", slot, offset, val); 41 pr_debug("%x writing to %x: %x\n", slot, offset, val);
44 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 42 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
45 outl(val, 0xcfc); 43 outl(val, 0xcfc);
46} 44}
47 45
48void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val) 46void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
49{ 47{
50 PDprintk("%x writing to %x: %x\n", slot, offset, val); 48 pr_debug("%x writing to %x: %x\n", slot, offset, val);
51 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 49 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
52 outb(val, 0xcfc + (offset&3)); 50 outb(val, 0xcfc + (offset&3));
53} 51}
54 52
55void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val) 53void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
56{ 54{
57 PDprintk("%x writing to %x: %x\n", slot, offset, val); 55 pr_debug("%x writing to %x: %x\n", slot, offset, val);
58 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 56 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
59 outw(val, 0xcfc + (offset&2)); 57 outw(val, 0xcfc + (offset&2));
60} 58}
@@ -71,7 +69,7 @@ void early_dump_pci_device(u8 bus, u8 slot, u8 func)
71 int j; 69 int j;
72 u32 val; 70 u32 val;
73 71
74 printk("PCI: %02x:%02x:%02x", bus, slot, func); 72 printk(KERN_INFO "PCI: %02x:%02x:%02x", bus, slot, func);
75 73
76 for (i = 0; i < 256; i += 4) { 74 for (i = 0; i < 256; i += 4) {
77 if (!(i & 0x0f)) 75 if (!(i & 0x0f))
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 192961fd7173..918711aa56f3 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -32,6 +32,7 @@
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/time.h> 33#include <linux/time.h>
34#include <linux/math64.h> 34#include <linux/math64.h>
35#include <linux/smp_lock.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/sn/addrs.h> 38#include <asm/sn/addrs.h>
@@ -57,8 +58,8 @@ extern unsigned long sn_rtc_cycles_per_second;
57 58
58#define rtc_time() (*RTC_COUNTER_ADDR) 59#define rtc_time() (*RTC_COUNTER_ADDR)
59 60
60static int mmtimer_ioctl(struct inode *inode, struct file *file, 61static long mmtimer_ioctl(struct file *file, unsigned int cmd,
61 unsigned int cmd, unsigned long arg); 62 unsigned long arg);
62static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma); 63static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
63 64
64/* 65/*
@@ -67,9 +68,9 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
67static unsigned long mmtimer_femtoperiod = 0; 68static unsigned long mmtimer_femtoperiod = 0;
68 69
69static const struct file_operations mmtimer_fops = { 70static const struct file_operations mmtimer_fops = {
70 .owner = THIS_MODULE, 71 .owner = THIS_MODULE,
71 .mmap = mmtimer_mmap, 72 .mmap = mmtimer_mmap,
72 .ioctl = mmtimer_ioctl, 73 .unlocked_ioctl = mmtimer_ioctl,
73}; 74};
74 75
75/* 76/*
@@ -339,7 +340,6 @@ restart:
339 340
340/** 341/**
341 * mmtimer_ioctl - ioctl interface for /dev/mmtimer 342 * mmtimer_ioctl - ioctl interface for /dev/mmtimer
342 * @inode: inode of the device
343 * @file: file structure for the device 343 * @file: file structure for the device
344 * @cmd: command to execute 344 * @cmd: command to execute
345 * @arg: optional argument to command 345 * @arg: optional argument to command
@@ -365,11 +365,13 @@ restart:
365 * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it 365 * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it
366 * in the address specified by @arg. 366 * in the address specified by @arg.
367 */ 367 */
368static int mmtimer_ioctl(struct inode *inode, struct file *file, 368static long mmtimer_ioctl(struct file *file, unsigned int cmd,
369 unsigned int cmd, unsigned long arg) 369 unsigned long arg)
370{ 370{
371 int ret = 0; 371 int ret = 0;
372 372
373 lock_kernel();
374
373 switch (cmd) { 375 switch (cmd) {
374 case MMTIMER_GETOFFSET: /* offset of the counter */ 376 case MMTIMER_GETOFFSET: /* offset of the counter */
375 /* 377 /*
@@ -384,15 +386,14 @@ static int mmtimer_ioctl(struct inode *inode, struct file *file,
384 case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ 386 case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
385 if(copy_to_user((unsigned long __user *)arg, 387 if(copy_to_user((unsigned long __user *)arg,
386 &mmtimer_femtoperiod, sizeof(unsigned long))) 388 &mmtimer_femtoperiod, sizeof(unsigned long)))
387 return -EFAULT; 389 ret = -EFAULT;
388 break; 390 break;
389 391
390 case MMTIMER_GETFREQ: /* frequency in Hz */ 392 case MMTIMER_GETFREQ: /* frequency in Hz */
391 if(copy_to_user((unsigned long __user *)arg, 393 if(copy_to_user((unsigned long __user *)arg,
392 &sn_rtc_cycles_per_second, 394 &sn_rtc_cycles_per_second,
393 sizeof(unsigned long))) 395 sizeof(unsigned long)))
394 return -EFAULT; 396 ret = -EFAULT;
395 ret = 0;
396 break; 397 break;
397 398
398 case MMTIMER_GETBITS: /* number of bits in the clock */ 399 case MMTIMER_GETBITS: /* number of bits in the clock */
@@ -406,13 +407,13 @@ static int mmtimer_ioctl(struct inode *inode, struct file *file,
406 case MMTIMER_GETCOUNTER: 407 case MMTIMER_GETCOUNTER:
407 if(copy_to_user((unsigned long __user *)arg, 408 if(copy_to_user((unsigned long __user *)arg,
408 RTC_COUNTER_ADDR, sizeof(unsigned long))) 409 RTC_COUNTER_ADDR, sizeof(unsigned long)))
409 return -EFAULT; 410 ret = -EFAULT;
410 break; 411 break;
411 default: 412 default:
412 ret = -ENOSYS; 413 ret = -ENOTTY;
413 break; 414 break;
414 } 415 }
415 416 unlock_kernel();
416 return ret; 417 return ret;
417} 418}
418 419
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1d41496ed2f8..ee1df0d45e81 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -38,10 +38,10 @@
38 * also protects the cpufreq_cpu_data array. 38 * also protects the cpufreq_cpu_data array.
39 */ 39 */
40static struct cpufreq_driver *cpufreq_driver; 40static struct cpufreq_driver *cpufreq_driver;
41static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42#ifdef CONFIG_HOTPLUG_CPU 42#ifdef CONFIG_HOTPLUG_CPU
43/* This one keeps track of the previously set governor of a removed CPU */ 43/* This one keeps track of the previously set governor of a removed CPU */
44static struct cpufreq_governor *cpufreq_cpu_governor[NR_CPUS]; 44static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
45#endif 45#endif
46static DEFINE_SPINLOCK(cpufreq_driver_lock); 46static DEFINE_SPINLOCK(cpufreq_driver_lock);
47 47
@@ -135,7 +135,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
135 struct cpufreq_policy *data; 135 struct cpufreq_policy *data;
136 unsigned long flags; 136 unsigned long flags;
137 137
138 if (cpu >= NR_CPUS) 138 if (cpu >= nr_cpu_ids)
139 goto err_out; 139 goto err_out;
140 140
141 /* get the cpufreq driver */ 141 /* get the cpufreq driver */
@@ -149,7 +149,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
149 149
150 150
151 /* get the CPU */ 151 /* get the CPU */
152 data = cpufreq_cpu_data[cpu]; 152 data = per_cpu(cpufreq_cpu_data, cpu);
153 153
154 if (!data) 154 if (!data)
155 goto err_out_put_module; 155 goto err_out_put_module;
@@ -327,7 +327,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
327 dprintk("notification %u of frequency transition to %u kHz\n", 327 dprintk("notification %u of frequency transition to %u kHz\n",
328 state, freqs->new); 328 state, freqs->new);
329 329
330 policy = cpufreq_cpu_data[freqs->cpu]; 330 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
331 switch (state) { 331 switch (state) {
332 332
333 case CPUFREQ_PRECHANGE: 333 case CPUFREQ_PRECHANGE:
@@ -828,8 +828,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
828#ifdef CONFIG_SMP 828#ifdef CONFIG_SMP
829 829
830#ifdef CONFIG_HOTPLUG_CPU 830#ifdef CONFIG_HOTPLUG_CPU
831 if (cpufreq_cpu_governor[cpu]){ 831 if (per_cpu(cpufreq_cpu_governor, cpu)) {
832 policy->governor = cpufreq_cpu_governor[cpu]; 832 policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
833 dprintk("Restoring governor %s for cpu %d\n", 833 dprintk("Restoring governor %s for cpu %d\n",
834 policy->governor->name, cpu); 834 policy->governor->name, cpu);
835 } 835 }
@@ -854,7 +854,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
854 854
855 spin_lock_irqsave(&cpufreq_driver_lock, flags); 855 spin_lock_irqsave(&cpufreq_driver_lock, flags);
856 managed_policy->cpus = policy->cpus; 856 managed_policy->cpus = policy->cpus;
857 cpufreq_cpu_data[cpu] = managed_policy; 857 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
858 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 858 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
859 859
860 dprintk("CPU already managed, adding link\n"); 860 dprintk("CPU already managed, adding link\n");
@@ -899,7 +899,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
899 899
900 spin_lock_irqsave(&cpufreq_driver_lock, flags); 900 spin_lock_irqsave(&cpufreq_driver_lock, flags);
901 for_each_cpu_mask(j, policy->cpus) { 901 for_each_cpu_mask(j, policy->cpus) {
902 cpufreq_cpu_data[j] = policy; 902 per_cpu(cpufreq_cpu_data, j) = policy;
903 per_cpu(policy_cpu, j) = policy->cpu; 903 per_cpu(policy_cpu, j) = policy->cpu;
904 } 904 }
905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -946,7 +946,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
946err_out_unregister: 946err_out_unregister:
947 spin_lock_irqsave(&cpufreq_driver_lock, flags); 947 spin_lock_irqsave(&cpufreq_driver_lock, flags);
948 for_each_cpu_mask(j, policy->cpus) 948 for_each_cpu_mask(j, policy->cpus)
949 cpufreq_cpu_data[j] = NULL; 949 per_cpu(cpufreq_cpu_data, j) = NULL;
950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
951 951
952 kobject_put(&policy->kobj); 952 kobject_put(&policy->kobj);
@@ -989,7 +989,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
989 dprintk("unregistering CPU %u\n", cpu); 989 dprintk("unregistering CPU %u\n", cpu);
990 990
991 spin_lock_irqsave(&cpufreq_driver_lock, flags); 991 spin_lock_irqsave(&cpufreq_driver_lock, flags);
992 data = cpufreq_cpu_data[cpu]; 992 data = per_cpu(cpufreq_cpu_data, cpu);
993 993
994 if (!data) { 994 if (!data) {
995 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 995 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -997,7 +997,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
997 unlock_policy_rwsem_write(cpu); 997 unlock_policy_rwsem_write(cpu);
998 return -EINVAL; 998 return -EINVAL;
999 } 999 }
1000 cpufreq_cpu_data[cpu] = NULL; 1000 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1001 1001
1002 1002
1003#ifdef CONFIG_SMP 1003#ifdef CONFIG_SMP
@@ -1019,19 +1019,19 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1019#ifdef CONFIG_SMP 1019#ifdef CONFIG_SMP
1020 1020
1021#ifdef CONFIG_HOTPLUG_CPU 1021#ifdef CONFIG_HOTPLUG_CPU
1022 cpufreq_cpu_governor[cpu] = data->governor; 1022 per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
1023#endif 1023#endif
1024 1024
1025 /* if we have other CPUs still registered, we need to unlink them, 1025 /* if we have other CPUs still registered, we need to unlink them,
1026 * or else wait_for_completion below will lock up. Clean the 1026 * or else wait_for_completion below will lock up. Clean the
1027 * cpufreq_cpu_data[] while holding the lock, and remove the sysfs 1027 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1028 * links afterwards. 1028 * the sysfs links afterwards.
1029 */ 1029 */
1030 if (unlikely(cpus_weight(data->cpus) > 1)) { 1030 if (unlikely(cpus_weight(data->cpus) > 1)) {
1031 for_each_cpu_mask(j, data->cpus) { 1031 for_each_cpu_mask(j, data->cpus) {
1032 if (j == cpu) 1032 if (j == cpu)
1033 continue; 1033 continue;
1034 cpufreq_cpu_data[j] = NULL; 1034 per_cpu(cpufreq_cpu_data, j) = NULL;
1035 } 1035 }
1036 } 1036 }
1037 1037
@@ -1043,7 +1043,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1043 continue; 1043 continue;
1044 dprintk("removing link for cpu %u\n", j); 1044 dprintk("removing link for cpu %u\n", j);
1045#ifdef CONFIG_HOTPLUG_CPU 1045#ifdef CONFIG_HOTPLUG_CPU
1046 cpufreq_cpu_governor[j] = data->governor; 1046 per_cpu(cpufreq_cpu_governor, j) = data->governor;
1047#endif 1047#endif
1048 cpu_sys_dev = get_cpu_sysdev(j); 1048 cpu_sys_dev = get_cpu_sysdev(j);
1049 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); 1049 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
@@ -1153,7 +1153,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
1153 1153
1154static unsigned int __cpufreq_get(unsigned int cpu) 1154static unsigned int __cpufreq_get(unsigned int cpu)
1155{ 1155{
1156 struct cpufreq_policy *policy = cpufreq_cpu_data[cpu]; 1156 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1157 unsigned int ret_freq = 0; 1157 unsigned int ret_freq = 0;
1158 1158
1159 if (!cpufreq_driver->get) 1159 if (!cpufreq_driver->get)
@@ -1822,16 +1822,19 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1822 cpufreq_driver = driver_data; 1822 cpufreq_driver = driver_data;
1823 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1823 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1824 1824
1825 ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver); 1825 ret = sysdev_driver_register(&cpu_sysdev_class,
1826 &cpufreq_sysdev_driver);
1826 1827
1827 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1828 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1828 int i; 1829 int i;
1829 ret = -ENODEV; 1830 ret = -ENODEV;
1830 1831
1831 /* check for at least one working CPU */ 1832 /* check for at least one working CPU */
1832 for (i=0; i<NR_CPUS; i++) 1833 for (i = 0; i < nr_cpu_ids; i++)
1833 if (cpufreq_cpu_data[i]) 1834 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1834 ret = 0; 1835 ret = 0;
1836 break;
1837 }
1835 1838
1836 /* if all ->init() calls failed, unregister */ 1839 /* if all ->init() calls failed, unregister */
1837 if (ret) { 1840 if (ret) {
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index ae70d63a8b26..c0ff97d375d7 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -43,7 +43,7 @@ struct cpufreq_stats {
43#endif 43#endif
44}; 44};
45 45
46static struct cpufreq_stats *cpufreq_stats_table[NR_CPUS]; 46static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
47 47
48struct cpufreq_stats_attribute { 48struct cpufreq_stats_attribute {
49 struct attribute attr; 49 struct attribute attr;
@@ -58,7 +58,7 @@ cpufreq_stats_update (unsigned int cpu)
58 58
59 cur_time = get_jiffies_64(); 59 cur_time = get_jiffies_64();
60 spin_lock(&cpufreq_stats_lock); 60 spin_lock(&cpufreq_stats_lock);
61 stat = cpufreq_stats_table[cpu]; 61 stat = per_cpu(cpufreq_stats_table, cpu);
62 if (stat->time_in_state) 62 if (stat->time_in_state)
63 stat->time_in_state[stat->last_index] = 63 stat->time_in_state[stat->last_index] =
64 cputime64_add(stat->time_in_state[stat->last_index], 64 cputime64_add(stat->time_in_state[stat->last_index],
@@ -71,11 +71,11 @@ cpufreq_stats_update (unsigned int cpu)
71static ssize_t 71static ssize_t
72show_total_trans(struct cpufreq_policy *policy, char *buf) 72show_total_trans(struct cpufreq_policy *policy, char *buf)
73{ 73{
74 struct cpufreq_stats *stat = cpufreq_stats_table[policy->cpu]; 74 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
75 if (!stat) 75 if (!stat)
76 return 0; 76 return 0;
77 return sprintf(buf, "%d\n", 77 return sprintf(buf, "%d\n",
78 cpufreq_stats_table[stat->cpu]->total_trans); 78 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
79} 79}
80 80
81static ssize_t 81static ssize_t
@@ -83,7 +83,7 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf)
83{ 83{
84 ssize_t len = 0; 84 ssize_t len = 0;
85 int i; 85 int i;
86 struct cpufreq_stats *stat = cpufreq_stats_table[policy->cpu]; 86 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
87 if (!stat) 87 if (!stat)
88 return 0; 88 return 0;
89 cpufreq_stats_update(stat->cpu); 89 cpufreq_stats_update(stat->cpu);
@@ -101,7 +101,7 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
101 ssize_t len = 0; 101 ssize_t len = 0;
102 int i, j; 102 int i, j;
103 103
104 struct cpufreq_stats *stat = cpufreq_stats_table[policy->cpu]; 104 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
105 if (!stat) 105 if (!stat)
106 return 0; 106 return 0;
107 cpufreq_stats_update(stat->cpu); 107 cpufreq_stats_update(stat->cpu);
@@ -170,7 +170,7 @@ freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
170 170
171static void cpufreq_stats_free_table(unsigned int cpu) 171static void cpufreq_stats_free_table(unsigned int cpu)
172{ 172{
173 struct cpufreq_stats *stat = cpufreq_stats_table[cpu]; 173 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
174 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 174 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
175 if (policy && policy->cpu == cpu) 175 if (policy && policy->cpu == cpu)
176 sysfs_remove_group(&policy->kobj, &stats_attr_group); 176 sysfs_remove_group(&policy->kobj, &stats_attr_group);
@@ -178,7 +178,7 @@ static void cpufreq_stats_free_table(unsigned int cpu)
178 kfree(stat->time_in_state); 178 kfree(stat->time_in_state);
179 kfree(stat); 179 kfree(stat);
180 } 180 }
181 cpufreq_stats_table[cpu] = NULL; 181 per_cpu(cpufreq_stats_table, cpu) = NULL;
182 if (policy) 182 if (policy)
183 cpufreq_cpu_put(policy); 183 cpufreq_cpu_put(policy);
184} 184}
@@ -192,7 +192,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
192 struct cpufreq_policy *data; 192 struct cpufreq_policy *data;
193 unsigned int alloc_size; 193 unsigned int alloc_size;
194 unsigned int cpu = policy->cpu; 194 unsigned int cpu = policy->cpu;
195 if (cpufreq_stats_table[cpu]) 195 if (per_cpu(cpufreq_stats_table, cpu))
196 return -EBUSY; 196 return -EBUSY;
197 if ((stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL) 197 if ((stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL)
198 return -ENOMEM; 198 return -ENOMEM;
@@ -207,7 +207,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
207 goto error_out; 207 goto error_out;
208 208
209 stat->cpu = cpu; 209 stat->cpu = cpu;
210 cpufreq_stats_table[cpu] = stat; 210 per_cpu(cpufreq_stats_table, cpu) = stat;
211 211
212 for (i=0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 212 for (i=0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
213 unsigned int freq = table[i].frequency; 213 unsigned int freq = table[i].frequency;
@@ -251,7 +251,7 @@ error_out:
251 cpufreq_cpu_put(data); 251 cpufreq_cpu_put(data);
252error_get_fail: 252error_get_fail:
253 kfree(stat); 253 kfree(stat);
254 cpufreq_stats_table[cpu] = NULL; 254 per_cpu(cpufreq_stats_table, cpu) = NULL;
255 return ret; 255 return ret;
256} 256}
257 257
@@ -284,7 +284,7 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
284 if (val != CPUFREQ_POSTCHANGE) 284 if (val != CPUFREQ_POSTCHANGE)
285 return 0; 285 return 0;
286 286
287 stat = cpufreq_stats_table[freq->cpu]; 287 stat = per_cpu(cpufreq_stats_table, freq->cpu);
288 if (!stat) 288 if (!stat)
289 return 0; 289 return 0;
290 290
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index b64c6bc445e3..9071d80fbba2 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
174} 174}
175EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); 175EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
176 176
177static struct cpufreq_frequency_table *show_table[NR_CPUS]; 177static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
178/** 178/**
179 * show_available_freqs - show available frequencies for the specified CPU 179 * show_available_freqs - show available frequencies for the specified CPU
180 */ 180 */
@@ -185,10 +185,10 @@ static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
185 ssize_t count = 0; 185 ssize_t count = 0;
186 struct cpufreq_frequency_table *table; 186 struct cpufreq_frequency_table *table;
187 187
188 if (!show_table[cpu]) 188 if (!per_cpu(show_table, cpu))
189 return -ENODEV; 189 return -ENODEV;
190 190
191 table = show_table[cpu]; 191 table = per_cpu(show_table, cpu);
192 192
193 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 193 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
@@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
217 unsigned int cpu) 217 unsigned int cpu)
218{ 218{
219 dprintk("setting show_table for cpu %u to %p\n", cpu, table); 219 dprintk("setting show_table for cpu %u to %p\n", cpu, table);
220 show_table[cpu] = table; 220 per_cpu(show_table, cpu) = table;
221} 221}
222EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); 222EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
223 223
224void cpufreq_frequency_table_put_attr(unsigned int cpu) 224void cpufreq_frequency_table_put_attr(unsigned int cpu)
225{ 225{
226 dprintk("clearing show_table for cpu %u\n", cpu); 226 dprintk("clearing show_table for cpu %u\n", cpu);
227 show_table[cpu] = NULL; 227 per_cpu(show_table, cpu) = NULL;
228} 228}
229EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); 229EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
230 230
231struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 231struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
232{ 232{
233 return show_table[cpu]; 233 return per_cpu(show_table, cpu);
234} 234}
235EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 235EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
236 236
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
index eb24a3f47caa..ccbe8ae47a61 100644
--- a/include/asm-ia64/Kbuild
+++ b/include/asm-ia64/Kbuild
@@ -5,12 +5,12 @@ header-y += fpu.h
5header-y += fpswa.h 5header-y += fpswa.h
6header-y += ia64regs.h 6header-y += ia64regs.h
7header-y += intel_intrin.h 7header-y += intel_intrin.h
8header-y += intrinsics.h
9header-y += perfmon_default_smpl.h 8header-y += perfmon_default_smpl.h
10header-y += ptrace_offsets.h 9header-y += ptrace_offsets.h
11header-y += rse.h 10header-y += rse.h
12header-y += ucontext.h 11header-y += ucontext.h
13 12
14unifdef-y += gcc_intrin.h 13unifdef-y += gcc_intrin.h
14unifdef-y += intrinsics.h
15unifdef-y += perfmon.h 15unifdef-y += perfmon.h
16unifdef-y += ustack.h 16unifdef-y += ustack.h
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index 2fe292c275fe..0f5b55921758 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -32,7 +32,7 @@ extern void ia64_bad_param_for_getreg (void);
32register unsigned long ia64_r13 asm ("r13") __used; 32register unsigned long ia64_r13 asm ("r13") __used;
33#endif 33#endif
34 34
35#define ia64_setreg(regnum, val) \ 35#define ia64_native_setreg(regnum, val) \
36({ \ 36({ \
37 switch (regnum) { \ 37 switch (regnum) { \
38 case _IA64_REG_PSR_L: \ 38 case _IA64_REG_PSR_L: \
@@ -61,7 +61,7 @@ register unsigned long ia64_r13 asm ("r13") __used;
61 } \ 61 } \
62}) 62})
63 63
64#define ia64_getreg(regnum) \ 64#define ia64_native_getreg(regnum) \
65({ \ 65({ \
66 __u64 ia64_intri_res; \ 66 __u64 ia64_intri_res; \
67 \ 67 \
@@ -385,7 +385,7 @@ register unsigned long ia64_r13 asm ("r13") __used;
385 385
386#define ia64_invala() asm volatile ("invala" ::: "memory") 386#define ia64_invala() asm volatile ("invala" ::: "memory")
387 387
388#define ia64_thash(addr) \ 388#define ia64_native_thash(addr) \
389({ \ 389({ \
390 __u64 ia64_intri_res; \ 390 __u64 ia64_intri_res; \
391 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ 391 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
@@ -438,10 +438,10 @@ register unsigned long ia64_r13 asm ("r13") __used;
438#define ia64_set_pmd(index, val) \ 438#define ia64_set_pmd(index, val) \
439 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") 439 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
440 440
441#define ia64_set_rr(index, val) \ 441#define ia64_native_set_rr(index, val) \
442 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); 442 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
443 443
444#define ia64_get_cpuid(index) \ 444#define ia64_native_get_cpuid(index) \
445({ \ 445({ \
446 __u64 ia64_intri_res; \ 446 __u64 ia64_intri_res; \
447 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ 447 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
@@ -477,33 +477,33 @@ register unsigned long ia64_r13 asm ("r13") __used;
477}) 477})
478 478
479 479
480#define ia64_get_pmd(index) \ 480#define ia64_native_get_pmd(index) \
481({ \ 481({ \
482 __u64 ia64_intri_res; \ 482 __u64 ia64_intri_res; \
483 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ 483 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
484 ia64_intri_res; \ 484 ia64_intri_res; \
485}) 485})
486 486
487#define ia64_get_rr(index) \ 487#define ia64_native_get_rr(index) \
488({ \ 488({ \
489 __u64 ia64_intri_res; \ 489 __u64 ia64_intri_res; \
490 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ 490 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
491 ia64_intri_res; \ 491 ia64_intri_res; \
492}) 492})
493 493
494#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") 494#define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
495 495
496 496
497#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") 497#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
498 498
499#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") 499#define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
500#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") 500#define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
501#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") 501#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
502#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") 502#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
503 503
504#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr)) 504#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
505 505
506#define ia64_ptcga(addr, size) \ 506#define ia64_native_ptcga(addr, size) \
507do { \ 507do { \
508 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \ 508 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
509 ia64_dv_serialize_data(); \ 509 ia64_dv_serialize_data(); \
@@ -608,7 +608,7 @@ do { \
608 } \ 608 } \
609}) 609})
610 610
611#define ia64_intrin_local_irq_restore(x) \ 611#define ia64_native_intrin_local_irq_restore(x) \
612do { \ 612do { \
613 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \ 613 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
614 "(p6) ssm psr.i;" \ 614 "(p6) ssm psr.i;" \
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index 76366dc9c1a0..5c99cbcb8a0d 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -15,7 +15,11 @@
15#include <asm/ptrace.h> 15#include <asm/ptrace.h>
16#include <asm/smp.h> 16#include <asm/smp.h>
17 17
18#ifndef CONFIG_PARAVIRT
18typedef u8 ia64_vector; 19typedef u8 ia64_vector;
20#else
21typedef u16 ia64_vector;
22#endif
19 23
20/* 24/*
21 * 0 special 25 * 0 special
@@ -104,13 +108,24 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
104 108
105extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ 109extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
106 110
111#ifdef CONFIG_PARAVIRT_GUEST
112#include <asm/paravirt.h>
113#else
114#define ia64_register_ipi ia64_native_register_ipi
115#define assign_irq_vector ia64_native_assign_irq_vector
116#define free_irq_vector ia64_native_free_irq_vector
117#define register_percpu_irq ia64_native_register_percpu_irq
118#define ia64_resend_irq ia64_native_resend_irq
119#endif
120
121extern void ia64_native_register_ipi(void);
107extern int bind_irq_vector(int irq, int vector, cpumask_t domain); 122extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
108extern int assign_irq_vector (int irq); /* allocate a free vector */ 123extern int ia64_native_assign_irq_vector (int irq); /* allocate a free vector */
109extern void free_irq_vector (int vector); 124extern void ia64_native_free_irq_vector (int vector);
110extern int reserve_irq_vector (int vector); 125extern int reserve_irq_vector (int vector);
111extern void __setup_vector_irq(int cpu); 126extern void __setup_vector_irq(int cpu);
112extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); 127extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
113extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); 128extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action);
114extern int check_irq_used (int irq); 129extern int check_irq_used (int irq);
115extern void destroy_and_reserve_irq (unsigned int irq); 130extern void destroy_and_reserve_irq (unsigned int irq);
116 131
@@ -122,7 +137,7 @@ static inline int irq_prepare_move(int irq, int cpu) { return 0; }
122static inline void irq_complete_move(unsigned int irq) {} 137static inline void irq_complete_move(unsigned int irq) {}
123#endif 138#endif
124 139
125static inline void ia64_resend_irq(unsigned int vector) 140static inline void ia64_native_resend_irq(unsigned int vector)
126{ 141{
127 platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); 142 platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
128} 143}
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h
index a520d103d808..53cec577558a 100644
--- a/include/asm-ia64/intel_intrin.h
+++ b/include/asm-ia64/intel_intrin.h
@@ -16,8 +16,8 @@
16 * intrinsic 16 * intrinsic
17 */ 17 */
18 18
19#define ia64_getreg __getReg 19#define ia64_native_getreg __getReg
20#define ia64_setreg __setReg 20#define ia64_native_setreg __setReg
21 21
22#define ia64_hint __hint 22#define ia64_hint __hint
23#define ia64_hint_pause __hint_pause 23#define ia64_hint_pause __hint_pause
@@ -39,10 +39,10 @@
39#define ia64_invala_fr __invala_fr 39#define ia64_invala_fr __invala_fr
40#define ia64_nop __nop 40#define ia64_nop __nop
41#define ia64_sum __sum 41#define ia64_sum __sum
42#define ia64_ssm __ssm 42#define ia64_native_ssm __ssm
43#define ia64_rum __rum 43#define ia64_rum __rum
44#define ia64_rsm __rsm 44#define ia64_native_rsm __rsm
45#define ia64_fc __fc 45#define ia64_native_fc __fc
46 46
47#define ia64_ldfs __ldfs 47#define ia64_ldfs __ldfs
48#define ia64_ldfd __ldfd 48#define ia64_ldfd __ldfd
@@ -88,16 +88,17 @@
88 __setIndReg(_IA64_REG_INDR_PMC, index, val) 88 __setIndReg(_IA64_REG_INDR_PMC, index, val)
89#define ia64_set_pmd(index, val) \ 89#define ia64_set_pmd(index, val) \
90 __setIndReg(_IA64_REG_INDR_PMD, index, val) 90 __setIndReg(_IA64_REG_INDR_PMD, index, val)
91#define ia64_set_rr(index, val) \ 91#define ia64_native_set_rr(index, val) \
92 __setIndReg(_IA64_REG_INDR_RR, index, val) 92 __setIndReg(_IA64_REG_INDR_RR, index, val)
93 93
94#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index) 94#define ia64_native_get_cpuid(index) \
95#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) 95 __getIndReg(_IA64_REG_INDR_CPUID, index)
96#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) 96#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
97#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) 97#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
98#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) 98#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
99#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) 99#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
100#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) 100#define ia64_native_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
101#define ia64_native_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
101 102
102#define ia64_srlz_d __dsrlz 103#define ia64_srlz_d __dsrlz
103#define ia64_srlz_i __isrlz 104#define ia64_srlz_i __isrlz
@@ -119,16 +120,16 @@
119#define ia64_ld8_acq __ld8_acq 120#define ia64_ld8_acq __ld8_acq
120 121
121#define ia64_sync_i __synci 122#define ia64_sync_i __synci
122#define ia64_thash __thash 123#define ia64_native_thash __thash
123#define ia64_ttag __ttag 124#define ia64_native_ttag __ttag
124#define ia64_itcd __itcd 125#define ia64_itcd __itcd
125#define ia64_itci __itci 126#define ia64_itci __itci
126#define ia64_itrd __itrd 127#define ia64_itrd __itrd
127#define ia64_itri __itri 128#define ia64_itri __itri
128#define ia64_ptce __ptce 129#define ia64_ptce __ptce
129#define ia64_ptcl __ptcl 130#define ia64_ptcl __ptcl
130#define ia64_ptcg __ptcg 131#define ia64_native_ptcg __ptcg
131#define ia64_ptcga __ptcga 132#define ia64_native_ptcga __ptcga
132#define ia64_ptri __ptri 133#define ia64_ptri __ptri
133#define ia64_ptrd __ptrd 134#define ia64_ptrd __ptrd
134#define ia64_dep_mi _m64_dep_mi 135#define ia64_dep_mi _m64_dep_mi
@@ -145,13 +146,13 @@
145#define ia64_lfetch_fault __lfetch_fault 146#define ia64_lfetch_fault __lfetch_fault
146#define ia64_lfetch_fault_excl __lfetch_fault_excl 147#define ia64_lfetch_fault_excl __lfetch_fault_excl
147 148
148#define ia64_intrin_local_irq_restore(x) \ 149#define ia64_native_intrin_local_irq_restore(x) \
149do { \ 150do { \
150 if ((x) != 0) { \ 151 if ((x) != 0) { \
151 ia64_ssm(IA64_PSR_I); \ 152 ia64_native_ssm(IA64_PSR_I); \
152 ia64_srlz_d(); \ 153 ia64_srlz_d(); \
153 } else { \ 154 } else { \
154 ia64_rsm(IA64_PSR_I); \ 155 ia64_native_rsm(IA64_PSR_I); \
155 } \ 156 } \
156} while (0) 157} while (0)
157 158
diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h
index f1135b5b94c3..47d686dba1eb 100644
--- a/include/asm-ia64/intrinsics.h
+++ b/include/asm-ia64/intrinsics.h
@@ -18,6 +18,17 @@
18# include <asm/gcc_intrin.h> 18# include <asm/gcc_intrin.h>
19#endif 19#endif
20 20
21#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
22
23#define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \
24do { \
25 ia64_native_set_rr(0x0000000000000000UL, (val0)); \
26 ia64_native_set_rr(0x2000000000000000UL, (val1)); \
27 ia64_native_set_rr(0x4000000000000000UL, (val2)); \
28 ia64_native_set_rr(0x6000000000000000UL, (val3)); \
29 ia64_native_set_rr(0x8000000000000000UL, (val4)); \
30} while (0)
31
21/* 32/*
22 * Force an unresolved reference if someone tries to use 33 * Force an unresolved reference if someone tries to use
23 * ia64_fetch_and_add() with a bad value. 34 * ia64_fetch_and_add() with a bad value.
@@ -183,4 +194,48 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
183#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ 194#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
184 195
185#endif 196#endif
197
198#ifdef __KERNEL__
199#include <asm/paravirt_privop.h>
200#endif
201
202#ifndef __ASSEMBLY__
203#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__)
204#define IA64_INTRINSIC_API(name) pv_cpu_ops.name
205#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
206#else
207#define IA64_INTRINSIC_API(name) ia64_native_ ## name
208#define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name
209#endif
210
211/************************************************/
212/* Instructions paravirtualized for correctness */
213/************************************************/
214/* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */
215/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
216 * is not currently used (though it may be in a long-format VHPT system!)
217 */
218#define ia64_fc IA64_INTRINSIC_API(fc)
219#define ia64_thash IA64_INTRINSIC_API(thash)
220#define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid)
221#define ia64_get_pmd IA64_INTRINSIC_API(get_pmd)
222
223
224/************************************************/
225/* Instructions paravirtualized for performance */
226/************************************************/
227#define ia64_ssm IA64_INTRINSIC_MACRO(ssm)
228#define ia64_rsm IA64_INTRINSIC_MACRO(rsm)
229#define ia64_getreg IA64_INTRINSIC_API(getreg)
230#define ia64_setreg IA64_INTRINSIC_API(setreg)
231#define ia64_set_rr IA64_INTRINSIC_API(set_rr)
232#define ia64_get_rr IA64_INTRINSIC_API(get_rr)
233#define ia64_ptcga IA64_INTRINSIC_API(ptcga)
234#define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i)
235#define ia64_intrin_local_irq_restore \
236 IA64_INTRINSIC_API(intrin_local_irq_restore)
237#define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4)
238
239#endif /* !__ASSEMBLY__ */
240
186#endif /* _ASM_IA64_INTRINSICS_H */ 241#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
index a3a4288daae8..b9c102e15f22 100644
--- a/include/asm-ia64/iosapic.h
+++ b/include/asm-ia64/iosapic.h
@@ -55,13 +55,27 @@
55 55
56#define NR_IOSAPICS 256 56#define NR_IOSAPICS 256
57 57
58static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg) 58#ifdef CONFIG_PARAVIRT_GUEST
59#include <asm/paravirt.h>
60#else
61#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
62#define __iosapic_read __ia64_native_iosapic_read
63#define __iosapic_write __ia64_native_iosapic_write
64#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
65#endif
66
67extern void __init ia64_native_iosapic_pcat_compat_init(void);
68extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
69
70static inline unsigned int
71__ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
59{ 72{
60 writel(reg, iosapic + IOSAPIC_REG_SELECT); 73 writel(reg, iosapic + IOSAPIC_REG_SELECT);
61 return readl(iosapic + IOSAPIC_WINDOW); 74 return readl(iosapic + IOSAPIC_WINDOW);
62} 75}
63 76
64static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) 77static inline void
78__ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
65{ 79{
66 writel(reg, iosapic + IOSAPIC_REG_SELECT); 80 writel(reg, iosapic + IOSAPIC_REG_SELECT);
67 writel(val, iosapic + IOSAPIC_WINDOW); 81 writel(val, iosapic + IOSAPIC_WINDOW);
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index a66d26827cbb..3627116fb0e2 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -13,14 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16 16#include <asm-ia64/nr-irqs.h>
17#define NR_VECTORS 256
18
19#if (NR_VECTORS + 32 * NR_CPUS) < 1024
20#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
21#else
22#define NR_IRQS 1024
23#endif
24 17
25static __inline__ int 18static __inline__ int
26irq_canonicalize (int irq) 19irq_canonicalize (int irq)
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index cef2400983fa..040bc87db930 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -152,11 +152,7 @@ reload_context (nv_mm_context_t context)
152# endif 152# endif
153#endif 153#endif
154 154
155 ia64_set_rr(0x0000000000000000UL, rr0); 155 ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
156 ia64_set_rr(0x2000000000000000UL, rr1);
157 ia64_set_rr(0x4000000000000000UL, rr2);
158 ia64_set_rr(0x6000000000000000UL, rr3);
159 ia64_set_rr(0x8000000000000000UL, rr4);
160 ia64_srlz_i(); /* srlz.i implies srlz.d */ 156 ia64_srlz_i(); /* srlz.i implies srlz.d */
161} 157}
162 158
diff --git a/include/asm-ia64/native/inst.h b/include/asm-ia64/native/inst.h
new file mode 100644
index 000000000000..c953a2ca4fce
--- /dev/null
+++ b/include/asm-ia64/native/inst.h
@@ -0,0 +1,175 @@
1/******************************************************************************
2 * include/asm-ia64/native/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
24
25#define __paravirt_switch_to ia64_native_switch_to
26#define __paravirt_leave_syscall ia64_native_leave_syscall
27#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
28#define __paravirt_leave_kernel ia64_native_leave_kernel
29#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
30#define __paravirt_work_processed_syscall_target \
31 ia64_work_processed_syscall
32
33#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
34# define PARAVIRT_POISON 0xdeadbeefbaadf00d
35# define CLOBBER(clob) \
36 ;; \
37 movl clob = PARAVIRT_POISON; \
38 ;;
39#else
40# define CLOBBER(clob) /* nothing */
41#endif
42
43#define MOV_FROM_IFA(reg) \
44 mov reg = cr.ifa
45
46#define MOV_FROM_ITIR(reg) \
47 mov reg = cr.itir
48
49#define MOV_FROM_ISR(reg) \
50 mov reg = cr.isr
51
52#define MOV_FROM_IHA(reg) \
53 mov reg = cr.iha
54
55#define MOV_FROM_IPSR(pred, reg) \
56(pred) mov reg = cr.ipsr
57
58#define MOV_FROM_IIM(reg) \
59 mov reg = cr.iim
60
61#define MOV_FROM_IIP(reg) \
62 mov reg = cr.iip
63
64#define MOV_FROM_IVR(reg, clob) \
65 mov reg = cr.ivr \
66 CLOBBER(clob)
67
68#define MOV_FROM_PSR(pred, reg, clob) \
69(pred) mov reg = psr \
70 CLOBBER(clob)
71
72#define MOV_TO_IFA(reg, clob) \
73 mov cr.ifa = reg \
74 CLOBBER(clob)
75
76#define MOV_TO_ITIR(pred, reg, clob) \
77(pred) mov cr.itir = reg \
78 CLOBBER(clob)
79
80#define MOV_TO_IHA(pred, reg, clob) \
81(pred) mov cr.iha = reg \
82 CLOBBER(clob)
83
84#define MOV_TO_IPSR(pred, reg, clob) \
85(pred) mov cr.ipsr = reg \
86 CLOBBER(clob)
87
88#define MOV_TO_IFS(pred, reg, clob) \
89(pred) mov cr.ifs = reg \
90 CLOBBER(clob)
91
92#define MOV_TO_IIP(reg, clob) \
93 mov cr.iip = reg \
94 CLOBBER(clob)
95
96#define MOV_TO_KR(kr, reg, clob0, clob1) \
97 mov IA64_KR(kr) = reg \
98 CLOBBER(clob0) \
99 CLOBBER(clob1)
100
101#define ITC_I(pred, reg, clob) \
102(pred) itc.i reg \
103 CLOBBER(clob)
104
105#define ITC_D(pred, reg, clob) \
106(pred) itc.d reg \
107 CLOBBER(clob)
108
109#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
110(pred_i) itc.i reg; \
111(pred_d) itc.d reg \
112 CLOBBER(clob)
113
114#define THASH(pred, reg0, reg1, clob) \
115(pred) thash reg0 = reg1 \
116 CLOBBER(clob)
117
118#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
119 ssm psr.ic | PSR_DEFAULT_BITS \
120 CLOBBER(clob0) \
121 CLOBBER(clob1) \
122 ;; \
123 srlz.i /* guarantee that interruption collectin is on */ \
124 ;;
125
126#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
127 ssm psr.ic \
128 CLOBBER(clob0) \
129 CLOBBER(clob1) \
130 ;; \
131 srlz.d
132
133#define RSM_PSR_IC(clob) \
134 rsm psr.ic \
135 CLOBBER(clob)
136
137#define SSM_PSR_I(pred, pred_clob, clob) \
138(pred) ssm psr.i \
139 CLOBBER(clob)
140
141#define RSM_PSR_I(pred, clob0, clob1) \
142(pred) rsm psr.i \
143 CLOBBER(clob0) \
144 CLOBBER(clob1)
145
146#define RSM_PSR_I_IC(clob0, clob1, clob2) \
147 rsm psr.i | psr.ic \
148 CLOBBER(clob0) \
149 CLOBBER(clob1) \
150 CLOBBER(clob2)
151
152#define RSM_PSR_DT \
153 rsm psr.dt
154
155#define SSM_PSR_DT_AND_SRLZ_I \
156 ssm psr.dt \
157 ;; \
158 srlz.i
159
160#define BSW_0(clob0, clob1, clob2) \
161 bsw.0 \
162 CLOBBER(clob0) \
163 CLOBBER(clob1) \
164 CLOBBER(clob2)
165
166#define BSW_1(clob0, clob1) \
167 bsw.1 \
168 CLOBBER(clob0) \
169 CLOBBER(clob1)
170
171#define COVER \
172 cover
173
174#define RFI \
175 rfi
diff --git a/include/asm-ia64/native/irq.h b/include/asm-ia64/native/irq.h
new file mode 100644
index 000000000000..efe9ff74a3c4
--- /dev/null
+++ b/include/asm-ia64/native/irq.h
@@ -0,0 +1,35 @@
1/******************************************************************************
2 * include/asm-ia64/native/irq.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * moved from linux/include/asm-ia64/irq.h.
22 */
23
24#ifndef _ASM_IA64_NATIVE_IRQ_H
25#define _ASM_IA64_NATIVE_IRQ_H
26
27#define NR_VECTORS 256
28
29#if (NR_VECTORS + 32 * NR_CPUS) < 1024
30#define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
31#else
32#define IA64_NATIVE_NR_IRQS 1024
33#endif
34
35#endif /* _ASM_IA64_NATIVE_IRQ_H */
diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h
new file mode 100644
index 000000000000..1b4df129f579
--- /dev/null
+++ b/include/asm-ia64/paravirt.h
@@ -0,0 +1,255 @@
1/******************************************************************************
2 * include/asm-ia64/paravirt.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23
24#ifndef __ASM_PARAVIRT_H
25#define __ASM_PARAVIRT_H
26
27#ifdef CONFIG_PARAVIRT_GUEST
28
29#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
30#define PARAVIRT_HYPERVISOR_TYPE_XEN 1
31
32#ifndef __ASSEMBLY__
33
34#include <asm/hw_irq.h>
35#include <asm/meminit.h>
36
37/******************************************************************************
38 * general info
39 */
40struct pv_info {
41 unsigned int kernel_rpl;
42 int paravirt_enabled;
43 const char *name;
44};
45
46extern struct pv_info pv_info;
47
48static inline int paravirt_enabled(void)
49{
50 return pv_info.paravirt_enabled;
51}
52
53static inline unsigned int get_kernel_rpl(void)
54{
55 return pv_info.kernel_rpl;
56}
57
58/******************************************************************************
59 * initialization hooks.
60 */
61struct rsvd_region;
62
63struct pv_init_ops {
64 void (*banner)(void);
65
66 int (*reserve_memory)(struct rsvd_region *region);
67
68 void (*arch_setup_early)(void);
69 void (*arch_setup_console)(char **cmdline_p);
70 int (*arch_setup_nomca)(void);
71
72 void (*post_smp_prepare_boot_cpu)(void);
73};
74
75extern struct pv_init_ops pv_init_ops;
76
77static inline void paravirt_banner(void)
78{
79 if (pv_init_ops.banner)
80 pv_init_ops.banner();
81}
82
83static inline int paravirt_reserve_memory(struct rsvd_region *region)
84{
85 if (pv_init_ops.reserve_memory)
86 return pv_init_ops.reserve_memory(region);
87 return 0;
88}
89
90static inline void paravirt_arch_setup_early(void)
91{
92 if (pv_init_ops.arch_setup_early)
93 pv_init_ops.arch_setup_early();
94}
95
96static inline void paravirt_arch_setup_console(char **cmdline_p)
97{
98 if (pv_init_ops.arch_setup_console)
99 pv_init_ops.arch_setup_console(cmdline_p);
100}
101
102static inline int paravirt_arch_setup_nomca(void)
103{
104 if (pv_init_ops.arch_setup_nomca)
105 return pv_init_ops.arch_setup_nomca();
106 return 0;
107}
108
109static inline void paravirt_post_smp_prepare_boot_cpu(void)
110{
111 if (pv_init_ops.post_smp_prepare_boot_cpu)
112 pv_init_ops.post_smp_prepare_boot_cpu();
113}
114
115/******************************************************************************
116 * replacement of iosapic operations.
117 */
118
119struct pv_iosapic_ops {
120 void (*pcat_compat_init)(void);
121
122 struct irq_chip *(*get_irq_chip)(unsigned long trigger);
123
124 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
125 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
126};
127
128extern struct pv_iosapic_ops pv_iosapic_ops;
129
130static inline void
131iosapic_pcat_compat_init(void)
132{
133 if (pv_iosapic_ops.pcat_compat_init)
134 pv_iosapic_ops.pcat_compat_init();
135}
136
137static inline struct irq_chip*
138iosapic_get_irq_chip(unsigned long trigger)
139{
140 return pv_iosapic_ops.get_irq_chip(trigger);
141}
142
143static inline unsigned int
144__iosapic_read(char __iomem *iosapic, unsigned int reg)
145{
146 return pv_iosapic_ops.__read(iosapic, reg);
147}
148
149static inline void
150__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
151{
152 return pv_iosapic_ops.__write(iosapic, reg, val);
153}
154
155/******************************************************************************
156 * replacement of irq operations.
157 */
158
159struct pv_irq_ops {
160 void (*register_ipi)(void);
161
162 int (*assign_irq_vector)(int irq);
163 void (*free_irq_vector)(int vector);
164
165 void (*register_percpu_irq)(ia64_vector vec,
166 struct irqaction *action);
167
168 void (*resend_irq)(unsigned int vector);
169};
170
171extern struct pv_irq_ops pv_irq_ops;
172
173static inline void
174ia64_register_ipi(void)
175{
176 pv_irq_ops.register_ipi();
177}
178
179static inline int
180assign_irq_vector(int irq)
181{
182 return pv_irq_ops.assign_irq_vector(irq);
183}
184
185static inline void
186free_irq_vector(int vector)
187{
188 return pv_irq_ops.free_irq_vector(vector);
189}
190
191static inline void
192register_percpu_irq(ia64_vector vec, struct irqaction *action)
193{
194 pv_irq_ops.register_percpu_irq(vec, action);
195}
196
197static inline void
198ia64_resend_irq(unsigned int vector)
199{
200 pv_irq_ops.resend_irq(vector);
201}
202
203/******************************************************************************
204 * replacement of time operations.
205 */
206
207extern struct itc_jitter_data_t itc_jitter_data;
208extern volatile int time_keeper_id;
209
210struct pv_time_ops {
211 void (*init_missing_ticks_accounting)(int cpu);
212 int (*do_steal_accounting)(unsigned long *new_itm);
213
214 void (*clocksource_resume)(void);
215};
216
217extern struct pv_time_ops pv_time_ops;
218
219static inline void
220paravirt_init_missing_ticks_accounting(int cpu)
221{
222 if (pv_time_ops.init_missing_ticks_accounting)
223 pv_time_ops.init_missing_ticks_accounting(cpu);
224}
225
226static inline int
227paravirt_do_steal_accounting(unsigned long *new_itm)
228{
229 return pv_time_ops.do_steal_accounting(new_itm);
230}
231
232#endif /* !__ASSEMBLY__ */
233
234#else
235/* fallback for native case */
236
237#ifndef __ASSEMBLY__
238
239#define paravirt_banner() do { } while (0)
240#define paravirt_reserve_memory(region) 0
241
242#define paravirt_arch_setup_early() do { } while (0)
243#define paravirt_arch_setup_console(cmdline_p) do { } while (0)
244#define paravirt_arch_setup_nomca() 0
245#define paravirt_post_smp_prepare_boot_cpu() do { } while (0)
246
247#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0)
248#define paravirt_do_steal_accounting(new_itm) 0
249
250#endif /* __ASSEMBLY__ */
251
252
253#endif /* CONFIG_PARAVIRT_GUEST */
254
255#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-ia64/paravirt_privop.h b/include/asm-ia64/paravirt_privop.h
new file mode 100644
index 000000000000..52482e6940ac
--- /dev/null
+++ b/include/asm-ia64/paravirt_privop.h
@@ -0,0 +1,114 @@
1/******************************************************************************
2 * include/asm-ia64/paravirt_privops.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
24#define _ASM_IA64_PARAVIRT_PRIVOP_H
25
26#ifdef CONFIG_PARAVIRT
27
28#ifndef __ASSEMBLY__
29
30#include <linux/types.h>
31#include <asm/kregs.h> /* for IA64_PSR_I */
32
33/******************************************************************************
34 * replacement of intrinsics operations.
35 */
36
37struct pv_cpu_ops {
38 void (*fc)(unsigned long addr);
39 unsigned long (*thash)(unsigned long addr);
40 unsigned long (*get_cpuid)(int index);
41 unsigned long (*get_pmd)(int index);
42 unsigned long (*getreg)(int reg);
43 void (*setreg)(int reg, unsigned long val);
44 void (*ptcga)(unsigned long addr, unsigned long size);
45 unsigned long (*get_rr)(unsigned long index);
46 void (*set_rr)(unsigned long index, unsigned long val);
47 void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
48 unsigned long val2, unsigned long val3,
49 unsigned long val4);
50 void (*ssm_i)(void);
51 void (*rsm_i)(void);
52 unsigned long (*get_psr_i)(void);
53 void (*intrin_local_irq_restore)(unsigned long flags);
54};
55
56extern struct pv_cpu_ops pv_cpu_ops;
57
58extern void ia64_native_setreg_func(int regnum, unsigned long val);
59extern unsigned long ia64_native_getreg_func(int regnum);
60
61/************************************************/
62/* Instructions paravirtualized for performance */
63/************************************************/
64
65/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
66 * static inline function doesn't satisfy it. */
67#define paravirt_ssm(mask) \
68 do { \
69 if ((mask) == IA64_PSR_I) \
70 pv_cpu_ops.ssm_i(); \
71 else \
72 ia64_native_ssm(mask); \
73 } while (0)
74
75#define paravirt_rsm(mask) \
76 do { \
77 if ((mask) == IA64_PSR_I) \
78 pv_cpu_ops.rsm_i(); \
79 else \
80 ia64_native_rsm(mask); \
81 } while (0)
82
83/******************************************************************************
84 * replacement of hand written assembly codes.
85 */
86struct pv_cpu_asm_switch {
87 unsigned long switch_to;
88 unsigned long leave_syscall;
89 unsigned long work_processed_syscall;
90 unsigned long leave_kernel;
91};
92void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
93
94#endif /* __ASSEMBLY__ */
95
96#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
97
98#else
99
100/* fallback for native case */
101#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
102
103#endif /* CONFIG_PARAVIRT */
104
105/* these routines utilize privilege-sensitive or performance-sensitive
106 * privileged instructions so the code must be replaced with
107 * paravirtualized versions */
108#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
109#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
110#define ia64_work_processed_syscall \
111 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
112#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
113
114#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 27731e032ee9..12d96e0cd513 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/cpumask.h> 16#include <linux/cpumask.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/irqreturn.h>
18 19
19#include <asm/io.h> 20#include <asm/io.h>
20#include <asm/param.h> 21#include <asm/param.h>
@@ -120,6 +121,7 @@ extern void __init smp_build_cpu_map(void);
120extern void __init init_smp_config (void); 121extern void __init init_smp_config (void);
121extern void smp_do_timer (struct pt_regs *regs); 122extern void smp_do_timer (struct pt_regs *regs);
122 123
124extern irqreturn_t handle_IPI(int irq, void *dev_id);
123extern void smp_send_reschedule (int cpu); 125extern void smp_send_reschedule (int cpu);
124extern void identify_siblings (struct cpuinfo_ia64 *); 126extern void identify_siblings (struct cpuinfo_ia64 *);
125extern int is_multithreading_enabled(void); 127extern int is_multithreading_enabled(void);
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 26e250bfb912..927a381c20ca 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -26,6 +26,7 @@
26 */ 26 */
27#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) 27#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
29#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
29 30
30#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
31 32
@@ -122,10 +123,16 @@ extern struct ia64_boot_param {
122 * write a floating-point register right before reading the PSR 123 * write a floating-point register right before reading the PSR
123 * and that writes to PSR.mfl 124 * and that writes to PSR.mfl
124 */ 125 */
126#ifdef CONFIG_PARAVIRT
127#define __local_save_flags() ia64_get_psr_i()
128#else
129#define __local_save_flags() ia64_getreg(_IA64_REG_PSR)
130#endif
131
125#define __local_irq_save(x) \ 132#define __local_irq_save(x) \
126do { \ 133do { \
127 ia64_stop(); \ 134 ia64_stop(); \
128 (x) = ia64_getreg(_IA64_REG_PSR); \ 135 (x) = __local_save_flags(); \
129 ia64_stop(); \ 136 ia64_stop(); \
130 ia64_rsm(IA64_PSR_I); \ 137 ia64_rsm(IA64_PSR_I); \
131} while (0) 138} while (0)
@@ -173,7 +180,7 @@ do { \
173#endif /* !CONFIG_IA64_DEBUG_IRQ */ 180#endif /* !CONFIG_IA64_DEBUG_IRQ */
174 181
175#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) 182#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
176#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) 183#define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); })
177 184
178#define irqs_disabled() \ 185#define irqs_disabled() \
179({ \ 186({ \
diff --git a/include/asm-ia64/uv/uv_mmrs.h b/include/asm-ia64/uv/uv_mmrs.h
index 1cc1dbb0182f..c149ef085437 100644
--- a/include/asm-ia64/uv/uv_mmrs.h
+++ b/include/asm-ia64/uv/uv_mmrs.h
@@ -11,11 +11,284 @@
11#ifndef __ASM_IA64_UV_MMRS__ 11#ifndef __ASM_IA64_UV_MMRS__
12#define __ASM_IA64_UV_MMRS__ 12#define __ASM_IA64_UV_MMRS__
13 13
14/* 14#define UV_MMR_ENABLE (1UL << 63)
15 * AUTO GENERATED - Do not edit 15
16 */ 16/* ========================================================================= */
17/* UVH_BAU_DATA_CONFIG */
18/* ========================================================================= */
19#define UVH_BAU_DATA_CONFIG 0x61680UL
20#define UVH_BAU_DATA_CONFIG_32 0x0438
21
22#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
23#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
24#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
25#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
26#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
27#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
28#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
29#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
30#define UVH_BAU_DATA_CONFIG_P_SHFT 13
31#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
32#define UVH_BAU_DATA_CONFIG_T_SHFT 15
33#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
34#define UVH_BAU_DATA_CONFIG_M_SHFT 16
35#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
36#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
37#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
38
39union uvh_bau_data_config_u {
40 unsigned long v;
41 struct uvh_bau_data_config_s {
42 unsigned long vector_ : 8; /* RW */
43 unsigned long dm : 3; /* RW */
44 unsigned long destmode : 1; /* RW */
45 unsigned long status : 1; /* RO */
46 unsigned long p : 1; /* RO */
47 unsigned long rsvd_14 : 1; /* */
48 unsigned long t : 1; /* RO */
49 unsigned long m : 1; /* RW */
50 unsigned long rsvd_17_31: 15; /* */
51 unsigned long apic_id : 32; /* RW */
52 } s;
53};
54
55/* ========================================================================= */
56/* UVH_EVENT_OCCURRED0 */
57/* ========================================================================= */
58#define UVH_EVENT_OCCURRED0 0x70000UL
59#define UVH_EVENT_OCCURRED0_32 0x005e8
60
61#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
62#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
63#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
64#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
65#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
66#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
67#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
68#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
69#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
70#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
71#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
72#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
73#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
74#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
75#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
76#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
77#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
78#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
79#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
80#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
81#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
82#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
83#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
84#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
85#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
86#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
87#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
88#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
89#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
90#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
91#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
92#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
93#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
94#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
95#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
96#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
97#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
98#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
99#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
100#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
101#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
102#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
103#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
104#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
105#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
106#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
107#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
108#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
109#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
110#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
111#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
112#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
113#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
114#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
115#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
116#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
117#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
118#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
119#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
120#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
121#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
122#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
123#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
124#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
125#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
126#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
127#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
128#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
129#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
130#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
131#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
132#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
133#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
134#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
135#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
136#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
137#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
138#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
139#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
140#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
141#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
142#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
143#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
144#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
145#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
146#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
147#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
148#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
149#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
150#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
151#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
152#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
153#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
154#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
155#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
156#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
157#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
158#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
159#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
160#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
161#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
162#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
163#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
164#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
165#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
166#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
167#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
168#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
169#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
170#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
171#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
172#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
173#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
174#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
175union uvh_event_occurred0_u {
176 unsigned long v;
177 struct uvh_event_occurred0_s {
178 unsigned long lb_hcerr : 1; /* RW, W1C */
179 unsigned long gr0_hcerr : 1; /* RW, W1C */
180 unsigned long gr1_hcerr : 1; /* RW, W1C */
181 unsigned long lh_hcerr : 1; /* RW, W1C */
182 unsigned long rh_hcerr : 1; /* RW, W1C */
183 unsigned long xn_hcerr : 1; /* RW, W1C */
184 unsigned long si_hcerr : 1; /* RW, W1C */
185 unsigned long lb_aoerr0 : 1; /* RW, W1C */
186 unsigned long gr0_aoerr0 : 1; /* RW, W1C */
187 unsigned long gr1_aoerr0 : 1; /* RW, W1C */
188 unsigned long lh_aoerr0 : 1; /* RW, W1C */
189 unsigned long rh_aoerr0 : 1; /* RW, W1C */
190 unsigned long xn_aoerr0 : 1; /* RW, W1C */
191 unsigned long si_aoerr0 : 1; /* RW, W1C */
192 unsigned long lb_aoerr1 : 1; /* RW, W1C */
193 unsigned long gr0_aoerr1 : 1; /* RW, W1C */
194 unsigned long gr1_aoerr1 : 1; /* RW, W1C */
195 unsigned long lh_aoerr1 : 1; /* RW, W1C */
196 unsigned long rh_aoerr1 : 1; /* RW, W1C */
197 unsigned long xn_aoerr1 : 1; /* RW, W1C */
198 unsigned long si_aoerr1 : 1; /* RW, W1C */
199 unsigned long rh_vpi_int : 1; /* RW, W1C */
200 unsigned long system_shutdown_int : 1; /* RW, W1C */
201 unsigned long lb_irq_int_0 : 1; /* RW, W1C */
202 unsigned long lb_irq_int_1 : 1; /* RW, W1C */
203 unsigned long lb_irq_int_2 : 1; /* RW, W1C */
204 unsigned long lb_irq_int_3 : 1; /* RW, W1C */
205 unsigned long lb_irq_int_4 : 1; /* RW, W1C */
206 unsigned long lb_irq_int_5 : 1; /* RW, W1C */
207 unsigned long lb_irq_int_6 : 1; /* RW, W1C */
208 unsigned long lb_irq_int_7 : 1; /* RW, W1C */
209 unsigned long lb_irq_int_8 : 1; /* RW, W1C */
210 unsigned long lb_irq_int_9 : 1; /* RW, W1C */
211 unsigned long lb_irq_int_10 : 1; /* RW, W1C */
212 unsigned long lb_irq_int_11 : 1; /* RW, W1C */
213 unsigned long lb_irq_int_12 : 1; /* RW, W1C */
214 unsigned long lb_irq_int_13 : 1; /* RW, W1C */
215 unsigned long lb_irq_int_14 : 1; /* RW, W1C */
216 unsigned long lb_irq_int_15 : 1; /* RW, W1C */
217 unsigned long l1_nmi_int : 1; /* RW, W1C */
218 unsigned long stop_clock : 1; /* RW, W1C */
219 unsigned long asic_to_l1 : 1; /* RW, W1C */
220 unsigned long l1_to_asic : 1; /* RW, W1C */
221 unsigned long ltc_int : 1; /* RW, W1C */
222 unsigned long la_seq_trigger : 1; /* RW, W1C */
223 unsigned long ipi_int : 1; /* RW, W1C */
224 unsigned long extio_int0 : 1; /* RW, W1C */
225 unsigned long extio_int1 : 1; /* RW, W1C */
226 unsigned long extio_int2 : 1; /* RW, W1C */
227 unsigned long extio_int3 : 1; /* RW, W1C */
228 unsigned long profile_int : 1; /* RW, W1C */
229 unsigned long rtc0 : 1; /* RW, W1C */
230 unsigned long rtc1 : 1; /* RW, W1C */
231 unsigned long rtc2 : 1; /* RW, W1C */
232 unsigned long rtc3 : 1; /* RW, W1C */
233 unsigned long bau_data : 1; /* RW, W1C */
234 unsigned long power_management_req : 1; /* RW, W1C */
235 unsigned long rsvd_57_63 : 7; /* */
236 } s;
237};
238
239/* ========================================================================= */
240/* UVH_EVENT_OCCURRED0_ALIAS */
241/* ========================================================================= */
242#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
243#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
244
245/* ========================================================================= */
246/* UVH_INT_CMPB */
247/* ========================================================================= */
248#define UVH_INT_CMPB 0x22080UL
249
250#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
251#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
252
253union uvh_int_cmpb_u {
254 unsigned long v;
255 struct uvh_int_cmpb_s {
256 unsigned long real_time_cmpb : 56; /* RW */
257 unsigned long rsvd_56_63 : 8; /* */
258 } s;
259};
260
261/* ========================================================================= */
262/* UVH_INT_CMPC */
263/* ========================================================================= */
264#define UVH_INT_CMPC 0x22100UL
265
266#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
267#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
268
269union uvh_int_cmpc_u {
270 unsigned long v;
271 struct uvh_int_cmpc_s {
272 unsigned long real_time_cmpc : 56; /* RW */
273 unsigned long rsvd_56_63 : 8; /* */
274 } s;
275};
17 276
18 #define UV_MMR_ENABLE (1UL << 63) 277/* ========================================================================= */
278/* UVH_INT_CMPD */
279/* ========================================================================= */
280#define UVH_INT_CMPD 0x22180UL
281
282#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
283#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
284
285union uvh_int_cmpd_u {
286 unsigned long v;
287 struct uvh_int_cmpd_s {
288 unsigned long real_time_cmpd : 56; /* RW */
289 unsigned long rsvd_56_63 : 8; /* */
290 } s;
291};
19 292
20/* ========================================================================= */ 293/* ========================================================================= */
21/* UVH_NODE_ID */ 294/* UVH_NODE_ID */
@@ -111,8 +384,8 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
111 384
112#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 385#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
113#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 386#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
114#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46 387#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
115#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL 388#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
116#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 389#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
117#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 390#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
118#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 391#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
@@ -123,8 +396,9 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
123 struct uvh_rh_gam_gru_overlay_config_mmr_s { 396 struct uvh_rh_gam_gru_overlay_config_mmr_s {
124 unsigned long rsvd_0_27: 28; /* */ 397 unsigned long rsvd_0_27: 28; /* */
125 unsigned long base : 18; /* RW */ 398 unsigned long base : 18; /* RW */
399 unsigned long rsvd_46_47: 2; /* */
126 unsigned long gr4 : 1; /* RW */ 400 unsigned long gr4 : 1; /* RW */
127 unsigned long rsvd_47_51: 5; /* */ 401 unsigned long rsvd_49_51: 3; /* */
128 unsigned long n_gru : 4; /* RW */ 402 unsigned long n_gru : 4; /* RW */
129 unsigned long rsvd_56_62: 7; /* */ 403 unsigned long rsvd_56_62: 7; /* */
130 unsigned long enable : 1; /* RW */ 404 unsigned long enable : 1; /* RW */
@@ -157,7 +431,7 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
157/* ========================================================================= */ 431/* ========================================================================= */
158/* UVH_RTC */ 432/* UVH_RTC */
159/* ========================================================================= */ 433/* ========================================================================= */
160#define UVH_RTC 0x28000UL 434#define UVH_RTC 0x340000UL
161 435
162#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 436#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
163#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL 437#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
@@ -171,6 +445,139 @@ union uvh_rtc_u {
171}; 445};
172 446
173/* ========================================================================= */ 447/* ========================================================================= */
448/* UVH_RTC1_INT_CONFIG */
449/* ========================================================================= */
450#define UVH_RTC1_INT_CONFIG 0x615c0UL
451
452#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
453#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
454#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
455#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
456#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
457#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
458#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
459#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
460#define UVH_RTC1_INT_CONFIG_P_SHFT 13
461#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
462#define UVH_RTC1_INT_CONFIG_T_SHFT 15
463#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
464#define UVH_RTC1_INT_CONFIG_M_SHFT 16
465#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
466#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
467#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
468
469union uvh_rtc1_int_config_u {
470 unsigned long v;
471 struct uvh_rtc1_int_config_s {
472 unsigned long vector_ : 8; /* RW */
473 unsigned long dm : 3; /* RW */
474 unsigned long destmode : 1; /* RW */
475 unsigned long status : 1; /* RO */
476 unsigned long p : 1; /* RO */
477 unsigned long rsvd_14 : 1; /* */
478 unsigned long t : 1; /* RO */
479 unsigned long m : 1; /* RW */
480 unsigned long rsvd_17_31: 15; /* */
481 unsigned long apic_id : 32; /* RW */
482 } s;
483};
484
485/* ========================================================================= */
486/* UVH_RTC2_INT_CONFIG */
487/* ========================================================================= */
488#define UVH_RTC2_INT_CONFIG 0x61600UL
489
490#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
491#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
492#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
493#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
494#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
495#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
496#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
497#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
498#define UVH_RTC2_INT_CONFIG_P_SHFT 13
499#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
500#define UVH_RTC2_INT_CONFIG_T_SHFT 15
501#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
502#define UVH_RTC2_INT_CONFIG_M_SHFT 16
503#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
504#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
505#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
506
507union uvh_rtc2_int_config_u {
508 unsigned long v;
509 struct uvh_rtc2_int_config_s {
510 unsigned long vector_ : 8; /* RW */
511 unsigned long dm : 3; /* RW */
512 unsigned long destmode : 1; /* RW */
513 unsigned long status : 1; /* RO */
514 unsigned long p : 1; /* RO */
515 unsigned long rsvd_14 : 1; /* */
516 unsigned long t : 1; /* RO */
517 unsigned long m : 1; /* RW */
518 unsigned long rsvd_17_31: 15; /* */
519 unsigned long apic_id : 32; /* RW */
520 } s;
521};
522
523/* ========================================================================= */
524/* UVH_RTC3_INT_CONFIG */
525/* ========================================================================= */
526#define UVH_RTC3_INT_CONFIG 0x61640UL
527
528#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
529#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
530#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
531#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
532#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
533#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
534#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
535#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
536#define UVH_RTC3_INT_CONFIG_P_SHFT 13
537#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
538#define UVH_RTC3_INT_CONFIG_T_SHFT 15
539#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
540#define UVH_RTC3_INT_CONFIG_M_SHFT 16
541#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
542#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
543#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
544
545union uvh_rtc3_int_config_u {
546 unsigned long v;
547 struct uvh_rtc3_int_config_s {
548 unsigned long vector_ : 8; /* RW */
549 unsigned long dm : 3; /* RW */
550 unsigned long destmode : 1; /* RW */
551 unsigned long status : 1; /* RO */
552 unsigned long p : 1; /* RO */
553 unsigned long rsvd_14 : 1; /* */
554 unsigned long t : 1; /* RO */
555 unsigned long m : 1; /* RW */
556 unsigned long rsvd_17_31: 15; /* */
557 unsigned long apic_id : 32; /* RW */
558 } s;
559};
560
561/* ========================================================================= */
562/* UVH_RTC_INC_RATIO */
563/* ========================================================================= */
564#define UVH_RTC_INC_RATIO 0x350000UL
565
566#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
567#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
568#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
569#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
570
571union uvh_rtc_inc_ratio_u {
572 unsigned long v;
573 struct uvh_rtc_inc_ratio_s {
574 unsigned long fraction : 20; /* RW */
575 unsigned long ratio : 3; /* RW */
576 unsigned long rsvd_23_63: 41; /* */
577 } s;
578};
579
580/* ========================================================================= */
174/* UVH_SI_ADDR_MAP_CONFIG */ 581/* UVH_SI_ADDR_MAP_CONFIG */
175/* ========================================================================= */ 582/* ========================================================================= */
176#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL 583#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index b96460a7190d..133c998161ca 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -12,8 +12,6 @@
12 12
13#define ARCH_APICTIMER_STOPS_ON_C3 1 13#define ARCH_APICTIMER_STOPS_ON_C3 1
14 14
15#define Dprintk printk
16
17/* 15/*
18 * Debugging macros 16 * Debugging macros
19 */ 17 */
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 56d001b9dce4..dbab36d64d48 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -12,11 +12,11 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
12{ 12{
13 CMOS_WRITE(0xa, 0xf); 13 CMOS_WRITE(0xa, 0xf);
14 local_flush_tlb(); 14 local_flush_tlb();
15 Dprintk("1.\n"); 15 pr_debug("1.\n");
16 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; 16 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
17 Dprintk("2.\n"); 17 pr_debug("2.\n");
18 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; 18 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
19 Dprintk("3.\n"); 19 pr_debug("3.\n");
20} 20}
21 21
22static inline void smpboot_restore_warm_reset_vector(void) 22static inline void smpboot_restore_warm_reset_vector(void)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index e7e91dbfde0f..2270ca5ec631 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -4,9 +4,6 @@
4 * Copyright (C) 2001 Russell King 4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * 6 *
7 *
8 * $Id: cpufreq.h,v 1.36 2003/01/20 17:31:48 db Exp $
9 *
10 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
diff --git a/mm/slub.c b/mm/slub.c
index 35ab38a94b46..6d4a49c1ff2f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -492,7 +492,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
492 if (p > addr + 16) 492 if (p > addr + 16)
493 print_section("Bytes b4", p - 16, 16); 493 print_section("Bytes b4", p - 16, 16);
494 494
495 print_section("Object", p, min(s->objsize, 128)); 495 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
496 496
497 if (s->flags & SLAB_RED_ZONE) 497 if (s->flags & SLAB_RED_ZONE)
498 print_section("Redzone", p + s->objsize, 498 print_section("Redzone", p + s->objsize,
@@ -1495,15 +1495,7 @@ static void flush_cpu_slab(void *d)
1495 1495
1496static void flush_all(struct kmem_cache *s) 1496static void flush_all(struct kmem_cache *s)
1497{ 1497{
1498#ifdef CONFIG_SMP
1499 on_each_cpu(flush_cpu_slab, s, 1); 1498 on_each_cpu(flush_cpu_slab, s, 1);
1500#else
1501 unsigned long flags;
1502
1503 local_irq_save(flags);
1504 flush_cpu_slab(s);
1505 local_irq_restore(flags);
1506#endif
1507} 1499}
1508 1500
1509/* 1501/*