aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-07-17 13:53:37 -0400
committerTony Luck <tony.luck@intel.com>2008-07-17 13:53:37 -0400
commitfca515fbfa5ecd9f7b54db311317e2c877d7831a (patch)
tree66b44028b3ab5be068be78650932812520d78865
parent2b04be7e8ab5756ea36e137dd03c8773d184e67e (diff)
parent4d58bbcc89e267d52b4df572acbf209a60a8a497 (diff)
Pull pvops into release branch
-rw-r--r--Documentation/ia64/paravirt_ops.txt137
-rw-r--r--arch/ia64/Makefile6
-rw-r--r--arch/ia64/kernel/Makefile44
-rw-r--r--arch/ia64/kernel/entry.S115
-rw-r--r--arch/ia64/kernel/head.S41
-rw-r--r--arch/ia64/kernel/iosapic.c45
-rw-r--r--arch/ia64/kernel/irq_ia64.c19
-rw-r--r--arch/ia64/kernel/ivt.S462
-rw-r--r--arch/ia64/kernel/minstate.h13
-rw-r--r--arch/ia64/kernel/nr-irqs.c24
-rw-r--r--arch/ia64/kernel/paravirt.c369
-rw-r--r--arch/ia64/kernel/paravirt_inst.h29
-rw-r--r--arch/ia64/kernel/paravirtentry.S60
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/time.c23
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--include/asm-ia64/Kbuild2
-rw-r--r--include/asm-ia64/gcc_intrin.h24
-rw-r--r--include/asm-ia64/hw_irq.h23
-rw-r--r--include/asm-ia64/intel_intrin.h41
-rw-r--r--include/asm-ia64/intrinsics.h55
-rw-r--r--include/asm-ia64/iosapic.h18
-rw-r--r--include/asm-ia64/irq.h9
-rw-r--r--include/asm-ia64/mmu_context.h6
-rw-r--r--include/asm-ia64/native/inst.h175
-rw-r--r--include/asm-ia64/native/irq.h35
-rw-r--r--include/asm-ia64/paravirt.h255
-rw-r--r--include/asm-ia64/paravirt_privop.h114
-rw-r--r--include/asm-ia64/smp.h2
-rw-r--r--include/asm-ia64/system.h11
31 files changed, 1813 insertions, 357 deletions
diff --git a/Documentation/ia64/paravirt_ops.txt b/Documentation/ia64/paravirt_ops.txt
new file mode 100644
index 000000000000..39ded02ec33f
--- /dev/null
+++ b/Documentation/ia64/paravirt_ops.txt
@@ -0,0 +1,137 @@
1Paravirt_ops on IA64
2====================
3 21 May 2008, Isaku Yamahata <yamahata@valinux.co.jp>
4
5
6Introduction
7------------
8The aim of this documentation is to help with maintainability and/or to
9encourage people to use paravirt_ops/IA64.
10
11paravirt_ops (pv_ops in short) is a way for virtualization support of
12Linux kernel on x86. Several ways for virtualization support were
13proposed, paravirt_ops is the winner.
14On the other hand, now there are also several IA64 virtualization
15technologies like kvm/IA64, xen/IA64 and many other academic IA64
16hypervisors so that it is good to add generic virtualization
17infrastructure on Linux/IA64.
18
19
20What is paravirt_ops?
21---------------------
22It has been developed on x86 as virtualization support via API, not ABI.
23It allows each hypervisor to override operations which are important for
24hypervisors at API level. And it allows a single kernel binary to run on
25all supported execution environments including native machine.
26Essentially paravirt_ops is a set of function pointers which represent
27operations corresponding to low level sensitive instructions and high
28level functionalities in various area. But one significant difference
29from usual function pointer table is that it allows optimization with
30binary patch. It is because some of these operations are very
31performance sensitive and indirect call overhead is not negligible.
32With binary patch, indirect C function call can be transformed into
33direct C function call or in-place execution to eliminate the overhead.
34
35Thus, operations of paravirt_ops are classified into three categories.
36- simple indirect call
37 These operations correspond to high level functionality so that the
38 overhead of indirect call isn't very important.
39
40- indirect call which allows optimization with binary patch
41 Usually these operations correspond to low level instructions. They
42 are called frequently and performance critical. So the overhead is
43 very important.
44
45- a set of macros for hand written assembly code
46 Hand written assembly codes (.S files) also need paravirtualization
47 because they include sensitive instructions or some of code paths in
48 them are very performance critical.
49
50
51The relation to the IA64 machine vector
52---------------------------------------
53Linux/IA64 has the IA64 machine vector functionality which allows the
54kernel to switch implementations (e.g. initialization, ipi, dma api...)
55depending on executing platform.
56We can replace some implementations very easily defining a new machine
57vector. Thus another approach for virtualization support would be
58enhancing the machine vector functionality.
59But paravirt_ops approach was taken because
60- virtualization support needs wider support than machine vector does.
61 e.g. low level instruction paravirtualization. It must be
62 initialized very early before platform detection.
63
64- virtualization support needs more functionality like binary patch.
65 Probably the calling overhead might not be very large compared to the
66 emulation overhead of virtualization. However in the native case, the
67 overhead should be eliminated completely.
68 A single kernel binary should run on each environment including native,
69 and the overhead of paravirt_ops on native environment should be as
70 small as possible.
71
72- for full virtualization technology, e.g. KVM/IA64 or
73 Xen/IA64 HVM domain, the result would be
74 (the emulated platform machine vector. probably dig) + (pv_ops).
75 This means that the virtualization support layer should be under
76 the machine vector layer.
77
78Possibly it might be better to move some function pointers from
79paravirt_ops to machine vector. In fact, Xen domU case utilizes both
80pv_ops and machine vector.
81
82
83IA64 paravirt_ops
84-----------------
85In this section, the concrete paravirt_ops will be discussed.
86Because of the architecture difference between ia64 and x86, the
87resulting set of functions is very different from x86 pv_ops.
88
89- C function pointer tables
90They are not very performance critical so that simple C indirect
91function call is acceptable. The following structures are defined at
92this moment. For details see linux/include/asm-ia64/paravirt.h
93 - struct pv_info
94 This structure describes the execution environment.
95 - struct pv_init_ops
96 This structure describes the various initialization hooks.
97 - struct pv_iosapic_ops
98 This structure describes hooks to iosapic operations.
99 - struct pv_irq_ops
100 This structure describes hooks to irq related operations
101 - struct pv_time_op
102 This structure describes hooks to steal time accounting.
103
104- a set of indirect calls which need optimization
105Currently this class of functions correspond to a subset of IA64
106intrinsics. At this moment the optimization with binary patch isn't
107implemented yet.
108struct pv_cpu_op is defined. For details see
109linux/include/asm-ia64/paravirt_privop.h
110Mostly they correspond to ia64 intrinsics 1-to-1.
111Caveat: Now they are defined as C indirect function pointers, but in
112order to support binary patch optimization, they will be changed
113using GCC extended inline assembly code.
114
115- a set of macros for hand written assembly code (.S files)
116For maintenance purpose, the taken approach for .S files is single
117source code and compile multiple times with different macros definitions.
118Each pv_ops instance must define those macros to compile.
119The important thing here is that sensitive, but non-privileged
120instructions must be paravirtualized and that some privileged
121instructions also need paravirtualization for reasonable performance.
122Developers who modify .S files must be aware of that. At this moment
123an easy checker is implemented to detect paravirtualization breakage.
124But it doesn't cover all the cases.
125
126Sometimes this set of macros is called pv_cpu_asm_op. But there is no
127corresponding structure in the source code.
128Those macros mostly 1:1 correspond to a subset of privileged
129instructions. See linux/include/asm-ia64/native/inst.h.
130And some functions written in assembly also need to be overrided so
131that each pv_ops instance have to define some macros. Again see
132linux/include/asm-ia64/native/inst.h.
133
134
135Those structures must be initialized very early before start_kernel.
136Probably initialized in head.S using multi entry point or some other trick.
137For native case implementation see linux/arch/ia64/kernel/paravirt.c.
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index e67ee3f27698..905d25b13d5a 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -100,3 +100,9 @@ define archhelp
100 echo ' boot - Build vmlinux and bootloader for Ski simulator' 100 echo ' boot - Build vmlinux and bootloader for Ski simulator'
101 echo '* unwcheck - Check vmlinux for invalid unwind info' 101 echo '* unwcheck - Check vmlinux for invalid unwind info'
102endef 102endef
103
104archprepare: make_nr_irqs_h FORCE
105PHONY += make_nr_irqs_h FORCE
106
107make_nr_irqs_h: FORCE
108 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 13fd10e8699e..87fea11aecb7 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -36,6 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
36mca_recovery-y += mca_drv.o mca_drv_asm.o 36mca_recovery-y += mca_drv.o mca_drv_asm.o
37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
38 38
39obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o
40
39obj-$(CONFIG_IA64_ESI) += esi.o 41obj-$(CONFIG_IA64_ESI) += esi.o
40ifneq ($(CONFIG_IA64_ESI),) 42ifneq ($(CONFIG_IA64_ESI),)
41obj-y += esi_stub.o # must be in kernel proper 43obj-y += esi_stub.o # must be in kernel proper
@@ -70,3 +72,45 @@ $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
70# We must build gate.so before we can assemble it. 72# We must build gate.so before we can assemble it.
71# Note: kbuild does not track this dependency due to usage of .incbin 73# Note: kbuild does not track this dependency due to usage of .incbin
72$(obj)/gate-data.o: $(obj)/gate.so 74$(obj)/gate-data.o: $(obj)/gate.so
75
76# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
77define sed-y
78 "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
79endef
80quiet_cmd_nr_irqs = GEN $@
81define cmd_nr_irqs
82 (set -e; \
83 echo "#ifndef __ASM_NR_IRQS_H__"; \
84 echo "#define __ASM_NR_IRQS_H__"; \
85 echo "/*"; \
86 echo " * DO NOT MODIFY."; \
87 echo " *"; \
88 echo " * This file was generated by Kbuild"; \
89 echo " *"; \
90 echo " */"; \
91 echo ""; \
92 sed -ne $(sed-y) $<; \
93 echo ""; \
94 echo "#endif" ) > $@
95endef
96
97# We use internal kbuild rules to avoid the "is up to date" message from make
98arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \
99 $(wildcard $(srctree)/include/asm-ia64/*/irq.h)
100 $(Q)mkdir -p $(dir $@)
101 $(call if_changed_dep,cc_s_c)
102
103include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
104 $(Q)mkdir -p $(dir $@)
105 $(call cmd,nr_irqs)
106
107clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
108
109#
110# native ivt.S and entry.S
111#
112ASM_PARAVIRT_OBJS = ivt.o entry.o
113define paravirtualized_native
114AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
115endef
116$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ca2bb95726de..56ab156c48ae 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -23,6 +23,11 @@
23 * 11/07/2000 23 * 11/07/2000
24 */ 24 */
25/* 25/*
26 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
27 * VA Linux Systems Japan K.K.
28 * pv_ops.
29 */
30/*
26 * Global (preserved) predicate usage on syscall entry/exit path: 31 * Global (preserved) predicate usage on syscall entry/exit path:
27 * 32 *
28 * pKStk: See entry.h. 33 * pKStk: See entry.h.
@@ -45,6 +50,7 @@
45 50
46#include "minstate.h" 51#include "minstate.h"
47 52
53#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
48 /* 54 /*
49 * execve() is special because in case of success, we need to 55 * execve() is special because in case of success, we need to
50 * setup a null register window frame. 56 * setup a null register window frame.
@@ -173,6 +179,7 @@ GLOBAL_ENTRY(sys_clone)
173 mov rp=loc0 179 mov rp=loc0
174 br.ret.sptk.many rp 180 br.ret.sptk.many rp
175END(sys_clone) 181END(sys_clone)
182#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
176 183
177/* 184/*
178 * prev_task <- ia64_switch_to(struct task_struct *next) 185 * prev_task <- ia64_switch_to(struct task_struct *next)
@@ -180,7 +187,7 @@ END(sys_clone)
180 * called. The code starting at .map relies on this. The rest of the code 187 * called. The code starting at .map relies on this. The rest of the code
181 * doesn't care about the interrupt masking status. 188 * doesn't care about the interrupt masking status.
182 */ 189 */
183GLOBAL_ENTRY(ia64_switch_to) 190GLOBAL_ENTRY(__paravirt_switch_to)
184 .prologue 191 .prologue
185 alloc r16=ar.pfs,1,0,0,0 192 alloc r16=ar.pfs,1,0,0,0
186 DO_SAVE_SWITCH_STACK 193 DO_SAVE_SWITCH_STACK
@@ -204,7 +211,7 @@ GLOBAL_ENTRY(ia64_switch_to)
204 ;; 211 ;;
205.done: 212.done:
206 ld8 sp=[r21] // load kernel stack pointer of new task 213 ld8 sp=[r21] // load kernel stack pointer of new task
207 mov IA64_KR(CURRENT)=in0 // update "current" application register 214 MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register
208 mov r8=r13 // return pointer to previously running task 215 mov r8=r13 // return pointer to previously running task
209 mov r13=in0 // set "current" pointer 216 mov r13=in0 // set "current" pointer
210 ;; 217 ;;
@@ -216,26 +223,25 @@ GLOBAL_ENTRY(ia64_switch_to)
216 br.ret.sptk.many rp // boogie on out in new context 223 br.ret.sptk.many rp // boogie on out in new context
217 224
218.map: 225.map:
219 rsm psr.ic // interrupts (psr.i) are already disabled here 226 RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
220 movl r25=PAGE_KERNEL 227 movl r25=PAGE_KERNEL
221 ;; 228 ;;
222 srlz.d 229 srlz.d
223 or r23=r25,r20 // construct PA | page properties 230 or r23=r25,r20 // construct PA | page properties
224 mov r25=IA64_GRANULE_SHIFT<<2 231 mov r25=IA64_GRANULE_SHIFT<<2
225 ;; 232 ;;
226 mov cr.itir=r25 233 MOV_TO_ITIR(p0, r25, r8)
227 mov cr.ifa=in0 // VA of next task... 234 MOV_TO_IFA(in0, r8) // VA of next task...
228 ;; 235 ;;
229 mov r25=IA64_TR_CURRENT_STACK 236 mov r25=IA64_TR_CURRENT_STACK
230 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... 237 MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped...
231 ;; 238 ;;
232 itr.d dtr[r25]=r23 // wire in new mapping... 239 itr.d dtr[r25]=r23 // wire in new mapping...
233 ssm psr.ic // reenable the psr.ic bit 240 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
234 ;;
235 srlz.d
236 br.cond.sptk .done 241 br.cond.sptk .done
237END(ia64_switch_to) 242END(__paravirt_switch_to)
238 243
244#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
239/* 245/*
240 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This 246 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
241 * means that we may get an interrupt with "sp" pointing to the new kernel stack while 247 * means that we may get an interrupt with "sp" pointing to the new kernel stack while
@@ -375,7 +381,7 @@ END(save_switch_stack)
375 * - b7 holds address to return to 381 * - b7 holds address to return to
376 * - must not touch r8-r11 382 * - must not touch r8-r11
377 */ 383 */
378ENTRY(load_switch_stack) 384GLOBAL_ENTRY(load_switch_stack)
379 .prologue 385 .prologue
380 .altrp b7 386 .altrp b7
381 387
@@ -571,7 +577,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
571.ret3: 577.ret3:
572(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 578(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
573(pUStk) rsm psr.i // disable interrupts 579(pUStk) rsm psr.i // disable interrupts
574 br.cond.sptk .work_pending_syscall_end 580 br.cond.sptk ia64_work_pending_syscall_end
575 581
576strace_error: 582strace_error:
577 ld8 r3=[r2] // load pt_regs.r8 583 ld8 r3=[r2] // load pt_regs.r8
@@ -636,8 +642,17 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
636 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 642 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
637 mov r10=r0 // clear error indication in r10 643 mov r10=r0 // clear error indication in r10
638(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure 644(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
645#ifdef CONFIG_PARAVIRT
646 ;;
647 br.cond.sptk.few ia64_leave_syscall
648 ;;
649#endif /* CONFIG_PARAVIRT */
639END(ia64_ret_from_syscall) 650END(ia64_ret_from_syscall)
651#ifndef CONFIG_PARAVIRT
640 // fall through 652 // fall through
653#endif
654#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
655
641/* 656/*
642 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't 657 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
643 * need to switch to bank 0 and doesn't restore the scratch registers. 658 * need to switch to bank 0 and doesn't restore the scratch registers.
@@ -682,7 +697,7 @@ END(ia64_ret_from_syscall)
682 * ar.csd: cleared 697 * ar.csd: cleared
683 * ar.ssd: cleared 698 * ar.ssd: cleared
684 */ 699 */
685ENTRY(ia64_leave_syscall) 700GLOBAL_ENTRY(__paravirt_leave_syscall)
686 PT_REGS_UNWIND_INFO(0) 701 PT_REGS_UNWIND_INFO(0)
687 /* 702 /*
688 * work.need_resched etc. mustn't get changed by this CPU before it returns to 703 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -692,11 +707,11 @@ ENTRY(ia64_leave_syscall)
692 * extra work. We always check for extra work when returning to user-level. 707 * extra work. We always check for extra work when returning to user-level.
693 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count 708 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
694 * is 0. After extra work processing has been completed, execution 709 * is 0. After extra work processing has been completed, execution
695 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check 710 * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
696 * needs to be redone. 711 * needs to be redone.
697 */ 712 */
698#ifdef CONFIG_PREEMPT 713#ifdef CONFIG_PREEMPT
699 rsm psr.i // disable interrupts 714 RSM_PSR_I(p0, r2, r18) // disable interrupts
700 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 715 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
701(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 716(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
702 ;; 717 ;;
@@ -706,11 +721,12 @@ ENTRY(ia64_leave_syscall)
706 ;; 721 ;;
707 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 722 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
708#else /* !CONFIG_PREEMPT */ 723#else /* !CONFIG_PREEMPT */
709(pUStk) rsm psr.i 724 RSM_PSR_I(pUStk, r2, r18)
710 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 725 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
711(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 726(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
712#endif 727#endif
713.work_processed_syscall: 728.global __paravirt_work_processed_syscall;
729__paravirt_work_processed_syscall:
714#ifdef CONFIG_VIRT_CPU_ACCOUNTING 730#ifdef CONFIG_VIRT_CPU_ACCOUNTING
715 adds r2=PT(LOADRS)+16,r12 731 adds r2=PT(LOADRS)+16,r12
716(pUStk) mov.m r22=ar.itc // fetch time at leave 732(pUStk) mov.m r22=ar.itc // fetch time at leave
@@ -744,7 +760,7 @@ ENTRY(ia64_leave_syscall)
744(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! 760(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
745 ;; 761 ;;
746 invala // M0|1 invalidate ALAT 762 invala // M0|1 invalidate ALAT
747 rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection 763 RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection
748 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs 764 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
749 765
750 ld8 r29=[r2],16 // M0|1 load cr.ipsr 766 ld8 r29=[r2],16 // M0|1 load cr.ipsr
@@ -765,7 +781,7 @@ ENTRY(ia64_leave_syscall)
765 ;; 781 ;;
766#endif 782#endif
767 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs 783 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
768(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 784 MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled
769 nop 0 785 nop 0
770 ;; 786 ;;
771 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 787 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
@@ -798,7 +814,7 @@ ENTRY(ia64_leave_syscall)
798 814
799 srlz.d // M0 ensure interruption collection is off (for cover) 815 srlz.d // M0 ensure interruption collection is off (for cover)
800 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition 816 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
801 cover // B add current frame into dirty partition & set cr.ifs 817 COVER // B add current frame into dirty partition & set cr.ifs
802 ;; 818 ;;
803#ifdef CONFIG_VIRT_CPU_ACCOUNTING 819#ifdef CONFIG_VIRT_CPU_ACCOUNTING
804 mov r19=ar.bsp // M2 get new backing store pointer 820 mov r19=ar.bsp // M2 get new backing store pointer
@@ -823,8 +839,9 @@ ENTRY(ia64_leave_syscall)
823 mov.m ar.ssd=r0 // M2 clear ar.ssd 839 mov.m ar.ssd=r0 // M2 clear ar.ssd
824 mov f11=f0 // F clear f11 840 mov f11=f0 // F clear f11
825 br.cond.sptk.many rbs_switch // B 841 br.cond.sptk.many rbs_switch // B
826END(ia64_leave_syscall) 842END(__paravirt_leave_syscall)
827 843
844#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
828#ifdef CONFIG_IA32_SUPPORT 845#ifdef CONFIG_IA32_SUPPORT
829GLOBAL_ENTRY(ia64_ret_from_ia32_execve) 846GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
830 PT_REGS_UNWIND_INFO(0) 847 PT_REGS_UNWIND_INFO(0)
@@ -835,10 +852,20 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
835 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit 852 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
836 .mem.offset 8,0 853 .mem.offset 8,0
837 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit 854 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
855#ifdef CONFIG_PARAVIRT
856 ;;
857 // don't fall through, ia64_leave_kernel may be #define'd
858 br.cond.sptk.few ia64_leave_kernel
859 ;;
860#endif /* CONFIG_PARAVIRT */
838END(ia64_ret_from_ia32_execve) 861END(ia64_ret_from_ia32_execve)
862#ifndef CONFIG_PARAVIRT
839 // fall through 863 // fall through
864#endif
840#endif /* CONFIG_IA32_SUPPORT */ 865#endif /* CONFIG_IA32_SUPPORT */
841GLOBAL_ENTRY(ia64_leave_kernel) 866#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
867
868GLOBAL_ENTRY(__paravirt_leave_kernel)
842 PT_REGS_UNWIND_INFO(0) 869 PT_REGS_UNWIND_INFO(0)
843 /* 870 /*
844 * work.need_resched etc. mustn't get changed by this CPU before it returns to 871 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -852,7 +879,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
852 * needs to be redone. 879 * needs to be redone.
853 */ 880 */
854#ifdef CONFIG_PREEMPT 881#ifdef CONFIG_PREEMPT
855 rsm psr.i // disable interrupts 882 RSM_PSR_I(p0, r17, r31) // disable interrupts
856 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel 883 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
857(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 884(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
858 ;; 885 ;;
@@ -862,7 +889,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
862 ;; 889 ;;
863 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 890 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
864#else 891#else
865(pUStk) rsm psr.i 892 RSM_PSR_I(pUStk, r17, r31)
866 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel 893 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
867(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 894(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
868#endif 895#endif
@@ -910,7 +937,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
910 mov ar.csd=r30 937 mov ar.csd=r30
911 mov ar.ssd=r31 938 mov ar.ssd=r31
912 ;; 939 ;;
913 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection 940 RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection
914 invala // invalidate ALAT 941 invala // invalidate ALAT
915 ;; 942 ;;
916 ld8.fill r22=[r2],24 943 ld8.fill r22=[r2],24
@@ -942,7 +969,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
942 mov ar.ccv=r15 969 mov ar.ccv=r15
943 ;; 970 ;;
944 ldf.fill f11=[r2] 971 ldf.fill f11=[r2]
945 bsw.0 // switch back to bank 0 (no stop bit required beforehand...) 972 BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...)
946 ;; 973 ;;
947(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) 974(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
948 adds r16=PT(CR_IPSR)+16,r12 975 adds r16=PT(CR_IPSR)+16,r12
@@ -950,12 +977,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
950 977
951#ifdef CONFIG_VIRT_CPU_ACCOUNTING 978#ifdef CONFIG_VIRT_CPU_ACCOUNTING
952 .pred.rel.mutex pUStk,pKStk 979 .pred.rel.mutex pUStk,pKStk
953(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 980 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
954(pUStk) mov.m r22=ar.itc // M fetch time at leave 981(pUStk) mov.m r22=ar.itc // M fetch time at leave
955 nop.i 0 982 nop.i 0
956 ;; 983 ;;
957#else 984#else
958(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 985 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
959 nop.i 0 986 nop.i 0
960 nop.i 0 987 nop.i 0
961 ;; 988 ;;
@@ -1027,7 +1054,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
1027 * NOTE: alloc, loadrs, and cover can't be predicated. 1054 * NOTE: alloc, loadrs, and cover can't be predicated.
1028 */ 1055 */
1029(pNonSys) br.cond.dpnt dont_preserve_current_frame 1056(pNonSys) br.cond.dpnt dont_preserve_current_frame
1030 cover // add current frame into dirty partition and set cr.ifs 1057 COVER // add current frame into dirty partition and set cr.ifs
1031 ;; 1058 ;;
1032 mov r19=ar.bsp // get new backing store pointer 1059 mov r19=ar.bsp // get new backing store pointer
1033rbs_switch: 1060rbs_switch:
@@ -1130,16 +1157,16 @@ skip_rbs_switch:
1130(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp 1157(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
1131(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise 1158(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1132 ;; 1159 ;;
1133 mov cr.ipsr=r29 // M2 1160 MOV_TO_IPSR(p0, r29, r25) // M2
1134 mov ar.pfs=r26 // I0 1161 mov ar.pfs=r26 // I0
1135(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise 1162(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1136 1163
1137(p9) mov cr.ifs=r30 // M2 1164 MOV_TO_IFS(p9, r30, r25)// M2
1138 mov b0=r21 // I0 1165 mov b0=r21 // I0
1139(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise 1166(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1140 1167
1141 mov ar.fpsr=r20 // M2 1168 mov ar.fpsr=r20 // M2
1142 mov cr.iip=r28 // M2 1169 MOV_TO_IIP(r28, r25) // M2
1143 nop 0 1170 nop 0
1144 ;; 1171 ;;
1145(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode 1172(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
@@ -1148,7 +1175,7 @@ skip_rbs_switch:
1148 1175
1149 mov ar.rsc=r27 // M2 1176 mov ar.rsc=r27 // M2
1150 mov pr=r31,-1 // I0 1177 mov pr=r31,-1 // I0
1151 rfi // B 1178 RFI // B
1152 1179
1153 /* 1180 /*
1154 * On entry: 1181 * On entry:
@@ -1174,35 +1201,36 @@ skip_rbs_switch:
1174 ;; 1201 ;;
1175(pKStk) st4 [r20]=r21 1202(pKStk) st4 [r20]=r21
1176#endif 1203#endif
1177 ssm psr.i // enable interrupts 1204 SSM_PSR_I(p0, p6, r2) // enable interrupts
1178 br.call.spnt.many rp=schedule 1205 br.call.spnt.many rp=schedule
1179.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) 1206.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1180 rsm psr.i // disable interrupts 1207 RSM_PSR_I(p0, r2, r20) // disable interrupts
1181 ;; 1208 ;;
1182#ifdef CONFIG_PREEMPT 1209#ifdef CONFIG_PREEMPT
1183(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 1210(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
1184 ;; 1211 ;;
1185(pKStk) st4 [r20]=r0 // preempt_count() <- 0 1212(pKStk) st4 [r20]=r0 // preempt_count() <- 0
1186#endif 1213#endif
1187(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1214(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1188 br.cond.sptk.many .work_processed_kernel 1215 br.cond.sptk.many .work_processed_kernel
1189 1216
1190.notify: 1217.notify:
1191(pUStk) br.call.spnt.many rp=notify_resume_user 1218(pUStk) br.call.spnt.many rp=notify_resume_user
1192.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) 1219.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1193(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1220(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1194 br.cond.sptk.many .work_processed_kernel 1221 br.cond.sptk.many .work_processed_kernel
1195 1222
1196.work_pending_syscall_end: 1223.global __paravirt_pending_syscall_end;
1224__paravirt_pending_syscall_end:
1197 adds r2=PT(R8)+16,r12 1225 adds r2=PT(R8)+16,r12
1198 adds r3=PT(R10)+16,r12 1226 adds r3=PT(R10)+16,r12
1199 ;; 1227 ;;
1200 ld8 r8=[r2] 1228 ld8 r8=[r2]
1201 ld8 r10=[r3] 1229 ld8 r10=[r3]
1202 br.cond.sptk.many .work_processed_syscall 1230 br.cond.sptk.many __paravirt_work_processed_syscall_target
1203 1231END(__paravirt_leave_kernel)
1204END(ia64_leave_kernel)
1205 1232
1233#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
1206ENTRY(handle_syscall_error) 1234ENTRY(handle_syscall_error)
1207 /* 1235 /*
1208 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could 1236 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
@@ -1244,7 +1272,7 @@ END(ia64_invoke_schedule_tail)
1244 * We declare 8 input registers so the system call args get preserved, 1272 * We declare 8 input registers so the system call args get preserved,
1245 * in case we need to restart a system call. 1273 * in case we need to restart a system call.
1246 */ 1274 */
1247ENTRY(notify_resume_user) 1275GLOBAL_ENTRY(notify_resume_user)
1248 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) 1276 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1249 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! 1277 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1250 mov r9=ar.unat 1278 mov r9=ar.unat
@@ -1306,7 +1334,7 @@ ENTRY(sys_rt_sigreturn)
1306 adds sp=16,sp 1334 adds sp=16,sp
1307 ;; 1335 ;;
1308 ld8 r9=[sp] // load new ar.unat 1336 ld8 r9=[sp] // load new ar.unat
1309 mov.sptk b7=r8,ia64_leave_kernel 1337 mov.sptk b7=r8,ia64_native_leave_kernel
1310 ;; 1338 ;;
1311 mov ar.unat=r9 1339 mov ar.unat=r9
1312 br.many b7 1340 br.many b7
@@ -1665,3 +1693,4 @@ sys_call_table:
1665 data8 sys_timerfd_gettime 1693 data8 sys_timerfd_gettime
1666 1694
1667 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1695 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1696#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index ddeab4e36fd5..db540e58c783 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -26,11 +26,14 @@
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27#include <asm/asm-offsets.h> 27#include <asm/asm-offsets.h>
28#include <asm/pal.h> 28#include <asm/pal.h>
29#include <asm/paravirt.h>
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
30#include <asm/processor.h> 31#include <asm/processor.h>
31#include <asm/ptrace.h> 32#include <asm/ptrace.h>
32#include <asm/system.h> 33#include <asm/system.h>
33#include <asm/mca_asm.h> 34#include <asm/mca_asm.h>
35#include <linux/init.h>
36#include <linux/linkage.h>
34 37
35#ifdef CONFIG_HOTPLUG_CPU 38#ifdef CONFIG_HOTPLUG_CPU
36#define SAL_PSR_BITS_TO_SET \ 39#define SAL_PSR_BITS_TO_SET \
@@ -367,6 +370,44 @@ start_ap:
367 ;; 370 ;;
368(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader 371(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
369 372
373#ifdef CONFIG_PARAVIRT
374
375 movl r14=hypervisor_setup_hooks
376 movl r15=hypervisor_type
377 mov r16=num_hypervisor_hooks
378 ;;
379 ld8 r2=[r15]
380 ;;
381 cmp.ltu p7,p0=r2,r16 // array size check
382 shladd r8=r2,3,r14
383 ;;
384(p7) ld8 r9=[r8]
385 ;;
386(p7) mov b1=r9
387(p7) cmp.ne.unc p7,p0=r9,r0 // no actual branch to NULL
388 ;;
389(p7) br.call.sptk.many rp=b1
390
391 __INITDATA
392
393default_setup_hook = 0 // Currently nothing needs to be done.
394
395 .weak xen_setup_hook
396
397 .global hypervisor_type
398hypervisor_type:
399 data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
400
401 // must have the same order with PARAVIRT_HYPERVISOR_TYPE_xxx
402
403hypervisor_setup_hooks:
404 data8 default_setup_hook
405 data8 xen_setup_hook
406num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
407 .previous
408
409#endif
410
370#ifdef CONFIG_SMP 411#ifdef CONFIG_SMP
371(isAP) br.call.sptk.many rp=start_secondary 412(isAP) br.call.sptk.many rp=start_secondary
372.ret0: 413.ret0:
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 39752cdef6ff..3bc2fa64f87f 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -585,6 +585,15 @@ static inline int irq_is_shared (int irq)
585 return (iosapic_intr_info[irq].count > 1); 585 return (iosapic_intr_info[irq].count > 1);
586} 586}
587 587
588struct irq_chip*
589ia64_native_iosapic_get_irq_chip(unsigned long trigger)
590{
591 if (trigger == IOSAPIC_EDGE)
592 return &irq_type_iosapic_edge;
593 else
594 return &irq_type_iosapic_level;
595}
596
588static int 597static int
589register_intr (unsigned int gsi, int irq, unsigned char delivery, 598register_intr (unsigned int gsi, int irq, unsigned char delivery,
590 unsigned long polarity, unsigned long trigger) 599 unsigned long polarity, unsigned long trigger)
@@ -635,13 +644,10 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
635 iosapic_intr_info[irq].dmode = delivery; 644 iosapic_intr_info[irq].dmode = delivery;
636 iosapic_intr_info[irq].trigger = trigger; 645 iosapic_intr_info[irq].trigger = trigger;
637 646
638 if (trigger == IOSAPIC_EDGE) 647 irq_type = iosapic_get_irq_chip(trigger);
639 irq_type = &irq_type_iosapic_edge;
640 else
641 irq_type = &irq_type_iosapic_level;
642 648
643 idesc = irq_desc + irq; 649 idesc = irq_desc + irq;
644 if (idesc->chip != irq_type) { 650 if (irq_type != NULL && idesc->chip != irq_type) {
645 if (idesc->chip != &no_irq_type) 651 if (idesc->chip != &no_irq_type)
646 printk(KERN_WARNING 652 printk(KERN_WARNING
647 "%s: changing vector %d from %s to %s\n", 653 "%s: changing vector %d from %s to %s\n",
@@ -974,6 +980,22 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
974} 980}
975 981
976void __init 982void __init
983ia64_native_iosapic_pcat_compat_init(void)
984{
985 if (pcat_compat) {
986 /*
987 * Disable the compatibility mode interrupts (8259 style),
988 * needs IN/OUT support enabled.
989 */
990 printk(KERN_INFO
991 "%s: Disabling PC-AT compatible 8259 interrupts\n",
992 __func__);
993 outb(0xff, 0xA1);
994 outb(0xff, 0x21);
995 }
996}
997
998void __init
977iosapic_system_init (int system_pcat_compat) 999iosapic_system_init (int system_pcat_compat)
978{ 1000{
979 int irq; 1001 int irq;
@@ -987,17 +1009,8 @@ iosapic_system_init (int system_pcat_compat)
987 } 1009 }
988 1010
989 pcat_compat = system_pcat_compat; 1011 pcat_compat = system_pcat_compat;
990 if (pcat_compat) { 1012 if (pcat_compat)
991 /* 1013 iosapic_pcat_compat_init();
992 * Disable the compatibility mode interrupts (8259 style),
993 * needs IN/OUT support enabled.
994 */
995 printk(KERN_INFO
996 "%s: Disabling PC-AT compatible 8259 interrupts\n",
997 __func__);
998 outb(0xff, 0xA1);
999 outb(0xff, 0x21);
1000 }
1001} 1014}
1002 1015
1003static inline int 1016static inline int
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5538471e8d68..28d3d483db92 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -196,7 +196,7 @@ static void clear_irq_vector(int irq)
196} 196}
197 197
198int 198int
199assign_irq_vector (int irq) 199ia64_native_assign_irq_vector (int irq)
200{ 200{
201 unsigned long flags; 201 unsigned long flags;
202 int vector, cpu; 202 int vector, cpu;
@@ -222,7 +222,7 @@ assign_irq_vector (int irq)
222} 222}
223 223
224void 224void
225free_irq_vector (int vector) 225ia64_native_free_irq_vector (int vector)
226{ 226{
227 if (vector < IA64_FIRST_DEVICE_VECTOR || 227 if (vector < IA64_FIRST_DEVICE_VECTOR ||
228 vector > IA64_LAST_DEVICE_VECTOR) 228 vector > IA64_LAST_DEVICE_VECTOR)
@@ -600,7 +600,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id)
600{ 600{
601 BUG(); 601 BUG();
602} 602}
603extern irqreturn_t handle_IPI (int irq, void *dev_id);
604 603
605static struct irqaction ipi_irqaction = { 604static struct irqaction ipi_irqaction = {
606 .handler = handle_IPI, 605 .handler = handle_IPI,
@@ -623,7 +622,7 @@ static struct irqaction tlb_irqaction = {
623#endif 622#endif
624 623
625void 624void
626register_percpu_irq (ia64_vector vec, struct irqaction *action) 625ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
627{ 626{
628 irq_desc_t *desc; 627 irq_desc_t *desc;
629 unsigned int irq; 628 unsigned int irq;
@@ -638,13 +637,21 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
638} 637}
639 638
640void __init 639void __init
641init_IRQ (void) 640ia64_native_register_ipi(void)
642{ 641{
643 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
644#ifdef CONFIG_SMP 642#ifdef CONFIG_SMP
645 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 643 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
646 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); 644 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
647 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); 645 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
646#endif
647}
648
649void __init
650init_IRQ (void)
651{
652 ia64_register_ipi();
653 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
654#ifdef CONFIG_SMP
648#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) 655#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
649 if (vector_domain_type != VECTOR_DOMAIN_NONE) { 656 if (vector_domain_type != VECTOR_DOMAIN_NONE) {
650 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); 657 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 80b44ea052d7..c39627df3cde 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -12,6 +12,14 @@
12 * 12 *
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP 13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. 14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
15 *
16 * Copyright (C) 2005 Hewlett-Packard Co
17 * Dan Magenheimer <dan.magenheimer@hp.com>
18 * Xen paravirtualization
19 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
20 * VA Linux Systems Japan K.K.
21 * pv_ops.
22 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 */ 23 */
16/* 24/*
17 * This file defines the interruption vector table used by the CPU. 25 * This file defines the interruption vector table used by the CPU.
@@ -102,13 +110,13 @@ ENTRY(vhpt_miss)
102 * - the faulting virtual address uses unimplemented address bits 110 * - the faulting virtual address uses unimplemented address bits
103 * - the faulting virtual address has no valid page table mapping 111 * - the faulting virtual address has no valid page table mapping
104 */ 112 */
105 mov r16=cr.ifa // get address that caused the TLB miss 113 MOV_FROM_IFA(r16) // get address that caused the TLB miss
106#ifdef CONFIG_HUGETLB_PAGE 114#ifdef CONFIG_HUGETLB_PAGE
107 movl r18=PAGE_SHIFT 115 movl r18=PAGE_SHIFT
108 mov r25=cr.itir 116 MOV_FROM_ITIR(r25)
109#endif 117#endif
110 ;; 118 ;;
111 rsm psr.dt // use physical addressing for data 119 RSM_PSR_DT // use physical addressing for data
112 mov r31=pr // save the predicate registers 120 mov r31=pr // save the predicate registers
113 mov r19=IA64_KR(PT_BASE) // get page table base address 121 mov r19=IA64_KR(PT_BASE) // get page table base address
114 shl r21=r16,3 // shift bit 60 into sign bit 122 shl r21=r16,3 // shift bit 60 into sign bit
@@ -168,21 +176,21 @@ ENTRY(vhpt_miss)
168 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) 176 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
169 ;; 177 ;;
170(p7) ld8 r18=[r21] // read *pte 178(p7) ld8 r18=[r21] // read *pte
171 mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss 179 MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss
172 ;; 180 ;;
173(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 181(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
174 mov r22=cr.iha // get the VHPT address that caused the TLB miss 182 MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss
175 ;; // avoid RAW on p7 183 ;; // avoid RAW on p7
176(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? 184(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
177 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address 185 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
178 ;; 186 ;;
179(p10) itc.i r18 // insert the instruction TLB entry 187 ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and
180(p11) itc.d r18 // insert the data TLB entry 188 // insert the data TLB entry
181(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) 189(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
182 mov cr.ifa=r22 190 MOV_TO_IFA(r22, r24)
183 191
184#ifdef CONFIG_HUGETLB_PAGE 192#ifdef CONFIG_HUGETLB_PAGE
185(p8) mov cr.itir=r25 // change to default page-size for VHPT 193 MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT
186#endif 194#endif
187 195
188 /* 196 /*
@@ -192,7 +200,7 @@ ENTRY(vhpt_miss)
192 */ 200 */
193 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 201 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
194 ;; 202 ;;
195(p7) itc.d r24 203 ITC_D(p7, r24, r25)
196 ;; 204 ;;
197#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
198 /* 206 /*
@@ -234,7 +242,7 @@ ENTRY(vhpt_miss)
234#endif 242#endif
235 243
236 mov pr=r31,-1 // restore predicate registers 244 mov pr=r31,-1 // restore predicate registers
237 rfi 245 RFI
238END(vhpt_miss) 246END(vhpt_miss)
239 247
240 .org ia64_ivt+0x400 248 .org ia64_ivt+0x400
@@ -248,11 +256,11 @@ ENTRY(itlb_miss)
248 * mode, walk the page table, and then re-execute the PTE read and 256 * mode, walk the page table, and then re-execute the PTE read and
249 * go on normally after that. 257 * go on normally after that.
250 */ 258 */
251 mov r16=cr.ifa // get virtual address 259 MOV_FROM_IFA(r16) // get virtual address
252 mov r29=b0 // save b0 260 mov r29=b0 // save b0
253 mov r31=pr // save predicates 261 mov r31=pr // save predicates
254.itlb_fault: 262.itlb_fault:
255 mov r17=cr.iha // get virtual address of PTE 263 MOV_FROM_IHA(r17) // get virtual address of PTE
256 movl r30=1f // load nested fault continuation point 264 movl r30=1f // load nested fault continuation point
257 ;; 265 ;;
2581: ld8 r18=[r17] // read *pte 2661: ld8 r18=[r17] // read *pte
@@ -261,7 +269,7 @@ ENTRY(itlb_miss)
261 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 269 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
262(p6) br.cond.spnt page_fault 270(p6) br.cond.spnt page_fault
263 ;; 271 ;;
264 itc.i r18 272 ITC_I(p0, r18, r19)
265 ;; 273 ;;
266#ifdef CONFIG_SMP 274#ifdef CONFIG_SMP
267 /* 275 /*
@@ -278,7 +286,7 @@ ENTRY(itlb_miss)
278(p7) ptc.l r16,r20 286(p7) ptc.l r16,r20
279#endif 287#endif
280 mov pr=r31,-1 288 mov pr=r31,-1
281 rfi 289 RFI
282END(itlb_miss) 290END(itlb_miss)
283 291
284 .org ia64_ivt+0x0800 292 .org ia64_ivt+0x0800
@@ -292,11 +300,11 @@ ENTRY(dtlb_miss)
292 * mode, walk the page table, and then re-execute the PTE read and 300 * mode, walk the page table, and then re-execute the PTE read and
293 * go on normally after that. 301 * go on normally after that.
294 */ 302 */
295 mov r16=cr.ifa // get virtual address 303 MOV_FROM_IFA(r16) // get virtual address
296 mov r29=b0 // save b0 304 mov r29=b0 // save b0
297 mov r31=pr // save predicates 305 mov r31=pr // save predicates
298dtlb_fault: 306dtlb_fault:
299 mov r17=cr.iha // get virtual address of PTE 307 MOV_FROM_IHA(r17) // get virtual address of PTE
300 movl r30=1f // load nested fault continuation point 308 movl r30=1f // load nested fault continuation point
301 ;; 309 ;;
3021: ld8 r18=[r17] // read *pte 3101: ld8 r18=[r17] // read *pte
@@ -305,7 +313,7 @@ dtlb_fault:
305 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 313 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
306(p6) br.cond.spnt page_fault 314(p6) br.cond.spnt page_fault
307 ;; 315 ;;
308 itc.d r18 316 ITC_D(p0, r18, r19)
309 ;; 317 ;;
310#ifdef CONFIG_SMP 318#ifdef CONFIG_SMP
311 /* 319 /*
@@ -322,7 +330,7 @@ dtlb_fault:
322(p7) ptc.l r16,r20 330(p7) ptc.l r16,r20
323#endif 331#endif
324 mov pr=r31,-1 332 mov pr=r31,-1
325 rfi 333 RFI
326END(dtlb_miss) 334END(dtlb_miss)
327 335
328 .org ia64_ivt+0x0c00 336 .org ia64_ivt+0x0c00
@@ -330,9 +338,9 @@ END(dtlb_miss)
330// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 338// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
331ENTRY(alt_itlb_miss) 339ENTRY(alt_itlb_miss)
332 DBG_FAULT(3) 340 DBG_FAULT(3)
333 mov r16=cr.ifa // get address that caused the TLB miss 341 MOV_FROM_IFA(r16) // get address that caused the TLB miss
334 movl r17=PAGE_KERNEL 342 movl r17=PAGE_KERNEL
335 mov r21=cr.ipsr 343 MOV_FROM_IPSR(p0, r21)
336 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 344 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
337 mov r31=pr 345 mov r31=pr
338 ;; 346 ;;
@@ -341,9 +349,9 @@ ENTRY(alt_itlb_miss)
341 ;; 349 ;;
342 cmp.gt p8,p0=6,r22 // user mode 350 cmp.gt p8,p0=6,r22 // user mode
343 ;; 351 ;;
344(p8) thash r17=r16 352 THASH(p8, r17, r16, r23)
345 ;; 353 ;;
346(p8) mov cr.iha=r17 354 MOV_TO_IHA(p8, r17, r23)
347(p8) mov r29=b0 // save b0 355(p8) mov r29=b0 // save b0
348(p8) br.cond.dptk .itlb_fault 356(p8) br.cond.dptk .itlb_fault
349#endif 357#endif
@@ -358,9 +366,9 @@ ENTRY(alt_itlb_miss)
358 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 366 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
359(p8) br.cond.spnt page_fault 367(p8) br.cond.spnt page_fault
360 ;; 368 ;;
361 itc.i r19 // insert the TLB entry 369 ITC_I(p0, r19, r18) // insert the TLB entry
362 mov pr=r31,-1 370 mov pr=r31,-1
363 rfi 371 RFI
364END(alt_itlb_miss) 372END(alt_itlb_miss)
365 373
366 .org ia64_ivt+0x1000 374 .org ia64_ivt+0x1000
@@ -368,11 +376,11 @@ END(alt_itlb_miss)
368// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 376// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
369ENTRY(alt_dtlb_miss) 377ENTRY(alt_dtlb_miss)
370 DBG_FAULT(4) 378 DBG_FAULT(4)
371 mov r16=cr.ifa // get address that caused the TLB miss 379 MOV_FROM_IFA(r16) // get address that caused the TLB miss
372 movl r17=PAGE_KERNEL 380 movl r17=PAGE_KERNEL
373 mov r20=cr.isr 381 MOV_FROM_ISR(r20)
374 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 382 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
375 mov r21=cr.ipsr 383 MOV_FROM_IPSR(p0, r21)
376 mov r31=pr 384 mov r31=pr
377 mov r24=PERCPU_ADDR 385 mov r24=PERCPU_ADDR
378 ;; 386 ;;
@@ -381,9 +389,9 @@ ENTRY(alt_dtlb_miss)
381 ;; 389 ;;
382 cmp.gt p8,p0=6,r22 // access to region 0-5 390 cmp.gt p8,p0=6,r22 // access to region 0-5
383 ;; 391 ;;
384(p8) thash r17=r16 392 THASH(p8, r17, r16, r25)
385 ;; 393 ;;
386(p8) mov cr.iha=r17 394 MOV_TO_IHA(p8, r17, r25)
387(p8) mov r29=b0 // save b0 395(p8) mov r29=b0 // save b0
388(p8) br.cond.dptk dtlb_fault 396(p8) br.cond.dptk dtlb_fault
389#endif 397#endif
@@ -402,7 +410,7 @@ ENTRY(alt_dtlb_miss)
402 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? 410 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
403 ;; 411 ;;
404(p10) sub r19=r19,r26 412(p10) sub r19=r19,r26
405(p10) mov cr.itir=r25 413 MOV_TO_ITIR(p10, r25, r24)
406 cmp.ne p8,p0=r0,r23 414 cmp.ne p8,p0=r0,r23
407(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 415(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
408(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr 416(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr
@@ -411,11 +419,11 @@ ENTRY(alt_dtlb_miss)
411 dep r21=-1,r21,IA64_PSR_ED_BIT,1 419 dep r21=-1,r21,IA64_PSR_ED_BIT,1
412 ;; 420 ;;
413 or r19=r19,r17 // insert PTE control bits into r19 421 or r19=r19,r17 // insert PTE control bits into r19
414(p6) mov cr.ipsr=r21 422 MOV_TO_IPSR(p6, r21, r24)
415 ;; 423 ;;
416(p7) itc.d r19 // insert the TLB entry 424 ITC_D(p7, r19, r18) // insert the TLB entry
417 mov pr=r31,-1 425 mov pr=r31,-1
418 rfi 426 RFI
419END(alt_dtlb_miss) 427END(alt_dtlb_miss)
420 428
421 .org ia64_ivt+0x1400 429 .org ia64_ivt+0x1400
@@ -444,10 +452,10 @@ ENTRY(nested_dtlb_miss)
444 * 452 *
445 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) 453 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
446 */ 454 */
447 rsm psr.dt // switch to using physical data addressing 455 RSM_PSR_DT // switch to using physical data addressing
448 mov r19=IA64_KR(PT_BASE) // get the page table base address 456 mov r19=IA64_KR(PT_BASE) // get the page table base address
449 shl r21=r16,3 // shift bit 60 into sign bit 457 shl r21=r16,3 // shift bit 60 into sign bit
450 mov r18=cr.itir 458 MOV_FROM_ITIR(r18)
451 ;; 459 ;;
452 shr.u r17=r16,61 // get the region number into r17 460 shr.u r17=r16,61 // get the region number into r17
453 extr.u r18=r18,2,6 // get the faulting page size 461 extr.u r18=r18,2,6 // get the faulting page size
@@ -507,33 +515,6 @@ ENTRY(ikey_miss)
507 FAULT(6) 515 FAULT(6)
508END(ikey_miss) 516END(ikey_miss)
509 517
510 //-----------------------------------------------------------------------------------
511 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
512ENTRY(page_fault)
513 ssm psr.dt
514 ;;
515 srlz.i
516 ;;
517 SAVE_MIN_WITH_COVER
518 alloc r15=ar.pfs,0,0,3,0
519 mov out0=cr.ifa
520 mov out1=cr.isr
521 adds r3=8,r2 // set up second base pointer
522 ;;
523 ssm psr.ic | PSR_DEFAULT_BITS
524 ;;
525 srlz.i // guarantee that interruption collectin is on
526 ;;
527(p15) ssm psr.i // restore psr.i
528 movl r14=ia64_leave_kernel
529 ;;
530 SAVE_REST
531 mov rp=r14
532 ;;
533 adds out2=16,r12 // out2 = pointer to pt_regs
534 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
535END(page_fault)
536
537 .org ia64_ivt+0x1c00 518 .org ia64_ivt+0x1c00
538///////////////////////////////////////////////////////////////////////////////////////// 519/////////////////////////////////////////////////////////////////////////////////////////
539// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 520// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
@@ -556,10 +537,10 @@ ENTRY(dirty_bit)
556 * page table TLB entry isn't present, we take a nested TLB miss hit where we look 537 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
557 * up the physical address of the L3 PTE and then continue at label 1 below. 538 * up the physical address of the L3 PTE and then continue at label 1 below.
558 */ 539 */
559 mov r16=cr.ifa // get the address that caused the fault 540 MOV_FROM_IFA(r16) // get the address that caused the fault
560 movl r30=1f // load continuation point in case of nested fault 541 movl r30=1f // load continuation point in case of nested fault
561 ;; 542 ;;
562 thash r17=r16 // compute virtual address of L3 PTE 543 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
563 mov r29=b0 // save b0 in case of nested fault 544 mov r29=b0 // save b0 in case of nested fault
564 mov r31=pr // save pr 545 mov r31=pr // save pr
565#ifdef CONFIG_SMP 546#ifdef CONFIG_SMP
@@ -576,7 +557,7 @@ ENTRY(dirty_bit)
576 ;; 557 ;;
577(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present 558(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
578 ;; 559 ;;
579(p6) itc.d r25 // install updated PTE 560 ITC_D(p6, r25, r18) // install updated PTE
580 ;; 561 ;;
581 /* 562 /*
582 * Tell the assemblers dependency-violation checker that the above "itc" instructions 563 * Tell the assemblers dependency-violation checker that the above "itc" instructions
@@ -602,7 +583,7 @@ ENTRY(dirty_bit)
602 itc.d r18 // install updated PTE 583 itc.d r18 // install updated PTE
603#endif 584#endif
604 mov pr=r31,-1 // restore pr 585 mov pr=r31,-1 // restore pr
605 rfi 586 RFI
606END(dirty_bit) 587END(dirty_bit)
607 588
608 .org ia64_ivt+0x2400 589 .org ia64_ivt+0x2400
@@ -611,22 +592,22 @@ END(dirty_bit)
611ENTRY(iaccess_bit) 592ENTRY(iaccess_bit)
612 DBG_FAULT(9) 593 DBG_FAULT(9)
613 // Like Entry 8, except for instruction access 594 // Like Entry 8, except for instruction access
614 mov r16=cr.ifa // get the address that caused the fault 595 MOV_FROM_IFA(r16) // get the address that caused the fault
615 movl r30=1f // load continuation point in case of nested fault 596 movl r30=1f // load continuation point in case of nested fault
616 mov r31=pr // save predicates 597 mov r31=pr // save predicates
617#ifdef CONFIG_ITANIUM 598#ifdef CONFIG_ITANIUM
618 /* 599 /*
619 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. 600 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
620 */ 601 */
621 mov r17=cr.ipsr 602 MOV_FROM_IPSR(p0, r17)
622 ;; 603 ;;
623 mov r18=cr.iip 604 MOV_FROM_IIP(r18)
624 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? 605 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
625 ;; 606 ;;
626(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa 607(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
627#endif /* CONFIG_ITANIUM */ 608#endif /* CONFIG_ITANIUM */
628 ;; 609 ;;
629 thash r17=r16 // compute virtual address of L3 PTE 610 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
630 mov r29=b0 // save b0 in case of nested fault) 611 mov r29=b0 // save b0 in case of nested fault)
631#ifdef CONFIG_SMP 612#ifdef CONFIG_SMP
632 mov r28=ar.ccv // save ar.ccv 613 mov r28=ar.ccv // save ar.ccv
@@ -642,7 +623,7 @@ ENTRY(iaccess_bit)
642 ;; 623 ;;
643(p6) cmp.eq p6,p7=r26,r18 // Only if page present 624(p6) cmp.eq p6,p7=r26,r18 // Only if page present
644 ;; 625 ;;
645(p6) itc.i r25 // install updated PTE 626 ITC_I(p6, r25, r26) // install updated PTE
646 ;; 627 ;;
647 /* 628 /*
648 * Tell the assemblers dependency-violation checker that the above "itc" instructions 629 * Tell the assemblers dependency-violation checker that the above "itc" instructions
@@ -668,7 +649,7 @@ ENTRY(iaccess_bit)
668 itc.i r18 // install updated PTE 649 itc.i r18 // install updated PTE
669#endif /* !CONFIG_SMP */ 650#endif /* !CONFIG_SMP */
670 mov pr=r31,-1 651 mov pr=r31,-1
671 rfi 652 RFI
672END(iaccess_bit) 653END(iaccess_bit)
673 654
674 .org ia64_ivt+0x2800 655 .org ia64_ivt+0x2800
@@ -677,10 +658,10 @@ END(iaccess_bit)
677ENTRY(daccess_bit) 658ENTRY(daccess_bit)
678 DBG_FAULT(10) 659 DBG_FAULT(10)
679 // Like Entry 8, except for data access 660 // Like Entry 8, except for data access
680 mov r16=cr.ifa // get the address that caused the fault 661 MOV_FROM_IFA(r16) // get the address that caused the fault
681 movl r30=1f // load continuation point in case of nested fault 662 movl r30=1f // load continuation point in case of nested fault
682 ;; 663 ;;
683 thash r17=r16 // compute virtual address of L3 PTE 664 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
684 mov r31=pr 665 mov r31=pr
685 mov r29=b0 // save b0 in case of nested fault) 666 mov r29=b0 // save b0 in case of nested fault)
686#ifdef CONFIG_SMP 667#ifdef CONFIG_SMP
@@ -697,7 +678,7 @@ ENTRY(daccess_bit)
697 ;; 678 ;;
698(p6) cmp.eq p6,p7=r26,r18 // Only if page is present 679(p6) cmp.eq p6,p7=r26,r18 // Only if page is present
699 ;; 680 ;;
700(p6) itc.d r25 // install updated PTE 681 ITC_D(p6, r25, r26) // install updated PTE
701 /* 682 /*
702 * Tell the assemblers dependency-violation checker that the above "itc" instructions 683 * Tell the assemblers dependency-violation checker that the above "itc" instructions
703 * cannot possibly affect the following loads: 684 * cannot possibly affect the following loads:
@@ -721,7 +702,7 @@ ENTRY(daccess_bit)
721#endif 702#endif
722 mov b0=r29 // restore b0 703 mov b0=r29 // restore b0
723 mov pr=r31,-1 704 mov pr=r31,-1
724 rfi 705 RFI
725END(daccess_bit) 706END(daccess_bit)
726 707
727 .org ia64_ivt+0x2c00 708 .org ia64_ivt+0x2c00
@@ -745,10 +726,10 @@ ENTRY(break_fault)
745 */ 726 */
746 DBG_FAULT(11) 727 DBG_FAULT(11)
747 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) 728 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
748 mov r29=cr.ipsr // M2 (12 cyc) 729 MOV_FROM_IPSR(p0, r29) // M2 (12 cyc)
749 mov r31=pr // I0 (2 cyc) 730 mov r31=pr // I0 (2 cyc)
750 731
751 mov r17=cr.iim // M2 (2 cyc) 732 MOV_FROM_IIM(r17) // M2 (2 cyc)
752 mov.m r27=ar.rsc // M2 (12 cyc) 733 mov.m r27=ar.rsc // M2 (12 cyc)
753 mov r18=__IA64_BREAK_SYSCALL // A 734 mov r18=__IA64_BREAK_SYSCALL // A
754 735
@@ -767,7 +748,7 @@ ENTRY(break_fault)
767 nop.m 0 748 nop.m 0
768 movl r30=sys_call_table // X 749 movl r30=sys_call_table // X
769 750
770 mov r28=cr.iip // M2 (2 cyc) 751 MOV_FROM_IIP(r28) // M2 (2 cyc)
771 cmp.eq p0,p7=r18,r17 // I0 is this a system call? 752 cmp.eq p0,p7=r18,r17 // I0 is this a system call?
772(p7) br.cond.spnt non_syscall // B no -> 753(p7) br.cond.spnt non_syscall // B no ->
773 // 754 //
@@ -864,18 +845,17 @@ ENTRY(break_fault)
864#endif 845#endif
865 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 846 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
866 nop 0 847 nop 0
867 bsw.1 // B (6 cyc) regs are saved, switch to bank 1 848 BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1
868 ;; 849 ;;
869 850
870 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection 851 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection
852 // M0 ensure interruption collection is on
871 movl r3=ia64_ret_from_syscall // X 853 movl r3=ia64_ret_from_syscall // X
872 ;; 854 ;;
873
874 srlz.i // M0 ensure interruption collection is on
875 mov rp=r3 // I0 set the real return addr 855 mov rp=r3 // I0 set the real return addr
876(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT 856(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
877 857
878(p15) ssm psr.i // M2 restore psr.i 858 SSM_PSR_I(p15, p15, r16) // M2 restore psr.i
879(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) 859(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
880 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic 860 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
881 // NOT REACHED 861 // NOT REACHED
@@ -895,27 +875,8 @@ END(break_fault)
895///////////////////////////////////////////////////////////////////////////////////////// 875/////////////////////////////////////////////////////////////////////////////////////////
896// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 876// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
897ENTRY(interrupt) 877ENTRY(interrupt)
898 DBG_FAULT(12) 878 /* interrupt handler has become too big to fit this area. */
899 mov r31=pr // prepare to save predicates 879 br.sptk.many __interrupt
900 ;;
901 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
902 ssm psr.ic | PSR_DEFAULT_BITS
903 ;;
904 adds r3=8,r2 // set up second base pointer for SAVE_REST
905 srlz.i // ensure everybody knows psr.ic is back on
906 ;;
907 SAVE_REST
908 ;;
909 MCA_RECOVER_RANGE(interrupt)
910 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
911 mov out0=cr.ivr // pass cr.ivr as first arg
912 add out1=16,sp // pass pointer to pt_regs as second arg
913 ;;
914 srlz.d // make sure we see the effect of cr.ivr
915 movl r14=ia64_leave_kernel
916 ;;
917 mov rp=r14
918 br.call.sptk.many b6=ia64_handle_irq
919END(interrupt) 880END(interrupt)
920 881
921 .org ia64_ivt+0x3400 882 .org ia64_ivt+0x3400
@@ -978,6 +939,7 @@ END(interrupt)
978 * - ar.fpsr: set to kernel settings 939 * - ar.fpsr: set to kernel settings
979 * - b6: preserved (same as on entry) 940 * - b6: preserved (same as on entry)
980 */ 941 */
942#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
981GLOBAL_ENTRY(ia64_syscall_setup) 943GLOBAL_ENTRY(ia64_syscall_setup)
982#if PT(B6) != 0 944#if PT(B6) != 0
983# error This code assumes that b6 is the first field in pt_regs. 945# error This code assumes that b6 is the first field in pt_regs.
@@ -1069,6 +1031,7 @@ GLOBAL_ENTRY(ia64_syscall_setup)
1069(p10) mov r8=-EINVAL 1031(p10) mov r8=-EINVAL
1070 br.ret.sptk.many b7 1032 br.ret.sptk.many b7
1071END(ia64_syscall_setup) 1033END(ia64_syscall_setup)
1034#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
1072 1035
1073 .org ia64_ivt+0x3c00 1036 .org ia64_ivt+0x3c00
1074///////////////////////////////////////////////////////////////////////////////////////// 1037/////////////////////////////////////////////////////////////////////////////////////////
@@ -1082,7 +1045,7 @@ END(ia64_syscall_setup)
1082 DBG_FAULT(16) 1045 DBG_FAULT(16)
1083 FAULT(16) 1046 FAULT(16)
1084 1047
1085#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1048#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
1086 /* 1049 /*
1087 * There is no particular reason for this code to be here, other than 1050 * There is no particular reason for this code to be here, other than
1088 * that there happens to be space here that would go unused otherwise. 1051 * that there happens to be space here that would go unused otherwise.
@@ -1092,7 +1055,7 @@ END(ia64_syscall_setup)
1092 * account_sys_enter is called from SAVE_MIN* macros if accounting is 1055 * account_sys_enter is called from SAVE_MIN* macros if accounting is
1093 * enabled and if the macro is entered from user mode. 1056 * enabled and if the macro is entered from user mode.
1094 */ 1057 */
1095ENTRY(account_sys_enter) 1058GLOBAL_ENTRY(account_sys_enter)
1096 // mov.m r20=ar.itc is called in advance, and r13 is current 1059 // mov.m r20=ar.itc is called in advance, and r13 is current
1097 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 1060 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
1098 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 1061 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
@@ -1123,110 +1086,18 @@ END(account_sys_enter)
1123 DBG_FAULT(17) 1086 DBG_FAULT(17)
1124 FAULT(17) 1087 FAULT(17)
1125 1088
1126ENTRY(non_syscall)
1127 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1128 ;;
1129 SAVE_MIN_WITH_COVER
1130
1131 // There is no particular reason for this code to be here, other than that
1132 // there happens to be space here that would go unused otherwise. If this
1133 // fault ever gets "unreserved", simply moved the following code to a more
1134 // suitable spot...
1135
1136 alloc r14=ar.pfs,0,0,2,0
1137 mov out0=cr.iim
1138 add out1=16,sp
1139 adds r3=8,r2 // set up second base pointer for SAVE_REST
1140
1141 ssm psr.ic | PSR_DEFAULT_BITS
1142 ;;
1143 srlz.i // guarantee that interruption collection is on
1144 ;;
1145(p15) ssm psr.i // restore psr.i
1146 movl r15=ia64_leave_kernel
1147 ;;
1148 SAVE_REST
1149 mov rp=r15
1150 ;;
1151 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1152END(non_syscall)
1153
1154 .org ia64_ivt+0x4800 1089 .org ia64_ivt+0x4800
1155///////////////////////////////////////////////////////////////////////////////////////// 1090/////////////////////////////////////////////////////////////////////////////////////////
1156// 0x4800 Entry 18 (size 64 bundles) Reserved 1091// 0x4800 Entry 18 (size 64 bundles) Reserved
1157 DBG_FAULT(18) 1092 DBG_FAULT(18)
1158 FAULT(18) 1093 FAULT(18)
1159 1094
1160 /*
1161 * There is no particular reason for this code to be here, other than that
1162 * there happens to be space here that would go unused otherwise. If this
1163 * fault ever gets "unreserved", simply moved the following code to a more
1164 * suitable spot...
1165 */
1166
1167ENTRY(dispatch_unaligned_handler)
1168 SAVE_MIN_WITH_COVER
1169 ;;
1170 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1171 mov out0=cr.ifa
1172 adds out1=16,sp
1173
1174 ssm psr.ic | PSR_DEFAULT_BITS
1175 ;;
1176 srlz.i // guarantee that interruption collection is on
1177 ;;
1178(p15) ssm psr.i // restore psr.i
1179 adds r3=8,r2 // set up second base pointer
1180 ;;
1181 SAVE_REST
1182 movl r14=ia64_leave_kernel
1183 ;;
1184 mov rp=r14
1185 br.sptk.many ia64_prepare_handle_unaligned
1186END(dispatch_unaligned_handler)
1187
1188 .org ia64_ivt+0x4c00 1095 .org ia64_ivt+0x4c00
1189///////////////////////////////////////////////////////////////////////////////////////// 1096/////////////////////////////////////////////////////////////////////////////////////////
1190// 0x4c00 Entry 19 (size 64 bundles) Reserved 1097// 0x4c00 Entry 19 (size 64 bundles) Reserved
1191 DBG_FAULT(19) 1098 DBG_FAULT(19)
1192 FAULT(19) 1099 FAULT(19)
1193 1100
1194 /*
1195 * There is no particular reason for this code to be here, other than that
1196 * there happens to be space here that would go unused otherwise. If this
1197 * fault ever gets "unreserved", simply moved the following code to a more
1198 * suitable spot...
1199 */
1200
1201ENTRY(dispatch_to_fault_handler)
1202 /*
1203 * Input:
1204 * psr.ic: off
1205 * r19: fault vector number (e.g., 24 for General Exception)
1206 * r31: contains saved predicates (pr)
1207 */
1208 SAVE_MIN_WITH_COVER_R19
1209 alloc r14=ar.pfs,0,0,5,0
1210 mov out0=r15
1211 mov out1=cr.isr
1212 mov out2=cr.ifa
1213 mov out3=cr.iim
1214 mov out4=cr.itir
1215 ;;
1216 ssm psr.ic | PSR_DEFAULT_BITS
1217 ;;
1218 srlz.i // guarantee that interruption collection is on
1219 ;;
1220(p15) ssm psr.i // restore psr.i
1221 adds r3=8,r2 // set up second base pointer for SAVE_REST
1222 ;;
1223 SAVE_REST
1224 movl r14=ia64_leave_kernel
1225 ;;
1226 mov rp=r14
1227 br.call.sptk.many b6=ia64_fault
1228END(dispatch_to_fault_handler)
1229
1230// 1101//
1231// --- End of long entries, Beginning of short entries 1102// --- End of long entries, Beginning of short entries
1232// 1103//
@@ -1236,8 +1107,8 @@ END(dispatch_to_fault_handler)
1236// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) 1107// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1237ENTRY(page_not_present) 1108ENTRY(page_not_present)
1238 DBG_FAULT(20) 1109 DBG_FAULT(20)
1239 mov r16=cr.ifa 1110 MOV_FROM_IFA(r16)
1240 rsm psr.dt 1111 RSM_PSR_DT
1241 /* 1112 /*
1242 * The Linux page fault handler doesn't expect non-present pages to be in 1113 * The Linux page fault handler doesn't expect non-present pages to be in
1243 * the TLB. Flush the existing entry now, so we meet that expectation. 1114 * the TLB. Flush the existing entry now, so we meet that expectation.
@@ -1256,8 +1127,8 @@ END(page_not_present)
1256// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) 1127// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1257ENTRY(key_permission) 1128ENTRY(key_permission)
1258 DBG_FAULT(21) 1129 DBG_FAULT(21)
1259 mov r16=cr.ifa 1130 MOV_FROM_IFA(r16)
1260 rsm psr.dt 1131 RSM_PSR_DT
1261 mov r31=pr 1132 mov r31=pr
1262 ;; 1133 ;;
1263 srlz.d 1134 srlz.d
@@ -1269,8 +1140,8 @@ END(key_permission)
1269// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 1140// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1270ENTRY(iaccess_rights) 1141ENTRY(iaccess_rights)
1271 DBG_FAULT(22) 1142 DBG_FAULT(22)
1272 mov r16=cr.ifa 1143 MOV_FROM_IFA(r16)
1273 rsm psr.dt 1144 RSM_PSR_DT
1274 mov r31=pr 1145 mov r31=pr
1275 ;; 1146 ;;
1276 srlz.d 1147 srlz.d
@@ -1282,8 +1153,8 @@ END(iaccess_rights)
1282// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 1153// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1283ENTRY(daccess_rights) 1154ENTRY(daccess_rights)
1284 DBG_FAULT(23) 1155 DBG_FAULT(23)
1285 mov r16=cr.ifa 1156 MOV_FROM_IFA(r16)
1286 rsm psr.dt 1157 RSM_PSR_DT
1287 mov r31=pr 1158 mov r31=pr
1288 ;; 1159 ;;
1289 srlz.d 1160 srlz.d
@@ -1295,7 +1166,7 @@ END(daccess_rights)
1295// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 1166// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1296ENTRY(general_exception) 1167ENTRY(general_exception)
1297 DBG_FAULT(24) 1168 DBG_FAULT(24)
1298 mov r16=cr.isr 1169 MOV_FROM_ISR(r16)
1299 mov r31=pr 1170 mov r31=pr
1300 ;; 1171 ;;
1301 cmp4.eq p6,p0=0,r16 1172 cmp4.eq p6,p0=0,r16
@@ -1324,8 +1195,8 @@ END(disabled_fp_reg)
1324ENTRY(nat_consumption) 1195ENTRY(nat_consumption)
1325 DBG_FAULT(26) 1196 DBG_FAULT(26)
1326 1197
1327 mov r16=cr.ipsr 1198 MOV_FROM_IPSR(p0, r16)
1328 mov r17=cr.isr 1199 MOV_FROM_ISR(r17)
1329 mov r31=pr // save PR 1200 mov r31=pr // save PR
1330 ;; 1201 ;;
1331 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} 1202 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
@@ -1335,10 +1206,10 @@ ENTRY(nat_consumption)
1335 dep r16=-1,r16,IA64_PSR_ED_BIT,1 1206 dep r16=-1,r16,IA64_PSR_ED_BIT,1
1336(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) 1207(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
1337 ;; 1208 ;;
1338 mov cr.ipsr=r16 // set cr.ipsr.na 1209 MOV_TO_IPSR(p0, r16, r18)
1339 mov pr=r31,-1 1210 mov pr=r31,-1
1340 ;; 1211 ;;
1341 rfi 1212 RFI
1342 1213
13431: mov pr=r31,-1 12141: mov pr=r31,-1
1344 ;; 1215 ;;
@@ -1360,26 +1231,26 @@ ENTRY(speculation_vector)
1360 * 1231 *
1361 * cr.imm contains zero_ext(imm21) 1232 * cr.imm contains zero_ext(imm21)
1362 */ 1233 */
1363 mov r18=cr.iim 1234 MOV_FROM_IIM(r18)
1364 ;; 1235 ;;
1365 mov r17=cr.iip 1236 MOV_FROM_IIP(r17)
1366 shl r18=r18,43 // put sign bit in position (43=64-21) 1237 shl r18=r18,43 // put sign bit in position (43=64-21)
1367 ;; 1238 ;;
1368 1239
1369 mov r16=cr.ipsr 1240 MOV_FROM_IPSR(p0, r16)
1370 shr r18=r18,39 // sign extend (39=43-4) 1241 shr r18=r18,39 // sign extend (39=43-4)
1371 ;; 1242 ;;
1372 1243
1373 add r17=r17,r18 // now add the offset 1244 add r17=r17,r18 // now add the offset
1374 ;; 1245 ;;
1375 mov cr.iip=r17 1246 MOV_FROM_IIP(r17)
1376 dep r16=0,r16,41,2 // clear EI 1247 dep r16=0,r16,41,2 // clear EI
1377 ;; 1248 ;;
1378 1249
1379 mov cr.ipsr=r16 1250 MOV_FROM_IPSR(p0, r16)
1380 ;; 1251 ;;
1381 1252
1382 rfi // and go back 1253 RFI
1383END(speculation_vector) 1254END(speculation_vector)
1384 1255
1385 .org ia64_ivt+0x5800 1256 .org ia64_ivt+0x5800
@@ -1517,11 +1388,11 @@ ENTRY(ia32_intercept)
1517 DBG_FAULT(46) 1388 DBG_FAULT(46)
1518#ifdef CONFIG_IA32_SUPPORT 1389#ifdef CONFIG_IA32_SUPPORT
1519 mov r31=pr 1390 mov r31=pr
1520 mov r16=cr.isr 1391 MOV_FROM_ISR(r16)
1521 ;; 1392 ;;
1522 extr.u r17=r16,16,8 // get ISR.code 1393 extr.u r17=r16,16,8 // get ISR.code
1523 mov r18=ar.eflag 1394 mov r18=ar.eflag
1524 mov r19=cr.iim // old eflag value 1395 MOV_FROM_IIM(r19) // old eflag value
1525 ;; 1396 ;;
1526 cmp.ne p6,p0=2,r17 1397 cmp.ne p6,p0=2,r17
1527(p6) br.cond.spnt 1f // not a system flag fault 1398(p6) br.cond.spnt 1f // not a system flag fault
@@ -1533,7 +1404,7 @@ ENTRY(ia32_intercept)
1533(p6) br.cond.spnt 1f // eflags.ac bit didn't change 1404(p6) br.cond.spnt 1f // eflags.ac bit didn't change
1534 ;; 1405 ;;
1535 mov pr=r31,-1 // restore predicate registers 1406 mov pr=r31,-1 // restore predicate registers
1536 rfi 1407 RFI
1537 1408
15381: 14091:
1539#endif // CONFIG_IA32_SUPPORT 1410#endif // CONFIG_IA32_SUPPORT
@@ -1673,6 +1544,137 @@ END(ia32_interrupt)
1673 DBG_FAULT(67) 1544 DBG_FAULT(67)
1674 FAULT(67) 1545 FAULT(67)
1675 1546
1547 //-----------------------------------------------------------------------------------
1548 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
1549ENTRY(page_fault)
1550 SSM_PSR_DT_AND_SRLZ_I
1551 ;;
1552 SAVE_MIN_WITH_COVER
1553 alloc r15=ar.pfs,0,0,3,0
1554 MOV_FROM_IFA(out0)
1555 MOV_FROM_ISR(out1)
1556 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
1557 adds r3=8,r2 // set up second base pointer
1558 SSM_PSR_I(p15, p15, r14) // restore psr.i
1559 movl r14=ia64_leave_kernel
1560 ;;
1561 SAVE_REST
1562 mov rp=r14
1563 ;;
1564 adds out2=16,r12 // out2 = pointer to pt_regs
1565 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
1566END(page_fault)
1567
1568ENTRY(non_syscall)
1569 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1570 ;;
1571 SAVE_MIN_WITH_COVER
1572
1573 // There is no particular reason for this code to be here, other than that
1574 // there happens to be space here that would go unused otherwise. If this
1575 // fault ever gets "unreserved", simply moved the following code to a more
1576 // suitable spot...
1577
1578 alloc r14=ar.pfs,0,0,2,0
1579 MOV_FROM_IIM(out0)
1580 add out1=16,sp
1581 adds r3=8,r2 // set up second base pointer for SAVE_REST
1582
1583 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
1584 // guarantee that interruption collection is on
1585 SSM_PSR_I(p15, p15, r15) // restore psr.i
1586 movl r15=ia64_leave_kernel
1587 ;;
1588 SAVE_REST
1589 mov rp=r15
1590 ;;
1591 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1592END(non_syscall)
1593
1594ENTRY(__interrupt)
1595 DBG_FAULT(12)
1596 mov r31=pr // prepare to save predicates
1597 ;;
1598 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1599 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
1600 // ensure everybody knows psr.ic is back on
1601 adds r3=8,r2 // set up second base pointer for SAVE_REST
1602 ;;
1603 SAVE_REST
1604 ;;
1605 MCA_RECOVER_RANGE(interrupt)
1606 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1607 MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg
1608 add out1=16,sp // pass pointer to pt_regs as second arg
1609 ;;
1610 srlz.d // make sure we see the effect of cr.ivr
1611 movl r14=ia64_leave_kernel
1612 ;;
1613 mov rp=r14
1614 br.call.sptk.many b6=ia64_handle_irq
1615END(__interrupt)
1616
1617 /*
1618 * There is no particular reason for this code to be here, other than that
1619 * there happens to be space here that would go unused otherwise. If this
1620 * fault ever gets "unreserved", simply moved the following code to a more
1621 * suitable spot...
1622 */
1623
1624ENTRY(dispatch_unaligned_handler)
1625 SAVE_MIN_WITH_COVER
1626 ;;
1627 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1628 MOV_FROM_IFA(out0)
1629 adds out1=16,sp
1630
1631 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1632 // guarantee that interruption collection is on
1633 SSM_PSR_I(p15, p15, r3) // restore psr.i
1634 adds r3=8,r2 // set up second base pointer
1635 ;;
1636 SAVE_REST
1637 movl r14=ia64_leave_kernel
1638 ;;
1639 mov rp=r14
1640 br.sptk.many ia64_prepare_handle_unaligned
1641END(dispatch_unaligned_handler)
1642
1643 /*
1644 * There is no particular reason for this code to be here, other than that
1645 * there happens to be space here that would go unused otherwise. If this
1646 * fault ever gets "unreserved", simply moved the following code to a more
1647 * suitable spot...
1648 */
1649
1650ENTRY(dispatch_to_fault_handler)
1651 /*
1652 * Input:
1653 * psr.ic: off
1654 * r19: fault vector number (e.g., 24 for General Exception)
1655 * r31: contains saved predicates (pr)
1656 */
1657 SAVE_MIN_WITH_COVER_R19
1658 alloc r14=ar.pfs,0,0,5,0
1659 MOV_FROM_ISR(out1)
1660 MOV_FROM_IFA(out2)
1661 MOV_FROM_IIM(out3)
1662 MOV_FROM_ITIR(out4)
1663 ;;
1664 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
1665 // guarantee that interruption collection is on
1666 mov out0=r15
1667 ;;
1668 SSM_PSR_I(p15, p15, r3) // restore psr.i
1669 adds r3=8,r2 // set up second base pointer for SAVE_REST
1670 ;;
1671 SAVE_REST
1672 movl r14=ia64_leave_kernel
1673 ;;
1674 mov rp=r14
1675 br.call.sptk.many b6=ia64_fault
1676END(dispatch_to_fault_handler)
1677
1676 /* 1678 /*
1677 * Squatting in this space ... 1679 * Squatting in this space ...
1678 * 1680 *
@@ -1686,11 +1688,10 @@ ENTRY(dispatch_illegal_op_fault)
1686 .prologue 1688 .prologue
1687 .body 1689 .body
1688 SAVE_MIN_WITH_COVER 1690 SAVE_MIN_WITH_COVER
1689 ssm psr.ic | PSR_DEFAULT_BITS 1691 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1690 ;; 1692 // guarantee that interruption collection is on
1691 srlz.i // guarantee that interruption collection is on
1692 ;; 1693 ;;
1693(p15) ssm psr.i // restore psr.i 1694 SSM_PSR_I(p15, p15, r3) // restore psr.i
1694 adds r3=8,r2 // set up second base pointer for SAVE_REST 1695 adds r3=8,r2 // set up second base pointer for SAVE_REST
1695 ;; 1696 ;;
1696 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group 1697 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
@@ -1729,12 +1730,11 @@ END(dispatch_illegal_op_fault)
1729ENTRY(dispatch_to_ia32_handler) 1730ENTRY(dispatch_to_ia32_handler)
1730 SAVE_MIN 1731 SAVE_MIN
1731 ;; 1732 ;;
1732 mov r14=cr.isr 1733 MOV_FROM_ISR(r14)
1733 ssm psr.ic | PSR_DEFAULT_BITS 1734 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1734 ;; 1735 // guarantee that interruption collection is on
1735 srlz.i // guarantee that interruption collection is on
1736 ;; 1736 ;;
1737(p15) ssm psr.i 1737 SSM_PSR_I(p15, p15, r3)
1738 adds r3=8,r2 // Base pointer for SAVE_REST 1738 adds r3=8,r2 // Base pointer for SAVE_REST
1739 ;; 1739 ;;
1740 SAVE_REST 1740 SAVE_REST
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 74b6d670aaef..292e214a3b84 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -2,6 +2,7 @@
2#include <asm/cache.h> 2#include <asm/cache.h>
3 3
4#include "entry.h" 4#include "entry.h"
5#include "paravirt_inst.h"
5 6
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING 7#ifdef CONFIG_VIRT_CPU_ACCOUNTING
7/* read ar.itc in advance, and use it before leaving bank 0 */ 8/* read ar.itc in advance, and use it before leaving bank 0 */
@@ -43,16 +44,16 @@
43 * Note that psr.ic is NOT turned on by this macro. This is so that 44 * Note that psr.ic is NOT turned on by this macro. This is so that
44 * we can pass interruption state as arguments to a handler. 45 * we can pass interruption state as arguments to a handler.
45 */ 46 */
46#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \ 47#define IA64_NATIVE_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
47 mov r16=IA64_KR(CURRENT); /* M */ \ 48 mov r16=IA64_KR(CURRENT); /* M */ \
48 mov r27=ar.rsc; /* M */ \ 49 mov r27=ar.rsc; /* M */ \
49 mov r20=r1; /* A */ \ 50 mov r20=r1; /* A */ \
50 mov r25=ar.unat; /* M */ \ 51 mov r25=ar.unat; /* M */ \
51 mov r29=cr.ipsr; /* M */ \ 52 MOV_FROM_IPSR(p0,r29); /* M */ \
52 mov r26=ar.pfs; /* I */ \ 53 mov r26=ar.pfs; /* I */ \
53 mov r28=cr.iip; /* M */ \ 54 MOV_FROM_IIP(r28); /* M */ \
54 mov r21=ar.fpsr; /* M */ \ 55 mov r21=ar.fpsr; /* M */ \
55 COVER; /* B;; (or nothing) */ \ 56 __COVER; /* B;; (or nothing) */ \
56 ;; \ 57 ;; \
57 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ 58 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
58 ;; \ 59 ;; \
@@ -244,6 +245,6 @@
2441: \ 2451: \
245 .pred.rel "mutex", pKStk, pUStk 246 .pred.rel "mutex", pKStk, pUStk
246 247
247#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND) 248#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(COVER, mov r30=cr.ifs, , RSE_WORKAROUND)
248#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND) 249#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(COVER, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
249#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , ) 250#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
new file mode 100644
index 000000000000..1ae049181e83
--- /dev/null
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -0,0 +1,24 @@
1/*
2 * calculate
3 * NR_IRQS = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, FOO_NR_IRQS...)
4 * depending on config.
5 * This must be calculated before processing asm-offset.c.
6 */
7
8#define ASM_OFFSETS_C 1
9
10#include <linux/kbuild.h>
11#include <linux/threads.h>
12#include <asm-ia64/native/irq.h>
13
14void foo(void)
15{
16 union paravirt_nr_irqs_max {
17 char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
18#ifdef CONFIG_XEN
19 char xen_nr_irqs[XEN_NR_IRQS];
20#endif
21 };
22
23 DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));
24}
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
new file mode 100644
index 000000000000..afaf5b9a2cf0
--- /dev/null
+++ b/arch/ia64/kernel/paravirt.c
@@ -0,0 +1,369 @@
1/******************************************************************************
2 * arch/ia64/kernel/paravirt.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/init.h>
25
26#include <linux/compiler.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/types.h>
31
32#include <asm/iosapic.h>
33#include <asm/paravirt.h>
34
35/***************************************************************************
36 * general info
37 */
38struct pv_info pv_info = {
39 .kernel_rpl = 0,
40 .paravirt_enabled = 0,
41 .name = "bare hardware"
42};
43
44/***************************************************************************
45 * pv_init_ops
46 * initialization hooks.
47 */
48
49struct pv_init_ops pv_init_ops;
50
51/***************************************************************************
52 * pv_cpu_ops
53 * intrinsics hooks.
54 */
55
56/* ia64_native_xxx are macros so that we have to make them real functions */
57
58#define DEFINE_VOID_FUNC1(name) \
59 static void \
60 ia64_native_ ## name ## _func(unsigned long arg) \
61 { \
62 ia64_native_ ## name(arg); \
63 } \
64
65#define DEFINE_VOID_FUNC2(name) \
66 static void \
67 ia64_native_ ## name ## _func(unsigned long arg0, \
68 unsigned long arg1) \
69 { \
70 ia64_native_ ## name(arg0, arg1); \
71 } \
72
73#define DEFINE_FUNC0(name) \
74 static unsigned long \
75 ia64_native_ ## name ## _func(void) \
76 { \
77 return ia64_native_ ## name(); \
78 }
79
80#define DEFINE_FUNC1(name, type) \
81 static unsigned long \
82 ia64_native_ ## name ## _func(type arg) \
83 { \
84 return ia64_native_ ## name(arg); \
85 } \
86
87DEFINE_VOID_FUNC1(fc);
88DEFINE_VOID_FUNC1(intrin_local_irq_restore);
89
90DEFINE_VOID_FUNC2(ptcga);
91DEFINE_VOID_FUNC2(set_rr);
92
93DEFINE_FUNC0(get_psr_i);
94
95DEFINE_FUNC1(thash, unsigned long);
96DEFINE_FUNC1(get_cpuid, int);
97DEFINE_FUNC1(get_pmd, int);
98DEFINE_FUNC1(get_rr, unsigned long);
99
100static void
101ia64_native_ssm_i_func(void)
102{
103 ia64_native_ssm(IA64_PSR_I);
104}
105
106static void
107ia64_native_rsm_i_func(void)
108{
109 ia64_native_rsm(IA64_PSR_I);
110}
111
112static void
113ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
114 unsigned long val2, unsigned long val3,
115 unsigned long val4)
116{
117 ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4);
118}
119
120#define CASE_GET_REG(id) \
121 case _IA64_REG_ ## id: \
122 res = ia64_native_getreg(_IA64_REG_ ## id); \
123 break;
124#define CASE_GET_AR(id) CASE_GET_REG(AR_ ## id)
125#define CASE_GET_CR(id) CASE_GET_REG(CR_ ## id)
126
127unsigned long
128ia64_native_getreg_func(int regnum)
129{
130 unsigned long res = -1;
131 switch (regnum) {
132 CASE_GET_REG(GP);
133 CASE_GET_REG(IP);
134 CASE_GET_REG(PSR);
135 CASE_GET_REG(TP);
136 CASE_GET_REG(SP);
137
138 CASE_GET_AR(KR0);
139 CASE_GET_AR(KR1);
140 CASE_GET_AR(KR2);
141 CASE_GET_AR(KR3);
142 CASE_GET_AR(KR4);
143 CASE_GET_AR(KR5);
144 CASE_GET_AR(KR6);
145 CASE_GET_AR(KR7);
146 CASE_GET_AR(RSC);
147 CASE_GET_AR(BSP);
148 CASE_GET_AR(BSPSTORE);
149 CASE_GET_AR(RNAT);
150 CASE_GET_AR(FCR);
151 CASE_GET_AR(EFLAG);
152 CASE_GET_AR(CSD);
153 CASE_GET_AR(SSD);
154 CASE_GET_AR(CFLAG);
155 CASE_GET_AR(FSR);
156 CASE_GET_AR(FIR);
157 CASE_GET_AR(FDR);
158 CASE_GET_AR(CCV);
159 CASE_GET_AR(UNAT);
160 CASE_GET_AR(FPSR);
161 CASE_GET_AR(ITC);
162 CASE_GET_AR(PFS);
163 CASE_GET_AR(LC);
164 CASE_GET_AR(EC);
165
166 CASE_GET_CR(DCR);
167 CASE_GET_CR(ITM);
168 CASE_GET_CR(IVA);
169 CASE_GET_CR(PTA);
170 CASE_GET_CR(IPSR);
171 CASE_GET_CR(ISR);
172 CASE_GET_CR(IIP);
173 CASE_GET_CR(IFA);
174 CASE_GET_CR(ITIR);
175 CASE_GET_CR(IIPA);
176 CASE_GET_CR(IFS);
177 CASE_GET_CR(IIM);
178 CASE_GET_CR(IHA);
179 CASE_GET_CR(LID);
180 CASE_GET_CR(IVR);
181 CASE_GET_CR(TPR);
182 CASE_GET_CR(EOI);
183 CASE_GET_CR(IRR0);
184 CASE_GET_CR(IRR1);
185 CASE_GET_CR(IRR2);
186 CASE_GET_CR(IRR3);
187 CASE_GET_CR(ITV);
188 CASE_GET_CR(PMV);
189 CASE_GET_CR(CMCV);
190 CASE_GET_CR(LRR0);
191 CASE_GET_CR(LRR1);
192
193 default:
194 printk(KERN_CRIT "wrong_getreg %d\n", regnum);
195 break;
196 }
197 return res;
198}
199
200#define CASE_SET_REG(id) \
201 case _IA64_REG_ ## id: \
202 ia64_native_setreg(_IA64_REG_ ## id, val); \
203 break;
204#define CASE_SET_AR(id) CASE_SET_REG(AR_ ## id)
205#define CASE_SET_CR(id) CASE_SET_REG(CR_ ## id)
206
207void
208ia64_native_setreg_func(int regnum, unsigned long val)
209{
210 switch (regnum) {
211 case _IA64_REG_PSR_L:
212 ia64_native_setreg(_IA64_REG_PSR_L, val);
213 ia64_dv_serialize_data();
214 break;
215 CASE_SET_REG(SP);
216 CASE_SET_REG(GP);
217
218 CASE_SET_AR(KR0);
219 CASE_SET_AR(KR1);
220 CASE_SET_AR(KR2);
221 CASE_SET_AR(KR3);
222 CASE_SET_AR(KR4);
223 CASE_SET_AR(KR5);
224 CASE_SET_AR(KR6);
225 CASE_SET_AR(KR7);
226 CASE_SET_AR(RSC);
227 CASE_SET_AR(BSP);
228 CASE_SET_AR(BSPSTORE);
229 CASE_SET_AR(RNAT);
230 CASE_SET_AR(FCR);
231 CASE_SET_AR(EFLAG);
232 CASE_SET_AR(CSD);
233 CASE_SET_AR(SSD);
234 CASE_SET_AR(CFLAG);
235 CASE_SET_AR(FSR);
236 CASE_SET_AR(FIR);
237 CASE_SET_AR(FDR);
238 CASE_SET_AR(CCV);
239 CASE_SET_AR(UNAT);
240 CASE_SET_AR(FPSR);
241 CASE_SET_AR(ITC);
242 CASE_SET_AR(PFS);
243 CASE_SET_AR(LC);
244 CASE_SET_AR(EC);
245
246 CASE_SET_CR(DCR);
247 CASE_SET_CR(ITM);
248 CASE_SET_CR(IVA);
249 CASE_SET_CR(PTA);
250 CASE_SET_CR(IPSR);
251 CASE_SET_CR(ISR);
252 CASE_SET_CR(IIP);
253 CASE_SET_CR(IFA);
254 CASE_SET_CR(ITIR);
255 CASE_SET_CR(IIPA);
256 CASE_SET_CR(IFS);
257 CASE_SET_CR(IIM);
258 CASE_SET_CR(IHA);
259 CASE_SET_CR(LID);
260 CASE_SET_CR(IVR);
261 CASE_SET_CR(TPR);
262 CASE_SET_CR(EOI);
263 CASE_SET_CR(IRR0);
264 CASE_SET_CR(IRR1);
265 CASE_SET_CR(IRR2);
266 CASE_SET_CR(IRR3);
267 CASE_SET_CR(ITV);
268 CASE_SET_CR(PMV);
269 CASE_SET_CR(CMCV);
270 CASE_SET_CR(LRR0);
271 CASE_SET_CR(LRR1);
272 default:
273 printk(KERN_CRIT "wrong setreg %d\n", regnum);
274 break;
275 }
276}
277
278struct pv_cpu_ops pv_cpu_ops = {
279 .fc = ia64_native_fc_func,
280 .thash = ia64_native_thash_func,
281 .get_cpuid = ia64_native_get_cpuid_func,
282 .get_pmd = ia64_native_get_pmd_func,
283 .ptcga = ia64_native_ptcga_func,
284 .get_rr = ia64_native_get_rr_func,
285 .set_rr = ia64_native_set_rr_func,
286 .set_rr0_to_rr4 = ia64_native_set_rr0_to_rr4_func,
287 .ssm_i = ia64_native_ssm_i_func,
288 .getreg = ia64_native_getreg_func,
289 .setreg = ia64_native_setreg_func,
290 .rsm_i = ia64_native_rsm_i_func,
291 .get_psr_i = ia64_native_get_psr_i_func,
292 .intrin_local_irq_restore
293 = ia64_native_intrin_local_irq_restore_func,
294};
295EXPORT_SYMBOL(pv_cpu_ops);
296
297/******************************************************************************
298 * replacement of hand written assembly codes.
299 */
300
301void
302paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch)
303{
304 extern unsigned long paravirt_switch_to_targ;
305 extern unsigned long paravirt_leave_syscall_targ;
306 extern unsigned long paravirt_work_processed_syscall_targ;
307 extern unsigned long paravirt_leave_kernel_targ;
308
309 paravirt_switch_to_targ = cpu_asm_switch->switch_to;
310 paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall;
311 paravirt_work_processed_syscall_targ =
312 cpu_asm_switch->work_processed_syscall;
313 paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel;
314}
315
316/***************************************************************************
317 * pv_iosapic_ops
318 * iosapic read/write hooks.
319 */
320
321static unsigned int
322ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
323{
324 return __ia64_native_iosapic_read(iosapic, reg);
325}
326
327static void
328ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
329{
330 __ia64_native_iosapic_write(iosapic, reg, val);
331}
332
333struct pv_iosapic_ops pv_iosapic_ops = {
334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
335 .get_irq_chip = ia64_native_iosapic_get_irq_chip,
336
337 .__read = ia64_native_iosapic_read,
338 .__write = ia64_native_iosapic_write,
339};
340
341/***************************************************************************
342 * pv_irq_ops
343 * irq operations
344 */
345
346struct pv_irq_ops pv_irq_ops = {
347 .register_ipi = ia64_native_register_ipi,
348
349 .assign_irq_vector = ia64_native_assign_irq_vector,
350 .free_irq_vector = ia64_native_free_irq_vector,
351 .register_percpu_irq = ia64_native_register_percpu_irq,
352
353 .resend_irq = ia64_native_resend_irq,
354};
355
356/***************************************************************************
357 * pv_time_ops
358 * time operations
359 */
360
361static int
362ia64_native_do_steal_accounting(unsigned long *new_itm)
363{
364 return 0;
365}
366
367struct pv_time_ops pv_time_ops = {
368 .do_steal_accounting = ia64_native_do_steal_accounting,
369};
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
new file mode 100644
index 000000000000..5cad6fb2ed19
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -0,0 +1,29 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirt_inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN
24#include <asm/xen/inst.h>
25#include <asm/xen/minstate.h>
26#else
27#include <asm/native/inst.h>
28#endif
29
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
new file mode 100644
index 000000000000..2f42fcb9776a
--- /dev/null
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -0,0 +1,60 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirtentry.S
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/asmmacro.h>
24#include <asm/asm-offsets.h>
25#include "entry.h"
26
27#define DATA8(sym, init_value) \
28 .pushsection .data.read_mostly ; \
29 .align 8 ; \
30 .global sym ; \
31 sym: ; \
32 data8 init_value ; \
33 .popsection
34
35#define BRANCH(targ, reg, breg) \
36 movl reg=targ ; \
37 ;; \
38 ld8 reg=[reg] ; \
39 ;; \
40 mov breg=reg ; \
41 br.cond.sptk.many breg
42
43#define BRANCH_PROC(sym, reg, breg) \
44 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
45 GLOBAL_ENTRY(paravirt_ ## sym) ; \
46 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
47 END(paravirt_ ## sym)
48
49#define BRANCH_PROC_UNWINFO(sym, reg, breg) \
50 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
51 GLOBAL_ENTRY(paravirt_ ## sym) ; \
52 PT_REGS_UNWIND_INFO(0) ; \
53 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
54 END(paravirt_ ## sym)
55
56
57BRANCH_PROC(switch_to, r22, b7)
58BRANCH_PROC_UNWINFO(leave_syscall, r22, b7)
59BRANCH_PROC(work_processed_syscall, r2, b7)
60BRANCH_PROC_UNWINFO(leave_kernel, r22, b7)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 632cda8f2e76..e5c2de9b29a5 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -51,6 +51,7 @@
51#include <asm/mca.h> 51#include <asm/mca.h>
52#include <asm/meminit.h> 52#include <asm/meminit.h>
53#include <asm/page.h> 53#include <asm/page.h>
54#include <asm/paravirt.h>
54#include <asm/patch.h> 55#include <asm/patch.h>
55#include <asm/pgtable.h> 56#include <asm/pgtable.h>
56#include <asm/processor.h> 57#include <asm/processor.h>
@@ -341,6 +342,8 @@ reserve_memory (void)
341 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 342 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
342 n++; 343 n++;
343 344
345 n += paravirt_reserve_memory(&rsvd_region[n]);
346
344#ifdef CONFIG_BLK_DEV_INITRD 347#ifdef CONFIG_BLK_DEV_INITRD
345 if (ia64_boot_param->initrd_start) { 348 if (ia64_boot_param->initrd_start) {
346 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 349 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
@@ -519,6 +522,8 @@ setup_arch (char **cmdline_p)
519{ 522{
520 unw_init(); 523 unw_init();
521 524
525 paravirt_arch_setup_early();
526
522 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 527 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
523 528
524 *cmdline_p = __va(ia64_boot_param->command_line); 529 *cmdline_p = __va(ia64_boot_param->command_line);
@@ -583,6 +588,9 @@ setup_arch (char **cmdline_p)
583 acpi_boot_init(); 588 acpi_boot_init();
584#endif 589#endif
585 590
591 paravirt_banner();
592 paravirt_arch_setup_console(cmdline_p);
593
586#ifdef CONFIG_VT 594#ifdef CONFIG_VT
587 if (!conswitchp) { 595 if (!conswitchp) {
588# if defined(CONFIG_DUMMY_CONSOLE) 596# if defined(CONFIG_DUMMY_CONSOLE)
@@ -602,6 +610,8 @@ setup_arch (char **cmdline_p)
602#endif 610#endif
603 611
604 /* enable IA-64 Machine Check Abort Handling unless disabled */ 612 /* enable IA-64 Machine Check Abort Handling unless disabled */
613 if (paravirt_arch_setup_nomca())
614 nomca = 1;
605 if (!nomca) 615 if (!nomca)
606 ia64_mca_init(); 616 ia64_mca_init();
607 617
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 9d1d429c6c59..03f1a9908afc 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -50,6 +50,7 @@
50#include <asm/machvec.h> 50#include <asm/machvec.h>
51#include <asm/mca.h> 51#include <asm/mca.h>
52#include <asm/page.h> 52#include <asm/page.h>
53#include <asm/paravirt.h>
53#include <asm/pgalloc.h> 54#include <asm/pgalloc.h>
54#include <asm/pgtable.h> 55#include <asm/pgtable.h>
55#include <asm/processor.h> 56#include <asm/processor.h>
@@ -642,6 +643,7 @@ void __devinit smp_prepare_boot_cpu(void)
642 cpu_set(smp_processor_id(), cpu_online_map); 643 cpu_set(smp_processor_id(), cpu_online_map);
643 cpu_set(smp_processor_id(), cpu_callin_map); 644 cpu_set(smp_processor_id(), cpu_callin_map);
644 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 645 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
646 paravirt_post_smp_prepare_boot_cpu();
645} 647}
646 648
647#ifdef CONFIG_HOTPLUG_CPU 649#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aad1b7b1fff9..65c10a42c88f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -24,6 +24,7 @@
24#include <asm/machvec.h> 24#include <asm/machvec.h>
25#include <asm/delay.h> 25#include <asm/delay.h>
26#include <asm/hw_irq.h> 26#include <asm/hw_irq.h>
27#include <asm/paravirt.h>
27#include <asm/ptrace.h> 28#include <asm/ptrace.h>
28#include <asm/sal.h> 29#include <asm/sal.h>
29#include <asm/sections.h> 30#include <asm/sections.h>
@@ -48,6 +49,15 @@ EXPORT_SYMBOL(last_cli_ip);
48 49
49#endif 50#endif
50 51
52#ifdef CONFIG_PARAVIRT
53static void
54paravirt_clocksource_resume(void)
55{
56 if (pv_time_ops.clocksource_resume)
57 pv_time_ops.clocksource_resume();
58}
59#endif
60
51static struct clocksource clocksource_itc = { 61static struct clocksource clocksource_itc = {
52 .name = "itc", 62 .name = "itc",
53 .rating = 350, 63 .rating = 350,
@@ -56,6 +66,9 @@ static struct clocksource clocksource_itc = {
56 .mult = 0, /*to be calculated*/ 66 .mult = 0, /*to be calculated*/
57 .shift = 16, 67 .shift = 16,
58 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 68 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
69#ifdef CONFIG_PARAVIRT
70 .resume = paravirt_clocksource_resume,
71#endif
59}; 72};
60static struct clocksource *itc_clocksource; 73static struct clocksource *itc_clocksource;
61 74
@@ -157,6 +170,9 @@ timer_interrupt (int irq, void *dev_id)
157 170
158 profile_tick(CPU_PROFILING); 171 profile_tick(CPU_PROFILING);
159 172
173 if (paravirt_do_steal_accounting(&new_itm))
174 goto skip_process_time_accounting;
175
160 while (1) { 176 while (1) {
161 update_process_times(user_mode(get_irq_regs())); 177 update_process_times(user_mode(get_irq_regs()));
162 178
@@ -186,6 +202,8 @@ timer_interrupt (int irq, void *dev_id)
186 local_irq_disable(); 202 local_irq_disable();
187 } 203 }
188 204
205skip_process_time_accounting:
206
189 do { 207 do {
190 /* 208 /*
191 * If we're too close to the next clock tick for 209 * If we're too close to the next clock tick for
@@ -335,6 +353,11 @@ ia64_init_itm (void)
335 */ 353 */
336 clocksource_itc.rating = 50; 354 clocksource_itc.rating = 50;
337 355
356 paravirt_init_missing_ticks_accounting(smp_processor_id());
357
358 /* avoid softlock up message when cpu is unplug and plugged again. */
359 touch_softlockup_watchdog();
360
338 /* Setup the CPU local timer tick */ 361 /* Setup the CPU local timer tick */
339 ia64_cpu_local_tick(); 362 ia64_cpu_local_tick();
340 363
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5929ab10a289..5a77206c2492 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -4,7 +4,6 @@
4#include <asm/system.h> 4#include <asm/system.h>
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6 6
7#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
8#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
9 8
10#define IVT_TEXT \ 9#define IVT_TEXT \
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
index eb24a3f47caa..ccbe8ae47a61 100644
--- a/include/asm-ia64/Kbuild
+++ b/include/asm-ia64/Kbuild
@@ -5,12 +5,12 @@ header-y += fpu.h
5header-y += fpswa.h 5header-y += fpswa.h
6header-y += ia64regs.h 6header-y += ia64regs.h
7header-y += intel_intrin.h 7header-y += intel_intrin.h
8header-y += intrinsics.h
9header-y += perfmon_default_smpl.h 8header-y += perfmon_default_smpl.h
10header-y += ptrace_offsets.h 9header-y += ptrace_offsets.h
11header-y += rse.h 10header-y += rse.h
12header-y += ucontext.h 11header-y += ucontext.h
13 12
14unifdef-y += gcc_intrin.h 13unifdef-y += gcc_intrin.h
14unifdef-y += intrinsics.h
15unifdef-y += perfmon.h 15unifdef-y += perfmon.h
16unifdef-y += ustack.h 16unifdef-y += ustack.h
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index 2fe292c275fe..0f5b55921758 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -32,7 +32,7 @@ extern void ia64_bad_param_for_getreg (void);
32register unsigned long ia64_r13 asm ("r13") __used; 32register unsigned long ia64_r13 asm ("r13") __used;
33#endif 33#endif
34 34
35#define ia64_setreg(regnum, val) \ 35#define ia64_native_setreg(regnum, val) \
36({ \ 36({ \
37 switch (regnum) { \ 37 switch (regnum) { \
38 case _IA64_REG_PSR_L: \ 38 case _IA64_REG_PSR_L: \
@@ -61,7 +61,7 @@ register unsigned long ia64_r13 asm ("r13") __used;
61 } \ 61 } \
62}) 62})
63 63
64#define ia64_getreg(regnum) \ 64#define ia64_native_getreg(regnum) \
65({ \ 65({ \
66 __u64 ia64_intri_res; \ 66 __u64 ia64_intri_res; \
67 \ 67 \
@@ -385,7 +385,7 @@ register unsigned long ia64_r13 asm ("r13") __used;
385 385
386#define ia64_invala() asm volatile ("invala" ::: "memory") 386#define ia64_invala() asm volatile ("invala" ::: "memory")
387 387
388#define ia64_thash(addr) \ 388#define ia64_native_thash(addr) \
389({ \ 389({ \
390 __u64 ia64_intri_res; \ 390 __u64 ia64_intri_res; \
391 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ 391 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
@@ -438,10 +438,10 @@ register unsigned long ia64_r13 asm ("r13") __used;
438#define ia64_set_pmd(index, val) \ 438#define ia64_set_pmd(index, val) \
439 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") 439 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
440 440
441#define ia64_set_rr(index, val) \ 441#define ia64_native_set_rr(index, val) \
442 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); 442 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
443 443
444#define ia64_get_cpuid(index) \ 444#define ia64_native_get_cpuid(index) \
445({ \ 445({ \
446 __u64 ia64_intri_res; \ 446 __u64 ia64_intri_res; \
447 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ 447 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
@@ -477,33 +477,33 @@ register unsigned long ia64_r13 asm ("r13") __used;
477}) 477})
478 478
479 479
480#define ia64_get_pmd(index) \ 480#define ia64_native_get_pmd(index) \
481({ \ 481({ \
482 __u64 ia64_intri_res; \ 482 __u64 ia64_intri_res; \
483 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ 483 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
484 ia64_intri_res; \ 484 ia64_intri_res; \
485}) 485})
486 486
487#define ia64_get_rr(index) \ 487#define ia64_native_get_rr(index) \
488({ \ 488({ \
489 __u64 ia64_intri_res; \ 489 __u64 ia64_intri_res; \
490 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ 490 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
491 ia64_intri_res; \ 491 ia64_intri_res; \
492}) 492})
493 493
494#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") 494#define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
495 495
496 496
497#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") 497#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
498 498
499#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") 499#define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
500#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") 500#define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
501#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") 501#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
502#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") 502#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
503 503
504#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr)) 504#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
505 505
506#define ia64_ptcga(addr, size) \ 506#define ia64_native_ptcga(addr, size) \
507do { \ 507do { \
508 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \ 508 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
509 ia64_dv_serialize_data(); \ 509 ia64_dv_serialize_data(); \
@@ -608,7 +608,7 @@ do { \
608 } \ 608 } \
609}) 609})
610 610
611#define ia64_intrin_local_irq_restore(x) \ 611#define ia64_native_intrin_local_irq_restore(x) \
612do { \ 612do { \
613 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \ 613 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
614 "(p6) ssm psr.i;" \ 614 "(p6) ssm psr.i;" \
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index 76366dc9c1a0..5c99cbcb8a0d 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -15,7 +15,11 @@
15#include <asm/ptrace.h> 15#include <asm/ptrace.h>
16#include <asm/smp.h> 16#include <asm/smp.h>
17 17
18#ifndef CONFIG_PARAVIRT
18typedef u8 ia64_vector; 19typedef u8 ia64_vector;
20#else
21typedef u16 ia64_vector;
22#endif
19 23
20/* 24/*
21 * 0 special 25 * 0 special
@@ -104,13 +108,24 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
104 108
105extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ 109extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
106 110
111#ifdef CONFIG_PARAVIRT_GUEST
112#include <asm/paravirt.h>
113#else
114#define ia64_register_ipi ia64_native_register_ipi
115#define assign_irq_vector ia64_native_assign_irq_vector
116#define free_irq_vector ia64_native_free_irq_vector
117#define register_percpu_irq ia64_native_register_percpu_irq
118#define ia64_resend_irq ia64_native_resend_irq
119#endif
120
121extern void ia64_native_register_ipi(void);
107extern int bind_irq_vector(int irq, int vector, cpumask_t domain); 122extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
108extern int assign_irq_vector (int irq); /* allocate a free vector */ 123extern int ia64_native_assign_irq_vector (int irq); /* allocate a free vector */
109extern void free_irq_vector (int vector); 124extern void ia64_native_free_irq_vector (int vector);
110extern int reserve_irq_vector (int vector); 125extern int reserve_irq_vector (int vector);
111extern void __setup_vector_irq(int cpu); 126extern void __setup_vector_irq(int cpu);
112extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); 127extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
113extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); 128extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action);
114extern int check_irq_used (int irq); 129extern int check_irq_used (int irq);
115extern void destroy_and_reserve_irq (unsigned int irq); 130extern void destroy_and_reserve_irq (unsigned int irq);
116 131
@@ -122,7 +137,7 @@ static inline int irq_prepare_move(int irq, int cpu) { return 0; }
122static inline void irq_complete_move(unsigned int irq) {} 137static inline void irq_complete_move(unsigned int irq) {}
123#endif 138#endif
124 139
125static inline void ia64_resend_irq(unsigned int vector) 140static inline void ia64_native_resend_irq(unsigned int vector)
126{ 141{
127 platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); 142 platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
128} 143}
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h
index a520d103d808..53cec577558a 100644
--- a/include/asm-ia64/intel_intrin.h
+++ b/include/asm-ia64/intel_intrin.h
@@ -16,8 +16,8 @@
16 * intrinsic 16 * intrinsic
17 */ 17 */
18 18
19#define ia64_getreg __getReg 19#define ia64_native_getreg __getReg
20#define ia64_setreg __setReg 20#define ia64_native_setreg __setReg
21 21
22#define ia64_hint __hint 22#define ia64_hint __hint
23#define ia64_hint_pause __hint_pause 23#define ia64_hint_pause __hint_pause
@@ -39,10 +39,10 @@
39#define ia64_invala_fr __invala_fr 39#define ia64_invala_fr __invala_fr
40#define ia64_nop __nop 40#define ia64_nop __nop
41#define ia64_sum __sum 41#define ia64_sum __sum
42#define ia64_ssm __ssm 42#define ia64_native_ssm __ssm
43#define ia64_rum __rum 43#define ia64_rum __rum
44#define ia64_rsm __rsm 44#define ia64_native_rsm __rsm
45#define ia64_fc __fc 45#define ia64_native_fc __fc
46 46
47#define ia64_ldfs __ldfs 47#define ia64_ldfs __ldfs
48#define ia64_ldfd __ldfd 48#define ia64_ldfd __ldfd
@@ -88,16 +88,17 @@
88 __setIndReg(_IA64_REG_INDR_PMC, index, val) 88 __setIndReg(_IA64_REG_INDR_PMC, index, val)
89#define ia64_set_pmd(index, val) \ 89#define ia64_set_pmd(index, val) \
90 __setIndReg(_IA64_REG_INDR_PMD, index, val) 90 __setIndReg(_IA64_REG_INDR_PMD, index, val)
91#define ia64_set_rr(index, val) \ 91#define ia64_native_set_rr(index, val) \
92 __setIndReg(_IA64_REG_INDR_RR, index, val) 92 __setIndReg(_IA64_REG_INDR_RR, index, val)
93 93
94#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index) 94#define ia64_native_get_cpuid(index) \
95#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) 95 __getIndReg(_IA64_REG_INDR_CPUID, index)
96#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) 96#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
97#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) 97#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
98#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) 98#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
99#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) 99#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
100#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) 100#define ia64_native_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
101#define ia64_native_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
101 102
102#define ia64_srlz_d __dsrlz 103#define ia64_srlz_d __dsrlz
103#define ia64_srlz_i __isrlz 104#define ia64_srlz_i __isrlz
@@ -119,16 +120,16 @@
119#define ia64_ld8_acq __ld8_acq 120#define ia64_ld8_acq __ld8_acq
120 121
121#define ia64_sync_i __synci 122#define ia64_sync_i __synci
122#define ia64_thash __thash 123#define ia64_native_thash __thash
123#define ia64_ttag __ttag 124#define ia64_native_ttag __ttag
124#define ia64_itcd __itcd 125#define ia64_itcd __itcd
125#define ia64_itci __itci 126#define ia64_itci __itci
126#define ia64_itrd __itrd 127#define ia64_itrd __itrd
127#define ia64_itri __itri 128#define ia64_itri __itri
128#define ia64_ptce __ptce 129#define ia64_ptce __ptce
129#define ia64_ptcl __ptcl 130#define ia64_ptcl __ptcl
130#define ia64_ptcg __ptcg 131#define ia64_native_ptcg __ptcg
131#define ia64_ptcga __ptcga 132#define ia64_native_ptcga __ptcga
132#define ia64_ptri __ptri 133#define ia64_ptri __ptri
133#define ia64_ptrd __ptrd 134#define ia64_ptrd __ptrd
134#define ia64_dep_mi _m64_dep_mi 135#define ia64_dep_mi _m64_dep_mi
@@ -145,13 +146,13 @@
145#define ia64_lfetch_fault __lfetch_fault 146#define ia64_lfetch_fault __lfetch_fault
146#define ia64_lfetch_fault_excl __lfetch_fault_excl 147#define ia64_lfetch_fault_excl __lfetch_fault_excl
147 148
148#define ia64_intrin_local_irq_restore(x) \ 149#define ia64_native_intrin_local_irq_restore(x) \
149do { \ 150do { \
150 if ((x) != 0) { \ 151 if ((x) != 0) { \
151 ia64_ssm(IA64_PSR_I); \ 152 ia64_native_ssm(IA64_PSR_I); \
152 ia64_srlz_d(); \ 153 ia64_srlz_d(); \
153 } else { \ 154 } else { \
154 ia64_rsm(IA64_PSR_I); \ 155 ia64_native_rsm(IA64_PSR_I); \
155 } \ 156 } \
156} while (0) 157} while (0)
157 158
diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h
index f1135b5b94c3..47d686dba1eb 100644
--- a/include/asm-ia64/intrinsics.h
+++ b/include/asm-ia64/intrinsics.h
@@ -18,6 +18,17 @@
18# include <asm/gcc_intrin.h> 18# include <asm/gcc_intrin.h>
19#endif 19#endif
20 20
21#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
22
23#define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \
24do { \
25 ia64_native_set_rr(0x0000000000000000UL, (val0)); \
26 ia64_native_set_rr(0x2000000000000000UL, (val1)); \
27 ia64_native_set_rr(0x4000000000000000UL, (val2)); \
28 ia64_native_set_rr(0x6000000000000000UL, (val3)); \
29 ia64_native_set_rr(0x8000000000000000UL, (val4)); \
30} while (0)
31
21/* 32/*
22 * Force an unresolved reference if someone tries to use 33 * Force an unresolved reference if someone tries to use
23 * ia64_fetch_and_add() with a bad value. 34 * ia64_fetch_and_add() with a bad value.
@@ -183,4 +194,48 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
183#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ 194#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
184 195
185#endif 196#endif
197
198#ifdef __KERNEL__
199#include <asm/paravirt_privop.h>
200#endif
201
202#ifndef __ASSEMBLY__
203#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__)
204#define IA64_INTRINSIC_API(name) pv_cpu_ops.name
205#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
206#else
207#define IA64_INTRINSIC_API(name) ia64_native_ ## name
208#define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name
209#endif
210
211/************************************************/
212/* Instructions paravirtualized for correctness */
213/************************************************/
214/* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */
215/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
216 * is not currently used (though it may be in a long-format VHPT system!)
217 */
218#define ia64_fc IA64_INTRINSIC_API(fc)
219#define ia64_thash IA64_INTRINSIC_API(thash)
220#define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid)
221#define ia64_get_pmd IA64_INTRINSIC_API(get_pmd)
222
223
224/************************************************/
225/* Instructions paravirtualized for performance */
226/************************************************/
227#define ia64_ssm IA64_INTRINSIC_MACRO(ssm)
228#define ia64_rsm IA64_INTRINSIC_MACRO(rsm)
229#define ia64_getreg IA64_INTRINSIC_API(getreg)
230#define ia64_setreg IA64_INTRINSIC_API(setreg)
231#define ia64_set_rr IA64_INTRINSIC_API(set_rr)
232#define ia64_get_rr IA64_INTRINSIC_API(get_rr)
233#define ia64_ptcga IA64_INTRINSIC_API(ptcga)
234#define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i)
235#define ia64_intrin_local_irq_restore \
236 IA64_INTRINSIC_API(intrin_local_irq_restore)
237#define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4)
238
239#endif /* !__ASSEMBLY__ */
240
186#endif /* _ASM_IA64_INTRINSICS_H */ 241#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
index a3a4288daae8..b9c102e15f22 100644
--- a/include/asm-ia64/iosapic.h
+++ b/include/asm-ia64/iosapic.h
@@ -55,13 +55,27 @@
55 55
56#define NR_IOSAPICS 256 56#define NR_IOSAPICS 256
57 57
58static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg) 58#ifdef CONFIG_PARAVIRT_GUEST
59#include <asm/paravirt.h>
60#else
61#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
62#define __iosapic_read __ia64_native_iosapic_read
63#define __iosapic_write __ia64_native_iosapic_write
64#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
65#endif
66
67extern void __init ia64_native_iosapic_pcat_compat_init(void);
68extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
69
70static inline unsigned int
71__ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
59{ 72{
60 writel(reg, iosapic + IOSAPIC_REG_SELECT); 73 writel(reg, iosapic + IOSAPIC_REG_SELECT);
61 return readl(iosapic + IOSAPIC_WINDOW); 74 return readl(iosapic + IOSAPIC_WINDOW);
62} 75}
63 76
64static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) 77static inline void
78__ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
65{ 79{
66 writel(reg, iosapic + IOSAPIC_REG_SELECT); 80 writel(reg, iosapic + IOSAPIC_REG_SELECT);
67 writel(val, iosapic + IOSAPIC_WINDOW); 81 writel(val, iosapic + IOSAPIC_WINDOW);
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index a66d26827cbb..3627116fb0e2 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -13,14 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16 16#include <asm-ia64/nr-irqs.h>
17#define NR_VECTORS 256
18
19#if (NR_VECTORS + 32 * NR_CPUS) < 1024
20#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
21#else
22#define NR_IRQS 1024
23#endif
24 17
25static __inline__ int 18static __inline__ int
26irq_canonicalize (int irq) 19irq_canonicalize (int irq)
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index cef2400983fa..040bc87db930 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -152,11 +152,7 @@ reload_context (nv_mm_context_t context)
152# endif 152# endif
153#endif 153#endif
154 154
155 ia64_set_rr(0x0000000000000000UL, rr0); 155 ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
156 ia64_set_rr(0x2000000000000000UL, rr1);
157 ia64_set_rr(0x4000000000000000UL, rr2);
158 ia64_set_rr(0x6000000000000000UL, rr3);
159 ia64_set_rr(0x8000000000000000UL, rr4);
160 ia64_srlz_i(); /* srlz.i implies srlz.d */ 156 ia64_srlz_i(); /* srlz.i implies srlz.d */
161} 157}
162 158
diff --git a/include/asm-ia64/native/inst.h b/include/asm-ia64/native/inst.h
new file mode 100644
index 000000000000..c953a2ca4fce
--- /dev/null
+++ b/include/asm-ia64/native/inst.h
@@ -0,0 +1,175 @@
1/******************************************************************************
2 * include/asm-ia64/native/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
24
25#define __paravirt_switch_to ia64_native_switch_to
26#define __paravirt_leave_syscall ia64_native_leave_syscall
27#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
28#define __paravirt_leave_kernel ia64_native_leave_kernel
29#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
30#define __paravirt_work_processed_syscall_target \
31 ia64_work_processed_syscall
32
33#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
34# define PARAVIRT_POISON 0xdeadbeefbaadf00d
35# define CLOBBER(clob) \
36 ;; \
37 movl clob = PARAVIRT_POISON; \
38 ;;
39#else
40# define CLOBBER(clob) /* nothing */
41#endif
42
43#define MOV_FROM_IFA(reg) \
44 mov reg = cr.ifa
45
46#define MOV_FROM_ITIR(reg) \
47 mov reg = cr.itir
48
49#define MOV_FROM_ISR(reg) \
50 mov reg = cr.isr
51
52#define MOV_FROM_IHA(reg) \
53 mov reg = cr.iha
54
55#define MOV_FROM_IPSR(pred, reg) \
56(pred) mov reg = cr.ipsr
57
58#define MOV_FROM_IIM(reg) \
59 mov reg = cr.iim
60
61#define MOV_FROM_IIP(reg) \
62 mov reg = cr.iip
63
64#define MOV_FROM_IVR(reg, clob) \
65 mov reg = cr.ivr \
66 CLOBBER(clob)
67
68#define MOV_FROM_PSR(pred, reg, clob) \
69(pred) mov reg = psr \
70 CLOBBER(clob)
71
72#define MOV_TO_IFA(reg, clob) \
73 mov cr.ifa = reg \
74 CLOBBER(clob)
75
76#define MOV_TO_ITIR(pred, reg, clob) \
77(pred) mov cr.itir = reg \
78 CLOBBER(clob)
79
80#define MOV_TO_IHA(pred, reg, clob) \
81(pred) mov cr.iha = reg \
82 CLOBBER(clob)
83
84#define MOV_TO_IPSR(pred, reg, clob) \
85(pred) mov cr.ipsr = reg \
86 CLOBBER(clob)
87
88#define MOV_TO_IFS(pred, reg, clob) \
89(pred) mov cr.ifs = reg \
90 CLOBBER(clob)
91
92#define MOV_TO_IIP(reg, clob) \
93 mov cr.iip = reg \
94 CLOBBER(clob)
95
96#define MOV_TO_KR(kr, reg, clob0, clob1) \
97 mov IA64_KR(kr) = reg \
98 CLOBBER(clob0) \
99 CLOBBER(clob1)
100
101#define ITC_I(pred, reg, clob) \
102(pred) itc.i reg \
103 CLOBBER(clob)
104
105#define ITC_D(pred, reg, clob) \
106(pred) itc.d reg \
107 CLOBBER(clob)
108
109#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
110(pred_i) itc.i reg; \
111(pred_d) itc.d reg \
112 CLOBBER(clob)
113
114#define THASH(pred, reg0, reg1, clob) \
115(pred) thash reg0 = reg1 \
116 CLOBBER(clob)
117
118#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
119 ssm psr.ic | PSR_DEFAULT_BITS \
120 CLOBBER(clob0) \
121 CLOBBER(clob1) \
122 ;; \
123 srlz.i /* guarantee that interruption collectin is on */ \
124 ;;
125
126#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
127 ssm psr.ic \
128 CLOBBER(clob0) \
129 CLOBBER(clob1) \
130 ;; \
131 srlz.d
132
133#define RSM_PSR_IC(clob) \
134 rsm psr.ic \
135 CLOBBER(clob)
136
137#define SSM_PSR_I(pred, pred_clob, clob) \
138(pred) ssm psr.i \
139 CLOBBER(clob)
140
141#define RSM_PSR_I(pred, clob0, clob1) \
142(pred) rsm psr.i \
143 CLOBBER(clob0) \
144 CLOBBER(clob1)
145
146#define RSM_PSR_I_IC(clob0, clob1, clob2) \
147 rsm psr.i | psr.ic \
148 CLOBBER(clob0) \
149 CLOBBER(clob1) \
150 CLOBBER(clob2)
151
152#define RSM_PSR_DT \
153 rsm psr.dt
154
155#define SSM_PSR_DT_AND_SRLZ_I \
156 ssm psr.dt \
157 ;; \
158 srlz.i
159
160#define BSW_0(clob0, clob1, clob2) \
161 bsw.0 \
162 CLOBBER(clob0) \
163 CLOBBER(clob1) \
164 CLOBBER(clob2)
165
166#define BSW_1(clob0, clob1) \
167 bsw.1 \
168 CLOBBER(clob0) \
169 CLOBBER(clob1)
170
171#define COVER \
172 cover
173
174#define RFI \
175 rfi
diff --git a/include/asm-ia64/native/irq.h b/include/asm-ia64/native/irq.h
new file mode 100644
index 000000000000..efe9ff74a3c4
--- /dev/null
+++ b/include/asm-ia64/native/irq.h
@@ -0,0 +1,35 @@
1/******************************************************************************
2 * include/asm-ia64/native/irq.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * moved from linux/include/asm-ia64/irq.h.
22 */
23
24#ifndef _ASM_IA64_NATIVE_IRQ_H
25#define _ASM_IA64_NATIVE_IRQ_H
26
27#define NR_VECTORS 256
28
29#if (NR_VECTORS + 32 * NR_CPUS) < 1024
30#define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
31#else
32#define IA64_NATIVE_NR_IRQS 1024
33#endif
34
35#endif /* _ASM_IA64_NATIVE_IRQ_H */
diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h
new file mode 100644
index 000000000000..1b4df129f579
--- /dev/null
+++ b/include/asm-ia64/paravirt.h
@@ -0,0 +1,255 @@
1/******************************************************************************
2 * include/asm-ia64/paravirt.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23
24#ifndef __ASM_PARAVIRT_H
25#define __ASM_PARAVIRT_H
26
27#ifdef CONFIG_PARAVIRT_GUEST
28
29#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
30#define PARAVIRT_HYPERVISOR_TYPE_XEN 1
31
32#ifndef __ASSEMBLY__
33
34#include <asm/hw_irq.h>
35#include <asm/meminit.h>
36
37/******************************************************************************
38 * general info
39 */
40struct pv_info {
41 unsigned int kernel_rpl;
42 int paravirt_enabled;
43 const char *name;
44};
45
46extern struct pv_info pv_info;
47
48static inline int paravirt_enabled(void)
49{
50 return pv_info.paravirt_enabled;
51}
52
53static inline unsigned int get_kernel_rpl(void)
54{
55 return pv_info.kernel_rpl;
56}
57
58/******************************************************************************
59 * initialization hooks.
60 */
61struct rsvd_region;
62
63struct pv_init_ops {
64 void (*banner)(void);
65
66 int (*reserve_memory)(struct rsvd_region *region);
67
68 void (*arch_setup_early)(void);
69 void (*arch_setup_console)(char **cmdline_p);
70 int (*arch_setup_nomca)(void);
71
72 void (*post_smp_prepare_boot_cpu)(void);
73};
74
75extern struct pv_init_ops pv_init_ops;
76
77static inline void paravirt_banner(void)
78{
79 if (pv_init_ops.banner)
80 pv_init_ops.banner();
81}
82
83static inline int paravirt_reserve_memory(struct rsvd_region *region)
84{
85 if (pv_init_ops.reserve_memory)
86 return pv_init_ops.reserve_memory(region);
87 return 0;
88}
89
90static inline void paravirt_arch_setup_early(void)
91{
92 if (pv_init_ops.arch_setup_early)
93 pv_init_ops.arch_setup_early();
94}
95
96static inline void paravirt_arch_setup_console(char **cmdline_p)
97{
98 if (pv_init_ops.arch_setup_console)
99 pv_init_ops.arch_setup_console(cmdline_p);
100}
101
102static inline int paravirt_arch_setup_nomca(void)
103{
104 if (pv_init_ops.arch_setup_nomca)
105 return pv_init_ops.arch_setup_nomca();
106 return 0;
107}
108
109static inline void paravirt_post_smp_prepare_boot_cpu(void)
110{
111 if (pv_init_ops.post_smp_prepare_boot_cpu)
112 pv_init_ops.post_smp_prepare_boot_cpu();
113}
114
115/******************************************************************************
116 * replacement of iosapic operations.
117 */
118
119struct pv_iosapic_ops {
120 void (*pcat_compat_init)(void);
121
122 struct irq_chip *(*get_irq_chip)(unsigned long trigger);
123
124 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
125 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
126};
127
128extern struct pv_iosapic_ops pv_iosapic_ops;
129
130static inline void
131iosapic_pcat_compat_init(void)
132{
133 if (pv_iosapic_ops.pcat_compat_init)
134 pv_iosapic_ops.pcat_compat_init();
135}
136
137static inline struct irq_chip*
138iosapic_get_irq_chip(unsigned long trigger)
139{
140 return pv_iosapic_ops.get_irq_chip(trigger);
141}
142
143static inline unsigned int
144__iosapic_read(char __iomem *iosapic, unsigned int reg)
145{
146 return pv_iosapic_ops.__read(iosapic, reg);
147}
148
149static inline void
150__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
151{
152 return pv_iosapic_ops.__write(iosapic, reg, val);
153}
154
155/******************************************************************************
156 * replacement of irq operations.
157 */
158
159struct pv_irq_ops {
160 void (*register_ipi)(void);
161
162 int (*assign_irq_vector)(int irq);
163 void (*free_irq_vector)(int vector);
164
165 void (*register_percpu_irq)(ia64_vector vec,
166 struct irqaction *action);
167
168 void (*resend_irq)(unsigned int vector);
169};
170
171extern struct pv_irq_ops pv_irq_ops;
172
173static inline void
174ia64_register_ipi(void)
175{
176 pv_irq_ops.register_ipi();
177}
178
179static inline int
180assign_irq_vector(int irq)
181{
182 return pv_irq_ops.assign_irq_vector(irq);
183}
184
185static inline void
186free_irq_vector(int vector)
187{
188 return pv_irq_ops.free_irq_vector(vector);
189}
190
191static inline void
192register_percpu_irq(ia64_vector vec, struct irqaction *action)
193{
194 pv_irq_ops.register_percpu_irq(vec, action);
195}
196
197static inline void
198ia64_resend_irq(unsigned int vector)
199{
200 pv_irq_ops.resend_irq(vector);
201}
202
203/******************************************************************************
204 * replacement of time operations.
205 */
206
207extern struct itc_jitter_data_t itc_jitter_data;
208extern volatile int time_keeper_id;
209
210struct pv_time_ops {
211 void (*init_missing_ticks_accounting)(int cpu);
212 int (*do_steal_accounting)(unsigned long *new_itm);
213
214 void (*clocksource_resume)(void);
215};
216
217extern struct pv_time_ops pv_time_ops;
218
219static inline void
220paravirt_init_missing_ticks_accounting(int cpu)
221{
222 if (pv_time_ops.init_missing_ticks_accounting)
223 pv_time_ops.init_missing_ticks_accounting(cpu);
224}
225
226static inline int
227paravirt_do_steal_accounting(unsigned long *new_itm)
228{
229 return pv_time_ops.do_steal_accounting(new_itm);
230}
231
232#endif /* !__ASSEMBLY__ */
233
234#else
235/* fallback for native case */
236
237#ifndef __ASSEMBLY__
238
239#define paravirt_banner() do { } while (0)
240#define paravirt_reserve_memory(region) 0
241
242#define paravirt_arch_setup_early() do { } while (0)
243#define paravirt_arch_setup_console(cmdline_p) do { } while (0)
244#define paravirt_arch_setup_nomca() 0
245#define paravirt_post_smp_prepare_boot_cpu() do { } while (0)
246
247#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0)
248#define paravirt_do_steal_accounting(new_itm) 0
249
250#endif /* __ASSEMBLY__ */
251
252
253#endif /* CONFIG_PARAVIRT_GUEST */
254
255#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-ia64/paravirt_privop.h b/include/asm-ia64/paravirt_privop.h
new file mode 100644
index 000000000000..52482e6940ac
--- /dev/null
+++ b/include/asm-ia64/paravirt_privop.h
@@ -0,0 +1,114 @@
1/******************************************************************************
2 * include/asm-ia64/paravirt_privops.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
24#define _ASM_IA64_PARAVIRT_PRIVOP_H
25
26#ifdef CONFIG_PARAVIRT
27
28#ifndef __ASSEMBLY__
29
30#include <linux/types.h>
31#include <asm/kregs.h> /* for IA64_PSR_I */
32
33/******************************************************************************
34 * replacement of intrinsics operations.
35 */
36
37struct pv_cpu_ops {
38 void (*fc)(unsigned long addr);
39 unsigned long (*thash)(unsigned long addr);
40 unsigned long (*get_cpuid)(int index);
41 unsigned long (*get_pmd)(int index);
42 unsigned long (*getreg)(int reg);
43 void (*setreg)(int reg, unsigned long val);
44 void (*ptcga)(unsigned long addr, unsigned long size);
45 unsigned long (*get_rr)(unsigned long index);
46 void (*set_rr)(unsigned long index, unsigned long val);
47 void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
48 unsigned long val2, unsigned long val3,
49 unsigned long val4);
50 void (*ssm_i)(void);
51 void (*rsm_i)(void);
52 unsigned long (*get_psr_i)(void);
53 void (*intrin_local_irq_restore)(unsigned long flags);
54};
55
56extern struct pv_cpu_ops pv_cpu_ops;
57
58extern void ia64_native_setreg_func(int regnum, unsigned long val);
59extern unsigned long ia64_native_getreg_func(int regnum);
60
61/************************************************/
62/* Instructions paravirtualized for performance */
63/************************************************/
64
65/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
66 * static inline function doesn't satisfy it. */
67#define paravirt_ssm(mask) \
68 do { \
69 if ((mask) == IA64_PSR_I) \
70 pv_cpu_ops.ssm_i(); \
71 else \
72 ia64_native_ssm(mask); \
73 } while (0)
74
75#define paravirt_rsm(mask) \
76 do { \
77 if ((mask) == IA64_PSR_I) \
78 pv_cpu_ops.rsm_i(); \
79 else \
80 ia64_native_rsm(mask); \
81 } while (0)
82
83/******************************************************************************
84 * replacement of hand written assembly codes.
85 */
86struct pv_cpu_asm_switch {
87 unsigned long switch_to;
88 unsigned long leave_syscall;
89 unsigned long work_processed_syscall;
90 unsigned long leave_kernel;
91};
92void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
93
94#endif /* __ASSEMBLY__ */
95
96#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
97
98#else
99
100/* fallback for native case */
101#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
102
103#endif /* CONFIG_PARAVIRT */
104
105/* these routines utilize privilege-sensitive or performance-sensitive
106 * privileged instructions so the code must be replaced with
107 * paravirtualized versions */
108#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
109#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
110#define ia64_work_processed_syscall \
111 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
112#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
113
114#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 27731e032ee9..12d96e0cd513 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/cpumask.h> 16#include <linux/cpumask.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/irqreturn.h>
18 19
19#include <asm/io.h> 20#include <asm/io.h>
20#include <asm/param.h> 21#include <asm/param.h>
@@ -120,6 +121,7 @@ extern void __init smp_build_cpu_map(void);
120extern void __init init_smp_config (void); 121extern void __init init_smp_config (void);
121extern void smp_do_timer (struct pt_regs *regs); 122extern void smp_do_timer (struct pt_regs *regs);
122 123
124extern irqreturn_t handle_IPI(int irq, void *dev_id);
123extern void smp_send_reschedule (int cpu); 125extern void smp_send_reschedule (int cpu);
124extern void identify_siblings (struct cpuinfo_ia64 *); 126extern void identify_siblings (struct cpuinfo_ia64 *);
125extern int is_multithreading_enabled(void); 127extern int is_multithreading_enabled(void);
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 26e250bfb912..927a381c20ca 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -26,6 +26,7 @@
26 */ 26 */
27#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) 27#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
29#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
29 30
30#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
31 32
@@ -122,10 +123,16 @@ extern struct ia64_boot_param {
122 * write a floating-point register right before reading the PSR 123 * write a floating-point register right before reading the PSR
123 * and that writes to PSR.mfl 124 * and that writes to PSR.mfl
124 */ 125 */
126#ifdef CONFIG_PARAVIRT
127#define __local_save_flags() ia64_get_psr_i()
128#else
129#define __local_save_flags() ia64_getreg(_IA64_REG_PSR)
130#endif
131
125#define __local_irq_save(x) \ 132#define __local_irq_save(x) \
126do { \ 133do { \
127 ia64_stop(); \ 134 ia64_stop(); \
128 (x) = ia64_getreg(_IA64_REG_PSR); \ 135 (x) = __local_save_flags(); \
129 ia64_stop(); \ 136 ia64_stop(); \
130 ia64_rsm(IA64_PSR_I); \ 137 ia64_rsm(IA64_PSR_I); \
131} while (0) 138} while (0)
@@ -173,7 +180,7 @@ do { \
173#endif /* !CONFIG_IA64_DEBUG_IRQ */ 180#endif /* !CONFIG_IA64_DEBUG_IRQ */
174 181
175#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) 182#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
176#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) 183#define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); })
177 184
178#define irqs_disabled() \ 185#define irqs_disabled() \
179({ \ 186({ \