aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile44
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/ia64/kernel/entry.S115
-rw-r--r--arch/ia64/kernel/head.S41
-rw-r--r--arch/ia64/kernel/iosapic.c45
-rw-r--r--arch/ia64/kernel/irq_ia64.c19
-rw-r--r--arch/ia64/kernel/ivt.S462
-rw-r--r--arch/ia64/kernel/minstate.h13
-rw-r--r--arch/ia64/kernel/module.c3
-rw-r--r--arch/ia64/kernel/nr-irqs.c24
-rw-r--r--arch/ia64/kernel/paravirt.c369
-rw-r--r--arch/ia64/kernel/paravirt_inst.h29
-rw-r--r--arch/ia64/kernel/paravirtentry.S60
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/time.c23
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
18 files changed, 960 insertions, 309 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 13fd10e8699e..87fea11aecb7 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -36,6 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
36mca_recovery-y += mca_drv.o mca_drv_asm.o 36mca_recovery-y += mca_drv.o mca_drv_asm.o
37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
38 38
39obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o
40
39obj-$(CONFIG_IA64_ESI) += esi.o 41obj-$(CONFIG_IA64_ESI) += esi.o
40ifneq ($(CONFIG_IA64_ESI),) 42ifneq ($(CONFIG_IA64_ESI),)
41obj-y += esi_stub.o # must be in kernel proper 43obj-y += esi_stub.o # must be in kernel proper
@@ -70,3 +72,45 @@ $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
70# We must build gate.so before we can assemble it. 72# We must build gate.so before we can assemble it.
71# Note: kbuild does not track this dependency due to usage of .incbin 73# Note: kbuild does not track this dependency due to usage of .incbin
72$(obj)/gate-data.o: $(obj)/gate.so 74$(obj)/gate-data.o: $(obj)/gate.so
75
76# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
77define sed-y
78 "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
79endef
80quiet_cmd_nr_irqs = GEN $@
81define cmd_nr_irqs
82 (set -e; \
83 echo "#ifndef __ASM_NR_IRQS_H__"; \
84 echo "#define __ASM_NR_IRQS_H__"; \
85 echo "/*"; \
86 echo " * DO NOT MODIFY."; \
87 echo " *"; \
88 echo " * This file was generated by Kbuild"; \
89 echo " *"; \
90 echo " */"; \
91 echo ""; \
92 sed -ne $(sed-y) $<; \
93 echo ""; \
94 echo "#endif" ) > $@
95endef
96
97# We use internal kbuild rules to avoid the "is up to date" message from make
98arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \
99 $(wildcard $(srctree)/include/asm-ia64/*/irq.h)
100 $(Q)mkdir -p $(dir $@)
101 $(call if_changed_dep,cc_s_c)
102
103include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
104 $(Q)mkdir -p $(dir $@)
105 $(call cmd,nr_irqs)
106
107clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
108
109#
110# native ivt.S and entry.S
111#
112ASM_PARAVIRT_OBJS = ivt.o entry.o
113define paravirtualized_native
114AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
115endef
116$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 43687cc60dfb..5d1eb7ee2bf6 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -774,7 +774,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
774 */ 774 */
775#ifdef CONFIG_ACPI_HOTPLUG_CPU 775#ifdef CONFIG_ACPI_HOTPLUG_CPU
776static 776static
777int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) 777int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
778{ 778{
779#ifdef CONFIG_ACPI_NUMA 779#ifdef CONFIG_ACPI_NUMA
780 int pxm_id; 780 int pxm_id;
@@ -854,8 +854,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
854 union acpi_object *obj; 854 union acpi_object *obj;
855 struct acpi_madt_local_sapic *lsapic; 855 struct acpi_madt_local_sapic *lsapic;
856 cpumask_t tmp_map; 856 cpumask_t tmp_map;
857 long physid; 857 int cpu, physid;
858 int cpu;
859 858
860 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 859 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
861 return -EINVAL; 860 return -EINVAL;
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index b8498ea62068..7b435451b3dc 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -51,7 +51,7 @@ processor_set_pstate (
51 retval = ia64_pal_set_pstate((u64)value); 51 retval = ia64_pal_set_pstate((u64)value);
52 52
53 if (retval) { 53 if (retval) {
54 dprintk("Failed to set freq to 0x%x, with error 0x%x\n", 54 dprintk("Failed to set freq to 0x%x, with error 0x%lx\n",
55 value, retval); 55 value, retval);
56 return -ENODEV; 56 return -ENODEV;
57 } 57 }
@@ -74,7 +74,7 @@ processor_get_pstate (
74 74
75 if (retval) 75 if (retval)
76 dprintk("Failed to get current freq with " 76 dprintk("Failed to get current freq with "
77 "error 0x%x, idx 0x%x\n", retval, *value); 77 "error 0x%lx, idx 0x%x\n", retval, *value);
78 78
79 return (int)retval; 79 return (int)retval;
80} 80}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ca2bb95726de..56ab156c48ae 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -23,6 +23,11 @@
23 * 11/07/2000 23 * 11/07/2000
24 */ 24 */
25/* 25/*
26 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
27 * VA Linux Systems Japan K.K.
28 * pv_ops.
29 */
30/*
26 * Global (preserved) predicate usage on syscall entry/exit path: 31 * Global (preserved) predicate usage on syscall entry/exit path:
27 * 32 *
28 * pKStk: See entry.h. 33 * pKStk: See entry.h.
@@ -45,6 +50,7 @@
45 50
46#include "minstate.h" 51#include "minstate.h"
47 52
53#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
48 /* 54 /*
49 * execve() is special because in case of success, we need to 55 * execve() is special because in case of success, we need to
50 * setup a null register window frame. 56 * setup a null register window frame.
@@ -173,6 +179,7 @@ GLOBAL_ENTRY(sys_clone)
173 mov rp=loc0 179 mov rp=loc0
174 br.ret.sptk.many rp 180 br.ret.sptk.many rp
175END(sys_clone) 181END(sys_clone)
182#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
176 183
177/* 184/*
178 * prev_task <- ia64_switch_to(struct task_struct *next) 185 * prev_task <- ia64_switch_to(struct task_struct *next)
@@ -180,7 +187,7 @@ END(sys_clone)
180 * called. The code starting at .map relies on this. The rest of the code 187 * called. The code starting at .map relies on this. The rest of the code
181 * doesn't care about the interrupt masking status. 188 * doesn't care about the interrupt masking status.
182 */ 189 */
183GLOBAL_ENTRY(ia64_switch_to) 190GLOBAL_ENTRY(__paravirt_switch_to)
184 .prologue 191 .prologue
185 alloc r16=ar.pfs,1,0,0,0 192 alloc r16=ar.pfs,1,0,0,0
186 DO_SAVE_SWITCH_STACK 193 DO_SAVE_SWITCH_STACK
@@ -204,7 +211,7 @@ GLOBAL_ENTRY(ia64_switch_to)
204 ;; 211 ;;
205.done: 212.done:
206 ld8 sp=[r21] // load kernel stack pointer of new task 213 ld8 sp=[r21] // load kernel stack pointer of new task
207 mov IA64_KR(CURRENT)=in0 // update "current" application register 214 MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register
208 mov r8=r13 // return pointer to previously running task 215 mov r8=r13 // return pointer to previously running task
209 mov r13=in0 // set "current" pointer 216 mov r13=in0 // set "current" pointer
210 ;; 217 ;;
@@ -216,26 +223,25 @@ GLOBAL_ENTRY(ia64_switch_to)
216 br.ret.sptk.many rp // boogie on out in new context 223 br.ret.sptk.many rp // boogie on out in new context
217 224
218.map: 225.map:
219 rsm psr.ic // interrupts (psr.i) are already disabled here 226 RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
220 movl r25=PAGE_KERNEL 227 movl r25=PAGE_KERNEL
221 ;; 228 ;;
222 srlz.d 229 srlz.d
223 or r23=r25,r20 // construct PA | page properties 230 or r23=r25,r20 // construct PA | page properties
224 mov r25=IA64_GRANULE_SHIFT<<2 231 mov r25=IA64_GRANULE_SHIFT<<2
225 ;; 232 ;;
226 mov cr.itir=r25 233 MOV_TO_ITIR(p0, r25, r8)
227 mov cr.ifa=in0 // VA of next task... 234 MOV_TO_IFA(in0, r8) // VA of next task...
228 ;; 235 ;;
229 mov r25=IA64_TR_CURRENT_STACK 236 mov r25=IA64_TR_CURRENT_STACK
230 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... 237 MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped...
231 ;; 238 ;;
232 itr.d dtr[r25]=r23 // wire in new mapping... 239 itr.d dtr[r25]=r23 // wire in new mapping...
233 ssm psr.ic // reenable the psr.ic bit 240 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
234 ;;
235 srlz.d
236 br.cond.sptk .done 241 br.cond.sptk .done
237END(ia64_switch_to) 242END(__paravirt_switch_to)
238 243
244#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
239/* 245/*
240 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This 246 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
241 * means that we may get an interrupt with "sp" pointing to the new kernel stack while 247 * means that we may get an interrupt with "sp" pointing to the new kernel stack while
@@ -375,7 +381,7 @@ END(save_switch_stack)
375 * - b7 holds address to return to 381 * - b7 holds address to return to
376 * - must not touch r8-r11 382 * - must not touch r8-r11
377 */ 383 */
378ENTRY(load_switch_stack) 384GLOBAL_ENTRY(load_switch_stack)
379 .prologue 385 .prologue
380 .altrp b7 386 .altrp b7
381 387
@@ -571,7 +577,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
571.ret3: 577.ret3:
572(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 578(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
573(pUStk) rsm psr.i // disable interrupts 579(pUStk) rsm psr.i // disable interrupts
574 br.cond.sptk .work_pending_syscall_end 580 br.cond.sptk ia64_work_pending_syscall_end
575 581
576strace_error: 582strace_error:
577 ld8 r3=[r2] // load pt_regs.r8 583 ld8 r3=[r2] // load pt_regs.r8
@@ -636,8 +642,17 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
636 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 642 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
637 mov r10=r0 // clear error indication in r10 643 mov r10=r0 // clear error indication in r10
638(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure 644(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
645#ifdef CONFIG_PARAVIRT
646 ;;
647 br.cond.sptk.few ia64_leave_syscall
648 ;;
649#endif /* CONFIG_PARAVIRT */
639END(ia64_ret_from_syscall) 650END(ia64_ret_from_syscall)
651#ifndef CONFIG_PARAVIRT
640 // fall through 652 // fall through
653#endif
654#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
655
641/* 656/*
642 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't 657 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
643 * need to switch to bank 0 and doesn't restore the scratch registers. 658 * need to switch to bank 0 and doesn't restore the scratch registers.
@@ -682,7 +697,7 @@ END(ia64_ret_from_syscall)
682 * ar.csd: cleared 697 * ar.csd: cleared
683 * ar.ssd: cleared 698 * ar.ssd: cleared
684 */ 699 */
685ENTRY(ia64_leave_syscall) 700GLOBAL_ENTRY(__paravirt_leave_syscall)
686 PT_REGS_UNWIND_INFO(0) 701 PT_REGS_UNWIND_INFO(0)
687 /* 702 /*
688 * work.need_resched etc. mustn't get changed by this CPU before it returns to 703 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -692,11 +707,11 @@ ENTRY(ia64_leave_syscall)
692 * extra work. We always check for extra work when returning to user-level. 707 * extra work. We always check for extra work when returning to user-level.
693 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count 708 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
694 * is 0. After extra work processing has been completed, execution 709 * is 0. After extra work processing has been completed, execution
695 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check 710 * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
696 * needs to be redone. 711 * needs to be redone.
697 */ 712 */
698#ifdef CONFIG_PREEMPT 713#ifdef CONFIG_PREEMPT
699 rsm psr.i // disable interrupts 714 RSM_PSR_I(p0, r2, r18) // disable interrupts
700 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 715 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
701(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 716(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
702 ;; 717 ;;
@@ -706,11 +721,12 @@ ENTRY(ia64_leave_syscall)
706 ;; 721 ;;
707 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 722 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
708#else /* !CONFIG_PREEMPT */ 723#else /* !CONFIG_PREEMPT */
709(pUStk) rsm psr.i 724 RSM_PSR_I(pUStk, r2, r18)
710 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 725 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
711(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 726(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
712#endif 727#endif
713.work_processed_syscall: 728.global __paravirt_work_processed_syscall;
729__paravirt_work_processed_syscall:
714#ifdef CONFIG_VIRT_CPU_ACCOUNTING 730#ifdef CONFIG_VIRT_CPU_ACCOUNTING
715 adds r2=PT(LOADRS)+16,r12 731 adds r2=PT(LOADRS)+16,r12
716(pUStk) mov.m r22=ar.itc // fetch time at leave 732(pUStk) mov.m r22=ar.itc // fetch time at leave
@@ -744,7 +760,7 @@ ENTRY(ia64_leave_syscall)
744(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! 760(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
745 ;; 761 ;;
746 invala // M0|1 invalidate ALAT 762 invala // M0|1 invalidate ALAT
747 rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection 763 RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection
748 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs 764 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
749 765
750 ld8 r29=[r2],16 // M0|1 load cr.ipsr 766 ld8 r29=[r2],16 // M0|1 load cr.ipsr
@@ -765,7 +781,7 @@ ENTRY(ia64_leave_syscall)
765 ;; 781 ;;
766#endif 782#endif
767 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs 783 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
768(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 784 MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled
769 nop 0 785 nop 0
770 ;; 786 ;;
771 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 787 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
@@ -798,7 +814,7 @@ ENTRY(ia64_leave_syscall)
798 814
799 srlz.d // M0 ensure interruption collection is off (for cover) 815 srlz.d // M0 ensure interruption collection is off (for cover)
800 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition 816 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
801 cover // B add current frame into dirty partition & set cr.ifs 817 COVER // B add current frame into dirty partition & set cr.ifs
802 ;; 818 ;;
803#ifdef CONFIG_VIRT_CPU_ACCOUNTING 819#ifdef CONFIG_VIRT_CPU_ACCOUNTING
804 mov r19=ar.bsp // M2 get new backing store pointer 820 mov r19=ar.bsp // M2 get new backing store pointer
@@ -823,8 +839,9 @@ ENTRY(ia64_leave_syscall)
823 mov.m ar.ssd=r0 // M2 clear ar.ssd 839 mov.m ar.ssd=r0 // M2 clear ar.ssd
824 mov f11=f0 // F clear f11 840 mov f11=f0 // F clear f11
825 br.cond.sptk.many rbs_switch // B 841 br.cond.sptk.many rbs_switch // B
826END(ia64_leave_syscall) 842END(__paravirt_leave_syscall)
827 843
844#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
828#ifdef CONFIG_IA32_SUPPORT 845#ifdef CONFIG_IA32_SUPPORT
829GLOBAL_ENTRY(ia64_ret_from_ia32_execve) 846GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
830 PT_REGS_UNWIND_INFO(0) 847 PT_REGS_UNWIND_INFO(0)
@@ -835,10 +852,20 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
835 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit 852 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
836 .mem.offset 8,0 853 .mem.offset 8,0
837 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit 854 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
855#ifdef CONFIG_PARAVIRT
856 ;;
857 // don't fall through, ia64_leave_kernel may be #define'd
858 br.cond.sptk.few ia64_leave_kernel
859 ;;
860#endif /* CONFIG_PARAVIRT */
838END(ia64_ret_from_ia32_execve) 861END(ia64_ret_from_ia32_execve)
862#ifndef CONFIG_PARAVIRT
839 // fall through 863 // fall through
864#endif
840#endif /* CONFIG_IA32_SUPPORT */ 865#endif /* CONFIG_IA32_SUPPORT */
841GLOBAL_ENTRY(ia64_leave_kernel) 866#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
867
868GLOBAL_ENTRY(__paravirt_leave_kernel)
842 PT_REGS_UNWIND_INFO(0) 869 PT_REGS_UNWIND_INFO(0)
843 /* 870 /*
844 * work.need_resched etc. mustn't get changed by this CPU before it returns to 871 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -852,7 +879,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
852 * needs to be redone. 879 * needs to be redone.
853 */ 880 */
854#ifdef CONFIG_PREEMPT 881#ifdef CONFIG_PREEMPT
855 rsm psr.i // disable interrupts 882 RSM_PSR_I(p0, r17, r31) // disable interrupts
856 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel 883 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
857(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 884(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
858 ;; 885 ;;
@@ -862,7 +889,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
862 ;; 889 ;;
863 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 890 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
864#else 891#else
865(pUStk) rsm psr.i 892 RSM_PSR_I(pUStk, r17, r31)
866 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel 893 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
867(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 894(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
868#endif 895#endif
@@ -910,7 +937,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
910 mov ar.csd=r30 937 mov ar.csd=r30
911 mov ar.ssd=r31 938 mov ar.ssd=r31
912 ;; 939 ;;
913 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection 940 RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection
914 invala // invalidate ALAT 941 invala // invalidate ALAT
915 ;; 942 ;;
916 ld8.fill r22=[r2],24 943 ld8.fill r22=[r2],24
@@ -942,7 +969,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
942 mov ar.ccv=r15 969 mov ar.ccv=r15
943 ;; 970 ;;
944 ldf.fill f11=[r2] 971 ldf.fill f11=[r2]
945 bsw.0 // switch back to bank 0 (no stop bit required beforehand...) 972 BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...)
946 ;; 973 ;;
947(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) 974(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
948 adds r16=PT(CR_IPSR)+16,r12 975 adds r16=PT(CR_IPSR)+16,r12
@@ -950,12 +977,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
950 977
951#ifdef CONFIG_VIRT_CPU_ACCOUNTING 978#ifdef CONFIG_VIRT_CPU_ACCOUNTING
952 .pred.rel.mutex pUStk,pKStk 979 .pred.rel.mutex pUStk,pKStk
953(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 980 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
954(pUStk) mov.m r22=ar.itc // M fetch time at leave 981(pUStk) mov.m r22=ar.itc // M fetch time at leave
955 nop.i 0 982 nop.i 0
956 ;; 983 ;;
957#else 984#else
958(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 985 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
959 nop.i 0 986 nop.i 0
960 nop.i 0 987 nop.i 0
961 ;; 988 ;;
@@ -1027,7 +1054,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
1027 * NOTE: alloc, loadrs, and cover can't be predicated. 1054 * NOTE: alloc, loadrs, and cover can't be predicated.
1028 */ 1055 */
1029(pNonSys) br.cond.dpnt dont_preserve_current_frame 1056(pNonSys) br.cond.dpnt dont_preserve_current_frame
1030 cover // add current frame into dirty partition and set cr.ifs 1057 COVER // add current frame into dirty partition and set cr.ifs
1031 ;; 1058 ;;
1032 mov r19=ar.bsp // get new backing store pointer 1059 mov r19=ar.bsp // get new backing store pointer
1033rbs_switch: 1060rbs_switch:
@@ -1130,16 +1157,16 @@ skip_rbs_switch:
1130(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp 1157(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
1131(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise 1158(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1132 ;; 1159 ;;
1133 mov cr.ipsr=r29 // M2 1160 MOV_TO_IPSR(p0, r29, r25) // M2
1134 mov ar.pfs=r26 // I0 1161 mov ar.pfs=r26 // I0
1135(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise 1162(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1136 1163
1137(p9) mov cr.ifs=r30 // M2 1164 MOV_TO_IFS(p9, r30, r25)// M2
1138 mov b0=r21 // I0 1165 mov b0=r21 // I0
1139(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise 1166(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1140 1167
1141 mov ar.fpsr=r20 // M2 1168 mov ar.fpsr=r20 // M2
1142 mov cr.iip=r28 // M2 1169 MOV_TO_IIP(r28, r25) // M2
1143 nop 0 1170 nop 0
1144 ;; 1171 ;;
1145(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode 1172(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
@@ -1148,7 +1175,7 @@ skip_rbs_switch:
1148 1175
1149 mov ar.rsc=r27 // M2 1176 mov ar.rsc=r27 // M2
1150 mov pr=r31,-1 // I0 1177 mov pr=r31,-1 // I0
1151 rfi // B 1178 RFI // B
1152 1179
1153 /* 1180 /*
1154 * On entry: 1181 * On entry:
@@ -1174,35 +1201,36 @@ skip_rbs_switch:
1174 ;; 1201 ;;
1175(pKStk) st4 [r20]=r21 1202(pKStk) st4 [r20]=r21
1176#endif 1203#endif
1177 ssm psr.i // enable interrupts 1204 SSM_PSR_I(p0, p6, r2) // enable interrupts
1178 br.call.spnt.many rp=schedule 1205 br.call.spnt.many rp=schedule
1179.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) 1206.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1180 rsm psr.i // disable interrupts 1207 RSM_PSR_I(p0, r2, r20) // disable interrupts
1181 ;; 1208 ;;
1182#ifdef CONFIG_PREEMPT 1209#ifdef CONFIG_PREEMPT
1183(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 1210(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
1184 ;; 1211 ;;
1185(pKStk) st4 [r20]=r0 // preempt_count() <- 0 1212(pKStk) st4 [r20]=r0 // preempt_count() <- 0
1186#endif 1213#endif
1187(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1214(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1188 br.cond.sptk.many .work_processed_kernel 1215 br.cond.sptk.many .work_processed_kernel
1189 1216
1190.notify: 1217.notify:
1191(pUStk) br.call.spnt.many rp=notify_resume_user 1218(pUStk) br.call.spnt.many rp=notify_resume_user
1192.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) 1219.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1193(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1220(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1194 br.cond.sptk.many .work_processed_kernel 1221 br.cond.sptk.many .work_processed_kernel
1195 1222
1196.work_pending_syscall_end: 1223.global __paravirt_pending_syscall_end;
1224__paravirt_pending_syscall_end:
1197 adds r2=PT(R8)+16,r12 1225 adds r2=PT(R8)+16,r12
1198 adds r3=PT(R10)+16,r12 1226 adds r3=PT(R10)+16,r12
1199 ;; 1227 ;;
1200 ld8 r8=[r2] 1228 ld8 r8=[r2]
1201 ld8 r10=[r3] 1229 ld8 r10=[r3]
1202 br.cond.sptk.many .work_processed_syscall 1230 br.cond.sptk.many __paravirt_work_processed_syscall_target
1203 1231END(__paravirt_leave_kernel)
1204END(ia64_leave_kernel)
1205 1232
1233#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
1206ENTRY(handle_syscall_error) 1234ENTRY(handle_syscall_error)
1207 /* 1235 /*
1208 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could 1236 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
@@ -1244,7 +1272,7 @@ END(ia64_invoke_schedule_tail)
1244 * We declare 8 input registers so the system call args get preserved, 1272 * We declare 8 input registers so the system call args get preserved,
1245 * in case we need to restart a system call. 1273 * in case we need to restart a system call.
1246 */ 1274 */
1247ENTRY(notify_resume_user) 1275GLOBAL_ENTRY(notify_resume_user)
1248 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) 1276 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1249 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! 1277 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1250 mov r9=ar.unat 1278 mov r9=ar.unat
@@ -1306,7 +1334,7 @@ ENTRY(sys_rt_sigreturn)
1306 adds sp=16,sp 1334 adds sp=16,sp
1307 ;; 1335 ;;
1308 ld8 r9=[sp] // load new ar.unat 1336 ld8 r9=[sp] // load new ar.unat
1309 mov.sptk b7=r8,ia64_leave_kernel 1337 mov.sptk b7=r8,ia64_native_leave_kernel
1310 ;; 1338 ;;
1311 mov ar.unat=r9 1339 mov ar.unat=r9
1312 br.many b7 1340 br.many b7
@@ -1665,3 +1693,4 @@ sys_call_table:
1665 data8 sys_timerfd_gettime 1693 data8 sys_timerfd_gettime
1666 1694
1667 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1695 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1696#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index ddeab4e36fd5..db540e58c783 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -26,11 +26,14 @@
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27#include <asm/asm-offsets.h> 27#include <asm/asm-offsets.h>
28#include <asm/pal.h> 28#include <asm/pal.h>
29#include <asm/paravirt.h>
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
30#include <asm/processor.h> 31#include <asm/processor.h>
31#include <asm/ptrace.h> 32#include <asm/ptrace.h>
32#include <asm/system.h> 33#include <asm/system.h>
33#include <asm/mca_asm.h> 34#include <asm/mca_asm.h>
35#include <linux/init.h>
36#include <linux/linkage.h>
34 37
35#ifdef CONFIG_HOTPLUG_CPU 38#ifdef CONFIG_HOTPLUG_CPU
36#define SAL_PSR_BITS_TO_SET \ 39#define SAL_PSR_BITS_TO_SET \
@@ -367,6 +370,44 @@ start_ap:
367 ;; 370 ;;
368(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader 371(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
369 372
373#ifdef CONFIG_PARAVIRT
374
375 movl r14=hypervisor_setup_hooks
376 movl r15=hypervisor_type
377 mov r16=num_hypervisor_hooks
378 ;;
379 ld8 r2=[r15]
380 ;;
381 cmp.ltu p7,p0=r2,r16 // array size check
382 shladd r8=r2,3,r14
383 ;;
384(p7) ld8 r9=[r8]
385 ;;
386(p7) mov b1=r9
387(p7) cmp.ne.unc p7,p0=r9,r0 // no actual branch to NULL
388 ;;
389(p7) br.call.sptk.many rp=b1
390
391 __INITDATA
392
393default_setup_hook = 0 // Currently nothing needs to be done.
394
395 .weak xen_setup_hook
396
397 .global hypervisor_type
398hypervisor_type:
399 data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
400
401 // must have the same order with PARAVIRT_HYPERVISOR_TYPE_xxx
402
403hypervisor_setup_hooks:
404 data8 default_setup_hook
405 data8 xen_setup_hook
406num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
407 .previous
408
409#endif
410
370#ifdef CONFIG_SMP 411#ifdef CONFIG_SMP
371(isAP) br.call.sptk.many rp=start_secondary 412(isAP) br.call.sptk.many rp=start_secondary
372.ret0: 413.ret0:
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 39752cdef6ff..3bc2fa64f87f 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -585,6 +585,15 @@ static inline int irq_is_shared (int irq)
585 return (iosapic_intr_info[irq].count > 1); 585 return (iosapic_intr_info[irq].count > 1);
586} 586}
587 587
588struct irq_chip*
589ia64_native_iosapic_get_irq_chip(unsigned long trigger)
590{
591 if (trigger == IOSAPIC_EDGE)
592 return &irq_type_iosapic_edge;
593 else
594 return &irq_type_iosapic_level;
595}
596
588static int 597static int
589register_intr (unsigned int gsi, int irq, unsigned char delivery, 598register_intr (unsigned int gsi, int irq, unsigned char delivery,
590 unsigned long polarity, unsigned long trigger) 599 unsigned long polarity, unsigned long trigger)
@@ -635,13 +644,10 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
635 iosapic_intr_info[irq].dmode = delivery; 644 iosapic_intr_info[irq].dmode = delivery;
636 iosapic_intr_info[irq].trigger = trigger; 645 iosapic_intr_info[irq].trigger = trigger;
637 646
638 if (trigger == IOSAPIC_EDGE) 647 irq_type = iosapic_get_irq_chip(trigger);
639 irq_type = &irq_type_iosapic_edge;
640 else
641 irq_type = &irq_type_iosapic_level;
642 648
643 idesc = irq_desc + irq; 649 idesc = irq_desc + irq;
644 if (idesc->chip != irq_type) { 650 if (irq_type != NULL && idesc->chip != irq_type) {
645 if (idesc->chip != &no_irq_type) 651 if (idesc->chip != &no_irq_type)
646 printk(KERN_WARNING 652 printk(KERN_WARNING
647 "%s: changing vector %d from %s to %s\n", 653 "%s: changing vector %d from %s to %s\n",
@@ -974,6 +980,22 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
974} 980}
975 981
976void __init 982void __init
983ia64_native_iosapic_pcat_compat_init(void)
984{
985 if (pcat_compat) {
986 /*
987 * Disable the compatibility mode interrupts (8259 style),
988 * needs IN/OUT support enabled.
989 */
990 printk(KERN_INFO
991 "%s: Disabling PC-AT compatible 8259 interrupts\n",
992 __func__);
993 outb(0xff, 0xA1);
994 outb(0xff, 0x21);
995 }
996}
997
998void __init
977iosapic_system_init (int system_pcat_compat) 999iosapic_system_init (int system_pcat_compat)
978{ 1000{
979 int irq; 1001 int irq;
@@ -987,17 +1009,8 @@ iosapic_system_init (int system_pcat_compat)
987 } 1009 }
988 1010
989 pcat_compat = system_pcat_compat; 1011 pcat_compat = system_pcat_compat;
990 if (pcat_compat) { 1012 if (pcat_compat)
991 /* 1013 iosapic_pcat_compat_init();
992 * Disable the compatibility mode interrupts (8259 style),
993 * needs IN/OUT support enabled.
994 */
995 printk(KERN_INFO
996 "%s: Disabling PC-AT compatible 8259 interrupts\n",
997 __func__);
998 outb(0xff, 0xA1);
999 outb(0xff, 0x21);
1000 }
1001} 1014}
1002 1015
1003static inline int 1016static inline int
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5538471e8d68..28d3d483db92 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -196,7 +196,7 @@ static void clear_irq_vector(int irq)
196} 196}
197 197
198int 198int
199assign_irq_vector (int irq) 199ia64_native_assign_irq_vector (int irq)
200{ 200{
201 unsigned long flags; 201 unsigned long flags;
202 int vector, cpu; 202 int vector, cpu;
@@ -222,7 +222,7 @@ assign_irq_vector (int irq)
222} 222}
223 223
224void 224void
225free_irq_vector (int vector) 225ia64_native_free_irq_vector (int vector)
226{ 226{
227 if (vector < IA64_FIRST_DEVICE_VECTOR || 227 if (vector < IA64_FIRST_DEVICE_VECTOR ||
228 vector > IA64_LAST_DEVICE_VECTOR) 228 vector > IA64_LAST_DEVICE_VECTOR)
@@ -600,7 +600,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id)
600{ 600{
601 BUG(); 601 BUG();
602} 602}
603extern irqreturn_t handle_IPI (int irq, void *dev_id);
604 603
605static struct irqaction ipi_irqaction = { 604static struct irqaction ipi_irqaction = {
606 .handler = handle_IPI, 605 .handler = handle_IPI,
@@ -623,7 +622,7 @@ static struct irqaction tlb_irqaction = {
623#endif 622#endif
624 623
625void 624void
626register_percpu_irq (ia64_vector vec, struct irqaction *action) 625ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
627{ 626{
628 irq_desc_t *desc; 627 irq_desc_t *desc;
629 unsigned int irq; 628 unsigned int irq;
@@ -638,13 +637,21 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
638} 637}
639 638
640void __init 639void __init
641init_IRQ (void) 640ia64_native_register_ipi(void)
642{ 641{
643 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
644#ifdef CONFIG_SMP 642#ifdef CONFIG_SMP
645 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 643 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
646 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); 644 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
647 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); 645 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
646#endif
647}
648
649void __init
650init_IRQ (void)
651{
652 ia64_register_ipi();
653 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
654#ifdef CONFIG_SMP
648#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) 655#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
649 if (vector_domain_type != VECTOR_DOMAIN_NONE) { 656 if (vector_domain_type != VECTOR_DOMAIN_NONE) {
650 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); 657 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 80b44ea052d7..c39627df3cde 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -12,6 +12,14 @@
12 * 12 *
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP 13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. 14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
15 *
16 * Copyright (C) 2005 Hewlett-Packard Co
17 * Dan Magenheimer <dan.magenheimer@hp.com>
18 * Xen paravirtualization
19 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
20 * VA Linux Systems Japan K.K.
21 * pv_ops.
22 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 */ 23 */
16/* 24/*
17 * This file defines the interruption vector table used by the CPU. 25 * This file defines the interruption vector table used by the CPU.
@@ -102,13 +110,13 @@ ENTRY(vhpt_miss)
102 * - the faulting virtual address uses unimplemented address bits 110 * - the faulting virtual address uses unimplemented address bits
103 * - the faulting virtual address has no valid page table mapping 111 * - the faulting virtual address has no valid page table mapping
104 */ 112 */
105 mov r16=cr.ifa // get address that caused the TLB miss 113 MOV_FROM_IFA(r16) // get address that caused the TLB miss
106#ifdef CONFIG_HUGETLB_PAGE 114#ifdef CONFIG_HUGETLB_PAGE
107 movl r18=PAGE_SHIFT 115 movl r18=PAGE_SHIFT
108 mov r25=cr.itir 116 MOV_FROM_ITIR(r25)
109#endif 117#endif
110 ;; 118 ;;
111 rsm psr.dt // use physical addressing for data 119 RSM_PSR_DT // use physical addressing for data
112 mov r31=pr // save the predicate registers 120 mov r31=pr // save the predicate registers
113 mov r19=IA64_KR(PT_BASE) // get page table base address 121 mov r19=IA64_KR(PT_BASE) // get page table base address
114 shl r21=r16,3 // shift bit 60 into sign bit 122 shl r21=r16,3 // shift bit 60 into sign bit
@@ -168,21 +176,21 @@ ENTRY(vhpt_miss)
168 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) 176 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
169 ;; 177 ;;
170(p7) ld8 r18=[r21] // read *pte 178(p7) ld8 r18=[r21] // read *pte
171 mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss 179 MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss
172 ;; 180 ;;
173(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 181(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
174 mov r22=cr.iha // get the VHPT address that caused the TLB miss 182 MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss
175 ;; // avoid RAW on p7 183 ;; // avoid RAW on p7
176(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? 184(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
177 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address 185 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
178 ;; 186 ;;
179(p10) itc.i r18 // insert the instruction TLB entry 187 ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and
180(p11) itc.d r18 // insert the data TLB entry 188 // insert the data TLB entry
181(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) 189(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
182 mov cr.ifa=r22 190 MOV_TO_IFA(r22, r24)
183 191
184#ifdef CONFIG_HUGETLB_PAGE 192#ifdef CONFIG_HUGETLB_PAGE
185(p8) mov cr.itir=r25 // change to default page-size for VHPT 193 MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT
186#endif 194#endif
187 195
188 /* 196 /*
@@ -192,7 +200,7 @@ ENTRY(vhpt_miss)
192 */ 200 */
193 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 201 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
194 ;; 202 ;;
195(p7) itc.d r24 203 ITC_D(p7, r24, r25)
196 ;; 204 ;;
197#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
198 /* 206 /*
@@ -234,7 +242,7 @@ ENTRY(vhpt_miss)
234#endif 242#endif
235 243
236 mov pr=r31,-1 // restore predicate registers 244 mov pr=r31,-1 // restore predicate registers
237 rfi 245 RFI
238END(vhpt_miss) 246END(vhpt_miss)
239 247
240 .org ia64_ivt+0x400 248 .org ia64_ivt+0x400
@@ -248,11 +256,11 @@ ENTRY(itlb_miss)
248 * mode, walk the page table, and then re-execute the PTE read and 256 * mode, walk the page table, and then re-execute the PTE read and
249 * go on normally after that. 257 * go on normally after that.
250 */ 258 */
251 mov r16=cr.ifa // get virtual address 259 MOV_FROM_IFA(r16) // get virtual address
252 mov r29=b0 // save b0 260 mov r29=b0 // save b0
253 mov r31=pr // save predicates 261 mov r31=pr // save predicates
254.itlb_fault: 262.itlb_fault:
255 mov r17=cr.iha // get virtual address of PTE 263 MOV_FROM_IHA(r17) // get virtual address of PTE
256 movl r30=1f // load nested fault continuation point 264 movl r30=1f // load nested fault continuation point
257 ;; 265 ;;
2581: ld8 r18=[r17] // read *pte 2661: ld8 r18=[r17] // read *pte
@@ -261,7 +269,7 @@ ENTRY(itlb_miss)
261 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 269 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
262(p6) br.cond.spnt page_fault 270(p6) br.cond.spnt page_fault
263 ;; 271 ;;
264 itc.i r18 272 ITC_I(p0, r18, r19)
265 ;; 273 ;;
266#ifdef CONFIG_SMP 274#ifdef CONFIG_SMP
267 /* 275 /*
@@ -278,7 +286,7 @@ ENTRY(itlb_miss)
278(p7) ptc.l r16,r20 286(p7) ptc.l r16,r20
279#endif 287#endif
280 mov pr=r31,-1 288 mov pr=r31,-1
281 rfi 289 RFI
282END(itlb_miss) 290END(itlb_miss)
283 291
284 .org ia64_ivt+0x0800 292 .org ia64_ivt+0x0800
@@ -292,11 +300,11 @@ ENTRY(dtlb_miss)
292 * mode, walk the page table, and then re-execute the PTE read and 300 * mode, walk the page table, and then re-execute the PTE read and
293 * go on normally after that. 301 * go on normally after that.
294 */ 302 */
295 mov r16=cr.ifa // get virtual address 303 MOV_FROM_IFA(r16) // get virtual address
296 mov r29=b0 // save b0 304 mov r29=b0 // save b0
297 mov r31=pr // save predicates 305 mov r31=pr // save predicates
298dtlb_fault: 306dtlb_fault:
299 mov r17=cr.iha // get virtual address of PTE 307 MOV_FROM_IHA(r17) // get virtual address of PTE
300 movl r30=1f // load nested fault continuation point 308 movl r30=1f // load nested fault continuation point
301 ;; 309 ;;
3021: ld8 r18=[r17] // read *pte 3101: ld8 r18=[r17] // read *pte
@@ -305,7 +313,7 @@ dtlb_fault:
305 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 313 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
306(p6) br.cond.spnt page_fault 314(p6) br.cond.spnt page_fault
307 ;; 315 ;;
308 itc.d r18 316 ITC_D(p0, r18, r19)
309 ;; 317 ;;
310#ifdef CONFIG_SMP 318#ifdef CONFIG_SMP
311 /* 319 /*
@@ -322,7 +330,7 @@ dtlb_fault:
322(p7) ptc.l r16,r20 330(p7) ptc.l r16,r20
323#endif 331#endif
324 mov pr=r31,-1 332 mov pr=r31,-1
325 rfi 333 RFI
326END(dtlb_miss) 334END(dtlb_miss)
327 335
328 .org ia64_ivt+0x0c00 336 .org ia64_ivt+0x0c00
@@ -330,9 +338,9 @@ END(dtlb_miss)
330// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 338// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
331ENTRY(alt_itlb_miss) 339ENTRY(alt_itlb_miss)
332 DBG_FAULT(3) 340 DBG_FAULT(3)
333 mov r16=cr.ifa // get address that caused the TLB miss 341 MOV_FROM_IFA(r16) // get address that caused the TLB miss
334 movl r17=PAGE_KERNEL 342 movl r17=PAGE_KERNEL
335 mov r21=cr.ipsr 343 MOV_FROM_IPSR(p0, r21)
336 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 344 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
337 mov r31=pr 345 mov r31=pr
338 ;; 346 ;;
@@ -341,9 +349,9 @@ ENTRY(alt_itlb_miss)
341 ;; 349 ;;
342 cmp.gt p8,p0=6,r22 // user mode 350 cmp.gt p8,p0=6,r22 // user mode
343 ;; 351 ;;
344(p8) thash r17=r16 352 THASH(p8, r17, r16, r23)
345 ;; 353 ;;
346(p8) mov cr.iha=r17 354 MOV_TO_IHA(p8, r17, r23)
347(p8) mov r29=b0 // save b0 355(p8) mov r29=b0 // save b0
348(p8) br.cond.dptk .itlb_fault 356(p8) br.cond.dptk .itlb_fault
349#endif 357#endif
@@ -358,9 +366,9 @@ ENTRY(alt_itlb_miss)
358 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 366 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
359(p8) br.cond.spnt page_fault 367(p8) br.cond.spnt page_fault
360 ;; 368 ;;
361 itc.i r19 // insert the TLB entry 369 ITC_I(p0, r19, r18) // insert the TLB entry
362 mov pr=r31,-1 370 mov pr=r31,-1
363 rfi 371 RFI
364END(alt_itlb_miss) 372END(alt_itlb_miss)
365 373
366 .org ia64_ivt+0x1000 374 .org ia64_ivt+0x1000
@@ -368,11 +376,11 @@ END(alt_itlb_miss)
368// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 376// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
369ENTRY(alt_dtlb_miss) 377ENTRY(alt_dtlb_miss)
370 DBG_FAULT(4) 378 DBG_FAULT(4)
371 mov r16=cr.ifa // get address that caused the TLB miss 379 MOV_FROM_IFA(r16) // get address that caused the TLB miss
372 movl r17=PAGE_KERNEL 380 movl r17=PAGE_KERNEL
373 mov r20=cr.isr 381 MOV_FROM_ISR(r20)
374 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 382 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
375 mov r21=cr.ipsr 383 MOV_FROM_IPSR(p0, r21)
376 mov r31=pr 384 mov r31=pr
377 mov r24=PERCPU_ADDR 385 mov r24=PERCPU_ADDR
378 ;; 386 ;;
@@ -381,9 +389,9 @@ ENTRY(alt_dtlb_miss)
381 ;; 389 ;;
382 cmp.gt p8,p0=6,r22 // access to region 0-5 390 cmp.gt p8,p0=6,r22 // access to region 0-5
383 ;; 391 ;;
384(p8) thash r17=r16 392 THASH(p8, r17, r16, r25)
385 ;; 393 ;;
386(p8) mov cr.iha=r17 394 MOV_TO_IHA(p8, r17, r25)
387(p8) mov r29=b0 // save b0 395(p8) mov r29=b0 // save b0
388(p8) br.cond.dptk dtlb_fault 396(p8) br.cond.dptk dtlb_fault
389#endif 397#endif
@@ -402,7 +410,7 @@ ENTRY(alt_dtlb_miss)
402 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? 410 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
403 ;; 411 ;;
404(p10) sub r19=r19,r26 412(p10) sub r19=r19,r26
405(p10) mov cr.itir=r25 413 MOV_TO_ITIR(p10, r25, r24)
406 cmp.ne p8,p0=r0,r23 414 cmp.ne p8,p0=r0,r23
407(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 415(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
408(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr 416(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr
@@ -411,11 +419,11 @@ ENTRY(alt_dtlb_miss)
411 dep r21=-1,r21,IA64_PSR_ED_BIT,1 419 dep r21=-1,r21,IA64_PSR_ED_BIT,1
412 ;; 420 ;;
413 or r19=r19,r17 // insert PTE control bits into r19 421 or r19=r19,r17 // insert PTE control bits into r19
414(p6) mov cr.ipsr=r21 422 MOV_TO_IPSR(p6, r21, r24)
415 ;; 423 ;;
416(p7) itc.d r19 // insert the TLB entry 424 ITC_D(p7, r19, r18) // insert the TLB entry
417 mov pr=r31,-1 425 mov pr=r31,-1
418 rfi 426 RFI
419END(alt_dtlb_miss) 427END(alt_dtlb_miss)
420 428
421 .org ia64_ivt+0x1400 429 .org ia64_ivt+0x1400
@@ -444,10 +452,10 @@ ENTRY(nested_dtlb_miss)
444 * 452 *
445 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) 453 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
446 */ 454 */
447 rsm psr.dt // switch to using physical data addressing 455 RSM_PSR_DT // switch to using physical data addressing
448 mov r19=IA64_KR(PT_BASE) // get the page table base address 456 mov r19=IA64_KR(PT_BASE) // get the page table base address
449 shl r21=r16,3 // shift bit 60 into sign bit 457 shl r21=r16,3 // shift bit 60 into sign bit
450 mov r18=cr.itir 458 MOV_FROM_ITIR(r18)
451 ;; 459 ;;
452 shr.u r17=r16,61 // get the region number into r17 460 shr.u r17=r16,61 // get the region number into r17
453 extr.u r18=r18,2,6 // get the faulting page size 461 extr.u r18=r18,2,6 // get the faulting page size
@@ -507,33 +515,6 @@ ENTRY(ikey_miss)
507 FAULT(6) 515 FAULT(6)
508END(ikey_miss) 516END(ikey_miss)
509 517
510 //-----------------------------------------------------------------------------------
511 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
512ENTRY(page_fault)
513 ssm psr.dt
514 ;;
515 srlz.i
516 ;;
517 SAVE_MIN_WITH_COVER
518 alloc r15=ar.pfs,0,0,3,0
519 mov out0=cr.ifa
520 mov out1=cr.isr
521 adds r3=8,r2 // set up second base pointer
522 ;;
523 ssm psr.ic | PSR_DEFAULT_BITS
524 ;;
525 srlz.i // guarantee that interruption collectin is on
526 ;;
527(p15) ssm psr.i // restore psr.i
528 movl r14=ia64_leave_kernel
529 ;;
530 SAVE_REST
531 mov rp=r14
532 ;;
533 adds out2=16,r12 // out2 = pointer to pt_regs
534 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
535END(page_fault)
536
537 .org ia64_ivt+0x1c00 518 .org ia64_ivt+0x1c00
538///////////////////////////////////////////////////////////////////////////////////////// 519/////////////////////////////////////////////////////////////////////////////////////////
539// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 520// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
@@ -556,10 +537,10 @@ ENTRY(dirty_bit)
556 * page table TLB entry isn't present, we take a nested TLB miss hit where we look 537 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
557 * up the physical address of the L3 PTE and then continue at label 1 below. 538 * up the physical address of the L3 PTE and then continue at label 1 below.
558 */ 539 */
559 mov r16=cr.ifa // get the address that caused the fault 540 MOV_FROM_IFA(r16) // get the address that caused the fault
560 movl r30=1f // load continuation point in case of nested fault 541 movl r30=1f // load continuation point in case of nested fault
561 ;; 542 ;;
562 thash r17=r16 // compute virtual address of L3 PTE 543 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
563 mov r29=b0 // save b0 in case of nested fault 544 mov r29=b0 // save b0 in case of nested fault
564 mov r31=pr // save pr 545 mov r31=pr // save pr
565#ifdef CONFIG_SMP 546#ifdef CONFIG_SMP
@@ -576,7 +557,7 @@ ENTRY(dirty_bit)
576 ;; 557 ;;
577(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present 558(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
578 ;; 559 ;;
579(p6) itc.d r25 // install updated PTE 560 ITC_D(p6, r25, r18) // install updated PTE
580 ;; 561 ;;
581 /* 562 /*
582 * Tell the assemblers dependency-violation checker that the above "itc" instructions 563 * Tell the assemblers dependency-violation checker that the above "itc" instructions
@@ -602,7 +583,7 @@ ENTRY(dirty_bit)
602 itc.d r18 // install updated PTE 583 itc.d r18 // install updated PTE
603#endif 584#endif
604 mov pr=r31,-1 // restore pr 585 mov pr=r31,-1 // restore pr
605 rfi 586 RFI
606END(dirty_bit) 587END(dirty_bit)
607 588
608 .org ia64_ivt+0x2400 589 .org ia64_ivt+0x2400
@@ -611,22 +592,22 @@ END(dirty_bit)
611ENTRY(iaccess_bit) 592ENTRY(iaccess_bit)
612 DBG_FAULT(9) 593 DBG_FAULT(9)
613 // Like Entry 8, except for instruction access 594 // Like Entry 8, except for instruction access
614 mov r16=cr.ifa // get the address that caused the fault 595 MOV_FROM_IFA(r16) // get the address that caused the fault
615 movl r30=1f // load continuation point in case of nested fault 596 movl r30=1f // load continuation point in case of nested fault
616 mov r31=pr // save predicates 597 mov r31=pr // save predicates
617#ifdef CONFIG_ITANIUM 598#ifdef CONFIG_ITANIUM
618 /* 599 /*
619 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. 600 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
620 */ 601 */
621 mov r17=cr.ipsr 602 MOV_FROM_IPSR(p0, r17)
622 ;; 603 ;;
623 mov r18=cr.iip 604 MOV_FROM_IIP(r18)
624 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? 605 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
625 ;; 606 ;;
626(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa 607(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
627#endif /* CONFIG_ITANIUM */ 608#endif /* CONFIG_ITANIUM */
628 ;; 609 ;;
629 thash r17=r16 // compute virtual address of L3 PTE 610 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
630 mov r29=b0 // save b0 in case of nested fault) 611 mov r29=b0 // save b0 in case of nested fault)
631#ifdef CONFIG_SMP 612#ifdef CONFIG_SMP
632 mov r28=ar.ccv // save ar.ccv 613 mov r28=ar.ccv // save ar.ccv
@@ -642,7 +623,7 @@ ENTRY(iaccess_bit)
642 ;; 623 ;;
643(p6) cmp.eq p6,p7=r26,r18 // Only if page present 624(p6) cmp.eq p6,p7=r26,r18 // Only if page present
644 ;; 625 ;;
645(p6) itc.i r25 // install updated PTE 626 ITC_I(p6, r25, r26) // install updated PTE
646 ;; 627 ;;
647 /* 628 /*
648 * Tell the assemblers dependency-violation checker that the above "itc" instructions 629 * Tell the assemblers dependency-violation checker that the above "itc" instructions
@@ -668,7 +649,7 @@ ENTRY(iaccess_bit)
668 itc.i r18 // install updated PTE 649 itc.i r18 // install updated PTE
669#endif /* !CONFIG_SMP */ 650#endif /* !CONFIG_SMP */
670 mov pr=r31,-1 651 mov pr=r31,-1
671 rfi 652 RFI
672END(iaccess_bit) 653END(iaccess_bit)
673 654
674 .org ia64_ivt+0x2800 655 .org ia64_ivt+0x2800
@@ -677,10 +658,10 @@ END(iaccess_bit)
677ENTRY(daccess_bit) 658ENTRY(daccess_bit)
678 DBG_FAULT(10) 659 DBG_FAULT(10)
679 // Like Entry 8, except for data access 660 // Like Entry 8, except for data access
680 mov r16=cr.ifa // get the address that caused the fault 661 MOV_FROM_IFA(r16) // get the address that caused the fault
681 movl r30=1f // load continuation point in case of nested fault 662 movl r30=1f // load continuation point in case of nested fault
682 ;; 663 ;;
683 thash r17=r16 // compute virtual address of L3 PTE 664 THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
684 mov r31=pr 665 mov r31=pr
685 mov r29=b0 // save b0 in case of nested fault) 666 mov r29=b0 // save b0 in case of nested fault)
686#ifdef CONFIG_SMP 667#ifdef CONFIG_SMP
@@ -697,7 +678,7 @@ ENTRY(daccess_bit)
697 ;; 678 ;;
698(p6) cmp.eq p6,p7=r26,r18 // Only if page is present 679(p6) cmp.eq p6,p7=r26,r18 // Only if page is present
699 ;; 680 ;;
700(p6) itc.d r25 // install updated PTE 681 ITC_D(p6, r25, r26) // install updated PTE
701 /* 682 /*
702 * Tell the assemblers dependency-violation checker that the above "itc" instructions 683 * Tell the assemblers dependency-violation checker that the above "itc" instructions
703 * cannot possibly affect the following loads: 684 * cannot possibly affect the following loads:
@@ -721,7 +702,7 @@ ENTRY(daccess_bit)
721#endif 702#endif
722 mov b0=r29 // restore b0 703 mov b0=r29 // restore b0
723 mov pr=r31,-1 704 mov pr=r31,-1
724 rfi 705 RFI
725END(daccess_bit) 706END(daccess_bit)
726 707
727 .org ia64_ivt+0x2c00 708 .org ia64_ivt+0x2c00
@@ -745,10 +726,10 @@ ENTRY(break_fault)
745 */ 726 */
746 DBG_FAULT(11) 727 DBG_FAULT(11)
747 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) 728 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
748 mov r29=cr.ipsr // M2 (12 cyc) 729 MOV_FROM_IPSR(p0, r29) // M2 (12 cyc)
749 mov r31=pr // I0 (2 cyc) 730 mov r31=pr // I0 (2 cyc)
750 731
751 mov r17=cr.iim // M2 (2 cyc) 732 MOV_FROM_IIM(r17) // M2 (2 cyc)
752 mov.m r27=ar.rsc // M2 (12 cyc) 733 mov.m r27=ar.rsc // M2 (12 cyc)
753 mov r18=__IA64_BREAK_SYSCALL // A 734 mov r18=__IA64_BREAK_SYSCALL // A
754 735
@@ -767,7 +748,7 @@ ENTRY(break_fault)
767 nop.m 0 748 nop.m 0
768 movl r30=sys_call_table // X 749 movl r30=sys_call_table // X
769 750
770 mov r28=cr.iip // M2 (2 cyc) 751 MOV_FROM_IIP(r28) // M2 (2 cyc)
771 cmp.eq p0,p7=r18,r17 // I0 is this a system call? 752 cmp.eq p0,p7=r18,r17 // I0 is this a system call?
772(p7) br.cond.spnt non_syscall // B no -> 753(p7) br.cond.spnt non_syscall // B no ->
773 // 754 //
@@ -864,18 +845,17 @@ ENTRY(break_fault)
864#endif 845#endif
865 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 846 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
866 nop 0 847 nop 0
867 bsw.1 // B (6 cyc) regs are saved, switch to bank 1 848 BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1
868 ;; 849 ;;
869 850
870 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection 851 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection
852 // M0 ensure interruption collection is on
871 movl r3=ia64_ret_from_syscall // X 853 movl r3=ia64_ret_from_syscall // X
872 ;; 854 ;;
873
874 srlz.i // M0 ensure interruption collection is on
875 mov rp=r3 // I0 set the real return addr 855 mov rp=r3 // I0 set the real return addr
876(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT 856(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
877 857
878(p15) ssm psr.i // M2 restore psr.i 858 SSM_PSR_I(p15, p15, r16) // M2 restore psr.i
879(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) 859(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
880 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic 860 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
881 // NOT REACHED 861 // NOT REACHED
@@ -895,27 +875,8 @@ END(break_fault)
895///////////////////////////////////////////////////////////////////////////////////////// 875/////////////////////////////////////////////////////////////////////////////////////////
896// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 876// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
897ENTRY(interrupt) 877ENTRY(interrupt)
898 DBG_FAULT(12) 878 /* interrupt handler has become too big to fit this area. */
899 mov r31=pr // prepare to save predicates 879 br.sptk.many __interrupt
900 ;;
901 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
902 ssm psr.ic | PSR_DEFAULT_BITS
903 ;;
904 adds r3=8,r2 // set up second base pointer for SAVE_REST
905 srlz.i // ensure everybody knows psr.ic is back on
906 ;;
907 SAVE_REST
908 ;;
909 MCA_RECOVER_RANGE(interrupt)
910 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
911 mov out0=cr.ivr // pass cr.ivr as first arg
912 add out1=16,sp // pass pointer to pt_regs as second arg
913 ;;
914 srlz.d // make sure we see the effect of cr.ivr
915 movl r14=ia64_leave_kernel
916 ;;
917 mov rp=r14
918 br.call.sptk.many b6=ia64_handle_irq
919END(interrupt) 880END(interrupt)
920 881
921 .org ia64_ivt+0x3400 882 .org ia64_ivt+0x3400
@@ -978,6 +939,7 @@ END(interrupt)
978 * - ar.fpsr: set to kernel settings 939 * - ar.fpsr: set to kernel settings
979 * - b6: preserved (same as on entry) 940 * - b6: preserved (same as on entry)
980 */ 941 */
942#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
981GLOBAL_ENTRY(ia64_syscall_setup) 943GLOBAL_ENTRY(ia64_syscall_setup)
982#if PT(B6) != 0 944#if PT(B6) != 0
983# error This code assumes that b6 is the first field in pt_regs. 945# error This code assumes that b6 is the first field in pt_regs.
@@ -1069,6 +1031,7 @@ GLOBAL_ENTRY(ia64_syscall_setup)
1069(p10) mov r8=-EINVAL 1031(p10) mov r8=-EINVAL
1070 br.ret.sptk.many b7 1032 br.ret.sptk.many b7
1071END(ia64_syscall_setup) 1033END(ia64_syscall_setup)
1034#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
1072 1035
1073 .org ia64_ivt+0x3c00 1036 .org ia64_ivt+0x3c00
1074///////////////////////////////////////////////////////////////////////////////////////// 1037/////////////////////////////////////////////////////////////////////////////////////////
@@ -1082,7 +1045,7 @@ END(ia64_syscall_setup)
1082 DBG_FAULT(16) 1045 DBG_FAULT(16)
1083 FAULT(16) 1046 FAULT(16)
1084 1047
1085#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1048#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
1086 /* 1049 /*
1087 * There is no particular reason for this code to be here, other than 1050 * There is no particular reason for this code to be here, other than
1088 * that there happens to be space here that would go unused otherwise. 1051 * that there happens to be space here that would go unused otherwise.
@@ -1092,7 +1055,7 @@ END(ia64_syscall_setup)
1092 * account_sys_enter is called from SAVE_MIN* macros if accounting is 1055 * account_sys_enter is called from SAVE_MIN* macros if accounting is
1093 * enabled and if the macro is entered from user mode. 1056 * enabled and if the macro is entered from user mode.
1094 */ 1057 */
1095ENTRY(account_sys_enter) 1058GLOBAL_ENTRY(account_sys_enter)
1096 // mov.m r20=ar.itc is called in advance, and r13 is current 1059 // mov.m r20=ar.itc is called in advance, and r13 is current
1097 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 1060 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
1098 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 1061 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
@@ -1123,110 +1086,18 @@ END(account_sys_enter)
1123 DBG_FAULT(17) 1086 DBG_FAULT(17)
1124 FAULT(17) 1087 FAULT(17)
1125 1088
1126ENTRY(non_syscall)
1127 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1128 ;;
1129 SAVE_MIN_WITH_COVER
1130
1131 // There is no particular reason for this code to be here, other than that
1132 // there happens to be space here that would go unused otherwise. If this
1133 // fault ever gets "unreserved", simply moved the following code to a more
1134 // suitable spot...
1135
1136 alloc r14=ar.pfs,0,0,2,0
1137 mov out0=cr.iim
1138 add out1=16,sp
1139 adds r3=8,r2 // set up second base pointer for SAVE_REST
1140
1141 ssm psr.ic | PSR_DEFAULT_BITS
1142 ;;
1143 srlz.i // guarantee that interruption collection is on
1144 ;;
1145(p15) ssm psr.i // restore psr.i
1146 movl r15=ia64_leave_kernel
1147 ;;
1148 SAVE_REST
1149 mov rp=r15
1150 ;;
1151 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1152END(non_syscall)
1153
1154 .org ia64_ivt+0x4800 1089 .org ia64_ivt+0x4800
1155///////////////////////////////////////////////////////////////////////////////////////// 1090/////////////////////////////////////////////////////////////////////////////////////////
1156// 0x4800 Entry 18 (size 64 bundles) Reserved 1091// 0x4800 Entry 18 (size 64 bundles) Reserved
1157 DBG_FAULT(18) 1092 DBG_FAULT(18)
1158 FAULT(18) 1093 FAULT(18)
1159 1094
1160 /*
1161 * There is no particular reason for this code to be here, other than that
1162 * there happens to be space here that would go unused otherwise. If this
1163 * fault ever gets "unreserved", simply moved the following code to a more
1164 * suitable spot...
1165 */
1166
1167ENTRY(dispatch_unaligned_handler)
1168 SAVE_MIN_WITH_COVER
1169 ;;
1170 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1171 mov out0=cr.ifa
1172 adds out1=16,sp
1173
1174 ssm psr.ic | PSR_DEFAULT_BITS
1175 ;;
1176 srlz.i // guarantee that interruption collection is on
1177 ;;
1178(p15) ssm psr.i // restore psr.i
1179 adds r3=8,r2 // set up second base pointer
1180 ;;
1181 SAVE_REST
1182 movl r14=ia64_leave_kernel
1183 ;;
1184 mov rp=r14
1185 br.sptk.many ia64_prepare_handle_unaligned
1186END(dispatch_unaligned_handler)
1187
1188 .org ia64_ivt+0x4c00 1095 .org ia64_ivt+0x4c00
1189///////////////////////////////////////////////////////////////////////////////////////// 1096/////////////////////////////////////////////////////////////////////////////////////////
1190// 0x4c00 Entry 19 (size 64 bundles) Reserved 1097// 0x4c00 Entry 19 (size 64 bundles) Reserved
1191 DBG_FAULT(19) 1098 DBG_FAULT(19)
1192 FAULT(19) 1099 FAULT(19)
1193 1100
1194 /*
1195 * There is no particular reason for this code to be here, other than that
1196 * there happens to be space here that would go unused otherwise. If this
1197 * fault ever gets "unreserved", simply moved the following code to a more
1198 * suitable spot...
1199 */
1200
1201ENTRY(dispatch_to_fault_handler)
1202 /*
1203 * Input:
1204 * psr.ic: off
1205 * r19: fault vector number (e.g., 24 for General Exception)
1206 * r31: contains saved predicates (pr)
1207 */
1208 SAVE_MIN_WITH_COVER_R19
1209 alloc r14=ar.pfs,0,0,5,0
1210 mov out0=r15
1211 mov out1=cr.isr
1212 mov out2=cr.ifa
1213 mov out3=cr.iim
1214 mov out4=cr.itir
1215 ;;
1216 ssm psr.ic | PSR_DEFAULT_BITS
1217 ;;
1218 srlz.i // guarantee that interruption collection is on
1219 ;;
1220(p15) ssm psr.i // restore psr.i
1221 adds r3=8,r2 // set up second base pointer for SAVE_REST
1222 ;;
1223 SAVE_REST
1224 movl r14=ia64_leave_kernel
1225 ;;
1226 mov rp=r14
1227 br.call.sptk.many b6=ia64_fault
1228END(dispatch_to_fault_handler)
1229
1230// 1101//
1231// --- End of long entries, Beginning of short entries 1102// --- End of long entries, Beginning of short entries
1232// 1103//
@@ -1236,8 +1107,8 @@ END(dispatch_to_fault_handler)
1236// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) 1107// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1237ENTRY(page_not_present) 1108ENTRY(page_not_present)
1238 DBG_FAULT(20) 1109 DBG_FAULT(20)
1239 mov r16=cr.ifa 1110 MOV_FROM_IFA(r16)
1240 rsm psr.dt 1111 RSM_PSR_DT
1241 /* 1112 /*
1242 * The Linux page fault handler doesn't expect non-present pages to be in 1113 * The Linux page fault handler doesn't expect non-present pages to be in
1243 * the TLB. Flush the existing entry now, so we meet that expectation. 1114 * the TLB. Flush the existing entry now, so we meet that expectation.
@@ -1256,8 +1127,8 @@ END(page_not_present)
1256// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) 1127// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1257ENTRY(key_permission) 1128ENTRY(key_permission)
1258 DBG_FAULT(21) 1129 DBG_FAULT(21)
1259 mov r16=cr.ifa 1130 MOV_FROM_IFA(r16)
1260 rsm psr.dt 1131 RSM_PSR_DT
1261 mov r31=pr 1132 mov r31=pr
1262 ;; 1133 ;;
1263 srlz.d 1134 srlz.d
@@ -1269,8 +1140,8 @@ END(key_permission)
1269// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 1140// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1270ENTRY(iaccess_rights) 1141ENTRY(iaccess_rights)
1271 DBG_FAULT(22) 1142 DBG_FAULT(22)
1272 mov r16=cr.ifa 1143 MOV_FROM_IFA(r16)
1273 rsm psr.dt 1144 RSM_PSR_DT
1274 mov r31=pr 1145 mov r31=pr
1275 ;; 1146 ;;
1276 srlz.d 1147 srlz.d
@@ -1282,8 +1153,8 @@ END(iaccess_rights)
1282// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 1153// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1283ENTRY(daccess_rights) 1154ENTRY(daccess_rights)
1284 DBG_FAULT(23) 1155 DBG_FAULT(23)
1285 mov r16=cr.ifa 1156 MOV_FROM_IFA(r16)
1286 rsm psr.dt 1157 RSM_PSR_DT
1287 mov r31=pr 1158 mov r31=pr
1288 ;; 1159 ;;
1289 srlz.d 1160 srlz.d
@@ -1295,7 +1166,7 @@ END(daccess_rights)
1295// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 1166// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1296ENTRY(general_exception) 1167ENTRY(general_exception)
1297 DBG_FAULT(24) 1168 DBG_FAULT(24)
1298 mov r16=cr.isr 1169 MOV_FROM_ISR(r16)
1299 mov r31=pr 1170 mov r31=pr
1300 ;; 1171 ;;
1301 cmp4.eq p6,p0=0,r16 1172 cmp4.eq p6,p0=0,r16
@@ -1324,8 +1195,8 @@ END(disabled_fp_reg)
1324ENTRY(nat_consumption) 1195ENTRY(nat_consumption)
1325 DBG_FAULT(26) 1196 DBG_FAULT(26)
1326 1197
1327 mov r16=cr.ipsr 1198 MOV_FROM_IPSR(p0, r16)
1328 mov r17=cr.isr 1199 MOV_FROM_ISR(r17)
1329 mov r31=pr // save PR 1200 mov r31=pr // save PR
1330 ;; 1201 ;;
1331 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} 1202 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
@@ -1335,10 +1206,10 @@ ENTRY(nat_consumption)
1335 dep r16=-1,r16,IA64_PSR_ED_BIT,1 1206 dep r16=-1,r16,IA64_PSR_ED_BIT,1
1336(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) 1207(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
1337 ;; 1208 ;;
1338 mov cr.ipsr=r16 // set cr.ipsr.na 1209 MOV_TO_IPSR(p0, r16, r18)
1339 mov pr=r31,-1 1210 mov pr=r31,-1
1340 ;; 1211 ;;
1341 rfi 1212 RFI
1342 1213
13431: mov pr=r31,-1 12141: mov pr=r31,-1
1344 ;; 1215 ;;
@@ -1360,26 +1231,26 @@ ENTRY(speculation_vector)
1360 * 1231 *
1361 * cr.imm contains zero_ext(imm21) 1232 * cr.imm contains zero_ext(imm21)
1362 */ 1233 */
1363 mov r18=cr.iim 1234 MOV_FROM_IIM(r18)
1364 ;; 1235 ;;
1365 mov r17=cr.iip 1236 MOV_FROM_IIP(r17)
1366 shl r18=r18,43 // put sign bit in position (43=64-21) 1237 shl r18=r18,43 // put sign bit in position (43=64-21)
1367 ;; 1238 ;;
1368 1239
1369 mov r16=cr.ipsr 1240 MOV_FROM_IPSR(p0, r16)
1370 shr r18=r18,39 // sign extend (39=43-4) 1241 shr r18=r18,39 // sign extend (39=43-4)
1371 ;; 1242 ;;
1372 1243
1373 add r17=r17,r18 // now add the offset 1244 add r17=r17,r18 // now add the offset
1374 ;; 1245 ;;
1375 mov cr.iip=r17 1246 MOV_FROM_IIP(r17)
1376 dep r16=0,r16,41,2 // clear EI 1247 dep r16=0,r16,41,2 // clear EI
1377 ;; 1248 ;;
1378 1249
1379 mov cr.ipsr=r16 1250 MOV_FROM_IPSR(p0, r16)
1380 ;; 1251 ;;
1381 1252
1382 rfi // and go back 1253 RFI
1383END(speculation_vector) 1254END(speculation_vector)
1384 1255
1385 .org ia64_ivt+0x5800 1256 .org ia64_ivt+0x5800
@@ -1517,11 +1388,11 @@ ENTRY(ia32_intercept)
1517 DBG_FAULT(46) 1388 DBG_FAULT(46)
1518#ifdef CONFIG_IA32_SUPPORT 1389#ifdef CONFIG_IA32_SUPPORT
1519 mov r31=pr 1390 mov r31=pr
1520 mov r16=cr.isr 1391 MOV_FROM_ISR(r16)
1521 ;; 1392 ;;
1522 extr.u r17=r16,16,8 // get ISR.code 1393 extr.u r17=r16,16,8 // get ISR.code
1523 mov r18=ar.eflag 1394 mov r18=ar.eflag
1524 mov r19=cr.iim // old eflag value 1395 MOV_FROM_IIM(r19) // old eflag value
1525 ;; 1396 ;;
1526 cmp.ne p6,p0=2,r17 1397 cmp.ne p6,p0=2,r17
1527(p6) br.cond.spnt 1f // not a system flag fault 1398(p6) br.cond.spnt 1f // not a system flag fault
@@ -1533,7 +1404,7 @@ ENTRY(ia32_intercept)
1533(p6) br.cond.spnt 1f // eflags.ac bit didn't change 1404(p6) br.cond.spnt 1f // eflags.ac bit didn't change
1534 ;; 1405 ;;
1535 mov pr=r31,-1 // restore predicate registers 1406 mov pr=r31,-1 // restore predicate registers
1536 rfi 1407 RFI
1537 1408
15381: 14091:
1539#endif // CONFIG_IA32_SUPPORT 1410#endif // CONFIG_IA32_SUPPORT
@@ -1673,6 +1544,137 @@ END(ia32_interrupt)
1673 DBG_FAULT(67) 1544 DBG_FAULT(67)
1674 FAULT(67) 1545 FAULT(67)
1675 1546
1547 //-----------------------------------------------------------------------------------
1548 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
1549ENTRY(page_fault)
1550 SSM_PSR_DT_AND_SRLZ_I
1551 ;;
1552 SAVE_MIN_WITH_COVER
1553 alloc r15=ar.pfs,0,0,3,0
1554 MOV_FROM_IFA(out0)
1555 MOV_FROM_ISR(out1)
1556 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
1557 adds r3=8,r2 // set up second base pointer
1558 SSM_PSR_I(p15, p15, r14) // restore psr.i
1559 movl r14=ia64_leave_kernel
1560 ;;
1561 SAVE_REST
1562 mov rp=r14
1563 ;;
1564 adds out2=16,r12 // out2 = pointer to pt_regs
1565 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
1566END(page_fault)
1567
1568ENTRY(non_syscall)
1569 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1570 ;;
1571 SAVE_MIN_WITH_COVER
1572
1573 // There is no particular reason for this code to be here, other than that
1574 // there happens to be space here that would go unused otherwise. If this
1575 // fault ever gets "unreserved", simply moved the following code to a more
1576 // suitable spot...
1577
1578 alloc r14=ar.pfs,0,0,2,0
1579 MOV_FROM_IIM(out0)
1580 add out1=16,sp
1581 adds r3=8,r2 // set up second base pointer for SAVE_REST
1582
1583 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
1584 // guarantee that interruption collection is on
1585 SSM_PSR_I(p15, p15, r15) // restore psr.i
1586 movl r15=ia64_leave_kernel
1587 ;;
1588 SAVE_REST
1589 mov rp=r15
1590 ;;
1591 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1592END(non_syscall)
1593
1594ENTRY(__interrupt)
1595 DBG_FAULT(12)
1596 mov r31=pr // prepare to save predicates
1597 ;;
1598 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1599 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
1600 // ensure everybody knows psr.ic is back on
1601 adds r3=8,r2 // set up second base pointer for SAVE_REST
1602 ;;
1603 SAVE_REST
1604 ;;
1605 MCA_RECOVER_RANGE(interrupt)
1606 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1607 MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg
1608 add out1=16,sp // pass pointer to pt_regs as second arg
1609 ;;
1610 srlz.d // make sure we see the effect of cr.ivr
1611 movl r14=ia64_leave_kernel
1612 ;;
1613 mov rp=r14
1614 br.call.sptk.many b6=ia64_handle_irq
1615END(__interrupt)
1616
1617 /*
1618 * There is no particular reason for this code to be here, other than that
1619 * there happens to be space here that would go unused otherwise. If this
1620 * fault ever gets "unreserved", simply moved the following code to a more
1621 * suitable spot...
1622 */
1623
1624ENTRY(dispatch_unaligned_handler)
1625 SAVE_MIN_WITH_COVER
1626 ;;
1627 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1628 MOV_FROM_IFA(out0)
1629 adds out1=16,sp
1630
1631 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1632 // guarantee that interruption collection is on
1633 SSM_PSR_I(p15, p15, r3) // restore psr.i
1634 adds r3=8,r2 // set up second base pointer
1635 ;;
1636 SAVE_REST
1637 movl r14=ia64_leave_kernel
1638 ;;
1639 mov rp=r14
1640 br.sptk.many ia64_prepare_handle_unaligned
1641END(dispatch_unaligned_handler)
1642
1643 /*
1644 * There is no particular reason for this code to be here, other than that
1645 * there happens to be space here that would go unused otherwise. If this
1646 * fault ever gets "unreserved", simply moved the following code to a more
1647 * suitable spot...
1648 */
1649
1650ENTRY(dispatch_to_fault_handler)
1651 /*
1652 * Input:
1653 * psr.ic: off
1654 * r19: fault vector number (e.g., 24 for General Exception)
1655 * r31: contains saved predicates (pr)
1656 */
1657 SAVE_MIN_WITH_COVER_R19
1658 alloc r14=ar.pfs,0,0,5,0
1659 MOV_FROM_ISR(out1)
1660 MOV_FROM_IFA(out2)
1661 MOV_FROM_IIM(out3)
1662 MOV_FROM_ITIR(out4)
1663 ;;
1664 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
1665 // guarantee that interruption collection is on
1666 mov out0=r15
1667 ;;
1668 SSM_PSR_I(p15, p15, r3) // restore psr.i
1669 adds r3=8,r2 // set up second base pointer for SAVE_REST
1670 ;;
1671 SAVE_REST
1672 movl r14=ia64_leave_kernel
1673 ;;
1674 mov rp=r14
1675 br.call.sptk.many b6=ia64_fault
1676END(dispatch_to_fault_handler)
1677
1676 /* 1678 /*
1677 * Squatting in this space ... 1679 * Squatting in this space ...
1678 * 1680 *
@@ -1686,11 +1688,10 @@ ENTRY(dispatch_illegal_op_fault)
1686 .prologue 1688 .prologue
1687 .body 1689 .body
1688 SAVE_MIN_WITH_COVER 1690 SAVE_MIN_WITH_COVER
1689 ssm psr.ic | PSR_DEFAULT_BITS 1691 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1690 ;; 1692 // guarantee that interruption collection is on
1691 srlz.i // guarantee that interruption collection is on
1692 ;; 1693 ;;
1693(p15) ssm psr.i // restore psr.i 1694 SSM_PSR_I(p15, p15, r3) // restore psr.i
1694 adds r3=8,r2 // set up second base pointer for SAVE_REST 1695 adds r3=8,r2 // set up second base pointer for SAVE_REST
1695 ;; 1696 ;;
1696 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group 1697 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
@@ -1729,12 +1730,11 @@ END(dispatch_illegal_op_fault)
1729ENTRY(dispatch_to_ia32_handler) 1730ENTRY(dispatch_to_ia32_handler)
1730 SAVE_MIN 1731 SAVE_MIN
1731 ;; 1732 ;;
1732 mov r14=cr.isr 1733 MOV_FROM_ISR(r14)
1733 ssm psr.ic | PSR_DEFAULT_BITS 1734 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1734 ;; 1735 // guarantee that interruption collection is on
1735 srlz.i // guarantee that interruption collection is on
1736 ;; 1736 ;;
1737(p15) ssm psr.i 1737 SSM_PSR_I(p15, p15, r3)
1738 adds r3=8,r2 // Base pointer for SAVE_REST 1738 adds r3=8,r2 // Base pointer for SAVE_REST
1739 ;; 1739 ;;
1740 SAVE_REST 1740 SAVE_REST
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 74b6d670aaef..292e214a3b84 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -2,6 +2,7 @@
2#include <asm/cache.h> 2#include <asm/cache.h>
3 3
4#include "entry.h" 4#include "entry.h"
5#include "paravirt_inst.h"
5 6
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING 7#ifdef CONFIG_VIRT_CPU_ACCOUNTING
7/* read ar.itc in advance, and use it before leaving bank 0 */ 8/* read ar.itc in advance, and use it before leaving bank 0 */
@@ -43,16 +44,16 @@
43 * Note that psr.ic is NOT turned on by this macro. This is so that 44 * Note that psr.ic is NOT turned on by this macro. This is so that
44 * we can pass interruption state as arguments to a handler. 45 * we can pass interruption state as arguments to a handler.
45 */ 46 */
46#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \ 47#define IA64_NATIVE_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
47 mov r16=IA64_KR(CURRENT); /* M */ \ 48 mov r16=IA64_KR(CURRENT); /* M */ \
48 mov r27=ar.rsc; /* M */ \ 49 mov r27=ar.rsc; /* M */ \
49 mov r20=r1; /* A */ \ 50 mov r20=r1; /* A */ \
50 mov r25=ar.unat; /* M */ \ 51 mov r25=ar.unat; /* M */ \
51 mov r29=cr.ipsr; /* M */ \ 52 MOV_FROM_IPSR(p0,r29); /* M */ \
52 mov r26=ar.pfs; /* I */ \ 53 mov r26=ar.pfs; /* I */ \
53 mov r28=cr.iip; /* M */ \ 54 MOV_FROM_IIP(r28); /* M */ \
54 mov r21=ar.fpsr; /* M */ \ 55 mov r21=ar.fpsr; /* M */ \
55 COVER; /* B;; (or nothing) */ \ 56 __COVER; /* B;; (or nothing) */ \
56 ;; \ 57 ;; \
57 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ 58 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
58 ;; \ 59 ;; \
@@ -244,6 +245,6 @@
2441: \ 2451: \
245 .pred.rel "mutex", pKStk, pUStk 246 .pred.rel "mutex", pKStk, pUStk
246 247
247#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND) 248#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(COVER, mov r30=cr.ifs, , RSE_WORKAROUND)
248#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND) 249#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(COVER, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
249#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , ) 250#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index e83e2ea3b3e0..29aad349e0c4 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -321,7 +321,8 @@ module_alloc (unsigned long size)
321void 321void
322module_free (struct module *mod, void *module_region) 322module_free (struct module *mod, void *module_region)
323{ 323{
324 if (mod->arch.init_unw_table && module_region == mod->module_init) { 324 if (mod && mod->arch.init_unw_table &&
325 module_region == mod->module_init) {
325 unw_remove_unwind_table(mod->arch.init_unw_table); 326 unw_remove_unwind_table(mod->arch.init_unw_table);
326 mod->arch.init_unw_table = NULL; 327 mod->arch.init_unw_table = NULL;
327 } 328 }
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
new file mode 100644
index 000000000000..1ae049181e83
--- /dev/null
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -0,0 +1,24 @@
1/*
2 * calculate
3 * NR_IRQS = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, FOO_NR_IRQS...)
4 * depending on config.
5 * This must be calculated before processing asm-offset.c.
6 */
7
8#define ASM_OFFSETS_C 1
9
10#include <linux/kbuild.h>
11#include <linux/threads.h>
12#include <asm-ia64/native/irq.h>
13
14void foo(void)
15{
16 union paravirt_nr_irqs_max {
17 char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
18#ifdef CONFIG_XEN
19 char xen_nr_irqs[XEN_NR_IRQS];
20#endif
21 };
22
23 DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));
24}
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
new file mode 100644
index 000000000000..afaf5b9a2cf0
--- /dev/null
+++ b/arch/ia64/kernel/paravirt.c
@@ -0,0 +1,369 @@
1/******************************************************************************
2 * arch/ia64/kernel/paravirt.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/init.h>
25
26#include <linux/compiler.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/types.h>
31
32#include <asm/iosapic.h>
33#include <asm/paravirt.h>
34
35/***************************************************************************
36 * general info
37 */
38struct pv_info pv_info = {
39 .kernel_rpl = 0,
40 .paravirt_enabled = 0,
41 .name = "bare hardware"
42};
43
44/***************************************************************************
45 * pv_init_ops
46 * initialization hooks.
47 */
48
49struct pv_init_ops pv_init_ops;
50
51/***************************************************************************
52 * pv_cpu_ops
53 * intrinsics hooks.
54 */
55
56/* ia64_native_xxx are macros so that we have to make them real functions */
57
58#define DEFINE_VOID_FUNC1(name) \
59 static void \
60 ia64_native_ ## name ## _func(unsigned long arg) \
61 { \
62 ia64_native_ ## name(arg); \
63 } \
64
65#define DEFINE_VOID_FUNC2(name) \
66 static void \
67 ia64_native_ ## name ## _func(unsigned long arg0, \
68 unsigned long arg1) \
69 { \
70 ia64_native_ ## name(arg0, arg1); \
71 } \
72
73#define DEFINE_FUNC0(name) \
74 static unsigned long \
75 ia64_native_ ## name ## _func(void) \
76 { \
77 return ia64_native_ ## name(); \
78 }
79
80#define DEFINE_FUNC1(name, type) \
81 static unsigned long \
82 ia64_native_ ## name ## _func(type arg) \
83 { \
84 return ia64_native_ ## name(arg); \
85 } \
86
87DEFINE_VOID_FUNC1(fc);
88DEFINE_VOID_FUNC1(intrin_local_irq_restore);
89
90DEFINE_VOID_FUNC2(ptcga);
91DEFINE_VOID_FUNC2(set_rr);
92
93DEFINE_FUNC0(get_psr_i);
94
95DEFINE_FUNC1(thash, unsigned long);
96DEFINE_FUNC1(get_cpuid, int);
97DEFINE_FUNC1(get_pmd, int);
98DEFINE_FUNC1(get_rr, unsigned long);
99
100static void
101ia64_native_ssm_i_func(void)
102{
103 ia64_native_ssm(IA64_PSR_I);
104}
105
106static void
107ia64_native_rsm_i_func(void)
108{
109 ia64_native_rsm(IA64_PSR_I);
110}
111
112static void
113ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
114 unsigned long val2, unsigned long val3,
115 unsigned long val4)
116{
117 ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4);
118}
119
120#define CASE_GET_REG(id) \
121 case _IA64_REG_ ## id: \
122 res = ia64_native_getreg(_IA64_REG_ ## id); \
123 break;
124#define CASE_GET_AR(id) CASE_GET_REG(AR_ ## id)
125#define CASE_GET_CR(id) CASE_GET_REG(CR_ ## id)
126
127unsigned long
128ia64_native_getreg_func(int regnum)
129{
130 unsigned long res = -1;
131 switch (regnum) {
132 CASE_GET_REG(GP);
133 CASE_GET_REG(IP);
134 CASE_GET_REG(PSR);
135 CASE_GET_REG(TP);
136 CASE_GET_REG(SP);
137
138 CASE_GET_AR(KR0);
139 CASE_GET_AR(KR1);
140 CASE_GET_AR(KR2);
141 CASE_GET_AR(KR3);
142 CASE_GET_AR(KR4);
143 CASE_GET_AR(KR5);
144 CASE_GET_AR(KR6);
145 CASE_GET_AR(KR7);
146 CASE_GET_AR(RSC);
147 CASE_GET_AR(BSP);
148 CASE_GET_AR(BSPSTORE);
149 CASE_GET_AR(RNAT);
150 CASE_GET_AR(FCR);
151 CASE_GET_AR(EFLAG);
152 CASE_GET_AR(CSD);
153 CASE_GET_AR(SSD);
154 CASE_GET_AR(CFLAG);
155 CASE_GET_AR(FSR);
156 CASE_GET_AR(FIR);
157 CASE_GET_AR(FDR);
158 CASE_GET_AR(CCV);
159 CASE_GET_AR(UNAT);
160 CASE_GET_AR(FPSR);
161 CASE_GET_AR(ITC);
162 CASE_GET_AR(PFS);
163 CASE_GET_AR(LC);
164 CASE_GET_AR(EC);
165
166 CASE_GET_CR(DCR);
167 CASE_GET_CR(ITM);
168 CASE_GET_CR(IVA);
169 CASE_GET_CR(PTA);
170 CASE_GET_CR(IPSR);
171 CASE_GET_CR(ISR);
172 CASE_GET_CR(IIP);
173 CASE_GET_CR(IFA);
174 CASE_GET_CR(ITIR);
175 CASE_GET_CR(IIPA);
176 CASE_GET_CR(IFS);
177 CASE_GET_CR(IIM);
178 CASE_GET_CR(IHA);
179 CASE_GET_CR(LID);
180 CASE_GET_CR(IVR);
181 CASE_GET_CR(TPR);
182 CASE_GET_CR(EOI);
183 CASE_GET_CR(IRR0);
184 CASE_GET_CR(IRR1);
185 CASE_GET_CR(IRR2);
186 CASE_GET_CR(IRR3);
187 CASE_GET_CR(ITV);
188 CASE_GET_CR(PMV);
189 CASE_GET_CR(CMCV);
190 CASE_GET_CR(LRR0);
191 CASE_GET_CR(LRR1);
192
193 default:
194 printk(KERN_CRIT "wrong_getreg %d\n", regnum);
195 break;
196 }
197 return res;
198}
199
200#define CASE_SET_REG(id) \
201 case _IA64_REG_ ## id: \
202 ia64_native_setreg(_IA64_REG_ ## id, val); \
203 break;
204#define CASE_SET_AR(id) CASE_SET_REG(AR_ ## id)
205#define CASE_SET_CR(id) CASE_SET_REG(CR_ ## id)
206
207void
208ia64_native_setreg_func(int regnum, unsigned long val)
209{
210 switch (regnum) {
211 case _IA64_REG_PSR_L:
212 ia64_native_setreg(_IA64_REG_PSR_L, val);
213 ia64_dv_serialize_data();
214 break;
215 CASE_SET_REG(SP);
216 CASE_SET_REG(GP);
217
218 CASE_SET_AR(KR0);
219 CASE_SET_AR(KR1);
220 CASE_SET_AR(KR2);
221 CASE_SET_AR(KR3);
222 CASE_SET_AR(KR4);
223 CASE_SET_AR(KR5);
224 CASE_SET_AR(KR6);
225 CASE_SET_AR(KR7);
226 CASE_SET_AR(RSC);
227 CASE_SET_AR(BSP);
228 CASE_SET_AR(BSPSTORE);
229 CASE_SET_AR(RNAT);
230 CASE_SET_AR(FCR);
231 CASE_SET_AR(EFLAG);
232 CASE_SET_AR(CSD);
233 CASE_SET_AR(SSD);
234 CASE_SET_AR(CFLAG);
235 CASE_SET_AR(FSR);
236 CASE_SET_AR(FIR);
237 CASE_SET_AR(FDR);
238 CASE_SET_AR(CCV);
239 CASE_SET_AR(UNAT);
240 CASE_SET_AR(FPSR);
241 CASE_SET_AR(ITC);
242 CASE_SET_AR(PFS);
243 CASE_SET_AR(LC);
244 CASE_SET_AR(EC);
245
246 CASE_SET_CR(DCR);
247 CASE_SET_CR(ITM);
248 CASE_SET_CR(IVA);
249 CASE_SET_CR(PTA);
250 CASE_SET_CR(IPSR);
251 CASE_SET_CR(ISR);
252 CASE_SET_CR(IIP);
253 CASE_SET_CR(IFA);
254 CASE_SET_CR(ITIR);
255 CASE_SET_CR(IIPA);
256 CASE_SET_CR(IFS);
257 CASE_SET_CR(IIM);
258 CASE_SET_CR(IHA);
259 CASE_SET_CR(LID);
260 CASE_SET_CR(IVR);
261 CASE_SET_CR(TPR);
262 CASE_SET_CR(EOI);
263 CASE_SET_CR(IRR0);
264 CASE_SET_CR(IRR1);
265 CASE_SET_CR(IRR2);
266 CASE_SET_CR(IRR3);
267 CASE_SET_CR(ITV);
268 CASE_SET_CR(PMV);
269 CASE_SET_CR(CMCV);
270 CASE_SET_CR(LRR0);
271 CASE_SET_CR(LRR1);
272 default:
273 printk(KERN_CRIT "wrong setreg %d\n", regnum);
274 break;
275 }
276}
277
278struct pv_cpu_ops pv_cpu_ops = {
279 .fc = ia64_native_fc_func,
280 .thash = ia64_native_thash_func,
281 .get_cpuid = ia64_native_get_cpuid_func,
282 .get_pmd = ia64_native_get_pmd_func,
283 .ptcga = ia64_native_ptcga_func,
284 .get_rr = ia64_native_get_rr_func,
285 .set_rr = ia64_native_set_rr_func,
286 .set_rr0_to_rr4 = ia64_native_set_rr0_to_rr4_func,
287 .ssm_i = ia64_native_ssm_i_func,
288 .getreg = ia64_native_getreg_func,
289 .setreg = ia64_native_setreg_func,
290 .rsm_i = ia64_native_rsm_i_func,
291 .get_psr_i = ia64_native_get_psr_i_func,
292 .intrin_local_irq_restore
293 = ia64_native_intrin_local_irq_restore_func,
294};
295EXPORT_SYMBOL(pv_cpu_ops);
296
297/******************************************************************************
298 * replacement of hand written assembly codes.
299 */
300
301void
302paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch)
303{
304 extern unsigned long paravirt_switch_to_targ;
305 extern unsigned long paravirt_leave_syscall_targ;
306 extern unsigned long paravirt_work_processed_syscall_targ;
307 extern unsigned long paravirt_leave_kernel_targ;
308
309 paravirt_switch_to_targ = cpu_asm_switch->switch_to;
310 paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall;
311 paravirt_work_processed_syscall_targ =
312 cpu_asm_switch->work_processed_syscall;
313 paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel;
314}
315
316/***************************************************************************
317 * pv_iosapic_ops
318 * iosapic read/write hooks.
319 */
320
321static unsigned int
322ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
323{
324 return __ia64_native_iosapic_read(iosapic, reg);
325}
326
327static void
328ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
329{
330 __ia64_native_iosapic_write(iosapic, reg, val);
331}
332
333struct pv_iosapic_ops pv_iosapic_ops = {
334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
335 .get_irq_chip = ia64_native_iosapic_get_irq_chip,
336
337 .__read = ia64_native_iosapic_read,
338 .__write = ia64_native_iosapic_write,
339};
340
341/***************************************************************************
342 * pv_irq_ops
343 * irq operations
344 */
345
346struct pv_irq_ops pv_irq_ops = {
347 .register_ipi = ia64_native_register_ipi,
348
349 .assign_irq_vector = ia64_native_assign_irq_vector,
350 .free_irq_vector = ia64_native_free_irq_vector,
351 .register_percpu_irq = ia64_native_register_percpu_irq,
352
353 .resend_irq = ia64_native_resend_irq,
354};
355
356/***************************************************************************
357 * pv_time_ops
358 * time operations
359 */
360
361static int
362ia64_native_do_steal_accounting(unsigned long *new_itm)
363{
364 return 0;
365}
366
367struct pv_time_ops pv_time_ops = {
368 .do_steal_accounting = ia64_native_do_steal_accounting,
369};
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
new file mode 100644
index 000000000000..5cad6fb2ed19
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -0,0 +1,29 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirt_inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN
24#include <asm/xen/inst.h>
25#include <asm/xen/minstate.h>
26#else
27#include <asm/native/inst.h>
28#endif
29
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
new file mode 100644
index 000000000000..2f42fcb9776a
--- /dev/null
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -0,0 +1,60 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirtentry.S
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/asmmacro.h>
24#include <asm/asm-offsets.h>
25#include "entry.h"
26
27#define DATA8(sym, init_value) \
28 .pushsection .data.read_mostly ; \
29 .align 8 ; \
30 .global sym ; \
31 sym: ; \
32 data8 init_value ; \
33 .popsection
34
35#define BRANCH(targ, reg, breg) \
36 movl reg=targ ; \
37 ;; \
38 ld8 reg=[reg] ; \
39 ;; \
40 mov breg=reg ; \
41 br.cond.sptk.many breg
42
43#define BRANCH_PROC(sym, reg, breg) \
44 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
45 GLOBAL_ENTRY(paravirt_ ## sym) ; \
46 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
47 END(paravirt_ ## sym)
48
49#define BRANCH_PROC_UNWINFO(sym, reg, breg) \
50 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
51 GLOBAL_ENTRY(paravirt_ ## sym) ; \
52 PT_REGS_UNWIND_INFO(0) ; \
53 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
54 END(paravirt_ ## sym)
55
56
57BRANCH_PROC(switch_to, r22, b7)
58BRANCH_PROC_UNWINFO(leave_syscall, r22, b7)
59BRANCH_PROC(work_processed_syscall, r2, b7)
60BRANCH_PROC_UNWINFO(leave_kernel, r22, b7)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 632cda8f2e76..e5c2de9b29a5 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -51,6 +51,7 @@
51#include <asm/mca.h> 51#include <asm/mca.h>
52#include <asm/meminit.h> 52#include <asm/meminit.h>
53#include <asm/page.h> 53#include <asm/page.h>
54#include <asm/paravirt.h>
54#include <asm/patch.h> 55#include <asm/patch.h>
55#include <asm/pgtable.h> 56#include <asm/pgtable.h>
56#include <asm/processor.h> 57#include <asm/processor.h>
@@ -341,6 +342,8 @@ reserve_memory (void)
341 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 342 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
342 n++; 343 n++;
343 344
345 n += paravirt_reserve_memory(&rsvd_region[n]);
346
344#ifdef CONFIG_BLK_DEV_INITRD 347#ifdef CONFIG_BLK_DEV_INITRD
345 if (ia64_boot_param->initrd_start) { 348 if (ia64_boot_param->initrd_start) {
346 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 349 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
@@ -519,6 +522,8 @@ setup_arch (char **cmdline_p)
519{ 522{
520 unw_init(); 523 unw_init();
521 524
525 paravirt_arch_setup_early();
526
522 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 527 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
523 528
524 *cmdline_p = __va(ia64_boot_param->command_line); 529 *cmdline_p = __va(ia64_boot_param->command_line);
@@ -583,6 +588,9 @@ setup_arch (char **cmdline_p)
583 acpi_boot_init(); 588 acpi_boot_init();
584#endif 589#endif
585 590
591 paravirt_banner();
592 paravirt_arch_setup_console(cmdline_p);
593
586#ifdef CONFIG_VT 594#ifdef CONFIG_VT
587 if (!conswitchp) { 595 if (!conswitchp) {
588# if defined(CONFIG_DUMMY_CONSOLE) 596# if defined(CONFIG_DUMMY_CONSOLE)
@@ -602,6 +610,8 @@ setup_arch (char **cmdline_p)
602#endif 610#endif
603 611
604 /* enable IA-64 Machine Check Abort Handling unless disabled */ 612 /* enable IA-64 Machine Check Abort Handling unless disabled */
613 if (paravirt_arch_setup_nomca())
614 nomca = 1;
605 if (!nomca) 615 if (!nomca)
606 ia64_mca_init(); 616 ia64_mca_init();
607 617
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 9d1d429c6c59..03f1a9908afc 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -50,6 +50,7 @@
50#include <asm/machvec.h> 50#include <asm/machvec.h>
51#include <asm/mca.h> 51#include <asm/mca.h>
52#include <asm/page.h> 52#include <asm/page.h>
53#include <asm/paravirt.h>
53#include <asm/pgalloc.h> 54#include <asm/pgalloc.h>
54#include <asm/pgtable.h> 55#include <asm/pgtable.h>
55#include <asm/processor.h> 56#include <asm/processor.h>
@@ -642,6 +643,7 @@ void __devinit smp_prepare_boot_cpu(void)
642 cpu_set(smp_processor_id(), cpu_online_map); 643 cpu_set(smp_processor_id(), cpu_online_map);
643 cpu_set(smp_processor_id(), cpu_callin_map); 644 cpu_set(smp_processor_id(), cpu_callin_map);
644 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 645 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
646 paravirt_post_smp_prepare_boot_cpu();
645} 647}
646 648
647#ifdef CONFIG_HOTPLUG_CPU 649#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aad1b7b1fff9..65c10a42c88f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -24,6 +24,7 @@
24#include <asm/machvec.h> 24#include <asm/machvec.h>
25#include <asm/delay.h> 25#include <asm/delay.h>
26#include <asm/hw_irq.h> 26#include <asm/hw_irq.h>
27#include <asm/paravirt.h>
27#include <asm/ptrace.h> 28#include <asm/ptrace.h>
28#include <asm/sal.h> 29#include <asm/sal.h>
29#include <asm/sections.h> 30#include <asm/sections.h>
@@ -48,6 +49,15 @@ EXPORT_SYMBOL(last_cli_ip);
48 49
49#endif 50#endif
50 51
52#ifdef CONFIG_PARAVIRT
53static void
54paravirt_clocksource_resume(void)
55{
56 if (pv_time_ops.clocksource_resume)
57 pv_time_ops.clocksource_resume();
58}
59#endif
60
51static struct clocksource clocksource_itc = { 61static struct clocksource clocksource_itc = {
52 .name = "itc", 62 .name = "itc",
53 .rating = 350, 63 .rating = 350,
@@ -56,6 +66,9 @@ static struct clocksource clocksource_itc = {
56 .mult = 0, /*to be calculated*/ 66 .mult = 0, /*to be calculated*/
57 .shift = 16, 67 .shift = 16,
58 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 68 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
69#ifdef CONFIG_PARAVIRT
70 .resume = paravirt_clocksource_resume,
71#endif
59}; 72};
60static struct clocksource *itc_clocksource; 73static struct clocksource *itc_clocksource;
61 74
@@ -157,6 +170,9 @@ timer_interrupt (int irq, void *dev_id)
157 170
158 profile_tick(CPU_PROFILING); 171 profile_tick(CPU_PROFILING);
159 172
173 if (paravirt_do_steal_accounting(&new_itm))
174 goto skip_process_time_accounting;
175
160 while (1) { 176 while (1) {
161 update_process_times(user_mode(get_irq_regs())); 177 update_process_times(user_mode(get_irq_regs()));
162 178
@@ -186,6 +202,8 @@ timer_interrupt (int irq, void *dev_id)
186 local_irq_disable(); 202 local_irq_disable();
187 } 203 }
188 204
205skip_process_time_accounting:
206
189 do { 207 do {
190 /* 208 /*
191 * If we're too close to the next clock tick for 209 * If we're too close to the next clock tick for
@@ -335,6 +353,11 @@ ia64_init_itm (void)
335 */ 353 */
336 clocksource_itc.rating = 50; 354 clocksource_itc.rating = 50;
337 355
356 paravirt_init_missing_ticks_accounting(smp_processor_id());
357
358 /* avoid softlock up message when cpu is unplug and plugged again. */
359 touch_softlockup_watchdog();
360
338 /* Setup the CPU local timer tick */ 361 /* Setup the CPU local timer tick */
339 ia64_cpu_local_tick(); 362 ia64_cpu_local_tick();
340 363
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5929ab10a289..5a77206c2492 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -4,7 +4,6 @@
4#include <asm/system.h> 4#include <asm/system.h>
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6 6
7#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
8#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
9 8
10#define IVT_TEXT \ 9#define IVT_TEXT \