diff options
author | Tony Luck <tony.luck@intel.com> | 2007-04-30 16:56:00 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-04-30 16:56:00 -0400 |
commit | b643b0fdbc59cf6bbb086974b29d2571e9e9f646 (patch) | |
tree | 59d1a1298ffc98877183a8def5c180c0f74e568b /arch/ia64 | |
parent | e0cc09e295f346b7921e921f385fe5213472316a (diff) | |
parent | 00b65985fb2fc542b855b03fcda0d0f2bab4f442 (diff) |
Pull percpu-dtc into release branch
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kernel/entry.S | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/ivt.S | 19 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 24 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 20 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 7 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 11 |
7 files changed, 49 insertions, 46 deletions
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e7873eeae448..55fd2d5471e1 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -767,7 +767,7 @@ ENTRY(ia64_leave_syscall) | |||
767 | ld8.fill r15=[r3] // M0|1 restore r15 | 767 | ld8.fill r15=[r3] // M0|1 restore r15 |
768 | mov b6=r18 // I0 restore b6 | 768 | mov b6=r18 // I0 restore b6 |
769 | 769 | ||
770 | addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A | 770 | LOAD_PHYS_STACK_REG_SIZE(r17) |
771 | mov f9=f0 // F clear f9 | 771 | mov f9=f0 // F clear f9 |
772 | (pKStk) br.cond.dpnt.many skip_rbs_switch // B | 772 | (pKStk) br.cond.dpnt.many skip_rbs_switch // B |
773 | 773 | ||
@@ -775,7 +775,6 @@ ENTRY(ia64_leave_syscall) | |||
775 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | 775 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition |
776 | cover // B add current frame into dirty partition & set cr.ifs | 776 | cover // B add current frame into dirty partition & set cr.ifs |
777 | ;; | 777 | ;; |
778 | (pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8 | ||
779 | mov r19=ar.bsp // M2 get new backing store pointer | 778 | mov r19=ar.bsp // M2 get new backing store pointer |
780 | mov f10=f0 // F clear f10 | 779 | mov f10=f0 // F clear f10 |
781 | 780 | ||
@@ -953,9 +952,7 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
953 | shr.u r18=r19,16 // get byte size of existing "dirty" partition | 952 | shr.u r18=r19,16 // get byte size of existing "dirty" partition |
954 | ;; | 953 | ;; |
955 | mov r16=ar.bsp // get existing backing store pointer | 954 | mov r16=ar.bsp // get existing backing store pointer |
956 | addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 | 955 | LOAD_PHYS_STACK_REG_SIZE(r17) |
957 | ;; | ||
958 | ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 | ||
959 | (pKStk) br.cond.dpnt skip_rbs_switch | 956 | (pKStk) br.cond.dpnt skip_rbs_switch |
960 | 957 | ||
961 | /* | 958 | /* |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 6b7fcbd3f6f1..34f44d8be00d 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -374,6 +374,7 @@ ENTRY(alt_dtlb_miss) | |||
374 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 374 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
375 | mov r21=cr.ipsr | 375 | mov r21=cr.ipsr |
376 | mov r31=pr | 376 | mov r31=pr |
377 | mov r24=PERCPU_ADDR | ||
377 | ;; | 378 | ;; |
378 | #ifdef CONFIG_DISABLE_VHPT | 379 | #ifdef CONFIG_DISABLE_VHPT |
379 | shr.u r22=r16,61 // get the region number into r21 | 380 | shr.u r22=r16,61 // get the region number into r21 |
@@ -386,22 +387,30 @@ ENTRY(alt_dtlb_miss) | |||
386 | (p8) mov r29=b0 // save b0 | 387 | (p8) mov r29=b0 // save b0 |
387 | (p8) br.cond.dptk dtlb_fault | 388 | (p8) br.cond.dptk dtlb_fault |
388 | #endif | 389 | #endif |
390 | cmp.ge p10,p11=r16,r24 // access to per_cpu_data? | ||
391 | tbit.z p12,p0=r16,61 // access to region 6? | ||
392 | mov r25=PERCPU_PAGE_SHIFT << 2 | ||
393 | mov r26=PERCPU_PAGE_SIZE | ||
394 | nop.m 0 | ||
395 | nop.b 0 | ||
396 | ;; | ||
397 | (p10) mov r19=IA64_KR(PER_CPU_DATA) | ||
398 | (p11) and r19=r19,r16 // clear non-ppn fields | ||
389 | extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl | 399 | extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl |
390 | and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field | 400 | and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field |
391 | tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? | 401 | tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? |
392 | shr.u r18=r16,57 // move address bit 61 to bit 4 | ||
393 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | ||
394 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? | 402 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? |
395 | ;; | 403 | ;; |
396 | andcm r18=0x10,r18 // bit 4=~address-bit(61) | 404 | (p10) sub r19=r19,r26 |
405 | (p10) mov cr.itir=r25 | ||
397 | cmp.ne p8,p0=r0,r23 | 406 | cmp.ne p8,p0=r0,r23 |
398 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field | 407 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field |
408 | (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr | ||
399 | (p8) br.cond.spnt page_fault | 409 | (p8) br.cond.spnt page_fault |
400 | 410 | ||
401 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 | 411 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 |
402 | or r19=r19,r17 // insert PTE control bits into r19 | ||
403 | ;; | 412 | ;; |
404 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 | 413 | or r19=r19,r17 // insert PTE control bits into r19 |
405 | (p6) mov cr.ipsr=r21 | 414 | (p6) mov cr.ipsr=r21 |
406 | ;; | 415 | ;; |
407 | (p7) itc.d r19 // insert the TLB entry | 416 | (p7) itc.d r19 // insert the TLB entry |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index c6b607c00dee..8c9c26aa6ae0 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -101,14 +101,6 @@ ia64_do_tlb_purge: | |||
101 | ;; | 101 | ;; |
102 | srlz.d | 102 | srlz.d |
103 | ;; | 103 | ;; |
104 | // 2. Purge DTR for PERCPU data. | ||
105 | movl r16=PERCPU_ADDR | ||
106 | mov r18=PERCPU_PAGE_SHIFT<<2 | ||
107 | ;; | ||
108 | ptr.d r16,r18 | ||
109 | ;; | ||
110 | srlz.d | ||
111 | ;; | ||
112 | // 3. Purge ITR for PAL code. | 104 | // 3. Purge ITR for PAL code. |
113 | GET_THIS_PADDR(r2, ia64_mca_pal_base) | 105 | GET_THIS_PADDR(r2, ia64_mca_pal_base) |
114 | ;; | 106 | ;; |
@@ -196,22 +188,6 @@ ia64_reload_tr: | |||
196 | srlz.i | 188 | srlz.i |
197 | srlz.d | 189 | srlz.d |
198 | ;; | 190 | ;; |
199 | // 2. Reload DTR register for PERCPU data. | ||
200 | GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte) | ||
201 | ;; | ||
202 | movl r16=PERCPU_ADDR // vaddr | ||
203 | movl r18=PERCPU_PAGE_SHIFT<<2 | ||
204 | ;; | ||
205 | mov cr.itir=r18 | ||
206 | mov cr.ifa=r16 | ||
207 | ;; | ||
208 | ld8 r18=[r2] // load per-CPU PTE | ||
209 | mov r16=IA64_TR_PERCPU_DATA; | ||
210 | ;; | ||
211 | itr.d dtr[r16]=r18 | ||
212 | ;; | ||
213 | srlz.d | ||
214 | ;; | ||
215 | // 3. Reload ITR for PAL code. | 191 | // 3. Reload ITR for PAL code. |
216 | GET_THIS_PADDR(r2, ia64_mca_pal_pte) | 192 | GET_THIS_PADDR(r2, ia64_mca_pal_pte) |
217 | ;; | 193 | ;; |
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index bc11bb096f58..e796e29f8e15 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
@@ -195,3 +195,23 @@ ia64_patch_gate (void) | |||
195 | ia64_patch_vtop(START(vtop), END(vtop)); | 195 | ia64_patch_vtop(START(vtop), END(vtop)); |
196 | ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); | 196 | ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); |
197 | } | 197 | } |
198 | |||
199 | void ia64_patch_phys_stack_reg(unsigned long val) | ||
200 | { | ||
201 | s32 * offp = (s32 *) __start___phys_stack_reg_patchlist; | ||
202 | s32 * end = (s32 *) __end___phys_stack_reg_patchlist; | ||
203 | u64 ip, mask, imm; | ||
204 | |||
205 | /* see instruction format A4: adds r1 = imm13, r3 */ | ||
206 | mask = (0x3fUL << 27) | (0x7f << 13); | ||
207 | imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13; | ||
208 | |||
209 | while (offp < end) { | ||
210 | ip = (u64) offp + *offp; | ||
211 | ia64_patch(ip, mask, imm); | ||
212 | ia64_fc(ip); | ||
213 | ++offp; | ||
214 | } | ||
215 | ia64_sync_i(); | ||
216 | ia64_srlz_i(); | ||
217 | } | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index dc7dd7648ec5..6e19da122ae3 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -75,7 +75,6 @@ extern void ia64_setup_printk_clock(void); | |||
75 | 75 | ||
76 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 76 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); |
77 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); | 77 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
78 | DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); | ||
79 | unsigned long ia64_cycles_per_usec; | 78 | unsigned long ia64_cycles_per_usec; |
80 | struct ia64_boot_param *ia64_boot_param; | 79 | struct ia64_boot_param *ia64_boot_param; |
81 | struct screen_info screen_info; | 80 | struct screen_info screen_info; |
@@ -869,6 +868,7 @@ void __cpuinit | |||
869 | cpu_init (void) | 868 | cpu_init (void) |
870 | { | 869 | { |
871 | extern void __cpuinit ia64_mmu_init (void *); | 870 | extern void __cpuinit ia64_mmu_init (void *); |
871 | static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; | ||
872 | unsigned long num_phys_stacked; | 872 | unsigned long num_phys_stacked; |
873 | pal_vm_info_2_u_t vmi; | 873 | pal_vm_info_2_u_t vmi; |
874 | unsigned int max_ctx; | 874 | unsigned int max_ctx; |
@@ -982,7 +982,10 @@ cpu_init (void) | |||
982 | num_phys_stacked = 96; | 982 | num_phys_stacked = 96; |
983 | } | 983 | } |
984 | /* size of physical stacked register partition plus 8 bytes: */ | 984 | /* size of physical stacked register partition plus 8 bytes: */ |
985 | __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; | 985 | if (num_phys_stacked > max_num_phys_stacked) { |
986 | ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); | ||
987 | max_num_phys_stacked = num_phys_stacked; | ||
988 | } | ||
986 | platform_cpu_init(); | 989 | platform_cpu_init(); |
987 | pm_idle = default_idle; | 990 | pm_idle = default_idle; |
988 | } | 991 | } |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 25dd55e4db24..692382642118 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -78,6 +78,13 @@ SECTIONS | |||
78 | __stop___mca_table = .; | 78 | __stop___mca_table = .; |
79 | } | 79 | } |
80 | 80 | ||
81 | .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) | ||
82 | { | ||
83 | __start___phys_stack_reg_patchlist = .; | ||
84 | *(.data.patch.phys_stack_reg) | ||
85 | __end___phys_stack_reg_patchlist = .; | ||
86 | } | ||
87 | |||
81 | /* Global data */ | 88 | /* Global data */ |
82 | _data = .; | 89 | _data = .; |
83 | 90 | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 4f36987eea72..5b70241741b4 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -355,7 +355,7 @@ setup_gate (void) | |||
355 | void __devinit | 355 | void __devinit |
356 | ia64_mmu_init (void *my_cpu_data) | 356 | ia64_mmu_init (void *my_cpu_data) |
357 | { | 357 | { |
358 | unsigned long psr, pta, impl_va_bits; | 358 | unsigned long pta, impl_va_bits; |
359 | extern void __devinit tlb_init (void); | 359 | extern void __devinit tlb_init (void); |
360 | 360 | ||
361 | #ifdef CONFIG_DISABLE_VHPT | 361 | #ifdef CONFIG_DISABLE_VHPT |
@@ -364,15 +364,6 @@ ia64_mmu_init (void *my_cpu_data) | |||
364 | # define VHPT_ENABLE_BIT 1 | 364 | # define VHPT_ENABLE_BIT 1 |
365 | #endif | 365 | #endif |
366 | 366 | ||
367 | /* Pin mapping for percpu area into TLB */ | ||
368 | psr = ia64_clear_ic(); | ||
369 | ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, | ||
370 | pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), | ||
371 | PERCPU_PAGE_SHIFT); | ||
372 | |||
373 | ia64_set_psr(psr); | ||
374 | ia64_srlz_i(); | ||
375 | |||
376 | /* | 367 | /* |
377 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped | 368 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped |
378 | * address space. The IA-64 architecture guarantees that at least 50 bits of | 369 | * address space. The IA-64 architecture guarantees that at least 50 bits of |