aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/hp/common/sba_iommu.c56
-rw-r--r--arch/ia64/kernel/asm-offsets.c7
-rw-r--r--arch/ia64/kernel/crash.c56
-rw-r--r--arch/ia64/kernel/fsys.S34
-rw-r--r--arch/ia64/kernel/irq_ia64.c2
-rw-r--r--arch/ia64/kernel/kprobes.c133
-rw-r--r--arch/ia64/kernel/mca.c11
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/setup.c23
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/unaligned.c3
-rw-r--r--arch/ia64/mm/contig.c4
-rw-r--r--arch/ia64/mm/discontig.c4
-rw-r--r--arch/ia64/mm/init.c14
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c8
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c2
-rw-r--r--include/asm-ia64/kprobes.h7
-rw-r--r--include/asm-ia64/meminit.h3
-rw-r--r--include/asm-ia64/pal.h72
-rw-r--r--include/asm-ia64/pgtable.h2
21 files changed, 338 insertions, 112 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index eef457fda08f..ed21737a00c5 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -622,6 +622,9 @@ config IRQ_PER_CPU
622 bool 622 bool
623 default y 623 default y
624 624
625config IOMMU_HELPER
626 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
627
625source "arch/ia64/hp/sim/Kconfig" 628source "arch/ia64/hp/sim/Kconfig"
626 629
627source "arch/ia64/Kconfig.debug" 630source "arch/ia64/Kconfig.debug"
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 523eae6d3e49..9409de5c9441 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -35,6 +35,7 @@
35#include <linux/nodemask.h> 35#include <linux/nodemask.h>
36#include <linux/bitops.h> /* hweight64() */ 36#include <linux/bitops.h> /* hweight64() */
37#include <linux/crash_dump.h> 37#include <linux/crash_dump.h>
38#include <linux/iommu-helper.h>
38 39
39#include <asm/delay.h> /* ia64_get_itc() */ 40#include <asm/delay.h> /* ia64_get_itc() */
40#include <asm/io.h> 41#include <asm/io.h>
@@ -460,6 +461,13 @@ get_iovp_order (unsigned long size)
460 return order; 461 return order;
461} 462}
462 463
464static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
465 unsigned int bitshiftcnt)
466{
467 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
468 + bitshiftcnt;
469}
470
463/** 471/**
464 * sba_search_bitmap - find free space in IO PDIR resource bitmap 472 * sba_search_bitmap - find free space in IO PDIR resource bitmap
465 * @ioc: IO MMU structure which owns the pdir we are interested in. 473 * @ioc: IO MMU structure which owns the pdir we are interested in.
@@ -471,15 +479,25 @@ get_iovp_order (unsigned long size)
471 * Cool perf optimization: search for log2(size) bits at a time. 479 * Cool perf optimization: search for log2(size) bits at a time.
472 */ 480 */
473static SBA_INLINE unsigned long 481static SBA_INLINE unsigned long
474sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) 482sba_search_bitmap(struct ioc *ioc, struct device *dev,
483 unsigned long bits_wanted, int use_hint)
475{ 484{
476 unsigned long *res_ptr; 485 unsigned long *res_ptr;
477 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 486 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
478 unsigned long flags, pide = ~0UL; 487 unsigned long flags, pide = ~0UL, tpide;
488 unsigned long boundary_size;
489 unsigned long shift;
490 int ret;
479 491
480 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); 492 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
481 ASSERT(res_ptr < res_end); 493 ASSERT(res_ptr < res_end);
482 494
495 boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
496 boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
497
498 BUG_ON(ioc->ibase & ~iovp_mask);
499 shift = ioc->ibase >> iovp_shift;
500
483 spin_lock_irqsave(&ioc->res_lock, flags); 501 spin_lock_irqsave(&ioc->res_lock, flags);
484 502
485 /* Allow caller to force a search through the entire resource space */ 503 /* Allow caller to force a search through the entire resource space */
@@ -504,9 +522,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
504 if (likely(*res_ptr != ~0UL)) { 522 if (likely(*res_ptr != ~0UL)) {
505 bitshiftcnt = ffz(*res_ptr); 523 bitshiftcnt = ffz(*res_ptr);
506 *res_ptr |= (1UL << bitshiftcnt); 524 *res_ptr |= (1UL << bitshiftcnt);
507 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 525 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
508 pide <<= 3; /* convert to bit address */
509 pide += bitshiftcnt;
510 ioc->res_bitshift = bitshiftcnt + bits_wanted; 526 ioc->res_bitshift = bitshiftcnt + bits_wanted;
511 goto found_it; 527 goto found_it;
512 } 528 }
@@ -535,11 +551,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
535 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 551 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
536 ASSERT(0 != mask); 552 ASSERT(0 != mask);
537 for (; mask ; mask <<= o, bitshiftcnt += o) { 553 for (; mask ; mask <<= o, bitshiftcnt += o) {
538 if(0 == ((*res_ptr) & mask)) { 554 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
555 ret = iommu_is_span_boundary(tpide, bits_wanted,
556 shift,
557 boundary_size);
558 if ((0 == ((*res_ptr) & mask)) && !ret) {
539 *res_ptr |= mask; /* mark resources busy! */ 559 *res_ptr |= mask; /* mark resources busy! */
540 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 560 pide = tpide;
541 pide <<= 3; /* convert to bit address */
542 pide += bitshiftcnt;
543 ioc->res_bitshift = bitshiftcnt + bits_wanted; 561 ioc->res_bitshift = bitshiftcnt + bits_wanted;
544 goto found_it; 562 goto found_it;
545 } 563 }
@@ -560,6 +578,11 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
560 end = res_end - qwords; 578 end = res_end - qwords;
561 579
562 for (; res_ptr < end; res_ptr++) { 580 for (; res_ptr < end; res_ptr++) {
581 tpide = ptr_to_pide(ioc, res_ptr, 0);
582 ret = iommu_is_span_boundary(tpide, bits_wanted,
583 shift, boundary_size);
584 if (ret)
585 goto next_ptr;
563 for (i = 0 ; i < qwords ; i++) { 586 for (i = 0 ; i < qwords ; i++) {
564 if (res_ptr[i] != 0) 587 if (res_ptr[i] != 0)
565 goto next_ptr; 588 goto next_ptr;
@@ -572,8 +595,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
572 res_ptr[i] = ~0UL; 595 res_ptr[i] = ~0UL;
573 res_ptr[i] |= RESMAP_MASK(bits); 596 res_ptr[i] |= RESMAP_MASK(bits);
574 597
575 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 598 pide = tpide;
576 pide <<= 3; /* convert to bit address */
577 res_ptr += qwords; 599 res_ptr += qwords;
578 ioc->res_bitshift = bits; 600 ioc->res_bitshift = bits;
579 goto found_it; 601 goto found_it;
@@ -605,7 +627,7 @@ found_it:
605 * resource bit map. 627 * resource bit map.
606 */ 628 */
607static int 629static int
608sba_alloc_range(struct ioc *ioc, size_t size) 630sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
609{ 631{
610 unsigned int pages_needed = size >> iovp_shift; 632 unsigned int pages_needed = size >> iovp_shift;
611#ifdef PDIR_SEARCH_TIMING 633#ifdef PDIR_SEARCH_TIMING
@@ -622,9 +644,9 @@ sba_alloc_range(struct ioc *ioc, size_t size)
622 /* 644 /*
623 ** "seek and ye shall find"...praying never hurts either... 645 ** "seek and ye shall find"...praying never hurts either...
624 */ 646 */
625 pide = sba_search_bitmap(ioc, pages_needed, 1); 647 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
626 if (unlikely(pide >= (ioc->res_size << 3))) { 648 if (unlikely(pide >= (ioc->res_size << 3))) {
627 pide = sba_search_bitmap(ioc, pages_needed, 0); 649 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
628 if (unlikely(pide >= (ioc->res_size << 3))) { 650 if (unlikely(pide >= (ioc->res_size << 3))) {
629#if DELAYED_RESOURCE_CNT > 0 651#if DELAYED_RESOURCE_CNT > 0
630 unsigned long flags; 652 unsigned long flags;
@@ -653,7 +675,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
653 } 675 }
654 spin_unlock_irqrestore(&ioc->saved_lock, flags); 676 spin_unlock_irqrestore(&ioc->saved_lock, flags);
655 677
656 pide = sba_search_bitmap(ioc, pages_needed, 0); 678 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
657 if (unlikely(pide >= (ioc->res_size << 3))) 679 if (unlikely(pide >= (ioc->res_size << 3)))
658 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 680 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
659 ioc->ioc_hpa); 681 ioc->ioc_hpa);
@@ -936,7 +958,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
936 spin_unlock_irqrestore(&ioc->res_lock, flags); 958 spin_unlock_irqrestore(&ioc->res_lock, flags);
937#endif 959#endif
938 960
939 pide = sba_alloc_range(ioc, size); 961 pide = sba_alloc_range(ioc, dev, size);
940 962
941 iovp = (dma_addr_t) pide << iovp_shift; 963 iovp = (dma_addr_t) pide << iovp_shift;
942 964
@@ -1373,7 +1395,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1373 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; 1395 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1374 ASSERT(dma_len <= DMA_CHUNK_SIZE); 1396 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1375 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG 1397 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
1376 | (sba_alloc_range(ioc, dma_len) << iovp_shift) 1398 | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
1377 | dma_offset); 1399 | dma_offset);
1378 n_mappings++; 1400 n_mappings++;
1379 } 1401 }
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 5865130b0a92..230a6f92367f 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -7,6 +7,7 @@
7#define ASM_OFFSETS_C 1 7#define ASM_OFFSETS_C 1
8 8
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/pid.h>
10#include <linux/clocksource.h> 11#include <linux/clocksource.h>
11 12
12#include <asm-ia64/processor.h> 13#include <asm-ia64/processor.h>
@@ -34,6 +35,9 @@ void foo(void)
34 DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); 35 DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
35 DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); 36 DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
36 37
38 BUILD_BUG_ON(sizeof(struct upid) != 32);
39 DEFINE(IA64_UPID_SHIFT, 5);
40
37 BLANK(); 41 BLANK();
38 42
39 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 43 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
@@ -51,6 +55,9 @@ void foo(void)
51 DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); 55 DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
52 DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); 56 DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
53 DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); 57 DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
58 DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid));
59 DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level));
60 DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0]));
54 DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); 61 DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
55 DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); 62 DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
56 DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); 63 DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index fbe742ad2fde..90ef338cf46f 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -24,6 +24,7 @@ int kdump_status[NR_CPUS];
24static atomic_t kdump_cpu_frozen; 24static atomic_t kdump_cpu_frozen;
25atomic_t kdump_in_progress; 25atomic_t kdump_in_progress;
26static int kdump_on_init = 1; 26static int kdump_on_init = 1;
27static int kdump_on_fatal_mca = 1;
27 28
28static inline Elf64_Word 29static inline Elf64_Word
29*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, 30*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
@@ -118,6 +119,7 @@ machine_crash_shutdown(struct pt_regs *pt)
118static void 119static void
119machine_kdump_on_init(void) 120machine_kdump_on_init(void)
120{ 121{
122 crash_save_vmcoreinfo();
121 local_irq_disable(); 123 local_irq_disable();
122 kexec_disable_iosapic(); 124 kexec_disable_iosapic();
123 machine_kexec(ia64_kimage); 125 machine_kexec(ia64_kimage);
@@ -148,7 +150,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
148 struct ia64_mca_notify_die *nd; 150 struct ia64_mca_notify_die *nd;
149 struct die_args *args = data; 151 struct die_args *args = data;
150 152
151 if (!kdump_on_init) 153 if (!kdump_on_init && !kdump_on_fatal_mca)
152 return NOTIFY_DONE; 154 return NOTIFY_DONE;
153 155
154 if (!ia64_kimage) { 156 if (!ia64_kimage) {
@@ -173,32 +175,38 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
173 return NOTIFY_DONE; 175 return NOTIFY_DONE;
174 176
175 switch (val) { 177 switch (val) {
176 case DIE_INIT_MONARCH_PROCESS: 178 case DIE_INIT_MONARCH_PROCESS:
179 if (kdump_on_init) {
177 atomic_set(&kdump_in_progress, 1); 180 atomic_set(&kdump_in_progress, 1);
178 *(nd->monarch_cpu) = -1; 181 *(nd->monarch_cpu) = -1;
179 break; 182 }
180 case DIE_INIT_MONARCH_LEAVE: 183 break;
184 case DIE_INIT_MONARCH_LEAVE:
185 if (kdump_on_init)
181 machine_kdump_on_init(); 186 machine_kdump_on_init();
182 break; 187 break;
183 case DIE_INIT_SLAVE_LEAVE: 188 case DIE_INIT_SLAVE_LEAVE:
184 if (atomic_read(&kdump_in_progress)) 189 if (atomic_read(&kdump_in_progress))
185 unw_init_running(kdump_cpu_freeze, NULL); 190 unw_init_running(kdump_cpu_freeze, NULL);
186 break; 191 break;
187 case DIE_MCA_RENDZVOUS_LEAVE: 192 case DIE_MCA_RENDZVOUS_LEAVE:
188 if (atomic_read(&kdump_in_progress)) 193 if (atomic_read(&kdump_in_progress))
189 unw_init_running(kdump_cpu_freeze, NULL); 194 unw_init_running(kdump_cpu_freeze, NULL);
190 break; 195 break;
191 case DIE_MCA_MONARCH_LEAVE: 196 case DIE_MCA_MONARCH_LEAVE:
192 /* die_register->signr indicate if MCA is recoverable */ 197 /* die_register->signr indicate if MCA is recoverable */
193 if (!args->signr) 198 if (kdump_on_fatal_mca && !args->signr) {
194 machine_kdump_on_init(); 199 atomic_set(&kdump_in_progress, 1);
195 break; 200 *(nd->monarch_cpu) = -1;
201 machine_kdump_on_init();
202 }
203 break;
196 } 204 }
197 return NOTIFY_DONE; 205 return NOTIFY_DONE;
198} 206}
199 207
200#ifdef CONFIG_SYSCTL 208#ifdef CONFIG_SYSCTL
201static ctl_table kdump_on_init_table[] = { 209static ctl_table kdump_ctl_table[] = {
202 { 210 {
203 .ctl_name = CTL_UNNUMBERED, 211 .ctl_name = CTL_UNNUMBERED,
204 .procname = "kdump_on_init", 212 .procname = "kdump_on_init",
@@ -207,6 +215,14 @@ static ctl_table kdump_on_init_table[] = {
207 .mode = 0644, 215 .mode = 0644,
208 .proc_handler = &proc_dointvec, 216 .proc_handler = &proc_dointvec,
209 }, 217 },
218 {
219 .ctl_name = CTL_UNNUMBERED,
220 .procname = "kdump_on_fatal_mca",
221 .data = &kdump_on_fatal_mca,
222 .maxlen = sizeof(int),
223 .mode = 0644,
224 .proc_handler = &proc_dointvec,
225 },
210 { .ctl_name = 0 } 226 { .ctl_name = 0 }
211}; 227};
212 228
@@ -215,7 +231,7 @@ static ctl_table sys_table[] = {
215 .ctl_name = CTL_KERN, 231 .ctl_name = CTL_KERN,
216 .procname = "kernel", 232 .procname = "kernel",
217 .mode = 0555, 233 .mode = 0555,
218 .child = kdump_on_init_table, 234 .child = kdump_ctl_table,
219 }, 235 },
220 { .ctl_name = 0 } 236 { .ctl_name = 0 }
221}; 237};
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 357b7e2adc63..c1625c7e1779 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -61,13 +61,29 @@ ENTRY(fsys_getpid)
61 .prologue 61 .prologue
62 .altrp b6 62 .altrp b6
63 .body 63 .body
64 add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
65 ;;
66 ld8 r17=[r17] // r17 = current->group_leader
64 add r9=TI_FLAGS+IA64_TASK_SIZE,r16 67 add r9=TI_FLAGS+IA64_TASK_SIZE,r16
65 ;; 68 ;;
66 ld4 r9=[r9] 69 ld4 r9=[r9]
67 add r8=IA64_TASK_TGID_OFFSET,r16 70 add r17=IA64_TASK_TGIDLINK_OFFSET,r17
68 ;; 71 ;;
69 and r9=TIF_ALLWORK_MASK,r9 72 and r9=TIF_ALLWORK_MASK,r9
70 ld4 r8=[r8] // r8 = current->tgid 73 ld8 r17=[r17] // r17 = current->group_leader->pids[PIDTYPE_PID].pid
74 ;;
75 add r8=IA64_PID_LEVEL_OFFSET,r17
76 ;;
77 ld4 r8=[r8] // r8 = pid->level
78 add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0]
79 ;;
80 shl r8=r8,IA64_UPID_SHIFT
81 ;;
82 add r17=r17,r8 // r17 = &pid->numbers[pid->level]
83 ;;
84 ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr
85 ;;
86 mov r17=0
71 ;; 87 ;;
72 cmp.ne p8,p0=0,r9 88 cmp.ne p8,p0=0,r9
73(p8) br.spnt.many fsys_fallback_syscall 89(p8) br.spnt.many fsys_fallback_syscall
@@ -126,15 +142,25 @@ ENTRY(fsys_set_tid_address)
126 .altrp b6 142 .altrp b6
127 .body 143 .body
128 add r9=TI_FLAGS+IA64_TASK_SIZE,r16 144 add r9=TI_FLAGS+IA64_TASK_SIZE,r16
145 add r17=IA64_TASK_TGIDLINK_OFFSET,r16
129 ;; 146 ;;
130 ld4 r9=[r9] 147 ld4 r9=[r9]
131 tnat.z p6,p7=r32 // check argument register for being NaT 148 tnat.z p6,p7=r32 // check argument register for being NaT
149 ld8 r17=[r17] // r17 = current->pids[PIDTYPE_PID].pid
132 ;; 150 ;;
133 and r9=TIF_ALLWORK_MASK,r9 151 and r9=TIF_ALLWORK_MASK,r9
134 add r8=IA64_TASK_PID_OFFSET,r16 152 add r8=IA64_PID_LEVEL_OFFSET,r17
135 add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 153 add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16
136 ;; 154 ;;
137 ld4 r8=[r8] 155 ld4 r8=[r8] // r8 = pid->level
156 add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0]
157 ;;
158 shl r8=r8,IA64_UPID_SHIFT
159 ;;
160 add r17=r17,r8 // r17 = &pid->numbers[pid->level]
161 ;;
162 ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr
163 ;;
138 cmp.ne p8,p0=0,r9 164 cmp.ne p8,p0=0,r9
139 mov r17=-1 165 mov r17=-1
140 ;; 166 ;;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index d8be23fbe6bc..5538471e8d68 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -472,7 +472,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
472 static unsigned char count; 472 static unsigned char count;
473 static long last_time; 473 static long last_time;
474 474
475 if (jiffies - last_time > 5*HZ) 475 if (time_after(jiffies, last_time + 5 * HZ))
476 count = 0; 476 count = 0;
477 if (++count < 5) { 477 if (++count < 5) {
478 last_time = jiffies; 478 last_time = jiffies;
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 8d9a446a0d17..233434f4f88f 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -78,6 +78,20 @@ static enum instruction_type bundle_encoding[32][3] = {
78 { u, u, u }, /* 1F */ 78 { u, u, u }, /* 1F */
79}; 79};
80 80
81/* Insert a long branch code */
82static void __kprobes set_brl_inst(void *from, void *to)
83{
84 s64 rel = ((s64) to - (s64) from) >> 4;
85 bundle_t *brl;
86 brl = (bundle_t *) ((u64) from & ~0xf);
87 brl->quad0.template = 0x05; /* [MLX](stop) */
88 brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */
89 brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2;
90 brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46);
91 /* brl.cond.sptk.many.clr rel<<4 (qp=0) */
92 brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff);
93}
94
81/* 95/*
82 * In this function we check to see if the instruction 96 * In this function we check to see if the instruction
83 * is IP relative instruction and update the kprobe 97 * is IP relative instruction and update the kprobe
@@ -496,6 +510,77 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
496 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; 510 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
497} 511}
498 512
513/* Check the instruction in the slot is break */
514static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
515{
516 unsigned int major_opcode;
517 unsigned int template = bundle->quad0.template;
518 unsigned long kprobe_inst;
519
520 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
521 if (slot == 1 && bundle_encoding[template][1] == L)
522 slot++;
523
524 /* Get Kprobe probe instruction at given slot*/
525 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
526
527 /* For break instruction,
528 * Bits 37:40 Major opcode to be zero
529 * Bits 27:32 X6 to be zero
530 * Bits 32:35 X3 to be zero
531 */
532 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) {
533 /* Not a break instruction */
534 return 0;
535 }
536
537 /* Is a break instruction */
538 return 1;
539}
540
541/*
542 * In this function, we check whether the target bundle modifies IP or
543 * it triggers an exception. If so, it cannot be boostable.
544 */
545static int __kprobes can_boost(bundle_t *bundle, uint slot,
546 unsigned long bundle_addr)
547{
548 unsigned int template = bundle->quad0.template;
549
550 do {
551 if (search_exception_tables(bundle_addr + slot) ||
552 __is_ia64_break_inst(bundle, slot))
553 return 0; /* exception may occur in this bundle*/
554 } while ((++slot) < 3);
555 template &= 0x1e;
556 if (template >= 0x10 /* including B unit */ ||
557 template == 0x04 /* including X unit */ ||
558 template == 0x06) /* undefined */
559 return 0;
560
561 return 1;
562}
563
564/* Prepare long jump bundle and disables other boosters if need */
565static void __kprobes prepare_booster(struct kprobe *p)
566{
567 unsigned long addr = (unsigned long)p->addr & ~0xFULL;
568 unsigned int slot = (unsigned long)p->addr & 0xf;
569 struct kprobe *other_kp;
570
571 if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) {
572 set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1);
573 p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
574 }
575
576 /* disables boosters in previous slots */
577 for (; addr < (unsigned long)p->addr; addr++) {
578 other_kp = get_kprobe((void *)addr);
579 if (other_kp)
580 other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE;
581 }
582}
583
499int __kprobes arch_prepare_kprobe(struct kprobe *p) 584int __kprobes arch_prepare_kprobe(struct kprobe *p)
500{ 585{
501 unsigned long addr = (unsigned long) p->addr; 586 unsigned long addr = (unsigned long) p->addr;
@@ -530,6 +615,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
530 615
531 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); 616 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
532 617
618 prepare_booster(p);
619
533 return 0; 620 return 0;
534} 621}
535 622
@@ -543,7 +630,9 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
543 src = &p->opcode.bundle; 630 src = &p->opcode.bundle;
544 631
545 flush_icache_range((unsigned long)p->ainsn.insn, 632 flush_icache_range((unsigned long)p->ainsn.insn,
546 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); 633 (unsigned long)p->ainsn.insn +
634 sizeof(kprobe_opcode_t) * MAX_INSN_SIZE);
635
547 switch (p->ainsn.slot) { 636 switch (p->ainsn.slot) {
548 case 0: 637 case 0:
549 dest->quad0.slot0 = src->quad0.slot0; 638 dest->quad0.slot0 = src->quad0.slot0;
@@ -584,13 +673,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
584void __kprobes arch_remove_kprobe(struct kprobe *p) 673void __kprobes arch_remove_kprobe(struct kprobe *p)
585{ 674{
586 mutex_lock(&kprobe_mutex); 675 mutex_lock(&kprobe_mutex);
587 free_insn_slot(p->ainsn.insn, 0); 676 free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE);
588 mutex_unlock(&kprobe_mutex); 677 mutex_unlock(&kprobe_mutex);
589} 678}
590/* 679/*
591 * We are resuming execution after a single step fault, so the pt_regs 680 * We are resuming execution after a single step fault, so the pt_regs
592 * structure reflects the register state after we executed the instruction 681 * structure reflects the register state after we executed the instruction
593 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust 682 * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust
594 * the ip to point back to the original stack address. To set the IP address 683 * the ip to point back to the original stack address. To set the IP address
595 * to original stack address, handle the case where we need to fixup the 684 * to original stack address, handle the case where we need to fixup the
596 * relative IP address and/or fixup branch register. 685 * relative IP address and/or fixup branch register.
@@ -607,7 +696,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
607 if (slot == 1 && bundle_encoding[template][1] == L) 696 if (slot == 1 && bundle_encoding[template][1] == L)
608 slot = 2; 697 slot = 2;
609 698
610 if (p->ainsn.inst_flag) { 699 if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {
611 700
612 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { 701 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
613 /* Fix relative IP address */ 702 /* Fix relative IP address */
@@ -686,33 +775,12 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
686static int __kprobes is_ia64_break_inst(struct pt_regs *regs) 775static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
687{ 776{
688 unsigned int slot = ia64_psr(regs)->ri; 777 unsigned int slot = ia64_psr(regs)->ri;
689 unsigned int template, major_opcode;
690 unsigned long kprobe_inst;
691 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; 778 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
692 bundle_t bundle; 779 bundle_t bundle;
693 780
694 memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); 781 memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
695 template = bundle.quad0.template;
696
697 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
698 if (slot == 1 && bundle_encoding[template][1] == L)
699 slot++;
700 782
701 /* Get Kprobe probe instruction at given slot*/ 783 return __is_ia64_break_inst(&bundle, slot);
702 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
703
704 /* For break instruction,
705 * Bits 37:40 Major opcode to be zero
706 * Bits 27:32 X6 to be zero
707 * Bits 32:35 X3 to be zero
708 */
709 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
710 /* Not a break instruction */
711 return 0;
712 }
713
714 /* Is a break instruction */
715 return 1;
716} 784}
717 785
718static int __kprobes pre_kprobes_handler(struct die_args *args) 786static int __kprobes pre_kprobes_handler(struct die_args *args)
@@ -802,6 +870,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
802 return 1; 870 return 1;
803 871
804ss_probe: 872ss_probe:
873#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
874 if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
875 /* Boost up -- we can execute copied instructions directly */
876 ia64_psr(regs)->ri = p->ainsn.slot;
877 regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL;
878 /* turn single stepping off */
879 ia64_psr(regs)->ss = 0;
880
881 reset_current_kprobe();
882 preempt_enable_no_resched();
883 return 1;
884 }
885#endif
805 prepare_ss(p, regs); 886 prepare_ss(p, regs);
806 kcb->kprobe_status = KPROBE_HIT_SS; 887 kcb->kprobe_status = KPROBE_HIT_SS;
807 return 1; 888 return 1;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 607006a6a976..e51bced3b0fa 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -69,6 +69,7 @@
69 * 2007-04-27 Russ Anderson <rja@sgi.com> 69 * 2007-04-27 Russ Anderson <rja@sgi.com>
70 * Support multiple cpus going through OS_MCA in the same event. 70 * Support multiple cpus going through OS_MCA in the same event.
71 */ 71 */
72#include <linux/jiffies.h>
72#include <linux/types.h> 73#include <linux/types.h>
73#include <linux/init.h> 74#include <linux/init.h>
74#include <linux/sched.h> 75#include <linux/sched.h>
@@ -295,7 +296,8 @@ static void ia64_mlogbuf_dump_from_init(void)
295 if (mlogbuf_finished) 296 if (mlogbuf_finished)
296 return; 297 return;
297 298
298 if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) { 299 if (mlogbuf_timestamp &&
300 time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
299 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " 301 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
300 " and the system seems to be messed up.\n"); 302 " and the system seems to be messed up.\n");
301 ia64_mlogbuf_finish(0); 303 ia64_mlogbuf_finish(0);
@@ -1311,20 +1313,17 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1311 } else { 1313 } else {
1312 /* Dump buffered message to console */ 1314 /* Dump buffered message to console */
1313 ia64_mlogbuf_finish(1); 1315 ia64_mlogbuf_finish(1);
1314#ifdef CONFIG_KEXEC
1315 atomic_set(&kdump_in_progress, 1);
1316 monarch_cpu = -1;
1317#endif
1318 } 1316 }
1317
1319 if (__get_cpu_var(ia64_mca_tr_reload)) { 1318 if (__get_cpu_var(ia64_mca_tr_reload)) {
1320 mca_insert_tr(0x1); /*Reload dynamic itrs*/ 1319 mca_insert_tr(0x1); /*Reload dynamic itrs*/
1321 mca_insert_tr(0x2); /*Reload dynamic itrs*/ 1320 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1322 } 1321 }
1322
1323 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1323 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1324 == NOTIFY_STOP) 1324 == NOTIFY_STOP)
1325 ia64_mca_spin(__func__); 1325 ia64_mca_spin(__func__);
1326 1326
1327
1328 if (atomic_dec_return(&mca_count) > 0) { 1327 if (atomic_dec_return(&mca_count) > 0) {
1329 int i; 1328 int i;
1330 1329
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index a2aabfdc80d9..d1d24f4598da 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -4204,10 +4204,10 @@ pfm_check_task_exist(pfm_context_t *ctx)
4204 do_each_thread (g, t) { 4204 do_each_thread (g, t) {
4205 if (t->thread.pfm_context == ctx) { 4205 if (t->thread.pfm_context == ctx) {
4206 ret = 0; 4206 ret = 0;
4207 break; 4207 goto out;
4208 } 4208 }
4209 } while_each_thread (g, t); 4209 } while_each_thread (g, t);
4210 4210out:
4211 read_unlock(&tasklist_lock); 4211 read_unlock(&tasklist_lock);
4212 4212
4213 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); 4213 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index b86a072418a2..5015ca1275ca 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -177,6 +177,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
177 return 0; 177 return 0;
178} 178}
179 179
180/*
181 * Similar to "filter_rsvd_memory()", but the reserved memory ranges
182 * are not filtered out.
183 */
184int __init
185filter_memory(unsigned long start, unsigned long end, void *arg)
186{
187 void (*func)(unsigned long, unsigned long, int);
188
189#if IGNORE_PFN0
190 if (start == PAGE_OFFSET) {
191 printk(KERN_WARNING "warning: skipping physical page 0\n");
192 start += PAGE_SIZE;
193 if (start >= end)
194 return 0;
195 }
196#endif
197 func = arg;
198 if (start < end)
199 call_pernode_memory(__pa(start), end - start, func);
200 return 0;
201}
202
180static void __init 203static void __init
181sort_regions (struct rsvd_region *rsvd_region, int max) 204sort_regions (struct rsvd_region *rsvd_region, int max)
182{ 205{
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 32ee5979a042..16483be18c0b 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -400,9 +400,9 @@ smp_callin (void)
400 /* Setup the per cpu irq handling data structures */ 400 /* Setup the per cpu irq handling data structures */
401 __setup_vector_irq(cpuid); 401 __setup_vector_irq(cpuid);
402 cpu_set(cpuid, cpu_online_map); 402 cpu_set(cpuid, cpu_online_map);
403 unlock_ipi_calllock();
404 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 403 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
405 spin_unlock(&vector_lock); 404 spin_unlock(&vector_lock);
405 unlock_ipi_calllock();
406 406
407 smp_setup_percpu_timer(); 407 smp_setup_percpu_timer();
408 408
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 6903361d11a5..ff0e7c10faa7 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -13,6 +13,7 @@
13 * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. 13 * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes.
14 * 2001/01/17 Add support emulation of unaligned kernel accesses. 14 * 2001/01/17 Add support emulation of unaligned kernel accesses.
15 */ 15 */
16#include <linux/jiffies.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
18#include <linux/tty.h> 19#include <linux/tty.h>
@@ -1290,7 +1291,7 @@ within_logging_rate_limit (void)
1290{ 1291{
1291 static unsigned long count, last_time; 1292 static unsigned long count, last_time;
1292 1293
1293 if (jiffies - last_time > 5*HZ) 1294 if (time_after(jiffies, last_time + 5 * HZ))
1294 count = 0; 1295 count = 0;
1295 if (count < 5) { 1296 if (count < 5) {
1296 last_time = jiffies; 1297 last_time = jiffies;
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 344f64eca7a9..798bf9835a51 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -45,8 +45,6 @@ void show_mem(void)
45 45
46 printk(KERN_INFO "Mem-info:\n"); 46 printk(KERN_INFO "Mem-info:\n");
47 show_free_areas(); 47 show_free_areas();
48 printk(KERN_INFO "Free swap: %6ldkB\n",
49 nr_swap_pages<<(PAGE_SHIFT-10));
50 printk(KERN_INFO "Node memory in pages:\n"); 48 printk(KERN_INFO "Node memory in pages:\n");
51 for_each_online_pgdat(pgdat) { 49 for_each_online_pgdat(pgdat) {
52 unsigned long present; 50 unsigned long present;
@@ -255,7 +253,7 @@ paging_init (void)
255 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 253 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
256 254
257#ifdef CONFIG_VIRTUAL_MEM_MAP 255#ifdef CONFIG_VIRTUAL_MEM_MAP
258 efi_memmap_walk(register_active_ranges, NULL); 256 efi_memmap_walk(filter_memory, register_active_ranges);
259 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 257 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
260 if (max_gap < LARGE_GAP) { 258 if (max_gap < LARGE_GAP) {
261 vmem_map = (struct page *) 0; 259 vmem_map = (struct page *) 0;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 6136a4c6df11..544dc420c65e 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -445,7 +445,7 @@ void __init find_memory(void)
445 mem_data[node].min_pfn = ~0UL; 445 mem_data[node].min_pfn = ~0UL;
446 } 446 }
447 447
448 efi_memmap_walk(register_active_ranges, NULL); 448 efi_memmap_walk(filter_memory, register_active_ranges);
449 449
450 /* 450 /*
451 * Initialize the boot memory maps in reverse order since that's 451 * Initialize the boot memory maps in reverse order since that's
@@ -519,8 +519,6 @@ void show_mem(void)
519 519
520 printk(KERN_INFO "Mem-info:\n"); 520 printk(KERN_INFO "Mem-info:\n");
521 show_free_areas(); 521 show_free_areas();
522 printk(KERN_INFO "Free swap: %6ldkB\n",
523 nr_swap_pages<<(PAGE_SHIFT-10));
524 printk(KERN_INFO "Node memory in pages:\n"); 522 printk(KERN_INFO "Node memory in pages:\n");
525 for_each_online_pgdat(pgdat) { 523 for_each_online_pgdat(pgdat) {
526 unsigned long present; 524 unsigned long present;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index a4ca657c72c6..5c1de53c8c1c 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -58,7 +58,6 @@ __ia64_sync_icache_dcache (pte_t pte)
58{ 58{
59 unsigned long addr; 59 unsigned long addr;
60 struct page *page; 60 struct page *page;
61 unsigned long order;
62 61
63 page = pte_page(pte); 62 page = pte_page(pte);
64 addr = (unsigned long) page_address(page); 63 addr = (unsigned long) page_address(page);
@@ -66,12 +65,7 @@ __ia64_sync_icache_dcache (pte_t pte)
66 if (test_bit(PG_arch_1, &page->flags)) 65 if (test_bit(PG_arch_1, &page->flags))
67 return; /* i-cache is already coherent with d-cache */ 66 return; /* i-cache is already coherent with d-cache */
68 67
69 if (PageCompound(page)) { 68 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
70 order = compound_order(page);
71 flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
72 }
73 else
74 flush_icache_range(addr, addr + PAGE_SIZE);
75 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 69 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
76} 70}
77 71
@@ -553,12 +547,10 @@ find_largest_hole (u64 start, u64 end, void *arg)
553#endif /* CONFIG_VIRTUAL_MEM_MAP */ 547#endif /* CONFIG_VIRTUAL_MEM_MAP */
554 548
555int __init 549int __init
556register_active_ranges(u64 start, u64 end, void *arg) 550register_active_ranges(u64 start, u64 len, int nid)
557{ 551{
558 int nid = paddr_to_nid(__pa(start)); 552 u64 end = start + len;
559 553
560 if (nid < 0)
561 nid = 0;
562#ifdef CONFIG_KEXEC 554#ifdef CONFIG_KEXEC
563 if (start > crashk_res.start && start < crashk_res.end) 555 if (start > crashk_res.start && start < crashk_res.end)
564 start = crashk_res.end; 556 start = crashk_res.end;
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 81785b78bc1e..9e0b164da9c2 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -199,7 +199,7 @@ xpc_timeout_partition_disengage_request(unsigned long data)
199 struct xpc_partition *part = (struct xpc_partition *) data; 199 struct xpc_partition *part = (struct xpc_partition *) data;
200 200
201 201
202 DBUG_ON(jiffies < part->disengage_request_timeout); 202 DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
203 203
204 (void) xpc_partition_disengaged(part); 204 (void) xpc_partition_disengaged(part);
205 205
@@ -230,7 +230,7 @@ xpc_hb_beater(unsigned long dummy)
230{ 230{
231 xpc_vars->heartbeat++; 231 xpc_vars->heartbeat++;
232 232
233 if (jiffies >= xpc_hb_check_timeout) { 233 if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
234 wake_up_interruptible(&xpc_act_IRQ_wq); 234 wake_up_interruptible(&xpc_act_IRQ_wq);
235 } 235 }
236 236
@@ -270,7 +270,7 @@ xpc_hb_checker(void *ignore)
270 270
271 271
272 /* checking of remote heartbeats is skewed by IRQ handling */ 272 /* checking of remote heartbeats is skewed by IRQ handling */
273 if (jiffies >= xpc_hb_check_timeout) { 273 if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
274 dev_dbg(xpc_part, "checking remote heartbeats\n"); 274 dev_dbg(xpc_part, "checking remote heartbeats\n");
275 xpc_check_remote_hb(); 275 xpc_check_remote_hb();
276 276
@@ -305,7 +305,7 @@ xpc_hb_checker(void *ignore)
305 /* wait for IRQ or timeout */ 305 /* wait for IRQ or timeout */
306 (void) wait_event_interruptible(xpc_act_IRQ_wq, 306 (void) wait_event_interruptible(xpc_act_IRQ_wq,
307 (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || 307 (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
308 jiffies >= xpc_hb_check_timeout || 308 time_after_eq(jiffies, xpc_hb_check_timeout) ||
309 (volatile int) xpc_exiting)); 309 (volatile int) xpc_exiting));
310 } 310 }
311 311
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 7ba403232cb8..9e97c2684832 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -877,7 +877,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
877 disengaged = (xpc_partition_engaged(1UL << partid) == 0); 877 disengaged = (xpc_partition_engaged(1UL << partid) == 0);
878 if (part->disengage_request_timeout) { 878 if (part->disengage_request_timeout) {
879 if (!disengaged) { 879 if (!disengaged) {
880 if (jiffies < part->disengage_request_timeout) { 880 if (time_before(jiffies, part->disengage_request_timeout)) {
881 /* timelimit hasn't been reached yet */ 881 /* timelimit hasn't been reached yet */
882 return 0; 882 return 0;
883 } 883 }
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index d03bf9ff68e3..ef71b57fc2f4 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -30,8 +30,12 @@
30#include <asm/break.h> 30#include <asm/break.h>
31 31
32#define __ARCH_WANT_KPROBES_INSN_SLOT 32#define __ARCH_WANT_KPROBES_INSN_SLOT
33#define MAX_INSN_SIZE 1 33#define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */
34#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) 34#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
35#define NOP_M_INST (long)(1<<27)
36#define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \
37 (0x1L << 12) | /* many */ \
38 (((i1) & 1) << 36) | ((i2) << 13))) /* imm */
35 39
36typedef union cmp_inst { 40typedef union cmp_inst {
37 struct { 41 struct {
@@ -112,6 +116,7 @@ struct arch_specific_insn {
112 #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 116 #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
113 #define INST_FLAG_FIX_BRANCH_REG 2 117 #define INST_FLAG_FIX_BRANCH_REG 2
114 #define INST_FLAG_BREAK_INST 4 118 #define INST_FLAG_BREAK_INST 4
119 #define INST_FLAG_BOOSTABLE 8
115 unsigned long inst_flag; 120 unsigned long inst_flag;
116 unsigned short target_br_reg; 121 unsigned short target_br_reg;
117 unsigned short slot; 122 unsigned short slot;
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index f93308f54b61..7245a5781594 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -35,6 +35,7 @@ extern void find_memory (void);
35extern void reserve_memory (void); 35extern void reserve_memory (void);
36extern void find_initrd (void); 36extern void find_initrd (void);
37extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); 37extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
38extern int filter_memory (unsigned long start, unsigned long end, void *arg);
38extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); 39extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e);
39extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); 40extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
40 41
@@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
56 57
57#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ 58#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
58 59
59extern int register_active_ranges(u64 start, u64 end, void *arg); 60extern int register_active_ranges(u64 start, u64 len, int nid);
60 61
61#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
62# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ 63# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 8a695d3407d2..67b02901ead4 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -13,6 +13,7 @@
13 * Copyright (C) 1999 VA Linux Systems 13 * Copyright (C) 1999 VA Linux Systems
14 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 14 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
15 * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> 15 * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
16 * Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
16 * 17 *
17 * 99/10/01 davidm Make sure we pass zero for reserved parameters. 18 * 99/10/01 davidm Make sure we pass zero for reserved parameters.
18 * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. 19 * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
@@ -73,6 +74,8 @@
73#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ 74#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
74#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */ 75#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */
75#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */ 76#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */
77#define PAL_VP_INFO 50 /* Information about virtual processor features */
78#define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */
76 79
77#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ 80#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
78#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ 81#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@@ -504,7 +507,8 @@ typedef struct pal_cache_check_info_s {
504 wiv : 1, /* Way field valid */ 507 wiv : 1, /* Way field valid */
505 reserved2 : 1, 508 reserved2 : 1,
506 dp : 1, /* Data poisoned on MBE */ 509 dp : 1, /* Data poisoned on MBE */
507 reserved3 : 8, 510 reserved3 : 6,
511 hlth : 2, /* Health indicator */
508 512
509 index : 20, /* Cache line index */ 513 index : 20, /* Cache line index */
510 reserved4 : 2, 514 reserved4 : 2,
@@ -542,7 +546,9 @@ typedef struct pal_tlb_check_info_s {
542 dtc : 1, /* Fail in data TC */ 546 dtc : 1, /* Fail in data TC */
543 itc : 1, /* Fail in inst. TC */ 547 itc : 1, /* Fail in inst. TC */
544 op : 4, /* Cache operation */ 548 op : 4, /* Cache operation */
545 reserved3 : 30, 549 reserved3 : 6,
550 hlth : 2, /* Health indicator */
551 reserved4 : 22,
546 552
547 is : 1, /* instruction set (1 == ia32) */ 553 is : 1, /* instruction set (1 == ia32) */
548 iv : 1, /* instruction set field valid */ 554 iv : 1, /* instruction set field valid */
@@ -633,7 +639,8 @@ typedef struct pal_uarch_check_info_s {
633 way : 6, /* Way of structure */ 639 way : 6, /* Way of structure */
634 wv : 1, /* way valid */ 640 wv : 1, /* way valid */
635 xv : 1, /* index valid */ 641 xv : 1, /* index valid */
636 reserved1 : 8, 642 reserved1 : 6,
643 hlth : 2, /* Health indicator */
637 index : 8, /* Index or set of the uarch 644 index : 8, /* Index or set of the uarch
638 * structure that failed. 645 * structure that failed.
639 */ 646 */
@@ -1213,14 +1220,12 @@ ia64_pal_mc_drain (void)
1213 1220
1214/* Return the machine check dynamic processor state */ 1221/* Return the machine check dynamic processor state */
1215static inline s64 1222static inline s64
1216ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds) 1223ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size)
1217{ 1224{
1218 struct ia64_pal_retval iprv; 1225 struct ia64_pal_retval iprv;
1219 PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0); 1226 PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0);
1220 if (size) 1227 if (size)
1221 *size = iprv.v0; 1228 *size = iprv.v0;
1222 if (pds)
1223 *pds = iprv.v1;
1224 return iprv.status; 1229 return iprv.status;
1225} 1230}
1226 1231
@@ -1281,15 +1286,41 @@ ia64_pal_mc_expected (u64 expected, u64 *previous)
1281 return iprv.status; 1286 return iprv.status;
1282} 1287}
1283 1288
1289typedef union pal_hw_tracking_u {
1290 u64 pht_data;
1291 struct {
1292 u64 itc :4, /* Instruction cache tracking */
1293 dct :4, /* Date cache tracking */
1294 itt :4, /* Instruction TLB tracking */
1295 ddt :4, /* Data TLB tracking */
1296 reserved:48;
1297 } pal_hw_tracking_s;
1298} pal_hw_tracking_u_t;
1299
1300/*
1301 * Hardware tracking status.
1302 */
1303static inline s64
1304ia64_pal_mc_hw_tracking (u64 *status)
1305{
1306 struct ia64_pal_retval iprv;
1307 PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0);
1308 if (status)
1309 *status = iprv.v0;
1310 return iprv.status;
1311}
1312
1284/* Register a platform dependent location with PAL to which it can save 1313/* Register a platform dependent location with PAL to which it can save
1285 * minimal processor state in the event of a machine check or initialization 1314 * minimal processor state in the event of a machine check or initialization
1286 * event. 1315 * event.
1287 */ 1316 */
1288static inline s64 1317static inline s64
1289ia64_pal_mc_register_mem (u64 physical_addr) 1318ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size)
1290{ 1319{
1291 struct ia64_pal_retval iprv; 1320 struct ia64_pal_retval iprv;
1292 PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0); 1321 PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0);
1322 if (req_size)
1323 *req_size = iprv.v0;
1293 return iprv.status; 1324 return iprv.status;
1294} 1325}
1295 1326
@@ -1631,6 +1662,29 @@ ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
1631 return iprv.status; 1662 return iprv.status;
1632} 1663}
1633 1664
1665typedef union pal_vp_info_u {
1666 u64 pvi_val;
1667 struct {
1668 u64 index: 48, /* virtual feature set info */
1669 vmm_id: 16; /* feature set id */
1670 } pal_vp_info_s;
1671} pal_vp_info_u_t;
1672
1673/*
1674 * Returns infomation about virtual processor features
1675 */
1676static inline s64
1677ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
1678{
1679 struct ia64_pal_retval iprv;
1680 PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0);
1681 if (vp_info)
1682 *vp_info = iprv.v0;
1683 if (vmm_id)
1684 *vmm_id = iprv.v1;
1685 return iprv.status;
1686}
1687
1634typedef union pal_itr_valid_u { 1688typedef union pal_itr_valid_u {
1635 u64 piv_val; 1689 u64 piv_val;
1636 struct { 1690 struct {
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index e6204f14f614..ed70862ea247 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -371,7 +371,7 @@ pgd_index (unsigned long address)
371/* The offset in the 1-level directory is given by the 3 region bits 371/* The offset in the 1-level directory is given by the 3 region bits
372 (61..63) and the level-1 bits. */ 372 (61..63) and the level-1 bits. */
373static inline pgd_t* 373static inline pgd_t*
374pgd_offset (struct mm_struct *mm, unsigned long address) 374pgd_offset (const struct mm_struct *mm, unsigned long address)
375{ 375{
376 return mm->pgd + pgd_index(address); 376 return mm->pgd + pgd_index(address);
377} 377}