diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 12:44:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 12:44:55 -0400 |
commit | 4786b4ee22de6304e841b12ee22b849230d7fba3 (patch) | |
tree | 08793b8fbcd63204d5d3355ac755745adcfef170 /arch/ia64/kernel | |
parent | 253ba4e79edc695b2925bd2ef34de06ff4d4070c (diff) | |
parent | 71b264f85ff50c14fe945ffff06ae0d5e9a9124e (diff) |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits)
[IA64] kdump: Add crash_save_vmcoreinfo for INIT
[IA64] Fix NUMA configuration issue
[IA64] Itanium Spec updates
[IA64] Untangle sync_icache_dcache() page size determination
[IA64] arch/ia64/kernel/: use time_* macros
[IA64] remove redundant display of free swap space in show_mem()
[IA64] make IOMMU respect the segment boundary limits
[IA64] kprobes: kprobe-booster for ia64
[IA64] fix getpid and set_tid_address fast system calls for pid namespaces
[IA64] Replace explicit jiffies tests with time_* macros.
[IA64] use goto to jump out do/while_each_thread
[IA64] Fix unlock ordering in smp_callin
[IA64] pgd_offset() constfication.
[IA64] kdump: crash.c coding style fix
[IA64] kdump: add kdump_on_fatal_mca
[IA64] Minimize per_cpu reservations.
[IA64] Correct pernodesize calculation.
[IA64] Kernel parameter for max number of concurrent global TLB purges
[IA64] Multiple outstanding ptc.g instruction support
[IA64] Implement smp_call_function_mask for ia64
...
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 13 | ||||
-rw-r--r-- | arch/ia64/kernel/crash.c | 56 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 46 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 65 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 88 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 20 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/ivt.S | 69 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 133 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 60 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/minstate.h | 14 | ||||
-rw-r--r-- | arch/ia64/kernel/numa.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 30 | ||||
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 1217 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 31 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 82 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 78 | ||||
-rw-r--r-- | arch/ia64/kernel/unaligned.c | 3 |
23 files changed, 1595 insertions, 437 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 78f28d825f30..c7467f863c7a 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -423,6 +423,7 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; | |||
423 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) | 423 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) |
424 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) | 424 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) |
425 | static struct acpi_table_slit __initdata *slit_table; | 425 | static struct acpi_table_slit __initdata *slit_table; |
426 | cpumask_t early_cpu_possible_map = CPU_MASK_NONE; | ||
426 | 427 | ||
427 | static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) | 428 | static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) |
428 | { | 429 | { |
@@ -482,6 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
482 | (pa->apic_id << 8) | (pa->local_sapic_eid); | 483 | (pa->apic_id << 8) | (pa->local_sapic_eid); |
483 | /* nid should be overridden as logical node id later */ | 484 | /* nid should be overridden as logical node id later */ |
484 | node_cpuid[srat_num_cpus].nid = pxm; | 485 | node_cpuid[srat_num_cpus].nid = pxm; |
486 | cpu_set(srat_num_cpus, early_cpu_possible_map); | ||
485 | srat_num_cpus++; | 487 | srat_num_cpus++; |
486 | } | 488 | } |
487 | 489 | ||
@@ -559,7 +561,7 @@ void __init acpi_numa_arch_fixup(void) | |||
559 | } | 561 | } |
560 | 562 | ||
561 | /* set logical node id in cpu structure */ | 563 | /* set logical node id in cpu structure */ |
562 | for (i = 0; i < srat_num_cpus; i++) | 564 | for_each_possible_early_cpu(i) |
563 | node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); | 565 | node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); |
564 | 566 | ||
565 | printk(KERN_INFO "Number of logical nodes in system = %d\n", | 567 | printk(KERN_INFO "Number of logical nodes in system = %d\n", |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 0aebc6f79e95..230a6f92367f 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #define ASM_OFFSETS_C 1 | 7 | #define ASM_OFFSETS_C 1 |
8 | 8 | ||
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/pid.h> | ||
10 | #include <linux/clocksource.h> | 11 | #include <linux/clocksource.h> |
11 | 12 | ||
12 | #include <asm-ia64/processor.h> | 13 | #include <asm-ia64/processor.h> |
@@ -34,17 +35,29 @@ void foo(void) | |||
34 | DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); | 35 | DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); |
35 | DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); | 36 | DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); |
36 | 37 | ||
38 | BUILD_BUG_ON(sizeof(struct upid) != 32); | ||
39 | DEFINE(IA64_UPID_SHIFT, 5); | ||
40 | |||
37 | BLANK(); | 41 | BLANK(); |
38 | 42 | ||
39 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 43 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
40 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | 44 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
41 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); | 45 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); |
46 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
47 | DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); | ||
48 | DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); | ||
49 | DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); | ||
50 | DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime)); | ||
51 | #endif | ||
42 | 52 | ||
43 | BLANK(); | 53 | BLANK(); |
44 | 54 | ||
45 | DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); | 55 | DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); |
46 | DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); | 56 | DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); |
47 | DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); | 57 | DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); |
58 | DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid)); | ||
59 | DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level)); | ||
60 | DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0])); | ||
48 | DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); | 61 | DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); |
49 | DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); | 62 | DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); |
50 | DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); | 63 | DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); |
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index fbe742ad2fde..90ef338cf46f 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -24,6 +24,7 @@ int kdump_status[NR_CPUS]; | |||
24 | static atomic_t kdump_cpu_frozen; | 24 | static atomic_t kdump_cpu_frozen; |
25 | atomic_t kdump_in_progress; | 25 | atomic_t kdump_in_progress; |
26 | static int kdump_on_init = 1; | 26 | static int kdump_on_init = 1; |
27 | static int kdump_on_fatal_mca = 1; | ||
27 | 28 | ||
28 | static inline Elf64_Word | 29 | static inline Elf64_Word |
29 | *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, | 30 | *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, |
@@ -118,6 +119,7 @@ machine_crash_shutdown(struct pt_regs *pt) | |||
118 | static void | 119 | static void |
119 | machine_kdump_on_init(void) | 120 | machine_kdump_on_init(void) |
120 | { | 121 | { |
122 | crash_save_vmcoreinfo(); | ||
121 | local_irq_disable(); | 123 | local_irq_disable(); |
122 | kexec_disable_iosapic(); | 124 | kexec_disable_iosapic(); |
123 | machine_kexec(ia64_kimage); | 125 | machine_kexec(ia64_kimage); |
@@ -148,7 +150,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
148 | struct ia64_mca_notify_die *nd; | 150 | struct ia64_mca_notify_die *nd; |
149 | struct die_args *args = data; | 151 | struct die_args *args = data; |
150 | 152 | ||
151 | if (!kdump_on_init) | 153 | if (!kdump_on_init && !kdump_on_fatal_mca) |
152 | return NOTIFY_DONE; | 154 | return NOTIFY_DONE; |
153 | 155 | ||
154 | if (!ia64_kimage) { | 156 | if (!ia64_kimage) { |
@@ -173,32 +175,38 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
173 | return NOTIFY_DONE; | 175 | return NOTIFY_DONE; |
174 | 176 | ||
175 | switch (val) { | 177 | switch (val) { |
176 | case DIE_INIT_MONARCH_PROCESS: | 178 | case DIE_INIT_MONARCH_PROCESS: |
179 | if (kdump_on_init) { | ||
177 | atomic_set(&kdump_in_progress, 1); | 180 | atomic_set(&kdump_in_progress, 1); |
178 | *(nd->monarch_cpu) = -1; | 181 | *(nd->monarch_cpu) = -1; |
179 | break; | 182 | } |
180 | case DIE_INIT_MONARCH_LEAVE: | 183 | break; |
184 | case DIE_INIT_MONARCH_LEAVE: | ||
185 | if (kdump_on_init) | ||
181 | machine_kdump_on_init(); | 186 | machine_kdump_on_init(); |
182 | break; | 187 | break; |
183 | case DIE_INIT_SLAVE_LEAVE: | 188 | case DIE_INIT_SLAVE_LEAVE: |
184 | if (atomic_read(&kdump_in_progress)) | 189 | if (atomic_read(&kdump_in_progress)) |
185 | unw_init_running(kdump_cpu_freeze, NULL); | 190 | unw_init_running(kdump_cpu_freeze, NULL); |
186 | break; | 191 | break; |
187 | case DIE_MCA_RENDZVOUS_LEAVE: | 192 | case DIE_MCA_RENDZVOUS_LEAVE: |
188 | if (atomic_read(&kdump_in_progress)) | 193 | if (atomic_read(&kdump_in_progress)) |
189 | unw_init_running(kdump_cpu_freeze, NULL); | 194 | unw_init_running(kdump_cpu_freeze, NULL); |
190 | break; | 195 | break; |
191 | case DIE_MCA_MONARCH_LEAVE: | 196 | case DIE_MCA_MONARCH_LEAVE: |
192 | /* die_register->signr indicate if MCA is recoverable */ | 197 | /* die_register->signr indicate if MCA is recoverable */ |
193 | if (!args->signr) | 198 | if (kdump_on_fatal_mca && !args->signr) { |
194 | machine_kdump_on_init(); | 199 | atomic_set(&kdump_in_progress, 1); |
195 | break; | 200 | *(nd->monarch_cpu) = -1; |
201 | machine_kdump_on_init(); | ||
202 | } | ||
203 | break; | ||
196 | } | 204 | } |
197 | return NOTIFY_DONE; | 205 | return NOTIFY_DONE; |
198 | } | 206 | } |
199 | 207 | ||
200 | #ifdef CONFIG_SYSCTL | 208 | #ifdef CONFIG_SYSCTL |
201 | static ctl_table kdump_on_init_table[] = { | 209 | static ctl_table kdump_ctl_table[] = { |
202 | { | 210 | { |
203 | .ctl_name = CTL_UNNUMBERED, | 211 | .ctl_name = CTL_UNNUMBERED, |
204 | .procname = "kdump_on_init", | 212 | .procname = "kdump_on_init", |
@@ -207,6 +215,14 @@ static ctl_table kdump_on_init_table[] = { | |||
207 | .mode = 0644, | 215 | .mode = 0644, |
208 | .proc_handler = &proc_dointvec, | 216 | .proc_handler = &proc_dointvec, |
209 | }, | 217 | }, |
218 | { | ||
219 | .ctl_name = CTL_UNNUMBERED, | ||
220 | .procname = "kdump_on_fatal_mca", | ||
221 | .data = &kdump_on_fatal_mca, | ||
222 | .maxlen = sizeof(int), | ||
223 | .mode = 0644, | ||
224 | .proc_handler = &proc_dointvec, | ||
225 | }, | ||
210 | { .ctl_name = 0 } | 226 | { .ctl_name = 0 } |
211 | }; | 227 | }; |
212 | 228 | ||
@@ -215,7 +231,7 @@ static ctl_table sys_table[] = { | |||
215 | .ctl_name = CTL_KERN, | 231 | .ctl_name = CTL_KERN, |
216 | .procname = "kernel", | 232 | .procname = "kernel", |
217 | .mode = 0555, | 233 | .mode = 0555, |
218 | .child = kdump_on_init_table, | 234 | .child = kdump_ctl_table, |
219 | }, | 235 | }, |
220 | { .ctl_name = 0 } | 236 | { .ctl_name = 0 } |
221 | }; | 237 | }; |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 728d7247a1a6..d45f215bc8fc 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <asm/mca.h> | 39 | #include <asm/mca.h> |
40 | #include <asm/tlbflush.h> | ||
40 | 41 | ||
41 | #define EFI_DEBUG 0 | 42 | #define EFI_DEBUG 0 |
42 | 43 | ||
@@ -403,6 +404,41 @@ efi_get_pal_addr (void) | |||
403 | return NULL; | 404 | return NULL; |
404 | } | 405 | } |
405 | 406 | ||
407 | |||
408 | static u8 __init palo_checksum(u8 *buffer, u32 length) | ||
409 | { | ||
410 | u8 sum = 0; | ||
411 | u8 *end = buffer + length; | ||
412 | |||
413 | while (buffer < end) | ||
414 | sum = (u8) (sum + *(buffer++)); | ||
415 | |||
416 | return sum; | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * Parse and handle PALO table which is published at: | ||
421 | * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf | ||
422 | */ | ||
423 | static void __init handle_palo(unsigned long palo_phys) | ||
424 | { | ||
425 | struct palo_table *palo = __va(palo_phys); | ||
426 | u8 checksum; | ||
427 | |||
428 | if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { | ||
429 | printk(KERN_INFO "PALO signature incorrect.\n"); | ||
430 | return; | ||
431 | } | ||
432 | |||
433 | checksum = palo_checksum((u8 *)palo, palo->length); | ||
434 | if (checksum) { | ||
435 | printk(KERN_INFO "PALO checksum incorrect.\n"); | ||
436 | return; | ||
437 | } | ||
438 | |||
439 | setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); | ||
440 | } | ||
441 | |||
406 | void | 442 | void |
407 | efi_map_pal_code (void) | 443 | efi_map_pal_code (void) |
408 | { | 444 | { |
@@ -432,6 +468,7 @@ efi_init (void) | |||
432 | u64 efi_desc_size; | 468 | u64 efi_desc_size; |
433 | char *cp, vendor[100] = "unknown"; | 469 | char *cp, vendor[100] = "unknown"; |
434 | int i; | 470 | int i; |
471 | unsigned long palo_phys; | ||
435 | 472 | ||
436 | /* | 473 | /* |
437 | * It's too early to be able to use the standard kernel command line | 474 | * It's too early to be able to use the standard kernel command line |
@@ -496,6 +533,8 @@ efi_init (void) | |||
496 | efi.hcdp = EFI_INVALID_TABLE_ADDR; | 533 | efi.hcdp = EFI_INVALID_TABLE_ADDR; |
497 | efi.uga = EFI_INVALID_TABLE_ADDR; | 534 | efi.uga = EFI_INVALID_TABLE_ADDR; |
498 | 535 | ||
536 | palo_phys = EFI_INVALID_TABLE_ADDR; | ||
537 | |||
499 | for (i = 0; i < (int) efi.systab->nr_tables; i++) { | 538 | for (i = 0; i < (int) efi.systab->nr_tables; i++) { |
500 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { | 539 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { |
501 | efi.mps = config_tables[i].table; | 540 | efi.mps = config_tables[i].table; |
@@ -515,10 +554,17 @@ efi_init (void) | |||
515 | } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { | 554 | } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { |
516 | efi.hcdp = config_tables[i].table; | 555 | efi.hcdp = config_tables[i].table; |
517 | printk(" HCDP=0x%lx", config_tables[i].table); | 556 | printk(" HCDP=0x%lx", config_tables[i].table); |
557 | } else if (efi_guidcmp(config_tables[i].guid, | ||
558 | PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) { | ||
559 | palo_phys = config_tables[i].table; | ||
560 | printk(" PALO=0x%lx", config_tables[i].table); | ||
518 | } | 561 | } |
519 | } | 562 | } |
520 | printk("\n"); | 563 | printk("\n"); |
521 | 564 | ||
565 | if (palo_phys != EFI_INVALID_TABLE_ADDR) | ||
566 | handle_palo(palo_phys); | ||
567 | |||
522 | runtime = __va(efi.systab->runtime); | 568 | runtime = __va(efi.systab->runtime); |
523 | efi.get_time = phys_get_time; | 569 | efi.get_time = phys_get_time; |
524 | efi.set_time = phys_set_time; | 570 | efi.set_time = phys_set_time; |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 3c331c464b40..b0be4a280174 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -710,6 +710,16 @@ ENTRY(ia64_leave_syscall) | |||
710 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk | 710 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk |
711 | #endif | 711 | #endif |
712 | .work_processed_syscall: | 712 | .work_processed_syscall: |
713 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
714 | adds r2=PT(LOADRS)+16,r12 | ||
715 | (pUStk) mov.m r22=ar.itc // fetch time at leave | ||
716 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | ||
717 | ;; | ||
718 | (p6) ld4 r31=[r18] // load current_thread_info()->flags | ||
719 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" | ||
720 | adds r3=PT(AR_BSPSTORE)+16,r12 // deferred | ||
721 | ;; | ||
722 | #else | ||
713 | adds r2=PT(LOADRS)+16,r12 | 723 | adds r2=PT(LOADRS)+16,r12 |
714 | adds r3=PT(AR_BSPSTORE)+16,r12 | 724 | adds r3=PT(AR_BSPSTORE)+16,r12 |
715 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | 725 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
@@ -718,6 +728,7 @@ ENTRY(ia64_leave_syscall) | |||
718 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" | 728 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" |
719 | nop.i 0 | 729 | nop.i 0 |
720 | ;; | 730 | ;; |
731 | #endif | ||
721 | mov r16=ar.bsp // M2 get existing backing store pointer | 732 | mov r16=ar.bsp // M2 get existing backing store pointer |
722 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 | 733 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 |
723 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? | 734 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? |
@@ -737,12 +748,21 @@ ENTRY(ia64_leave_syscall) | |||
737 | 748 | ||
738 | ld8 r29=[r2],16 // M0|1 load cr.ipsr | 749 | ld8 r29=[r2],16 // M0|1 load cr.ipsr |
739 | ld8 r28=[r3],16 // M0|1 load cr.iip | 750 | ld8 r28=[r3],16 // M0|1 load cr.iip |
751 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
752 | (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | ||
753 | ;; | ||
754 | ld8 r30=[r2],16 // M0|1 load cr.ifs | ||
755 | ld8 r25=[r3],16 // M0|1 load ar.unat | ||
756 | (pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | ||
757 | ;; | ||
758 | #else | ||
740 | mov r22=r0 // A clear r22 | 759 | mov r22=r0 // A clear r22 |
741 | ;; | 760 | ;; |
742 | ld8 r30=[r2],16 // M0|1 load cr.ifs | 761 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
743 | ld8 r25=[r3],16 // M0|1 load ar.unat | 762 | ld8 r25=[r3],16 // M0|1 load ar.unat |
744 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | 763 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 |
745 | ;; | 764 | ;; |
765 | #endif | ||
746 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs | 766 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs |
747 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | 767 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled |
748 | nop 0 | 768 | nop 0 |
@@ -759,7 +779,11 @@ ENTRY(ia64_leave_syscall) | |||
759 | ld8.fill r1=[r3],16 // M0|1 load r1 | 779 | ld8.fill r1=[r3],16 // M0|1 load r1 |
760 | (pUStk) mov r17=1 // A | 780 | (pUStk) mov r17=1 // A |
761 | ;; | 781 | ;; |
782 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
783 | (pUStk) st1 [r15]=r17 // M2|3 | ||
784 | #else | ||
762 | (pUStk) st1 [r14]=r17 // M2|3 | 785 | (pUStk) st1 [r14]=r17 // M2|3 |
786 | #endif | ||
763 | ld8.fill r13=[r3],16 // M0|1 | 787 | ld8.fill r13=[r3],16 // M0|1 |
764 | mov f8=f0 // F clear f8 | 788 | mov f8=f0 // F clear f8 |
765 | ;; | 789 | ;; |
@@ -775,12 +799,22 @@ ENTRY(ia64_leave_syscall) | |||
775 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | 799 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition |
776 | cover // B add current frame into dirty partition & set cr.ifs | 800 | cover // B add current frame into dirty partition & set cr.ifs |
777 | ;; | 801 | ;; |
802 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
803 | mov r19=ar.bsp // M2 get new backing store pointer | ||
804 | st8 [r14]=r22 // M save time at leave | ||
805 | mov f10=f0 // F clear f10 | ||
806 | |||
807 | mov r22=r0 // A clear r22 | ||
808 | movl r14=__kernel_syscall_via_epc // X | ||
809 | ;; | ||
810 | #else | ||
778 | mov r19=ar.bsp // M2 get new backing store pointer | 811 | mov r19=ar.bsp // M2 get new backing store pointer |
779 | mov f10=f0 // F clear f10 | 812 | mov f10=f0 // F clear f10 |
780 | 813 | ||
781 | nop.m 0 | 814 | nop.m 0 |
782 | movl r14=__kernel_syscall_via_epc // X | 815 | movl r14=__kernel_syscall_via_epc // X |
783 | ;; | 816 | ;; |
817 | #endif | ||
784 | mov.m ar.csd=r0 // M2 clear ar.csd | 818 | mov.m ar.csd=r0 // M2 clear ar.csd |
785 | mov.m ar.ccv=r0 // M2 clear ar.ccv | 819 | mov.m ar.ccv=r0 // M2 clear ar.ccv |
786 | mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) | 820 | mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) |
@@ -913,10 +947,18 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
913 | adds r16=PT(CR_IPSR)+16,r12 | 947 | adds r16=PT(CR_IPSR)+16,r12 |
914 | adds r17=PT(CR_IIP)+16,r12 | 948 | adds r17=PT(CR_IIP)+16,r12 |
915 | 949 | ||
950 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
951 | .pred.rel.mutex pUStk,pKStk | ||
952 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | ||
953 | (pUStk) mov.m r22=ar.itc // M fetch time at leave | ||
954 | nop.i 0 | ||
955 | ;; | ||
956 | #else | ||
916 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | 957 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled |
917 | nop.i 0 | 958 | nop.i 0 |
918 | nop.i 0 | 959 | nop.i 0 |
919 | ;; | 960 | ;; |
961 | #endif | ||
920 | ld8 r29=[r16],16 // load cr.ipsr | 962 | ld8 r29=[r16],16 // load cr.ipsr |
921 | ld8 r28=[r17],16 // load cr.iip | 963 | ld8 r28=[r17],16 // load cr.iip |
922 | ;; | 964 | ;; |
@@ -938,15 +980,37 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
938 | ;; | 980 | ;; |
939 | ld8.fill r12=[r16],16 | 981 | ld8.fill r12=[r16],16 |
940 | ld8.fill r13=[r17],16 | 982 | ld8.fill r13=[r17],16 |
983 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
984 | (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 | ||
985 | #else | ||
941 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 | 986 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 |
987 | #endif | ||
942 | ;; | 988 | ;; |
943 | ld8 r20=[r16],16 // ar.fpsr | 989 | ld8 r20=[r16],16 // ar.fpsr |
944 | ld8.fill r15=[r17],16 | 990 | ld8.fill r15=[r17],16 |
991 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
992 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred | ||
993 | #endif | ||
945 | ;; | 994 | ;; |
946 | ld8.fill r14=[r16],16 | 995 | ld8.fill r14=[r16],16 |
947 | ld8.fill r2=[r17] | 996 | ld8.fill r2=[r17] |
948 | (pUStk) mov r17=1 | 997 | (pUStk) mov r17=1 |
949 | ;; | 998 | ;; |
999 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1000 | // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; | ||
1001 | // mib : mov add br -> mib : ld8 add br | ||
1002 | // bbb_ : br nop cover;; mbb_ : mov br cover;; | ||
1003 | // | ||
1004 | // no one require bsp in r16 if (pKStk) branch is selected. | ||
1005 | (pUStk) st8 [r3]=r22 // save time at leave | ||
1006 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack | ||
1007 | shr.u r18=r19,16 // get byte size of existing "dirty" partition | ||
1008 | ;; | ||
1009 | ld8.fill r3=[r16] // deferred | ||
1010 | LOAD_PHYS_STACK_REG_SIZE(r17) | ||
1011 | (pKStk) br.cond.dpnt skip_rbs_switch | ||
1012 | mov r16=ar.bsp // get existing backing store pointer | ||
1013 | #else | ||
950 | ld8.fill r3=[r16] | 1014 | ld8.fill r3=[r16] |
951 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack | 1015 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack |
952 | shr.u r18=r19,16 // get byte size of existing "dirty" partition | 1016 | shr.u r18=r19,16 // get byte size of existing "dirty" partition |
@@ -954,6 +1018,7 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
954 | mov r16=ar.bsp // get existing backing store pointer | 1018 | mov r16=ar.bsp // get existing backing store pointer |
955 | LOAD_PHYS_STACK_REG_SIZE(r17) | 1019 | LOAD_PHYS_STACK_REG_SIZE(r17) |
956 | (pKStk) br.cond.dpnt skip_rbs_switch | 1020 | (pKStk) br.cond.dpnt skip_rbs_switch |
1021 | #endif | ||
957 | 1022 | ||
958 | /* | 1023 | /* |
959 | * Restore user backing store. | 1024 | * Restore user backing store. |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 44841971f077..c1625c7e1779 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -61,13 +61,29 @@ ENTRY(fsys_getpid) | |||
61 | .prologue | 61 | .prologue |
62 | .altrp b6 | 62 | .altrp b6 |
63 | .body | 63 | .body |
64 | add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 | ||
65 | ;; | ||
66 | ld8 r17=[r17] // r17 = current->group_leader | ||
64 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 | 67 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 |
65 | ;; | 68 | ;; |
66 | ld4 r9=[r9] | 69 | ld4 r9=[r9] |
67 | add r8=IA64_TASK_TGID_OFFSET,r16 | 70 | add r17=IA64_TASK_TGIDLINK_OFFSET,r17 |
68 | ;; | 71 | ;; |
69 | and r9=TIF_ALLWORK_MASK,r9 | 72 | and r9=TIF_ALLWORK_MASK,r9 |
70 | ld4 r8=[r8] // r8 = current->tgid | 73 | ld8 r17=[r17] // r17 = current->group_leader->pids[PIDTYPE_PID].pid |
74 | ;; | ||
75 | add r8=IA64_PID_LEVEL_OFFSET,r17 | ||
76 | ;; | ||
77 | ld4 r8=[r8] // r8 = pid->level | ||
78 | add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] | ||
79 | ;; | ||
80 | shl r8=r8,IA64_UPID_SHIFT | ||
81 | ;; | ||
82 | add r17=r17,r8 // r17 = &pid->numbers[pid->level] | ||
83 | ;; | ||
84 | ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr | ||
85 | ;; | ||
86 | mov r17=0 | ||
71 | ;; | 87 | ;; |
72 | cmp.ne p8,p0=0,r9 | 88 | cmp.ne p8,p0=0,r9 |
73 | (p8) br.spnt.many fsys_fallback_syscall | 89 | (p8) br.spnt.many fsys_fallback_syscall |
@@ -126,15 +142,25 @@ ENTRY(fsys_set_tid_address) | |||
126 | .altrp b6 | 142 | .altrp b6 |
127 | .body | 143 | .body |
128 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 | 144 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 |
145 | add r17=IA64_TASK_TGIDLINK_OFFSET,r16 | ||
129 | ;; | 146 | ;; |
130 | ld4 r9=[r9] | 147 | ld4 r9=[r9] |
131 | tnat.z p6,p7=r32 // check argument register for being NaT | 148 | tnat.z p6,p7=r32 // check argument register for being NaT |
149 | ld8 r17=[r17] // r17 = current->pids[PIDTYPE_PID].pid | ||
132 | ;; | 150 | ;; |
133 | and r9=TIF_ALLWORK_MASK,r9 | 151 | and r9=TIF_ALLWORK_MASK,r9 |
134 | add r8=IA64_TASK_PID_OFFSET,r16 | 152 | add r8=IA64_PID_LEVEL_OFFSET,r17 |
135 | add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 | 153 | add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 |
136 | ;; | 154 | ;; |
137 | ld4 r8=[r8] | 155 | ld4 r8=[r8] // r8 = pid->level |
156 | add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] | ||
157 | ;; | ||
158 | shl r8=r8,IA64_UPID_SHIFT | ||
159 | ;; | ||
160 | add r17=r17,r8 // r17 = &pid->numbers[pid->level] | ||
161 | ;; | ||
162 | ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr | ||
163 | ;; | ||
138 | cmp.ne p8,p0=0,r9 | 164 | cmp.ne p8,p0=0,r9 |
139 | mov r17=-1 | 165 | mov r17=-1 |
140 | ;; | 166 | ;; |
@@ -210,27 +236,25 @@ ENTRY(fsys_gettimeofday) | |||
210 | // Note that instructions are optimized for McKinley. McKinley can | 236 | // Note that instructions are optimized for McKinley. McKinley can |
211 | // process two bundles simultaneously and therefore we continuously | 237 | // process two bundles simultaneously and therefore we continuously |
212 | // try to feed the CPU two bundles and then a stop. | 238 | // try to feed the CPU two bundles and then a stop. |
213 | // | 239 | |
214 | // Additional note that code has changed a lot. Optimization is TBD. | ||
215 | // Comments begin with "?" are maybe outdated. | ||
216 | tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle | ||
217 | mov pr = r30,0xc000 // Set predicates according to function | ||
218 | add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 | 240 | add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 |
241 | tnat.nz p6,p0 = r31 // guard against Nat argument | ||
242 | (p6) br.cond.spnt.few .fail_einval | ||
219 | movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address | 243 | movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address |
220 | ;; | 244 | ;; |
245 | ld4 r2 = [r2] // process work pending flags | ||
221 | movl r29 = itc_jitter_data // itc_jitter | 246 | movl r29 = itc_jitter_data // itc_jitter |
222 | add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time | 247 | add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time |
223 | ld4 r2 = [r2] // process work pending flags | ||
224 | ;; | ||
225 | (p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time | ||
226 | add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 | 248 | add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 |
227 | add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29 | 249 | mov pr = r30,0xc000 // Set predicates according to function |
250 | ;; | ||
228 | and r2 = TIF_ALLWORK_MASK,r2 | 251 | and r2 = TIF_ALLWORK_MASK,r2 |
229 | (p6) br.cond.spnt.few .fail_einval // ? deferred branch | 252 | add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29 |
253 | (p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time | ||
230 | ;; | 254 | ;; |
231 | add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last | 255 | add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last |
232 | cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled | 256 | cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled |
233 | (p6) br.cond.spnt.many fsys_fallback_syscall | 257 | (p6) br.cond.spnt.many fsys_fallback_syscall |
234 | ;; | 258 | ;; |
235 | // Begin critical section | 259 | // Begin critical section |
236 | .time_redo: | 260 | .time_redo: |
@@ -258,7 +282,6 @@ ENTRY(fsys_gettimeofday) | |||
258 | (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! | 282 | (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! |
259 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. | 283 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. |
260 | (p13) ld8 r25 = [r19] // get itc_lastcycle value | 284 | (p13) ld8 r25 = [r19] // get itc_lastcycle value |
261 | ;; // ? could be removed by moving the last add upward | ||
262 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec | 285 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec |
263 | ;; | 286 | ;; |
264 | ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec | 287 | ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec |
@@ -285,13 +308,12 @@ ENTRY(fsys_gettimeofday) | |||
285 | EX(.fail_efault, probe.w.fault r31, 3) | 308 | EX(.fail_efault, probe.w.fault r31, 3) |
286 | xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) | 309 | xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) |
287 | ;; | 310 | ;; |
288 | // ? simulate tbit.nz.or p7,p0 = r28,0 | ||
289 | getf.sig r2 = f8 | 311 | getf.sig r2 = f8 |
290 | mf | 312 | mf |
291 | ;; | 313 | ;; |
292 | ld4 r10 = [r20] // gtod_lock.sequence | 314 | ld4 r10 = [r20] // gtod_lock.sequence |
293 | shr.u r2 = r2,r23 // shift by factor | 315 | shr.u r2 = r2,r23 // shift by factor |
294 | ;; // ? overloaded 3 bundles! | 316 | ;; |
295 | add r8 = r8,r2 // Add xtime.nsecs | 317 | add r8 = r8,r2 // Add xtime.nsecs |
296 | cmp4.ne p7,p0 = r28,r10 | 318 | cmp4.ne p7,p0 = r28,r10 |
297 | (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo | 319 | (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo |
@@ -319,9 +341,9 @@ EX(.fail_efault, probe.w.fault r31, 3) | |||
319 | EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles | 341 | EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles |
320 | (p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it | 342 | (p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it |
321 | ;; | 343 | ;; |
322 | mov r8 = r0 | ||
323 | (p14) getf.sig r2 = f8 | 344 | (p14) getf.sig r2 = f8 |
324 | ;; | 345 | ;; |
346 | mov r8 = r0 | ||
325 | (p14) shr.u r21 = r2, 4 | 347 | (p14) shr.u r21 = r2, 4 |
326 | ;; | 348 | ;; |
327 | EX(.fail_efault, st8 [r31] = r9) | 349 | EX(.fail_efault, st8 [r31] = r9) |
@@ -660,7 +682,11 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
660 | nop.i 0 | 682 | nop.i 0 |
661 | ;; | 683 | ;; |
662 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 | 684 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 |
685 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
686 | mov.m r30=ar.itc // M get cycle for accounting | ||
687 | #else | ||
663 | nop.m 0 | 688 | nop.m 0 |
689 | #endif | ||
664 | nop.i 0 | 690 | nop.i 0 |
665 | ;; | 691 | ;; |
666 | mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore | 692 | mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore |
@@ -682,6 +708,28 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
682 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 | 708 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 |
683 | br.call.sptk.many b7=ia64_syscall_setup // B | 709 | br.call.sptk.many b7=ia64_syscall_setup // B |
684 | ;; | 710 | ;; |
711 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
712 | // mov.m r30=ar.itc is called in advance | ||
713 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 | ||
714 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 | ||
715 | ;; | ||
716 | ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel | ||
717 | ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at leave kernel | ||
718 | ;; | ||
719 | ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime | ||
720 | ld8 r21=[r17] // cumulated utime | ||
721 | sub r22=r19,r18 // stime before leave kernel | ||
722 | ;; | ||
723 | st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // update stamp | ||
724 | sub r18=r30,r19 // elapsed time in user mode | ||
725 | ;; | ||
726 | add r20=r20,r22 // sum stime | ||
727 | add r21=r21,r18 // sum utime | ||
728 | ;; | ||
729 | st8 [r16]=r20 // update stime | ||
730 | st8 [r17]=r21 // update utime | ||
731 | ;; | ||
732 | #endif | ||
685 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 | 733 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
686 | mov rp=r14 // I0 set the real return addr | 734 | mov rp=r14 // I0 set the real return addr |
687 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A | 735 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index d3a41d5f8d12..ddeab4e36fd5 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1002,6 +1002,26 @@ GLOBAL_ENTRY(sched_clock) | |||
1002 | br.ret.sptk.many rp | 1002 | br.ret.sptk.many rp |
1003 | END(sched_clock) | 1003 | END(sched_clock) |
1004 | 1004 | ||
1005 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1006 | GLOBAL_ENTRY(cycle_to_cputime) | ||
1007 | alloc r16=ar.pfs,1,0,0,0 | ||
1008 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | ||
1009 | ;; | ||
1010 | ldf8 f8=[r8] | ||
1011 | ;; | ||
1012 | setf.sig f9=r32 | ||
1013 | ;; | ||
1014 | xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc) | ||
1015 | xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product | ||
1016 | ;; | ||
1017 | getf.sig r8=f10 // (5 cyc) | ||
1018 | getf.sig r9=f11 | ||
1019 | ;; | ||
1020 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT | ||
1021 | br.ret.sptk.many rp | ||
1022 | END(cycle_to_cputime) | ||
1023 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
1024 | |||
1005 | GLOBAL_ENTRY(start_kernel_thread) | 1025 | GLOBAL_ENTRY(start_kernel_thread) |
1006 | .prologue | 1026 | .prologue |
1007 | .save rp, r0 // this is the end of the call-chain | 1027 | .save rp, r0 // this is the end of the call-chain |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index d8be23fbe6bc..5538471e8d68 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -472,7 +472,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | |||
472 | static unsigned char count; | 472 | static unsigned char count; |
473 | static long last_time; | 473 | static long last_time; |
474 | 474 | ||
475 | if (jiffies - last_time > 5*HZ) | 475 | if (time_after(jiffies, last_time + 5 * HZ)) |
476 | count = 0; | 476 | count = 0; |
477 | if (++count < 5) { | 477 | if (++count < 5) { |
478 | last_time = jiffies; | 478 | last_time = jiffies; |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 34f44d8be00d..6678c49daba3 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -805,8 +805,13 @@ ENTRY(break_fault) | |||
805 | 805 | ||
806 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle | 806 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle |
807 | (p9) adds r8=1,r8 // A increment ei to next slot | 807 | (p9) adds r8=1,r8 // A increment ei to next slot |
808 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
809 | ;; | ||
810 | mov b6=r30 // I0 setup syscall handler branch reg early | ||
811 | #else | ||
808 | nop.i 0 | 812 | nop.i 0 |
809 | ;; | 813 | ;; |
814 | #endif | ||
810 | 815 | ||
811 | mov.m r25=ar.unat // M2 (5 cyc) | 816 | mov.m r25=ar.unat // M2 (5 cyc) |
812 | dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr | 817 | dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr |
@@ -817,7 +822,11 @@ ENTRY(break_fault) | |||
817 | // | 822 | // |
818 | /////////////////////////////////////////////////////////////////////// | 823 | /////////////////////////////////////////////////////////////////////// |
819 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag | 824 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag |
825 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
826 | mov.m r30=ar.itc // M get cycle for accounting | ||
827 | #else | ||
820 | mov b6=r30 // I0 setup syscall handler branch reg early | 828 | mov b6=r30 // I0 setup syscall handler branch reg early |
829 | #endif | ||
821 | cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? | 830 | cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? |
822 | 831 | ||
823 | and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit | 832 | and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit |
@@ -829,6 +838,30 @@ ENTRY(break_fault) | |||
829 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? | 838 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? |
830 | br.call.sptk.many b7=ia64_syscall_setup // B | 839 | br.call.sptk.many b7=ia64_syscall_setup // B |
831 | 1: | 840 | 1: |
841 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
842 | // mov.m r30=ar.itc is called in advance, and r13 is current | ||
843 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A | ||
844 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A | ||
845 | (pKStk) br.cond.spnt .skip_accounting // B unlikely skip | ||
846 | ;; | ||
847 | ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp | ||
848 | ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave | ||
849 | ;; | ||
850 | ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime | ||
851 | ld8 r21=[r17] // M cumulated utime | ||
852 | sub r22=r19,r18 // A stime before leave | ||
853 | ;; | ||
854 | st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp | ||
855 | sub r18=r30,r19 // A elapsed time in user | ||
856 | ;; | ||
857 | add r20=r20,r22 // A sum stime | ||
858 | add r21=r21,r18 // A sum utime | ||
859 | ;; | ||
860 | st8 [r16]=r20 // M update stime | ||
861 | st8 [r17]=r21 // M update utime | ||
862 | ;; | ||
863 | .skip_accounting: | ||
864 | #endif | ||
832 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 | 865 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
833 | nop 0 | 866 | nop 0 |
834 | bsw.1 // B (6 cyc) regs are saved, switch to bank 1 | 867 | bsw.1 // B (6 cyc) regs are saved, switch to bank 1 |
@@ -928,6 +961,7 @@ END(interrupt) | |||
928 | * - r27: saved ar.rsc | 961 | * - r27: saved ar.rsc |
929 | * - r28: saved cr.iip | 962 | * - r28: saved cr.iip |
930 | * - r29: saved cr.ipsr | 963 | * - r29: saved cr.ipsr |
964 | * - r30: ar.itc for accounting (don't touch) | ||
931 | * - r31: saved pr | 965 | * - r31: saved pr |
932 | * - b0: original contents (to be saved) | 966 | * - b0: original contents (to be saved) |
933 | * On exit: | 967 | * On exit: |
@@ -1090,6 +1124,41 @@ END(dispatch_illegal_op_fault) | |||
1090 | DBG_FAULT(16) | 1124 | DBG_FAULT(16) |
1091 | FAULT(16) | 1125 | FAULT(16) |
1092 | 1126 | ||
1127 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1128 | /* | ||
1129 | * There is no particular reason for this code to be here, other than | ||
1130 | * that there happens to be space here that would go unused otherwise. | ||
1131 | * If this fault ever gets "unreserved", simply moved the following | ||
1132 | * code to a more suitable spot... | ||
1133 | * | ||
1134 | * account_sys_enter is called from SAVE_MIN* macros if accounting is | ||
1135 | * enabled and if the macro is entered from user mode. | ||
1136 | */ | ||
1137 | ENTRY(account_sys_enter) | ||
1138 | // mov.m r20=ar.itc is called in advance, and r13 is current | ||
1139 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 | ||
1140 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | ||
1141 | ;; | ||
1142 | ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel | ||
1143 | ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel | ||
1144 | ;; | ||
1145 | ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime | ||
1146 | ld8 r21=[r17] // cumulated utime | ||
1147 | sub r22=r19,r18 // stime before leave kernel | ||
1148 | ;; | ||
1149 | st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp | ||
1150 | sub r18=r20,r19 // elapsed time in user mode | ||
1151 | ;; | ||
1152 | add r23=r23,r22 // sum stime | ||
1153 | add r21=r21,r18 // sum utime | ||
1154 | ;; | ||
1155 | st8 [r16]=r23 // update stime | ||
1156 | st8 [r17]=r21 // update utime | ||
1157 | ;; | ||
1158 | br.ret.sptk.many rp | ||
1159 | END(account_sys_enter) | ||
1160 | #endif | ||
1161 | |||
1093 | .org ia64_ivt+0x4400 | 1162 | .org ia64_ivt+0x4400 |
1094 | ///////////////////////////////////////////////////////////////////////////////////////// | 1163 | ///////////////////////////////////////////////////////////////////////////////////////// |
1095 | // 0x4400 Entry 17 (size 64 bundles) Reserved | 1164 | // 0x4400 Entry 17 (size 64 bundles) Reserved |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 8d9a446a0d17..233434f4f88f 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -78,6 +78,20 @@ static enum instruction_type bundle_encoding[32][3] = { | |||
78 | { u, u, u }, /* 1F */ | 78 | { u, u, u }, /* 1F */ |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* Insert a long branch code */ | ||
82 | static void __kprobes set_brl_inst(void *from, void *to) | ||
83 | { | ||
84 | s64 rel = ((s64) to - (s64) from) >> 4; | ||
85 | bundle_t *brl; | ||
86 | brl = (bundle_t *) ((u64) from & ~0xf); | ||
87 | brl->quad0.template = 0x05; /* [MLX](stop) */ | ||
88 | brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ | ||
89 | brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; | ||
90 | brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); | ||
91 | /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ | ||
92 | brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); | ||
93 | } | ||
94 | |||
81 | /* | 95 | /* |
82 | * In this function we check to see if the instruction | 96 | * In this function we check to see if the instruction |
83 | * is IP relative instruction and update the kprobe | 97 | * is IP relative instruction and update the kprobe |
@@ -496,6 +510,77 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |||
496 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; | 510 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; |
497 | } | 511 | } |
498 | 512 | ||
513 | /* Check the instruction in the slot is break */ | ||
514 | static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) | ||
515 | { | ||
516 | unsigned int major_opcode; | ||
517 | unsigned int template = bundle->quad0.template; | ||
518 | unsigned long kprobe_inst; | ||
519 | |||
520 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ | ||
521 | if (slot == 1 && bundle_encoding[template][1] == L) | ||
522 | slot++; | ||
523 | |||
524 | /* Get Kprobe probe instruction at given slot*/ | ||
525 | get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); | ||
526 | |||
527 | /* For break instruction, | ||
528 | * Bits 37:40 Major opcode to be zero | ||
529 | * Bits 27:32 X6 to be zero | ||
530 | * Bits 32:35 X3 to be zero | ||
531 | */ | ||
532 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { | ||
533 | /* Not a break instruction */ | ||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | /* Is a break instruction */ | ||
538 | return 1; | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * In this function, we check whether the target bundle modifies IP or | ||
543 | * it triggers an exception. If so, it cannot be boostable. | ||
544 | */ | ||
545 | static int __kprobes can_boost(bundle_t *bundle, uint slot, | ||
546 | unsigned long bundle_addr) | ||
547 | { | ||
548 | unsigned int template = bundle->quad0.template; | ||
549 | |||
550 | do { | ||
551 | if (search_exception_tables(bundle_addr + slot) || | ||
552 | __is_ia64_break_inst(bundle, slot)) | ||
553 | return 0; /* exception may occur in this bundle*/ | ||
554 | } while ((++slot) < 3); | ||
555 | template &= 0x1e; | ||
556 | if (template >= 0x10 /* including B unit */ || | ||
557 | template == 0x04 /* including X unit */ || | ||
558 | template == 0x06) /* undefined */ | ||
559 | return 0; | ||
560 | |||
561 | return 1; | ||
562 | } | ||
563 | |||
564 | /* Prepare long jump bundle and disables other boosters if need */ | ||
565 | static void __kprobes prepare_booster(struct kprobe *p) | ||
566 | { | ||
567 | unsigned long addr = (unsigned long)p->addr & ~0xFULL; | ||
568 | unsigned int slot = (unsigned long)p->addr & 0xf; | ||
569 | struct kprobe *other_kp; | ||
570 | |||
571 | if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { | ||
572 | set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); | ||
573 | p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; | ||
574 | } | ||
575 | |||
576 | /* disables boosters in previous slots */ | ||
577 | for (; addr < (unsigned long)p->addr; addr++) { | ||
578 | other_kp = get_kprobe((void *)addr); | ||
579 | if (other_kp) | ||
580 | other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; | ||
581 | } | ||
582 | } | ||
583 | |||
499 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 584 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
500 | { | 585 | { |
501 | unsigned long addr = (unsigned long) p->addr; | 586 | unsigned long addr = (unsigned long) p->addr; |
@@ -530,6 +615,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
530 | 615 | ||
531 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); | 616 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); |
532 | 617 | ||
618 | prepare_booster(p); | ||
619 | |||
533 | return 0; | 620 | return 0; |
534 | } | 621 | } |
535 | 622 | ||
@@ -543,7 +630,9 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) | |||
543 | src = &p->opcode.bundle; | 630 | src = &p->opcode.bundle; |
544 | 631 | ||
545 | flush_icache_range((unsigned long)p->ainsn.insn, | 632 | flush_icache_range((unsigned long)p->ainsn.insn, |
546 | (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); | 633 | (unsigned long)p->ainsn.insn + |
634 | sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); | ||
635 | |||
547 | switch (p->ainsn.slot) { | 636 | switch (p->ainsn.slot) { |
548 | case 0: | 637 | case 0: |
549 | dest->quad0.slot0 = src->quad0.slot0; | 638 | dest->quad0.slot0 = src->quad0.slot0; |
@@ -584,13 +673,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
584 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 673 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
585 | { | 674 | { |
586 | mutex_lock(&kprobe_mutex); | 675 | mutex_lock(&kprobe_mutex); |
587 | free_insn_slot(p->ainsn.insn, 0); | 676 | free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); |
588 | mutex_unlock(&kprobe_mutex); | 677 | mutex_unlock(&kprobe_mutex); |
589 | } | 678 | } |
590 | /* | 679 | /* |
591 | * We are resuming execution after a single step fault, so the pt_regs | 680 | * We are resuming execution after a single step fault, so the pt_regs |
592 | * structure reflects the register state after we executed the instruction | 681 | * structure reflects the register state after we executed the instruction |
593 | * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust | 682 | * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust |
594 | * the ip to point back to the original stack address. To set the IP address | 683 | * the ip to point back to the original stack address. To set the IP address |
595 | * to original stack address, handle the case where we need to fixup the | 684 | * to original stack address, handle the case where we need to fixup the |
596 | * relative IP address and/or fixup branch register. | 685 | * relative IP address and/or fixup branch register. |
@@ -607,7 +696,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
607 | if (slot == 1 && bundle_encoding[template][1] == L) | 696 | if (slot == 1 && bundle_encoding[template][1] == L) |
608 | slot = 2; | 697 | slot = 2; |
609 | 698 | ||
610 | if (p->ainsn.inst_flag) { | 699 | if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { |
611 | 700 | ||
612 | if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { | 701 | if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { |
613 | /* Fix relative IP address */ | 702 | /* Fix relative IP address */ |
@@ -686,33 +775,12 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) | |||
686 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) | 775 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) |
687 | { | 776 | { |
688 | unsigned int slot = ia64_psr(regs)->ri; | 777 | unsigned int slot = ia64_psr(regs)->ri; |
689 | unsigned int template, major_opcode; | ||
690 | unsigned long kprobe_inst; | ||
691 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; | 778 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; |
692 | bundle_t bundle; | 779 | bundle_t bundle; |
693 | 780 | ||
694 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); | 781 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); |
695 | template = bundle.quad0.template; | ||
696 | |||
697 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ | ||
698 | if (slot == 1 && bundle_encoding[template][1] == L) | ||
699 | slot++; | ||
700 | 782 | ||
701 | /* Get Kprobe probe instruction at given slot*/ | 783 | return __is_ia64_break_inst(&bundle, slot); |
702 | get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); | ||
703 | |||
704 | /* For break instruction, | ||
705 | * Bits 37:40 Major opcode to be zero | ||
706 | * Bits 27:32 X6 to be zero | ||
707 | * Bits 32:35 X3 to be zero | ||
708 | */ | ||
709 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { | ||
710 | /* Not a break instruction */ | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | /* Is a break instruction */ | ||
715 | return 1; | ||
716 | } | 784 | } |
717 | 785 | ||
718 | static int __kprobes pre_kprobes_handler(struct die_args *args) | 786 | static int __kprobes pre_kprobes_handler(struct die_args *args) |
@@ -802,6 +870,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
802 | return 1; | 870 | return 1; |
803 | 871 | ||
804 | ss_probe: | 872 | ss_probe: |
873 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) | ||
874 | if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { | ||
875 | /* Boost up -- we can execute copied instructions directly */ | ||
876 | ia64_psr(regs)->ri = p->ainsn.slot; | ||
877 | regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; | ||
878 | /* turn single stepping off */ | ||
879 | ia64_psr(regs)->ss = 0; | ||
880 | |||
881 | reset_current_kprobe(); | ||
882 | preempt_enable_no_resched(); | ||
883 | return 1; | ||
884 | } | ||
885 | #endif | ||
805 | prepare_ss(p, regs); | 886 | prepare_ss(p, regs); |
806 | kcb->kprobe_status = KPROBE_HIT_SS; | 887 | kcb->kprobe_status = KPROBE_HIT_SS; |
807 | return 1; | 888 | return 1; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6c18221dba36..e51bced3b0fa 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -69,6 +69,7 @@ | |||
69 | * 2007-04-27 Russ Anderson <rja@sgi.com> | 69 | * 2007-04-27 Russ Anderson <rja@sgi.com> |
70 | * Support multiple cpus going through OS_MCA in the same event. | 70 | * Support multiple cpus going through OS_MCA in the same event. |
71 | */ | 71 | */ |
72 | #include <linux/jiffies.h> | ||
72 | #include <linux/types.h> | 73 | #include <linux/types.h> |
73 | #include <linux/init.h> | 74 | #include <linux/init.h> |
74 | #include <linux/sched.h> | 75 | #include <linux/sched.h> |
@@ -97,6 +98,7 @@ | |||
97 | 98 | ||
98 | #include <asm/irq.h> | 99 | #include <asm/irq.h> |
99 | #include <asm/hw_irq.h> | 100 | #include <asm/hw_irq.h> |
101 | #include <asm/tlb.h> | ||
100 | 102 | ||
101 | #include "mca_drv.h" | 103 | #include "mca_drv.h" |
102 | #include "entry.h" | 104 | #include "entry.h" |
@@ -112,6 +114,7 @@ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ | |||
112 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ | 114 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ |
113 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ | 115 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ |
114 | DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ | 116 | DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ |
117 | DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */ | ||
115 | 118 | ||
116 | unsigned long __per_cpu_mca[NR_CPUS]; | 119 | unsigned long __per_cpu_mca[NR_CPUS]; |
117 | 120 | ||
@@ -293,7 +296,8 @@ static void ia64_mlogbuf_dump_from_init(void) | |||
293 | if (mlogbuf_finished) | 296 | if (mlogbuf_finished) |
294 | return; | 297 | return; |
295 | 298 | ||
296 | if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) { | 299 | if (mlogbuf_timestamp && |
300 | time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) { | ||
297 | printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " | 301 | printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " |
298 | " and the system seems to be messed up.\n"); | 302 | " and the system seems to be messed up.\n"); |
299 | ia64_mlogbuf_finish(0); | 303 | ia64_mlogbuf_finish(0); |
@@ -1182,6 +1186,49 @@ all_in: | |||
1182 | return; | 1186 | return; |
1183 | } | 1187 | } |
1184 | 1188 | ||
1189 | /* mca_insert_tr | ||
1190 | * | ||
1191 | * Switch rid when TR reload and needed! | ||
1192 | * iord: 1: itr, 2: itr; | ||
1193 | * | ||
1194 | */ | ||
1195 | static void mca_insert_tr(u64 iord) | ||
1196 | { | ||
1197 | |||
1198 | int i; | ||
1199 | u64 old_rr; | ||
1200 | struct ia64_tr_entry *p; | ||
1201 | unsigned long psr; | ||
1202 | int cpu = smp_processor_id(); | ||
1203 | |||
1204 | psr = ia64_clear_ic(); | ||
1205 | for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { | ||
1206 | p = &__per_cpu_idtrs[cpu][iord-1][i]; | ||
1207 | if (p->pte & 0x1) { | ||
1208 | old_rr = ia64_get_rr(p->ifa); | ||
1209 | if (old_rr != p->rr) { | ||
1210 | ia64_set_rr(p->ifa, p->rr); | ||
1211 | ia64_srlz_d(); | ||
1212 | } | ||
1213 | ia64_ptr(iord, p->ifa, p->itir >> 2); | ||
1214 | ia64_srlz_i(); | ||
1215 | if (iord & 0x1) { | ||
1216 | ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2); | ||
1217 | ia64_srlz_i(); | ||
1218 | } | ||
1219 | if (iord & 0x2) { | ||
1220 | ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2); | ||
1221 | ia64_srlz_i(); | ||
1222 | } | ||
1223 | if (old_rr != p->rr) { | ||
1224 | ia64_set_rr(p->ifa, old_rr); | ||
1225 | ia64_srlz_d(); | ||
1226 | } | ||
1227 | } | ||
1228 | } | ||
1229 | ia64_set_psr(psr); | ||
1230 | } | ||
1231 | |||
1185 | /* | 1232 | /* |
1186 | * ia64_mca_handler | 1233 | * ia64_mca_handler |
1187 | * | 1234 | * |
@@ -1266,16 +1313,17 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1266 | } else { | 1313 | } else { |
1267 | /* Dump buffered message to console */ | 1314 | /* Dump buffered message to console */ |
1268 | ia64_mlogbuf_finish(1); | 1315 | ia64_mlogbuf_finish(1); |
1269 | #ifdef CONFIG_KEXEC | ||
1270 | atomic_set(&kdump_in_progress, 1); | ||
1271 | monarch_cpu = -1; | ||
1272 | #endif | ||
1273 | } | 1316 | } |
1317 | |||
1318 | if (__get_cpu_var(ia64_mca_tr_reload)) { | ||
1319 | mca_insert_tr(0x1); /*Reload dynamic itrs*/ | ||
1320 | mca_insert_tr(0x2); /*Reload dynamic itrs*/ | ||
1321 | } | ||
1322 | |||
1274 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) | 1323 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) |
1275 | == NOTIFY_STOP) | 1324 | == NOTIFY_STOP) |
1276 | ia64_mca_spin(__func__); | 1325 | ia64_mca_spin(__func__); |
1277 | 1326 | ||
1278 | |||
1279 | if (atomic_dec_return(&mca_count) > 0) { | 1327 | if (atomic_dec_return(&mca_count) > 0) { |
1280 | int i; | 1328 | int i; |
1281 | 1329 | ||
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 8bc7d259e0c6..a06d46548ff9 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -219,8 +219,13 @@ ia64_reload_tr: | |||
219 | mov r20=IA64_TR_CURRENT_STACK | 219 | mov r20=IA64_TR_CURRENT_STACK |
220 | ;; | 220 | ;; |
221 | itr.d dtr[r20]=r16 | 221 | itr.d dtr[r20]=r16 |
222 | GET_THIS_PADDR(r2, ia64_mca_tr_reload) | ||
223 | mov r18 = 1 | ||
222 | ;; | 224 | ;; |
223 | srlz.d | 225 | srlz.d |
226 | ;; | ||
227 | st8 [r2] =r18 | ||
228 | ;; | ||
224 | 229 | ||
225 | done_tlb_purge_and_reload: | 230 | done_tlb_purge_and_reload: |
226 | 231 | ||
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index c9ac8bada786..7c548ac52bbc 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h | |||
@@ -3,6 +3,18 @@ | |||
3 | 3 | ||
4 | #include "entry.h" | 4 | #include "entry.h" |
5 | 5 | ||
6 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
7 | /* read ar.itc in advance, and use it before leaving bank 0 */ | ||
8 | #define ACCOUNT_GET_STAMP \ | ||
9 | (pUStk) mov.m r20=ar.itc; | ||
10 | #define ACCOUNT_SYS_ENTER \ | ||
11 | (pUStk) br.call.spnt rp=account_sys_enter \ | ||
12 | ;; | ||
13 | #else | ||
14 | #define ACCOUNT_GET_STAMP | ||
15 | #define ACCOUNT_SYS_ENTER | ||
16 | #endif | ||
17 | |||
6 | /* | 18 | /* |
7 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 19 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves |
8 | * the minimum state necessary that allows us to turn psr.ic back | 20 | * the minimum state necessary that allows us to turn psr.ic back |
@@ -122,11 +134,13 @@ | |||
122 | ;; \ | 134 | ;; \ |
123 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ | 135 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ |
124 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ | 136 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ |
137 | ACCOUNT_GET_STAMP \ | ||
125 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ | 138 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ |
126 | ;; \ | 139 | ;; \ |
127 | EXTRA; \ | 140 | EXTRA; \ |
128 | movl r1=__gp; /* establish kernel global pointer */ \ | 141 | movl r1=__gp; /* establish kernel global pointer */ \ |
129 | ;; \ | 142 | ;; \ |
143 | ACCOUNT_SYS_ENTER \ | ||
130 | bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ | 144 | bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ |
131 | ;; | 145 | ;; |
132 | 146 | ||
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index a78b45f5fe2f..c93420c97409 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c | |||
@@ -73,7 +73,7 @@ void __init build_cpu_to_node_map(void) | |||
73 | for(node=0; node < MAX_NUMNODES; node++) | 73 | for(node=0; node < MAX_NUMNODES; node++) |
74 | cpus_clear(node_to_cpu_mask[node]); | 74 | cpus_clear(node_to_cpu_mask[node]); |
75 | 75 | ||
76 | for(cpu = 0; cpu < NR_CPUS; ++cpu) { | 76 | for_each_possible_early_cpu(cpu) { |
77 | node = -1; | 77 | node = -1; |
78 | for (i = 0; i < NR_CPUS; ++i) | 78 | for (i = 0; i < NR_CPUS; ++i) |
79 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { | 79 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { |
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index 2cb9425e0421..e0dca8743dbb 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
@@ -135,10 +135,10 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) | |||
135 | 135 | ||
136 | while (offp < (s32 *) end) { | 136 | while (offp < (s32 *) end) { |
137 | wp = (u64 *) ia64_imva((char *) offp + *offp); | 137 | wp = (u64 *) ia64_imva((char *) offp + *offp); |
138 | wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */ | 138 | wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */ |
139 | wp[1] = 0x0004000000000200UL; | 139 | wp[1] = 0x0084006880000200UL; |
140 | wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */ | 140 | wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */ |
141 | wp[3] = 0x0084006880000200UL; | 141 | wp[3] = 0x0004000000000200UL; |
142 | ia64_fc(wp); ia64_fc(wp + 2); | 142 | ia64_fc(wp); ia64_fc(wp + 2); |
143 | ++offp; | 143 | ++offp; |
144 | } | 144 | } |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index a2aabfdc80d9..d1d24f4598da 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -4204,10 +4204,10 @@ pfm_check_task_exist(pfm_context_t *ctx) | |||
4204 | do_each_thread (g, t) { | 4204 | do_each_thread (g, t) { |
4205 | if (t->thread.pfm_context == ctx) { | 4205 | if (t->thread.pfm_context == ctx) { |
4206 | ret = 0; | 4206 | ret = 0; |
4207 | break; | 4207 | goto out; |
4208 | } | 4208 | } |
4209 | } while_each_thread (g, t); | 4209 | } while_each_thread (g, t); |
4210 | 4210 | out: | |
4211 | read_unlock(&tasklist_lock); | 4211 | read_unlock(&tasklist_lock); |
4212 | 4212 | ||
4213 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); | 4213 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 49937a383b23..a5ea817cbcbf 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -625,21 +625,6 @@ do_dump_fpu (struct unw_frame_info *info, void *arg) | |||
625 | do_dump_task_fpu(current, info, arg); | 625 | do_dump_task_fpu(current, info, arg); |
626 | } | 626 | } |
627 | 627 | ||
628 | int | ||
629 | dump_task_regs(struct task_struct *task, elf_gregset_t *regs) | ||
630 | { | ||
631 | struct unw_frame_info tcore_info; | ||
632 | |||
633 | if (current == task) { | ||
634 | unw_init_running(do_copy_regs, regs); | ||
635 | } else { | ||
636 | memset(&tcore_info, 0, sizeof(tcore_info)); | ||
637 | unw_init_from_blocked_task(&tcore_info, task); | ||
638 | do_copy_task_regs(task, &tcore_info, regs); | ||
639 | } | ||
640 | return 1; | ||
641 | } | ||
642 | |||
643 | void | 628 | void |
644 | ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) | 629 | ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) |
645 | { | 630 | { |
@@ -647,21 +632,6 @@ ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) | |||
647 | } | 632 | } |
648 | 633 | ||
649 | int | 634 | int |
650 | dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst) | ||
651 | { | ||
652 | struct unw_frame_info tcore_info; | ||
653 | |||
654 | if (current == task) { | ||
655 | unw_init_running(do_dump_fpu, dst); | ||
656 | } else { | ||
657 | memset(&tcore_info, 0, sizeof(tcore_info)); | ||
658 | unw_init_from_blocked_task(&tcore_info, task); | ||
659 | do_dump_task_fpu(task, &tcore_info, dst); | ||
660 | } | ||
661 | return 1; | ||
662 | } | ||
663 | |||
664 | int | ||
665 | dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) | 635 | dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) |
666 | { | 636 | { |
667 | unw_init_running(do_dump_fpu, dst); | 637 | unw_init_running(do_dump_fpu, dst); |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index ab784ec4319d..2a9943b5947f 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -3,6 +3,9 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 1999-2005 Hewlett-Packard Co | 4 | * Copyright (C) 1999-2005 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * Copyright (C) 2006 Intel Co | ||
7 | * 2006-08-12 - IA64 Native Utrace implementation support added by | ||
8 | * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | ||
6 | * | 9 | * |
7 | * Derived from the x86 and Alpha versions. | 10 | * Derived from the x86 and Alpha versions. |
8 | */ | 11 | */ |
@@ -17,6 +20,8 @@ | |||
17 | #include <linux/security.h> | 20 | #include <linux/security.h> |
18 | #include <linux/audit.h> | 21 | #include <linux/audit.h> |
19 | #include <linux/signal.h> | 22 | #include <linux/signal.h> |
23 | #include <linux/regset.h> | ||
24 | #include <linux/elf.h> | ||
20 | 25 | ||
21 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
22 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
@@ -740,25 +745,6 @@ ia64_sync_fph (struct task_struct *task) | |||
740 | psr->dfh = 1; | 745 | psr->dfh = 1; |
741 | } | 746 | } |
742 | 747 | ||
743 | static int | ||
744 | access_fr (struct unw_frame_info *info, int regnum, int hi, | ||
745 | unsigned long *data, int write_access) | ||
746 | { | ||
747 | struct ia64_fpreg fpval; | ||
748 | int ret; | ||
749 | |||
750 | ret = unw_get_fr(info, regnum, &fpval); | ||
751 | if (ret < 0) | ||
752 | return ret; | ||
753 | |||
754 | if (write_access) { | ||
755 | fpval.u.bits[hi] = *data; | ||
756 | ret = unw_set_fr(info, regnum, fpval); | ||
757 | } else | ||
758 | *data = fpval.u.bits[hi]; | ||
759 | return ret; | ||
760 | } | ||
761 | |||
762 | /* | 748 | /* |
763 | * Change the machine-state of CHILD such that it will return via the normal | 749 | * Change the machine-state of CHILD such that it will return via the normal |
764 | * kernel exit-path, rather than the syscall-exit path. | 750 | * kernel exit-path, rather than the syscall-exit path. |
@@ -860,309 +846,7 @@ access_nat_bits (struct task_struct *child, struct pt_regs *pt, | |||
860 | 846 | ||
861 | static int | 847 | static int |
862 | access_uarea (struct task_struct *child, unsigned long addr, | 848 | access_uarea (struct task_struct *child, unsigned long addr, |
863 | unsigned long *data, int write_access) | 849 | unsigned long *data, int write_access); |
864 | { | ||
865 | unsigned long *ptr, regnum, urbs_end, cfm; | ||
866 | struct switch_stack *sw; | ||
867 | struct pt_regs *pt; | ||
868 | # define pt_reg_addr(pt, reg) ((void *) \ | ||
869 | ((unsigned long) (pt) \ | ||
870 | + offsetof(struct pt_regs, reg))) | ||
871 | |||
872 | |||
873 | pt = task_pt_regs(child); | ||
874 | sw = (struct switch_stack *) (child->thread.ksp + 16); | ||
875 | |||
876 | if ((addr & 0x7) != 0) { | ||
877 | dprintk("ptrace: unaligned register address 0x%lx\n", addr); | ||
878 | return -1; | ||
879 | } | ||
880 | |||
881 | if (addr < PT_F127 + 16) { | ||
882 | /* accessing fph */ | ||
883 | if (write_access) | ||
884 | ia64_sync_fph(child); | ||
885 | else | ||
886 | ia64_flush_fph(child); | ||
887 | ptr = (unsigned long *) | ||
888 | ((unsigned long) &child->thread.fph + addr); | ||
889 | } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) { | ||
890 | /* scratch registers untouched by kernel (saved in pt_regs) */ | ||
891 | ptr = pt_reg_addr(pt, f10) + (addr - PT_F10); | ||
892 | } else if (addr >= PT_F12 && addr < PT_F15 + 16) { | ||
893 | /* | ||
894 | * Scratch registers untouched by kernel (saved in | ||
895 | * switch_stack). | ||
896 | */ | ||
897 | ptr = (unsigned long *) ((long) sw | ||
898 | + (addr - PT_NAT_BITS - 32)); | ||
899 | } else if (addr < PT_AR_LC + 8) { | ||
900 | /* preserved state: */ | ||
901 | struct unw_frame_info info; | ||
902 | char nat = 0; | ||
903 | int ret; | ||
904 | |||
905 | unw_init_from_blocked_task(&info, child); | ||
906 | if (unw_unwind_to_user(&info) < 0) | ||
907 | return -1; | ||
908 | |||
909 | switch (addr) { | ||
910 | case PT_NAT_BITS: | ||
911 | return access_nat_bits(child, pt, &info, | ||
912 | data, write_access); | ||
913 | |||
914 | case PT_R4: case PT_R5: case PT_R6: case PT_R7: | ||
915 | if (write_access) { | ||
916 | /* read NaT bit first: */ | ||
917 | unsigned long dummy; | ||
918 | |||
919 | ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, | ||
920 | &dummy, &nat); | ||
921 | if (ret < 0) | ||
922 | return ret; | ||
923 | } | ||
924 | return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, | ||
925 | &nat, write_access); | ||
926 | |||
927 | case PT_B1: case PT_B2: case PT_B3: | ||
928 | case PT_B4: case PT_B5: | ||
929 | return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, | ||
930 | write_access); | ||
931 | |||
932 | case PT_AR_EC: | ||
933 | return unw_access_ar(&info, UNW_AR_EC, data, | ||
934 | write_access); | ||
935 | |||
936 | case PT_AR_LC: | ||
937 | return unw_access_ar(&info, UNW_AR_LC, data, | ||
938 | write_access); | ||
939 | |||
940 | default: | ||
941 | if (addr >= PT_F2 && addr < PT_F5 + 16) | ||
942 | return access_fr(&info, (addr - PT_F2)/16 + 2, | ||
943 | (addr & 8) != 0, data, | ||
944 | write_access); | ||
945 | else if (addr >= PT_F16 && addr < PT_F31 + 16) | ||
946 | return access_fr(&info, | ||
947 | (addr - PT_F16)/16 + 16, | ||
948 | (addr & 8) != 0, | ||
949 | data, write_access); | ||
950 | else { | ||
951 | dprintk("ptrace: rejecting access to register " | ||
952 | "address 0x%lx\n", addr); | ||
953 | return -1; | ||
954 | } | ||
955 | } | ||
956 | } else if (addr < PT_F9+16) { | ||
957 | /* scratch state */ | ||
958 | switch (addr) { | ||
959 | case PT_AR_BSP: | ||
960 | /* | ||
961 | * By convention, we use PT_AR_BSP to refer to | ||
962 | * the end of the user-level backing store. | ||
963 | * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) | ||
964 | * to get the real value of ar.bsp at the time | ||
965 | * the kernel was entered. | ||
966 | * | ||
967 | * Furthermore, when changing the contents of | ||
968 | * PT_AR_BSP (or PT_CFM) while the task is | ||
969 | * blocked in a system call, convert the state | ||
970 | * so that the non-system-call exit | ||
971 | * path is used. This ensures that the proper | ||
972 | * state will be picked up when resuming | ||
973 | * execution. However, it *also* means that | ||
974 | * once we write PT_AR_BSP/PT_CFM, it won't be | ||
975 | * possible to modify the syscall arguments of | ||
976 | * the pending system call any longer. This | ||
977 | * shouldn't be an issue because modifying | ||
978 | * PT_AR_BSP/PT_CFM generally implies that | ||
979 | * we're either abandoning the pending system | ||
980 | * call or that we defer it's re-execution | ||
981 | * (e.g., due to GDB doing an inferior | ||
982 | * function call). | ||
983 | */ | ||
984 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | ||
985 | if (write_access) { | ||
986 | if (*data != urbs_end) { | ||
987 | if (in_syscall(pt)) | ||
988 | convert_to_non_syscall(child, | ||
989 | pt, | ||
990 | cfm); | ||
991 | /* | ||
992 | * Simulate user-level write | ||
993 | * of ar.bsp: | ||
994 | */ | ||
995 | pt->loadrs = 0; | ||
996 | pt->ar_bspstore = *data; | ||
997 | } | ||
998 | } else | ||
999 | *data = urbs_end; | ||
1000 | return 0; | ||
1001 | |||
1002 | case PT_CFM: | ||
1003 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | ||
1004 | if (write_access) { | ||
1005 | if (((cfm ^ *data) & PFM_MASK) != 0) { | ||
1006 | if (in_syscall(pt)) | ||
1007 | convert_to_non_syscall(child, | ||
1008 | pt, | ||
1009 | cfm); | ||
1010 | pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | ||
1011 | | (*data & PFM_MASK)); | ||
1012 | } | ||
1013 | } else | ||
1014 | *data = cfm; | ||
1015 | return 0; | ||
1016 | |||
1017 | case PT_CR_IPSR: | ||
1018 | if (write_access) { | ||
1019 | unsigned long tmp = *data; | ||
1020 | /* psr.ri==3 is a reserved value: SDM 2:25 */ | ||
1021 | if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) | ||
1022 | tmp &= ~IA64_PSR_RI; | ||
1023 | pt->cr_ipsr = ((tmp & IPSR_MASK) | ||
1024 | | (pt->cr_ipsr & ~IPSR_MASK)); | ||
1025 | } else | ||
1026 | *data = (pt->cr_ipsr & IPSR_MASK); | ||
1027 | return 0; | ||
1028 | |||
1029 | case PT_AR_RSC: | ||
1030 | if (write_access) | ||
1031 | pt->ar_rsc = *data | (3 << 2); /* force PL3 */ | ||
1032 | else | ||
1033 | *data = pt->ar_rsc; | ||
1034 | return 0; | ||
1035 | |||
1036 | case PT_AR_RNAT: | ||
1037 | ptr = pt_reg_addr(pt, ar_rnat); | ||
1038 | break; | ||
1039 | case PT_R1: | ||
1040 | ptr = pt_reg_addr(pt, r1); | ||
1041 | break; | ||
1042 | case PT_R2: case PT_R3: | ||
1043 | ptr = pt_reg_addr(pt, r2) + (addr - PT_R2); | ||
1044 | break; | ||
1045 | case PT_R8: case PT_R9: case PT_R10: case PT_R11: | ||
1046 | ptr = pt_reg_addr(pt, r8) + (addr - PT_R8); | ||
1047 | break; | ||
1048 | case PT_R12: case PT_R13: | ||
1049 | ptr = pt_reg_addr(pt, r12) + (addr - PT_R12); | ||
1050 | break; | ||
1051 | case PT_R14: | ||
1052 | ptr = pt_reg_addr(pt, r14); | ||
1053 | break; | ||
1054 | case PT_R15: | ||
1055 | ptr = pt_reg_addr(pt, r15); | ||
1056 | break; | ||
1057 | case PT_R16: case PT_R17: case PT_R18: case PT_R19: | ||
1058 | case PT_R20: case PT_R21: case PT_R22: case PT_R23: | ||
1059 | case PT_R24: case PT_R25: case PT_R26: case PT_R27: | ||
1060 | case PT_R28: case PT_R29: case PT_R30: case PT_R31: | ||
1061 | ptr = pt_reg_addr(pt, r16) + (addr - PT_R16); | ||
1062 | break; | ||
1063 | case PT_B0: | ||
1064 | ptr = pt_reg_addr(pt, b0); | ||
1065 | break; | ||
1066 | case PT_B6: | ||
1067 | ptr = pt_reg_addr(pt, b6); | ||
1068 | break; | ||
1069 | case PT_B7: | ||
1070 | ptr = pt_reg_addr(pt, b7); | ||
1071 | break; | ||
1072 | case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: | ||
1073 | case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: | ||
1074 | ptr = pt_reg_addr(pt, f6) + (addr - PT_F6); | ||
1075 | break; | ||
1076 | case PT_AR_BSPSTORE: | ||
1077 | ptr = pt_reg_addr(pt, ar_bspstore); | ||
1078 | break; | ||
1079 | case PT_AR_UNAT: | ||
1080 | ptr = pt_reg_addr(pt, ar_unat); | ||
1081 | break; | ||
1082 | case PT_AR_PFS: | ||
1083 | ptr = pt_reg_addr(pt, ar_pfs); | ||
1084 | break; | ||
1085 | case PT_AR_CCV: | ||
1086 | ptr = pt_reg_addr(pt, ar_ccv); | ||
1087 | break; | ||
1088 | case PT_AR_FPSR: | ||
1089 | ptr = pt_reg_addr(pt, ar_fpsr); | ||
1090 | break; | ||
1091 | case PT_CR_IIP: | ||
1092 | ptr = pt_reg_addr(pt, cr_iip); | ||
1093 | break; | ||
1094 | case PT_PR: | ||
1095 | ptr = pt_reg_addr(pt, pr); | ||
1096 | break; | ||
1097 | /* scratch register */ | ||
1098 | |||
1099 | default: | ||
1100 | /* disallow accessing anything else... */ | ||
1101 | dprintk("ptrace: rejecting access to register " | ||
1102 | "address 0x%lx\n", addr); | ||
1103 | return -1; | ||
1104 | } | ||
1105 | } else if (addr <= PT_AR_SSD) { | ||
1106 | ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD); | ||
1107 | } else { | ||
1108 | /* access debug registers */ | ||
1109 | |||
1110 | if (addr >= PT_IBR) { | ||
1111 | regnum = (addr - PT_IBR) >> 3; | ||
1112 | ptr = &child->thread.ibr[0]; | ||
1113 | } else { | ||
1114 | regnum = (addr - PT_DBR) >> 3; | ||
1115 | ptr = &child->thread.dbr[0]; | ||
1116 | } | ||
1117 | |||
1118 | if (regnum >= 8) { | ||
1119 | dprintk("ptrace: rejecting access to register " | ||
1120 | "address 0x%lx\n", addr); | ||
1121 | return -1; | ||
1122 | } | ||
1123 | #ifdef CONFIG_PERFMON | ||
1124 | /* | ||
1125 | * Check if debug registers are used by perfmon. This | ||
1126 | * test must be done once we know that we can do the | ||
1127 | * operation, i.e. the arguments are all valid, but | ||
1128 | * before we start modifying the state. | ||
1129 | * | ||
1130 | * Perfmon needs to keep a count of how many processes | ||
1131 | * are trying to modify the debug registers for system | ||
1132 | * wide monitoring sessions. | ||
1133 | * | ||
1134 | * We also include read access here, because they may | ||
1135 | * cause the PMU-installed debug register state | ||
1136 | * (dbr[], ibr[]) to be reset. The two arrays are also | ||
1137 | * used by perfmon, but we do not use | ||
1138 | * IA64_THREAD_DBG_VALID. The registers are restored | ||
1139 | * by the PMU context switch code. | ||
1140 | */ | ||
1141 | if (pfm_use_debug_registers(child)) return -1; | ||
1142 | #endif | ||
1143 | |||
1144 | if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { | ||
1145 | child->thread.flags |= IA64_THREAD_DBG_VALID; | ||
1146 | memset(child->thread.dbr, 0, | ||
1147 | sizeof(child->thread.dbr)); | ||
1148 | memset(child->thread.ibr, 0, | ||
1149 | sizeof(child->thread.ibr)); | ||
1150 | } | ||
1151 | |||
1152 | ptr += regnum; | ||
1153 | |||
1154 | if ((regnum & 1) && write_access) { | ||
1155 | /* don't let the user set kernel-level breakpoints: */ | ||
1156 | *ptr = *data & ~(7UL << 56); | ||
1157 | return 0; | ||
1158 | } | ||
1159 | } | ||
1160 | if (write_access) | ||
1161 | *ptr = *data; | ||
1162 | else | ||
1163 | *data = *ptr; | ||
1164 | return 0; | ||
1165 | } | ||
1166 | 850 | ||
1167 | static long | 851 | static long |
1168 | ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | 852 | ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) |
@@ -1626,3 +1310,892 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | |||
1626 | if (test_thread_flag(TIF_RESTORE_RSE)) | 1310 | if (test_thread_flag(TIF_RESTORE_RSE)) |
1627 | ia64_sync_krbs(); | 1311 | ia64_sync_krbs(); |
1628 | } | 1312 | } |
1313 | |||
1314 | /* Utrace implementation starts here */ | ||
1315 | struct regset_get { | ||
1316 | void *kbuf; | ||
1317 | void __user *ubuf; | ||
1318 | }; | ||
1319 | |||
1320 | struct regset_set { | ||
1321 | const void *kbuf; | ||
1322 | const void __user *ubuf; | ||
1323 | }; | ||
1324 | |||
1325 | struct regset_getset { | ||
1326 | struct task_struct *target; | ||
1327 | const struct user_regset *regset; | ||
1328 | union { | ||
1329 | struct regset_get get; | ||
1330 | struct regset_set set; | ||
1331 | } u; | ||
1332 | unsigned int pos; | ||
1333 | unsigned int count; | ||
1334 | int ret; | ||
1335 | }; | ||
1336 | |||
1337 | static int | ||
1338 | access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, | ||
1339 | unsigned long addr, unsigned long *data, int write_access) | ||
1340 | { | ||
1341 | struct pt_regs *pt; | ||
1342 | unsigned long *ptr = NULL; | ||
1343 | int ret; | ||
1344 | char nat = 0; | ||
1345 | |||
1346 | pt = task_pt_regs(target); | ||
1347 | switch (addr) { | ||
1348 | case ELF_GR_OFFSET(1): | ||
1349 | ptr = &pt->r1; | ||
1350 | break; | ||
1351 | case ELF_GR_OFFSET(2): | ||
1352 | case ELF_GR_OFFSET(3): | ||
1353 | ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2)); | ||
1354 | break; | ||
1355 | case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7): | ||
1356 | if (write_access) { | ||
1357 | /* read NaT bit first: */ | ||
1358 | unsigned long dummy; | ||
1359 | |||
1360 | ret = unw_get_gr(info, addr/8, &dummy, &nat); | ||
1361 | if (ret < 0) | ||
1362 | return ret; | ||
1363 | } | ||
1364 | return unw_access_gr(info, addr/8, data, &nat, write_access); | ||
1365 | case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11): | ||
1366 | ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8); | ||
1367 | break; | ||
1368 | case ELF_GR_OFFSET(12): | ||
1369 | case ELF_GR_OFFSET(13): | ||
1370 | ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12); | ||
1371 | break; | ||
1372 | case ELF_GR_OFFSET(14): | ||
1373 | ptr = &pt->r14; | ||
1374 | break; | ||
1375 | case ELF_GR_OFFSET(15): | ||
1376 | ptr = &pt->r15; | ||
1377 | } | ||
1378 | if (write_access) | ||
1379 | *ptr = *data; | ||
1380 | else | ||
1381 | *data = *ptr; | ||
1382 | return 0; | ||
1383 | } | ||
1384 | |||
1385 | static int | ||
1386 | access_elf_breg(struct task_struct *target, struct unw_frame_info *info, | ||
1387 | unsigned long addr, unsigned long *data, int write_access) | ||
1388 | { | ||
1389 | struct pt_regs *pt; | ||
1390 | unsigned long *ptr = NULL; | ||
1391 | |||
1392 | pt = task_pt_regs(target); | ||
1393 | switch (addr) { | ||
1394 | case ELF_BR_OFFSET(0): | ||
1395 | ptr = &pt->b0; | ||
1396 | break; | ||
1397 | case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5): | ||
1398 | return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8, | ||
1399 | data, write_access); | ||
1400 | case ELF_BR_OFFSET(6): | ||
1401 | ptr = &pt->b6; | ||
1402 | break; | ||
1403 | case ELF_BR_OFFSET(7): | ||
1404 | ptr = &pt->b7; | ||
1405 | } | ||
1406 | if (write_access) | ||
1407 | *ptr = *data; | ||
1408 | else | ||
1409 | *data = *ptr; | ||
1410 | return 0; | ||
1411 | } | ||
1412 | |||
1413 | static int | ||
1414 | access_elf_areg(struct task_struct *target, struct unw_frame_info *info, | ||
1415 | unsigned long addr, unsigned long *data, int write_access) | ||
1416 | { | ||
1417 | struct pt_regs *pt; | ||
1418 | unsigned long cfm, urbs_end; | ||
1419 | unsigned long *ptr = NULL; | ||
1420 | |||
1421 | pt = task_pt_regs(target); | ||
1422 | if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) { | ||
1423 | switch (addr) { | ||
1424 | case ELF_AR_RSC_OFFSET: | ||
1425 | /* force PL3 */ | ||
1426 | if (write_access) | ||
1427 | pt->ar_rsc = *data | (3 << 2); | ||
1428 | else | ||
1429 | *data = pt->ar_rsc; | ||
1430 | return 0; | ||
1431 | case ELF_AR_BSP_OFFSET: | ||
1432 | /* | ||
1433 | * By convention, we use PT_AR_BSP to refer to | ||
1434 | * the end of the user-level backing store. | ||
1435 | * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) | ||
1436 | * to get the real value of ar.bsp at the time | ||
1437 | * the kernel was entered. | ||
1438 | * | ||
1439 | * Furthermore, when changing the contents of | ||
1440 | * PT_AR_BSP (or PT_CFM) while the task is | ||
1441 | * blocked in a system call, convert the state | ||
1442 | * so that the non-system-call exit | ||
1443 | * path is used. This ensures that the proper | ||
1444 | * state will be picked up when resuming | ||
1445 | * execution. However, it *also* means that | ||
1446 | * once we write PT_AR_BSP/PT_CFM, it won't be | ||
1447 | * possible to modify the syscall arguments of | ||
1448 | * the pending system call any longer. This | ||
1449 | * shouldn't be an issue because modifying | ||
1450 | * PT_AR_BSP/PT_CFM generally implies that | ||
1451 | * we're either abandoning the pending system | ||
1452 | * call or that we defer it's re-execution | ||
1453 | * (e.g., due to GDB doing an inferior | ||
1454 | * function call). | ||
1455 | */ | ||
1456 | urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); | ||
1457 | if (write_access) { | ||
1458 | if (*data != urbs_end) { | ||
1459 | if (in_syscall(pt)) | ||
1460 | convert_to_non_syscall(target, | ||
1461 | pt, | ||
1462 | cfm); | ||
1463 | /* | ||
1464 | * Simulate user-level write | ||
1465 | * of ar.bsp: | ||
1466 | */ | ||
1467 | pt->loadrs = 0; | ||
1468 | pt->ar_bspstore = *data; | ||
1469 | } | ||
1470 | } else | ||
1471 | *data = urbs_end; | ||
1472 | return 0; | ||
1473 | case ELF_AR_BSPSTORE_OFFSET: | ||
1474 | ptr = &pt->ar_bspstore; | ||
1475 | break; | ||
1476 | case ELF_AR_RNAT_OFFSET: | ||
1477 | ptr = &pt->ar_rnat; | ||
1478 | break; | ||
1479 | case ELF_AR_CCV_OFFSET: | ||
1480 | ptr = &pt->ar_ccv; | ||
1481 | break; | ||
1482 | case ELF_AR_UNAT_OFFSET: | ||
1483 | ptr = &pt->ar_unat; | ||
1484 | break; | ||
1485 | case ELF_AR_FPSR_OFFSET: | ||
1486 | ptr = &pt->ar_fpsr; | ||
1487 | break; | ||
1488 | case ELF_AR_PFS_OFFSET: | ||
1489 | ptr = &pt->ar_pfs; | ||
1490 | break; | ||
1491 | case ELF_AR_LC_OFFSET: | ||
1492 | return unw_access_ar(info, UNW_AR_LC, data, | ||
1493 | write_access); | ||
1494 | case ELF_AR_EC_OFFSET: | ||
1495 | return unw_access_ar(info, UNW_AR_EC, data, | ||
1496 | write_access); | ||
1497 | case ELF_AR_CSD_OFFSET: | ||
1498 | ptr = &pt->ar_csd; | ||
1499 | break; | ||
1500 | case ELF_AR_SSD_OFFSET: | ||
1501 | ptr = &pt->ar_ssd; | ||
1502 | } | ||
1503 | } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) { | ||
1504 | switch (addr) { | ||
1505 | case ELF_CR_IIP_OFFSET: | ||
1506 | ptr = &pt->cr_iip; | ||
1507 | break; | ||
1508 | case ELF_CFM_OFFSET: | ||
1509 | urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); | ||
1510 | if (write_access) { | ||
1511 | if (((cfm ^ *data) & PFM_MASK) != 0) { | ||
1512 | if (in_syscall(pt)) | ||
1513 | convert_to_non_syscall(target, | ||
1514 | pt, | ||
1515 | cfm); | ||
1516 | pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | ||
1517 | | (*data & PFM_MASK)); | ||
1518 | } | ||
1519 | } else | ||
1520 | *data = cfm; | ||
1521 | return 0; | ||
1522 | case ELF_CR_IPSR_OFFSET: | ||
1523 | if (write_access) { | ||
1524 | unsigned long tmp = *data; | ||
1525 | /* psr.ri==3 is a reserved value: SDM 2:25 */ | ||
1526 | if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) | ||
1527 | tmp &= ~IA64_PSR_RI; | ||
1528 | pt->cr_ipsr = ((tmp & IPSR_MASK) | ||
1529 | | (pt->cr_ipsr & ~IPSR_MASK)); | ||
1530 | } else | ||
1531 | *data = (pt->cr_ipsr & IPSR_MASK); | ||
1532 | return 0; | ||
1533 | } | ||
1534 | } else if (addr == ELF_NAT_OFFSET) | ||
1535 | return access_nat_bits(target, pt, info, | ||
1536 | data, write_access); | ||
1537 | else if (addr == ELF_PR_OFFSET) | ||
1538 | ptr = &pt->pr; | ||
1539 | else | ||
1540 | return -1; | ||
1541 | |||
1542 | if (write_access) | ||
1543 | *ptr = *data; | ||
1544 | else | ||
1545 | *data = *ptr; | ||
1546 | |||
1547 | return 0; | ||
1548 | } | ||
1549 | |||
1550 | static int | ||
1551 | access_elf_reg(struct task_struct *target, struct unw_frame_info *info, | ||
1552 | unsigned long addr, unsigned long *data, int write_access) | ||
1553 | { | ||
1554 | if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15)) | ||
1555 | return access_elf_gpreg(target, info, addr, data, write_access); | ||
1556 | else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) | ||
1557 | return access_elf_breg(target, info, addr, data, write_access); | ||
1558 | else | ||
1559 | return access_elf_areg(target, info, addr, data, write_access); | ||
1560 | } | ||
1561 | |||
1562 | void do_gpregs_get(struct unw_frame_info *info, void *arg) | ||
1563 | { | ||
1564 | struct pt_regs *pt; | ||
1565 | struct regset_getset *dst = arg; | ||
1566 | elf_greg_t tmp[16]; | ||
1567 | unsigned int i, index, min_copy; | ||
1568 | |||
1569 | if (unw_unwind_to_user(info) < 0) | ||
1570 | return; | ||
1571 | |||
1572 | /* | ||
1573 | * coredump format: | ||
1574 | * r0-r31 | ||
1575 | * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) | ||
1576 | * predicate registers (p0-p63) | ||
1577 | * b0-b7 | ||
1578 | * ip cfm user-mask | ||
1579 | * ar.rsc ar.bsp ar.bspstore ar.rnat | ||
1580 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec | ||
1581 | */ | ||
1582 | |||
1583 | |||
1584 | /* Skip r0 */ | ||
1585 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { | ||
1586 | dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, | ||
1587 | &dst->u.get.kbuf, | ||
1588 | &dst->u.get.ubuf, | ||
1589 | 0, ELF_GR_OFFSET(1)); | ||
1590 | if (dst->ret || dst->count == 0) | ||
1591 | return; | ||
1592 | } | ||
1593 | |||
1594 | /* gr1 - gr15 */ | ||
1595 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { | ||
1596 | index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); | ||
1597 | min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ? | ||
1598 | (dst->pos + dst->count) : ELF_GR_OFFSET(16); | ||
1599 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | ||
1600 | index++) | ||
1601 | if (access_elf_reg(dst->target, info, i, | ||
1602 | &tmp[index], 0) < 0) { | ||
1603 | dst->ret = -EIO; | ||
1604 | return; | ||
1605 | } | ||
1606 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
1607 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | ||
1608 | ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); | ||
1609 | if (dst->ret || dst->count == 0) | ||
1610 | return; | ||
1611 | } | ||
1612 | |||
1613 | /* r16-r31 */ | ||
1614 | if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { | ||
1615 | pt = task_pt_regs(dst->target); | ||
1616 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
1617 | &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16, | ||
1618 | ELF_GR_OFFSET(16), ELF_NAT_OFFSET); | ||
1619 | if (dst->ret || dst->count == 0) | ||
1620 | return; | ||
1621 | } | ||
1622 | |||
1623 | /* nat, pr, b0 - b7 */ | ||
1624 | if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { | ||
1625 | index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); | ||
1626 | min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ? | ||
1627 | (dst->pos + dst->count) : ELF_CR_IIP_OFFSET; | ||
1628 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | ||
1629 | index++) | ||
1630 | if (access_elf_reg(dst->target, info, i, | ||
1631 | &tmp[index], 0) < 0) { | ||
1632 | dst->ret = -EIO; | ||
1633 | return; | ||
1634 | } | ||
1635 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
1636 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | ||
1637 | ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); | ||
1638 | if (dst->ret || dst->count == 0) | ||
1639 | return; | ||
1640 | } | ||
1641 | |||
1642 | /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat | ||
1643 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd | ||
1644 | */ | ||
1645 | if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { | ||
1646 | index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); | ||
1647 | min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ? | ||
1648 | (dst->pos + dst->count) : ELF_AR_END_OFFSET; | ||
1649 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | ||
1650 | index++) | ||
1651 | if (access_elf_reg(dst->target, info, i, | ||
1652 | &tmp[index], 0) < 0) { | ||
1653 | dst->ret = -EIO; | ||
1654 | return; | ||
1655 | } | ||
1656 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
1657 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | ||
1658 | ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); | ||
1659 | } | ||
1660 | } | ||
1661 | |||
1662 | void do_gpregs_set(struct unw_frame_info *info, void *arg) | ||
1663 | { | ||
1664 | struct pt_regs *pt; | ||
1665 | struct regset_getset *dst = arg; | ||
1666 | elf_greg_t tmp[16]; | ||
1667 | unsigned int i, index; | ||
1668 | |||
1669 | if (unw_unwind_to_user(info) < 0) | ||
1670 | return; | ||
1671 | |||
1672 | /* Skip r0 */ | ||
1673 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { | ||
1674 | dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, | ||
1675 | &dst->u.set.kbuf, | ||
1676 | &dst->u.set.ubuf, | ||
1677 | 0, ELF_GR_OFFSET(1)); | ||
1678 | if (dst->ret || dst->count == 0) | ||
1679 | return; | ||
1680 | } | ||
1681 | |||
1682 | /* gr1-gr15 */ | ||
1683 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { | ||
1684 | i = dst->pos; | ||
1685 | index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); | ||
1686 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
1687 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | ||
1688 | ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); | ||
1689 | if (dst->ret) | ||
1690 | return; | ||
1691 | for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) | ||
1692 | if (access_elf_reg(dst->target, info, i, | ||
1693 | &tmp[index], 1) < 0) { | ||
1694 | dst->ret = -EIO; | ||
1695 | return; | ||
1696 | } | ||
1697 | if (dst->count == 0) | ||
1698 | return; | ||
1699 | } | ||
1700 | |||
1701 | /* gr16-gr31 */ | ||
1702 | if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { | ||
1703 | pt = task_pt_regs(dst->target); | ||
1704 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
1705 | &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16, | ||
1706 | ELF_GR_OFFSET(16), ELF_NAT_OFFSET); | ||
1707 | if (dst->ret || dst->count == 0) | ||
1708 | return; | ||
1709 | } | ||
1710 | |||
1711 | /* nat, pr, b0 - b7 */ | ||
1712 | if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { | ||
1713 | i = dst->pos; | ||
1714 | index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); | ||
1715 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
1716 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | ||
1717 | ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); | ||
1718 | if (dst->ret) | ||
1719 | return; | ||
1720 | for (; i < dst->pos; i += sizeof(elf_greg_t), index++) | ||
1721 | if (access_elf_reg(dst->target, info, i, | ||
1722 | &tmp[index], 1) < 0) { | ||
1723 | dst->ret = -EIO; | ||
1724 | return; | ||
1725 | } | ||
1726 | if (dst->count == 0) | ||
1727 | return; | ||
1728 | } | ||
1729 | |||
1730 | /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat | ||
1731 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd | ||
1732 | */ | ||
1733 | if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { | ||
1734 | i = dst->pos; | ||
1735 | index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); | ||
1736 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
1737 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | ||
1738 | ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); | ||
1739 | if (dst->ret) | ||
1740 | return; | ||
1741 | for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) | ||
1742 | if (access_elf_reg(dst->target, info, i, | ||
1743 | &tmp[index], 1) < 0) { | ||
1744 | dst->ret = -EIO; | ||
1745 | return; | ||
1746 | } | ||
1747 | } | ||
1748 | } | ||
1749 | |||
1750 | #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t)) | ||
1751 | |||
1752 | void do_fpregs_get(struct unw_frame_info *info, void *arg) | ||
1753 | { | ||
1754 | struct regset_getset *dst = arg; | ||
1755 | struct task_struct *task = dst->target; | ||
1756 | elf_fpreg_t tmp[30]; | ||
1757 | int index, min_copy, i; | ||
1758 | |||
1759 | if (unw_unwind_to_user(info) < 0) | ||
1760 | return; | ||
1761 | |||
1762 | /* Skip pos 0 and 1 */ | ||
1763 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { | ||
1764 | dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, | ||
1765 | &dst->u.get.kbuf, | ||
1766 | &dst->u.get.ubuf, | ||
1767 | 0, ELF_FP_OFFSET(2)); | ||
1768 | if (dst->count == 0 || dst->ret) | ||
1769 | return; | ||
1770 | } | ||
1771 | |||
1772 | /* fr2-fr31 */ | ||
1773 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { | ||
1774 | index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t); | ||
1775 | |||
1776 | min_copy = min(((unsigned int)ELF_FP_OFFSET(32)), | ||
1777 | dst->pos + dst->count); | ||
1778 | for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), | ||
1779 | index++) | ||
1780 | if (unw_get_fr(info, i / sizeof(elf_fpreg_t), | ||
1781 | &tmp[index])) { | ||
1782 | dst->ret = -EIO; | ||
1783 | return; | ||
1784 | } | ||
1785 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | ||
1786 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | ||
1787 | ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); | ||
1788 | if (dst->count == 0 || dst->ret) | ||
1789 | return; | ||
1790 | } | ||
1791 | |||
1792 | /* fph */ | ||
1793 | if (dst->count > 0) { | ||
1794 | ia64_flush_fph(dst->target); | ||
1795 | if (task->thread.flags & IA64_THREAD_FPH_VALID) | ||
1796 | dst->ret = user_regset_copyout( | ||
1797 | &dst->pos, &dst->count, | ||
1798 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
1799 | &dst->target->thread.fph, | ||
1800 | ELF_FP_OFFSET(32), -1); | ||
1801 | else | ||
1802 | /* Zero fill instead. */ | ||
1803 | dst->ret = user_regset_copyout_zero( | ||
1804 | &dst->pos, &dst->count, | ||
1805 | &dst->u.get.kbuf, &dst->u.get.ubuf, | ||
1806 | ELF_FP_OFFSET(32), -1); | ||
1807 | } | ||
1808 | } | ||
1809 | |||
1810 | void do_fpregs_set(struct unw_frame_info *info, void *arg) | ||
1811 | { | ||
1812 | struct regset_getset *dst = arg; | ||
1813 | elf_fpreg_t fpreg, tmp[30]; | ||
1814 | int index, start, end; | ||
1815 | |||
1816 | if (unw_unwind_to_user(info) < 0) | ||
1817 | return; | ||
1818 | |||
1819 | /* Skip pos 0 and 1 */ | ||
1820 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { | ||
1821 | dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, | ||
1822 | &dst->u.set.kbuf, | ||
1823 | &dst->u.set.ubuf, | ||
1824 | 0, ELF_FP_OFFSET(2)); | ||
1825 | if (dst->count == 0 || dst->ret) | ||
1826 | return; | ||
1827 | } | ||
1828 | |||
1829 | /* fr2-fr31 */ | ||
1830 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { | ||
1831 | start = dst->pos; | ||
1832 | end = min(((unsigned int)ELF_FP_OFFSET(32)), | ||
1833 | dst->pos + dst->count); | ||
1834 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
1835 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | ||
1836 | ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); | ||
1837 | if (dst->ret) | ||
1838 | return; | ||
1839 | |||
1840 | if (start & 0xF) { /* only write high part */ | ||
1841 | if (unw_get_fr(info, start / sizeof(elf_fpreg_t), | ||
1842 | &fpreg)) { | ||
1843 | dst->ret = -EIO; | ||
1844 | return; | ||
1845 | } | ||
1846 | tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] | ||
1847 | = fpreg.u.bits[0]; | ||
1848 | start &= ~0xFUL; | ||
1849 | } | ||
1850 | if (end & 0xF) { /* only write low part */ | ||
1851 | if (unw_get_fr(info, end / sizeof(elf_fpreg_t), | ||
1852 | &fpreg)) { | ||
1853 | dst->ret = -EIO; | ||
1854 | return; | ||
1855 | } | ||
1856 | tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1] | ||
1857 | = fpreg.u.bits[1]; | ||
1858 | end = (end + 0xF) & ~0xFUL; | ||
1859 | } | ||
1860 | |||
1861 | for ( ; start < end ; start += sizeof(elf_fpreg_t)) { | ||
1862 | index = start / sizeof(elf_fpreg_t); | ||
1863 | if (unw_set_fr(info, index, tmp[index - 2])) { | ||
1864 | dst->ret = -EIO; | ||
1865 | return; | ||
1866 | } | ||
1867 | } | ||
1868 | if (dst->ret || dst->count == 0) | ||
1869 | return; | ||
1870 | } | ||
1871 | |||
1872 | /* fph */ | ||
1873 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) { | ||
1874 | ia64_sync_fph(dst->target); | ||
1875 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | ||
1876 | &dst->u.set.kbuf, | ||
1877 | &dst->u.set.ubuf, | ||
1878 | &dst->target->thread.fph, | ||
1879 | ELF_FP_OFFSET(32), -1); | ||
1880 | } | ||
1881 | } | ||
1882 | |||
1883 | static int | ||
1884 | do_regset_call(void (*call)(struct unw_frame_info *, void *), | ||
1885 | struct task_struct *target, | ||
1886 | const struct user_regset *regset, | ||
1887 | unsigned int pos, unsigned int count, | ||
1888 | const void *kbuf, const void __user *ubuf) | ||
1889 | { | ||
1890 | struct regset_getset info = { .target = target, .regset = regset, | ||
1891 | .pos = pos, .count = count, | ||
1892 | .u.set = { .kbuf = kbuf, .ubuf = ubuf }, | ||
1893 | .ret = 0 }; | ||
1894 | |||
1895 | if (target == current) | ||
1896 | unw_init_running(call, &info); | ||
1897 | else { | ||
1898 | struct unw_frame_info ufi; | ||
1899 | memset(&ufi, 0, sizeof(ufi)); | ||
1900 | unw_init_from_blocked_task(&ufi, target); | ||
1901 | (*call)(&ufi, &info); | ||
1902 | } | ||
1903 | |||
1904 | return info.ret; | ||
1905 | } | ||
1906 | |||
1907 | static int | ||
1908 | gpregs_get(struct task_struct *target, | ||
1909 | const struct user_regset *regset, | ||
1910 | unsigned int pos, unsigned int count, | ||
1911 | void *kbuf, void __user *ubuf) | ||
1912 | { | ||
1913 | return do_regset_call(do_gpregs_get, target, regset, pos, count, | ||
1914 | kbuf, ubuf); | ||
1915 | } | ||
1916 | |||
1917 | static int gpregs_set(struct task_struct *target, | ||
1918 | const struct user_regset *regset, | ||
1919 | unsigned int pos, unsigned int count, | ||
1920 | const void *kbuf, const void __user *ubuf) | ||
1921 | { | ||
1922 | return do_regset_call(do_gpregs_set, target, regset, pos, count, | ||
1923 | kbuf, ubuf); | ||
1924 | } | ||
1925 | |||
1926 | static void do_gpregs_writeback(struct unw_frame_info *info, void *arg) | ||
1927 | { | ||
1928 | do_sync_rbs(info, ia64_sync_user_rbs); | ||
1929 | } | ||
1930 | |||
1931 | /* | ||
1932 | * This is called to write back the register backing store. | ||
1933 | * ptrace does this before it stops, so that a tracer reading the user | ||
1934 | * memory after the thread stops will get the current register data. | ||
1935 | */ | ||
1936 | static int | ||
1937 | gpregs_writeback(struct task_struct *target, | ||
1938 | const struct user_regset *regset, | ||
1939 | int now) | ||
1940 | { | ||
1941 | if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) | ||
1942 | return 0; | ||
1943 | tsk_set_notify_resume(target); | ||
1944 | return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, | ||
1945 | NULL, NULL); | ||
1946 | } | ||
1947 | |||
1948 | static int | ||
1949 | fpregs_active(struct task_struct *target, const struct user_regset *regset) | ||
1950 | { | ||
1951 | return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32; | ||
1952 | } | ||
1953 | |||
1954 | static int fpregs_get(struct task_struct *target, | ||
1955 | const struct user_regset *regset, | ||
1956 | unsigned int pos, unsigned int count, | ||
1957 | void *kbuf, void __user *ubuf) | ||
1958 | { | ||
1959 | return do_regset_call(do_fpregs_get, target, regset, pos, count, | ||
1960 | kbuf, ubuf); | ||
1961 | } | ||
1962 | |||
1963 | static int fpregs_set(struct task_struct *target, | ||
1964 | const struct user_regset *regset, | ||
1965 | unsigned int pos, unsigned int count, | ||
1966 | const void *kbuf, const void __user *ubuf) | ||
1967 | { | ||
1968 | return do_regset_call(do_fpregs_set, target, regset, pos, count, | ||
1969 | kbuf, ubuf); | ||
1970 | } | ||
1971 | |||
1972 | static int | ||
1973 | access_uarea(struct task_struct *child, unsigned long addr, | ||
1974 | unsigned long *data, int write_access) | ||
1975 | { | ||
1976 | unsigned int pos = -1; /* an invalid value */ | ||
1977 | int ret; | ||
1978 | unsigned long *ptr, regnum; | ||
1979 | |||
1980 | if ((addr & 0x7) != 0) { | ||
1981 | dprintk("ptrace: unaligned register address 0x%lx\n", addr); | ||
1982 | return -1; | ||
1983 | } | ||
1984 | if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) || | ||
1985 | (addr >= PT_R7 + 8 && addr < PT_B1) || | ||
1986 | (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) || | ||
1987 | (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) { | ||
1988 | dprintk("ptrace: rejecting access to register " | ||
1989 | "address 0x%lx\n", addr); | ||
1990 | return -1; | ||
1991 | } | ||
1992 | |||
1993 | switch (addr) { | ||
1994 | case PT_F32 ... (PT_F127 + 15): | ||
1995 | pos = addr - PT_F32 + ELF_FP_OFFSET(32); | ||
1996 | break; | ||
1997 | case PT_F2 ... (PT_F5 + 15): | ||
1998 | pos = addr - PT_F2 + ELF_FP_OFFSET(2); | ||
1999 | break; | ||
2000 | case PT_F10 ... (PT_F31 + 15): | ||
2001 | pos = addr - PT_F10 + ELF_FP_OFFSET(10); | ||
2002 | break; | ||
2003 | case PT_F6 ... (PT_F9 + 15): | ||
2004 | pos = addr - PT_F6 + ELF_FP_OFFSET(6); | ||
2005 | break; | ||
2006 | } | ||
2007 | |||
2008 | if (pos != -1) { | ||
2009 | if (write_access) | ||
2010 | ret = fpregs_set(child, NULL, pos, | ||
2011 | sizeof(unsigned long), data, NULL); | ||
2012 | else | ||
2013 | ret = fpregs_get(child, NULL, pos, | ||
2014 | sizeof(unsigned long), data, NULL); | ||
2015 | if (ret != 0) | ||
2016 | return -1; | ||
2017 | return 0; | ||
2018 | } | ||
2019 | |||
2020 | switch (addr) { | ||
2021 | case PT_NAT_BITS: | ||
2022 | pos = ELF_NAT_OFFSET; | ||
2023 | break; | ||
2024 | case PT_R4 ... PT_R7: | ||
2025 | pos = addr - PT_R4 + ELF_GR_OFFSET(4); | ||
2026 | break; | ||
2027 | case PT_B1 ... PT_B5: | ||
2028 | pos = addr - PT_B1 + ELF_BR_OFFSET(1); | ||
2029 | break; | ||
2030 | case PT_AR_EC: | ||
2031 | pos = ELF_AR_EC_OFFSET; | ||
2032 | break; | ||
2033 | case PT_AR_LC: | ||
2034 | pos = ELF_AR_LC_OFFSET; | ||
2035 | break; | ||
2036 | case PT_CR_IPSR: | ||
2037 | pos = ELF_CR_IPSR_OFFSET; | ||
2038 | break; | ||
2039 | case PT_CR_IIP: | ||
2040 | pos = ELF_CR_IIP_OFFSET; | ||
2041 | break; | ||
2042 | case PT_CFM: | ||
2043 | pos = ELF_CFM_OFFSET; | ||
2044 | break; | ||
2045 | case PT_AR_UNAT: | ||
2046 | pos = ELF_AR_UNAT_OFFSET; | ||
2047 | break; | ||
2048 | case PT_AR_PFS: | ||
2049 | pos = ELF_AR_PFS_OFFSET; | ||
2050 | break; | ||
2051 | case PT_AR_RSC: | ||
2052 | pos = ELF_AR_RSC_OFFSET; | ||
2053 | break; | ||
2054 | case PT_AR_RNAT: | ||
2055 | pos = ELF_AR_RNAT_OFFSET; | ||
2056 | break; | ||
2057 | case PT_AR_BSPSTORE: | ||
2058 | pos = ELF_AR_BSPSTORE_OFFSET; | ||
2059 | break; | ||
2060 | case PT_PR: | ||
2061 | pos = ELF_PR_OFFSET; | ||
2062 | break; | ||
2063 | case PT_B6: | ||
2064 | pos = ELF_BR_OFFSET(6); | ||
2065 | break; | ||
2066 | case PT_AR_BSP: | ||
2067 | pos = ELF_AR_BSP_OFFSET; | ||
2068 | break; | ||
2069 | case PT_R1 ... PT_R3: | ||
2070 | pos = addr - PT_R1 + ELF_GR_OFFSET(1); | ||
2071 | break; | ||
2072 | case PT_R12 ... PT_R15: | ||
2073 | pos = addr - PT_R12 + ELF_GR_OFFSET(12); | ||
2074 | break; | ||
2075 | case PT_R8 ... PT_R11: | ||
2076 | pos = addr - PT_R8 + ELF_GR_OFFSET(8); | ||
2077 | break; | ||
2078 | case PT_R16 ... PT_R31: | ||
2079 | pos = addr - PT_R16 + ELF_GR_OFFSET(16); | ||
2080 | break; | ||
2081 | case PT_AR_CCV: | ||
2082 | pos = ELF_AR_CCV_OFFSET; | ||
2083 | break; | ||
2084 | case PT_AR_FPSR: | ||
2085 | pos = ELF_AR_FPSR_OFFSET; | ||
2086 | break; | ||
2087 | case PT_B0: | ||
2088 | pos = ELF_BR_OFFSET(0); | ||
2089 | break; | ||
2090 | case PT_B7: | ||
2091 | pos = ELF_BR_OFFSET(7); | ||
2092 | break; | ||
2093 | case PT_AR_CSD: | ||
2094 | pos = ELF_AR_CSD_OFFSET; | ||
2095 | break; | ||
2096 | case PT_AR_SSD: | ||
2097 | pos = ELF_AR_SSD_OFFSET; | ||
2098 | break; | ||
2099 | } | ||
2100 | |||
2101 | if (pos != -1) { | ||
2102 | if (write_access) | ||
2103 | ret = gpregs_set(child, NULL, pos, | ||
2104 | sizeof(unsigned long), data, NULL); | ||
2105 | else | ||
2106 | ret = gpregs_get(child, NULL, pos, | ||
2107 | sizeof(unsigned long), data, NULL); | ||
2108 | if (ret != 0) | ||
2109 | return -1; | ||
2110 | return 0; | ||
2111 | } | ||
2112 | |||
2113 | /* access debug registers */ | ||
2114 | if (addr >= PT_IBR) { | ||
2115 | regnum = (addr - PT_IBR) >> 3; | ||
2116 | ptr = &child->thread.ibr[0]; | ||
2117 | } else { | ||
2118 | regnum = (addr - PT_DBR) >> 3; | ||
2119 | ptr = &child->thread.dbr[0]; | ||
2120 | } | ||
2121 | |||
2122 | if (regnum >= 8) { | ||
2123 | dprintk("ptrace: rejecting access to register " | ||
2124 | "address 0x%lx\n", addr); | ||
2125 | return -1; | ||
2126 | } | ||
2127 | #ifdef CONFIG_PERFMON | ||
2128 | /* | ||
2129 | * Check if debug registers are used by perfmon. This | ||
2130 | * test must be done once we know that we can do the | ||
2131 | * operation, i.e. the arguments are all valid, but | ||
2132 | * before we start modifying the state. | ||
2133 | * | ||
2134 | * Perfmon needs to keep a count of how many processes | ||
2135 | * are trying to modify the debug registers for system | ||
2136 | * wide monitoring sessions. | ||
2137 | * | ||
2138 | * We also include read access here, because they may | ||
2139 | * cause the PMU-installed debug register state | ||
2140 | * (dbr[], ibr[]) to be reset. The two arrays are also | ||
2141 | * used by perfmon, but we do not use | ||
2142 | * IA64_THREAD_DBG_VALID. The registers are restored | ||
2143 | * by the PMU context switch code. | ||
2144 | */ | ||
2145 | if (pfm_use_debug_registers(child)) | ||
2146 | return -1; | ||
2147 | #endif | ||
2148 | |||
2149 | if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { | ||
2150 | child->thread.flags |= IA64_THREAD_DBG_VALID; | ||
2151 | memset(child->thread.dbr, 0, | ||
2152 | sizeof(child->thread.dbr)); | ||
2153 | memset(child->thread.ibr, 0, | ||
2154 | sizeof(child->thread.ibr)); | ||
2155 | } | ||
2156 | |||
2157 | ptr += regnum; | ||
2158 | |||
2159 | if ((regnum & 1) && write_access) { | ||
2160 | /* don't let the user set kernel-level breakpoints: */ | ||
2161 | *ptr = *data & ~(7UL << 56); | ||
2162 | return 0; | ||
2163 | } | ||
2164 | if (write_access) | ||
2165 | *ptr = *data; | ||
2166 | else | ||
2167 | *data = *ptr; | ||
2168 | return 0; | ||
2169 | } | ||
2170 | |||
2171 | static const struct user_regset native_regsets[] = { | ||
2172 | { | ||
2173 | .core_note_type = NT_PRSTATUS, | ||
2174 | .n = ELF_NGREG, | ||
2175 | .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), | ||
2176 | .get = gpregs_get, .set = gpregs_set, | ||
2177 | .writeback = gpregs_writeback | ||
2178 | }, | ||
2179 | { | ||
2180 | .core_note_type = NT_PRFPREG, | ||
2181 | .n = ELF_NFPREG, | ||
2182 | .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), | ||
2183 | .get = fpregs_get, .set = fpregs_set, .active = fpregs_active | ||
2184 | }, | ||
2185 | }; | ||
2186 | |||
2187 | static const struct user_regset_view user_ia64_view = { | ||
2188 | .name = "ia64", | ||
2189 | .e_machine = EM_IA_64, | ||
2190 | .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) | ||
2191 | }; | ||
2192 | |||
2193 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) | ||
2194 | { | ||
2195 | #ifdef CONFIG_IA32_SUPPORT | ||
2196 | extern const struct user_regset_view user_ia32_view; | ||
2197 | if (IS_IA32_PROCESS(task_pt_regs(tsk))) | ||
2198 | return &user_ia32_view; | ||
2199 | #endif | ||
2200 | return &user_ia64_view; | ||
2201 | } | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4aa9eaea76c3..5015ca1275ca 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #include <asm/setup.h> | 59 | #include <asm/setup.h> |
60 | #include <asm/smp.h> | 60 | #include <asm/smp.h> |
61 | #include <asm/system.h> | 61 | #include <asm/system.h> |
62 | #include <asm/tlbflush.h> | ||
62 | #include <asm/unistd.h> | 63 | #include <asm/unistd.h> |
63 | #include <asm/hpsim.h> | 64 | #include <asm/hpsim.h> |
64 | 65 | ||
@@ -176,6 +177,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) | |||
176 | return 0; | 177 | return 0; |
177 | } | 178 | } |
178 | 179 | ||
180 | /* | ||
181 | * Similar to "filter_rsvd_memory()", but the reserved memory ranges | ||
182 | * are not filtered out. | ||
183 | */ | ||
184 | int __init | ||
185 | filter_memory(unsigned long start, unsigned long end, void *arg) | ||
186 | { | ||
187 | void (*func)(unsigned long, unsigned long, int); | ||
188 | |||
189 | #if IGNORE_PFN0 | ||
190 | if (start == PAGE_OFFSET) { | ||
191 | printk(KERN_WARNING "warning: skipping physical page 0\n"); | ||
192 | start += PAGE_SIZE; | ||
193 | if (start >= end) | ||
194 | return 0; | ||
195 | } | ||
196 | #endif | ||
197 | func = arg; | ||
198 | if (start < end) | ||
199 | call_pernode_memory(__pa(start), end - start, func); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
179 | static void __init | 203 | static void __init |
180 | sort_regions (struct rsvd_region *rsvd_region, int max) | 204 | sort_regions (struct rsvd_region *rsvd_region, int max) |
181 | { | 205 | { |
@@ -493,6 +517,8 @@ setup_arch (char **cmdline_p) | |||
493 | acpi_table_init(); | 517 | acpi_table_init(); |
494 | # ifdef CONFIG_ACPI_NUMA | 518 | # ifdef CONFIG_ACPI_NUMA |
495 | acpi_numa_init(); | 519 | acpi_numa_init(); |
520 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | ||
521 | 32 : cpus_weight(early_cpu_possible_map)), additional_cpus); | ||
496 | # endif | 522 | # endif |
497 | #else | 523 | #else |
498 | # ifdef CONFIG_SMP | 524 | # ifdef CONFIG_SMP |
@@ -946,9 +972,10 @@ cpu_init (void) | |||
946 | #endif | 972 | #endif |
947 | 973 | ||
948 | /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ | 974 | /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ |
949 | if (ia64_pal_vm_summary(NULL, &vmi) == 0) | 975 | if (ia64_pal_vm_summary(NULL, &vmi) == 0) { |
950 | max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; | 976 | max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; |
951 | else { | 977 | setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); |
978 | } else { | ||
952 | printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); | 979 | printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); |
953 | max_ctx = (1U << 15) - 1; /* use architected minimum */ | 980 | max_ctx = (1U << 15) - 1; /* use architected minimum */ |
954 | } | 981 | } |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 4e446aa5f4ac..9a9d4c489330 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -213,6 +213,19 @@ send_IPI_allbutself (int op) | |||
213 | * Called with preemption disabled. | 213 | * Called with preemption disabled. |
214 | */ | 214 | */ |
215 | static inline void | 215 | static inline void |
216 | send_IPI_mask(cpumask_t mask, int op) | ||
217 | { | ||
218 | unsigned int cpu; | ||
219 | |||
220 | for_each_cpu_mask(cpu, mask) { | ||
221 | send_IPI_single(cpu, op); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Called with preemption disabled. | ||
227 | */ | ||
228 | static inline void | ||
216 | send_IPI_all (int op) | 229 | send_IPI_all (int op) |
217 | { | 230 | { |
218 | int i; | 231 | int i; |
@@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int | |||
401 | } | 414 | } |
402 | EXPORT_SYMBOL(smp_call_function_single); | 415 | EXPORT_SYMBOL(smp_call_function_single); |
403 | 416 | ||
417 | /** | ||
418 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
419 | * <mask> The set of cpus to run on. Must not include the current cpu. | ||
420 | * <func> The function to run. This must be fast and non-blocking. | ||
421 | * <info> An arbitrary pointer to pass to the function. | ||
422 | * <wait> If true, wait (atomically) until function | ||
423 | * has completed on other CPUs. | ||
424 | * | ||
425 | * Returns 0 on success, else a negative status code. | ||
426 | * | ||
427 | * If @wait is true, then returns once @func has returned; otherwise | ||
428 | * it returns just before the target cpu calls @func. | ||
429 | * | ||
430 | * You must not call this function with disabled interrupts or from a | ||
431 | * hardware interrupt handler or from a bottom half handler. | ||
432 | */ | ||
433 | int smp_call_function_mask(cpumask_t mask, | ||
434 | void (*func)(void *), void *info, | ||
435 | int wait) | ||
436 | { | ||
437 | struct call_data_struct data; | ||
438 | cpumask_t allbutself; | ||
439 | int cpus; | ||
440 | |||
441 | spin_lock(&call_lock); | ||
442 | allbutself = cpu_online_map; | ||
443 | cpu_clear(smp_processor_id(), allbutself); | ||
444 | |||
445 | cpus_and(mask, mask, allbutself); | ||
446 | cpus = cpus_weight(mask); | ||
447 | if (!cpus) { | ||
448 | spin_unlock(&call_lock); | ||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | /* Can deadlock when called with interrupts disabled */ | ||
453 | WARN_ON(irqs_disabled()); | ||
454 | |||
455 | data.func = func; | ||
456 | data.info = info; | ||
457 | atomic_set(&data.started, 0); | ||
458 | data.wait = wait; | ||
459 | if (wait) | ||
460 | atomic_set(&data.finished, 0); | ||
461 | |||
462 | call_data = &data; | ||
463 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ | ||
464 | |||
465 | /* Send a message to other CPUs */ | ||
466 | if (cpus_equal(mask, allbutself)) | ||
467 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
468 | else | ||
469 | send_IPI_mask(mask, IPI_CALL_FUNC); | ||
470 | |||
471 | /* Wait for response */ | ||
472 | while (atomic_read(&data.started) != cpus) | ||
473 | cpu_relax(); | ||
474 | |||
475 | if (wait) | ||
476 | while (atomic_read(&data.finished) != cpus) | ||
477 | cpu_relax(); | ||
478 | call_data = NULL; | ||
479 | |||
480 | spin_unlock(&call_lock); | ||
481 | return 0; | ||
482 | |||
483 | } | ||
484 | EXPORT_SYMBOL(smp_call_function_mask); | ||
485 | |||
404 | /* | 486 | /* |
405 | * this function sends a 'generic call function' IPI to all other CPUs | 487 | * this function sends a 'generic call function' IPI to all other CPUs |
406 | * in the system. | 488 | * in the system. |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 32ee5979a042..16483be18c0b 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -400,9 +400,9 @@ smp_callin (void) | |||
400 | /* Setup the per cpu irq handling data structures */ | 400 | /* Setup the per cpu irq handling data structures */ |
401 | __setup_vector_irq(cpuid); | 401 | __setup_vector_irq(cpuid); |
402 | cpu_set(cpuid, cpu_online_map); | 402 | cpu_set(cpuid, cpu_online_map); |
403 | unlock_ipi_calllock(); | ||
404 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | 403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; |
405 | spin_unlock(&vector_lock); | 404 | spin_unlock(&vector_lock); |
405 | unlock_ipi_calllock(); | ||
406 | 406 | ||
407 | smp_setup_percpu_timer(); | 407 | smp_setup_percpu_timer(); |
408 | 408 | ||
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 17fda5293c67..48e15a51782f 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -59,6 +59,84 @@ static struct clocksource clocksource_itc = { | |||
59 | }; | 59 | }; |
60 | static struct clocksource *itc_clocksource; | 60 | static struct clocksource *itc_clocksource; |
61 | 61 | ||
62 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
63 | |||
64 | #include <linux/kernel_stat.h> | ||
65 | |||
66 | extern cputime_t cycle_to_cputime(u64 cyc); | ||
67 | |||
68 | /* | ||
69 | * Called from the context switch with interrupts disabled, to charge all | ||
70 | * accumulated times to the current process, and to prepare accounting on | ||
71 | * the next process. | ||
72 | */ | ||
73 | void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) | ||
74 | { | ||
75 | struct thread_info *pi = task_thread_info(prev); | ||
76 | struct thread_info *ni = task_thread_info(next); | ||
77 | cputime_t delta_stime, delta_utime; | ||
78 | __u64 now; | ||
79 | |||
80 | now = ia64_get_itc(); | ||
81 | |||
82 | delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); | ||
83 | account_system_time(prev, 0, delta_stime); | ||
84 | account_system_time_scaled(prev, delta_stime); | ||
85 | |||
86 | if (pi->ac_utime) { | ||
87 | delta_utime = cycle_to_cputime(pi->ac_utime); | ||
88 | account_user_time(prev, delta_utime); | ||
89 | account_user_time_scaled(prev, delta_utime); | ||
90 | } | ||
91 | |||
92 | pi->ac_stamp = ni->ac_stamp = now; | ||
93 | ni->ac_stime = ni->ac_utime = 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Account time for a transition between system, hard irq or soft irq state. | ||
98 | * Note that this function is called with interrupts enabled. | ||
99 | */ | ||
100 | void account_system_vtime(struct task_struct *tsk) | ||
101 | { | ||
102 | struct thread_info *ti = task_thread_info(tsk); | ||
103 | unsigned long flags; | ||
104 | cputime_t delta_stime; | ||
105 | __u64 now; | ||
106 | |||
107 | local_irq_save(flags); | ||
108 | |||
109 | now = ia64_get_itc(); | ||
110 | |||
111 | delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); | ||
112 | account_system_time(tsk, 0, delta_stime); | ||
113 | account_system_time_scaled(tsk, delta_stime); | ||
114 | ti->ac_stime = 0; | ||
115 | |||
116 | ti->ac_stamp = now; | ||
117 | |||
118 | local_irq_restore(flags); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Called from the timer interrupt handler to charge accumulated user time | ||
123 | * to the current process. Must be called with interrupts disabled. | ||
124 | */ | ||
125 | void account_process_tick(struct task_struct *p, int user_tick) | ||
126 | { | ||
127 | struct thread_info *ti = task_thread_info(p); | ||
128 | cputime_t delta_utime; | ||
129 | |||
130 | if (ti->ac_utime) { | ||
131 | delta_utime = cycle_to_cputime(ti->ac_utime); | ||
132 | account_user_time(p, delta_utime); | ||
133 | account_user_time_scaled(p, delta_utime); | ||
134 | ti->ac_utime = 0; | ||
135 | } | ||
136 | } | ||
137 | |||
138 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
139 | |||
62 | static irqreturn_t | 140 | static irqreturn_t |
63 | timer_interrupt (int irq, void *dev_id) | 141 | timer_interrupt (int irq, void *dev_id) |
64 | { | 142 | { |
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6903361d11a5..ff0e7c10faa7 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. | 13 | * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. |
14 | * 2001/01/17 Add support emulation of unaligned kernel accesses. | 14 | * 2001/01/17 Add support emulation of unaligned kernel accesses. |
15 | */ | 15 | */ |
16 | #include <linux/jiffies.h> | ||
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
18 | #include <linux/tty.h> | 19 | #include <linux/tty.h> |
@@ -1290,7 +1291,7 @@ within_logging_rate_limit (void) | |||
1290 | { | 1291 | { |
1291 | static unsigned long count, last_time; | 1292 | static unsigned long count, last_time; |
1292 | 1293 | ||
1293 | if (jiffies - last_time > 5*HZ) | 1294 | if (time_after(jiffies, last_time + 5 * HZ)) |
1294 | count = 0; | 1295 | count = 0; |
1295 | if (count < 5) { | 1296 | if (count < 5) { |
1296 | last_time = jiffies; | 1297 | last_time = jiffies; |