aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorAl Viro <viro@ftp.linux.org.uk>2006-01-12 04:06:06 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-12 12:08:58 -0500
commit6450578f32cdca587ae5f148e2118b2fcc36bb11 (patch)
tree91ad424aae66d72fc370dc624fca3f42d830675b /arch/ia64/kernel
parentab03591db110e8d195d381a68692eb37da981cdf (diff)
[PATCH] ia64: task_pt_regs()
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/perfmon.c32
-rw-r--r--arch/ia64/kernel/process.c12
-rw-r--r--arch/ia64/kernel/ptrace.c24
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/sys_ia64.c2
5 files changed, 36 insertions, 36 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index c026ac1142a6..bd87cb6b7a81 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1710,7 +1710,7 @@ static void
1710pfm_syswide_force_stop(void *info) 1710pfm_syswide_force_stop(void *info)
1711{ 1711{
1712 pfm_context_t *ctx = (pfm_context_t *)info; 1712 pfm_context_t *ctx = (pfm_context_t *)info;
1713 struct pt_regs *regs = ia64_task_regs(current); 1713 struct pt_regs *regs = task_pt_regs(current);
1714 struct task_struct *owner; 1714 struct task_struct *owner;
1715 unsigned long flags; 1715 unsigned long flags;
1716 int ret; 1716 int ret;
@@ -1815,7 +1815,7 @@ pfm_flush(struct file *filp)
1815 is_system = ctx->ctx_fl_system; 1815 is_system = ctx->ctx_fl_system;
1816 1816
1817 task = PFM_CTX_TASK(ctx); 1817 task = PFM_CTX_TASK(ctx);
1818 regs = ia64_task_regs(task); 1818 regs = task_pt_regs(task);
1819 1819
1820 DPRINT(("ctx_state=%d is_current=%d\n", 1820 DPRINT(("ctx_state=%d is_current=%d\n",
1821 state, 1821 state,
@@ -1945,7 +1945,7 @@ pfm_close(struct inode *inode, struct file *filp)
1945 is_system = ctx->ctx_fl_system; 1945 is_system = ctx->ctx_fl_system;
1946 1946
1947 task = PFM_CTX_TASK(ctx); 1947 task = PFM_CTX_TASK(ctx);
1948 regs = ia64_task_regs(task); 1948 regs = task_pt_regs(task);
1949 1949
1950 DPRINT(("ctx_state=%d is_current=%d\n", 1950 DPRINT(("ctx_state=%d is_current=%d\n",
1951 state, 1951 state,
@@ -4052,7 +4052,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4052 */ 4052 */
4053 ia64_psr(regs)->up = 0; 4053 ia64_psr(regs)->up = 0;
4054 } else { 4054 } else {
4055 tregs = ia64_task_regs(task); 4055 tregs = task_pt_regs(task);
4056 4056
4057 /* 4057 /*
4058 * stop monitoring at the user level 4058 * stop monitoring at the user level
@@ -4134,7 +4134,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4134 ia64_psr(regs)->up = 1; 4134 ia64_psr(regs)->up = 1;
4135 4135
4136 } else { 4136 } else {
4137 tregs = ia64_task_regs(ctx->ctx_task); 4137 tregs = task_pt_regs(ctx->ctx_task);
4138 4138
4139 /* 4139 /*
4140 * start monitoring at the kernel level the next 4140 * start monitoring at the kernel level the next
@@ -4404,7 +4404,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4404 /* 4404 /*
4405 * when not current, task MUST be stopped, so this is safe 4405 * when not current, task MUST be stopped, so this is safe
4406 */ 4406 */
4407 regs = ia64_task_regs(task); 4407 regs = task_pt_regs(task);
4408 4408
4409 /* force a full reload */ 4409 /* force a full reload */
4410 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; 4410 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
@@ -4530,7 +4530,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4530 /* 4530 /*
4531 * per-task mode 4531 * per-task mode
4532 */ 4532 */
4533 tregs = task == current ? regs : ia64_task_regs(task); 4533 tregs = task == current ? regs : task_pt_regs(task);
4534 4534
4535 if (task == current) { 4535 if (task == current) {
4536 /* 4536 /*
@@ -4593,7 +4593,7 @@ pfm_exit_thread(struct task_struct *task)
4593{ 4593{
4594 pfm_context_t *ctx; 4594 pfm_context_t *ctx;
4595 unsigned long flags; 4595 unsigned long flags;
4596 struct pt_regs *regs = ia64_task_regs(task); 4596 struct pt_regs *regs = task_pt_regs(task);
4597 int ret, state; 4597 int ret, state;
4598 int free_ok = 0; 4598 int free_ok = 0;
4599 4599
@@ -4926,7 +4926,7 @@ restart_args:
4926 if (unlikely(ret)) goto abort_locked; 4926 if (unlikely(ret)) goto abort_locked;
4927 4927
4928skip_fd: 4928skip_fd:
4929 ret = (*func)(ctx, args_k, count, ia64_task_regs(current)); 4929 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4930 4930
4931 call_made = 1; 4931 call_made = 1;
4932 4932
@@ -5050,7 +5050,7 @@ pfm_handle_work(void)
5050 5050
5051 pfm_clear_task_notify(); 5051 pfm_clear_task_notify();
5052 5052
5053 regs = ia64_task_regs(current); 5053 regs = task_pt_regs(current);
5054 5054
5055 /* 5055 /*
5056 * extract reason for being here and clear 5056 * extract reason for being here and clear
@@ -5794,7 +5794,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c
5794 * on every CPU, so we can rely on the pid to identify the idle task. 5794 * on every CPU, so we can rely on the pid to identify the idle task.
5795 */ 5795 */
5796 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { 5796 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5797 regs = ia64_task_regs(task); 5797 regs = task_pt_regs(task);
5798 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; 5798 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5799 return; 5799 return;
5800 } 5800 }
@@ -5877,7 +5877,7 @@ pfm_save_regs(struct task_struct *task)
5877 flags = pfm_protect_ctx_ctxsw(ctx); 5877 flags = pfm_protect_ctx_ctxsw(ctx);
5878 5878
5879 if (ctx->ctx_state == PFM_CTX_ZOMBIE) { 5879 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5880 struct pt_regs *regs = ia64_task_regs(task); 5880 struct pt_regs *regs = task_pt_regs(task);
5881 5881
5882 pfm_clear_psr_up(); 5882 pfm_clear_psr_up();
5883 5883
@@ -6077,7 +6077,7 @@ pfm_load_regs (struct task_struct *task)
6077 BUG_ON(psr & IA64_PSR_I); 6077 BUG_ON(psr & IA64_PSR_I);
6078 6078
6079 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { 6079 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6080 struct pt_regs *regs = ia64_task_regs(task); 6080 struct pt_regs *regs = task_pt_regs(task);
6081 6081
6082 BUG_ON(ctx->ctx_smpl_hdr); 6082 BUG_ON(ctx->ctx_smpl_hdr);
6083 6083
@@ -6446,7 +6446,7 @@ pfm_alt_save_pmu_state(void *data)
6446{ 6446{
6447 struct pt_regs *regs; 6447 struct pt_regs *regs;
6448 6448
6449 regs = ia64_task_regs(current); 6449 regs = task_pt_regs(current);
6450 6450
6451 DPRINT(("called\n")); 6451 DPRINT(("called\n"));
6452 6452
@@ -6472,7 +6472,7 @@ pfm_alt_restore_pmu_state(void *data)
6472{ 6472{
6473 struct pt_regs *regs; 6473 struct pt_regs *regs;
6474 6474
6475 regs = ia64_task_regs(current); 6475 regs = task_pt_regs(current);
6476 6476
6477 DPRINT(("called\n")); 6477 DPRINT(("called\n"));
6478 6478
@@ -6754,7 +6754,7 @@ dump_pmu_state(const char *from)
6754 local_irq_save(flags); 6754 local_irq_save(flags);
6755 6755
6756 this_cpu = smp_processor_id(); 6756 this_cpu = smp_processor_id();
6757 regs = ia64_task_regs(current); 6757 regs = task_pt_regs(current);
6758 info = PFM_CPUINFO_GET(); 6758 info = PFM_CPUINFO_GET();
6759 dcr = ia64_getreg(_IA64_REG_CR_DCR); 6759 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6760 6760
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e9904c74d2ba..309d59658e5f 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -328,7 +328,7 @@ ia64_save_extra (struct task_struct *task)
328#endif 328#endif
329 329
330#ifdef CONFIG_IA32_SUPPORT 330#ifdef CONFIG_IA32_SUPPORT
331 if (IS_IA32_PROCESS(ia64_task_regs(task))) 331 if (IS_IA32_PROCESS(task_pt_regs(task)))
332 ia32_save_state(task); 332 ia32_save_state(task);
333#endif 333#endif
334} 334}
@@ -353,7 +353,7 @@ ia64_load_extra (struct task_struct *task)
353#endif 353#endif
354 354
355#ifdef CONFIG_IA32_SUPPORT 355#ifdef CONFIG_IA32_SUPPORT
356 if (IS_IA32_PROCESS(ia64_task_regs(task))) 356 if (IS_IA32_PROCESS(task_pt_regs(task)))
357 ia32_load_state(task); 357 ia32_load_state(task);
358#endif 358#endif
359} 359}
@@ -488,7 +488,7 @@ copy_thread (int nr, unsigned long clone_flags,
488 * If we're cloning an IA32 task then save the IA32 extra 488 * If we're cloning an IA32 task then save the IA32 extra
489 * state from the current task to the new task 489 * state from the current task to the new task
490 */ 490 */
491 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 491 if (IS_IA32_PROCESS(task_pt_regs(current))) {
492 ia32_save_state(p); 492 ia32_save_state(p);
493 if (clone_flags & CLONE_SETTLS) 493 if (clone_flags & CLONE_SETTLS)
494 retval = ia32_clone_tls(p, child_ptregs); 494 retval = ia32_clone_tls(p, child_ptregs);
@@ -701,7 +701,7 @@ int
701kernel_thread_helper (int (*fn)(void *), void *arg) 701kernel_thread_helper (int (*fn)(void *), void *arg)
702{ 702{
703#ifdef CONFIG_IA32_SUPPORT 703#ifdef CONFIG_IA32_SUPPORT
704 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 704 if (IS_IA32_PROCESS(task_pt_regs(current))) {
705 /* A kernel thread is always a 64-bit process. */ 705 /* A kernel thread is always a 64-bit process. */
706 current->thread.map_base = DEFAULT_MAP_BASE; 706 current->thread.map_base = DEFAULT_MAP_BASE;
707 current->thread.task_size = DEFAULT_TASK_SIZE; 707 current->thread.task_size = DEFAULT_TASK_SIZE;
@@ -722,7 +722,7 @@ flush_thread (void)
722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
723 ia64_drop_fpu(current); 723 ia64_drop_fpu(current);
724#ifdef CONFIG_IA32_SUPPORT 724#ifdef CONFIG_IA32_SUPPORT
725 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 725 if (IS_IA32_PROCESS(task_pt_regs(current))) {
726 ia32_drop_partial_page_list(current); 726 ia32_drop_partial_page_list(current);
727 current->thread.task_size = IA32_PAGE_OFFSET; 727 current->thread.task_size = IA32_PAGE_OFFSET;
728 set_fs(USER_DS); 728 set_fs(USER_DS);
@@ -755,7 +755,7 @@ exit_thread (void)
755 if (current->thread.flags & IA64_THREAD_DBG_VALID) 755 if (current->thread.flags & IA64_THREAD_DBG_VALID)
756 pfm_release_debug_registers(current); 756 pfm_release_debug_registers(current);
757#endif 757#endif
758 if (IS_IA32_PROCESS(ia64_task_regs(current))) 758 if (IS_IA32_PROCESS(task_pt_regs(current)))
759 ia32_drop_partial_page_list(current); 759 ia32_drop_partial_page_list(current);
760} 760}
761 761
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 8d88eeea02d1..eaed14aac6aa 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -254,7 +254,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw,
254 long num_regs, nbits; 254 long num_regs, nbits;
255 struct pt_regs *pt; 255 struct pt_regs *pt;
256 256
257 pt = ia64_task_regs(task); 257 pt = task_pt_regs(task);
258 kbsp = (unsigned long *) sw->ar_bspstore; 258 kbsp = (unsigned long *) sw->ar_bspstore;
259 ubspstore = (unsigned long *) pt->ar_bspstore; 259 ubspstore = (unsigned long *) pt->ar_bspstore;
260 260
@@ -314,7 +314,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
314 struct pt_regs *pt; 314 struct pt_regs *pt;
315 unsigned long cfm, *urbs_kargs; 315 unsigned long cfm, *urbs_kargs;
316 316
317 pt = ia64_task_regs(task); 317 pt = task_pt_regs(task);
318 kbsp = (unsigned long *) sw->ar_bspstore; 318 kbsp = (unsigned long *) sw->ar_bspstore;
319 ubspstore = (unsigned long *) pt->ar_bspstore; 319 ubspstore = (unsigned long *) pt->ar_bspstore;
320 320
@@ -407,7 +407,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
407 407
408 urbs_end = (long *) user_rbs_end; 408 urbs_end = (long *) user_rbs_end;
409 laddr = (unsigned long *) addr; 409 laddr = (unsigned long *) addr;
410 child_regs = ia64_task_regs(child); 410 child_regs = task_pt_regs(child);
411 bspstore = (unsigned long *) child_regs->ar_bspstore; 411 bspstore = (unsigned long *) child_regs->ar_bspstore;
412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
413 if (on_kernel_rbs(addr, (unsigned long) bspstore, 413 if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -467,7 +467,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
467 struct pt_regs *child_regs; 467 struct pt_regs *child_regs;
468 468
469 laddr = (unsigned long *) addr; 469 laddr = (unsigned long *) addr;
470 child_regs = ia64_task_regs(child); 470 child_regs = task_pt_regs(child);
471 bspstore = (unsigned long *) child_regs->ar_bspstore; 471 bspstore = (unsigned long *) child_regs->ar_bspstore;
472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
473 if (on_kernel_rbs(addr, (unsigned long) bspstore, 473 if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -567,7 +567,7 @@ thread_matches (struct task_struct *thread, unsigned long addr)
567 */ 567 */
568 return 0; 568 return 0;
569 569
570 thread_regs = ia64_task_regs(thread); 570 thread_regs = task_pt_regs(thread);
571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); 571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) 572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
573 return 0; 573 return 0;
@@ -627,7 +627,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
627inline void 627inline void
628ia64_flush_fph (struct task_struct *task) 628ia64_flush_fph (struct task_struct *task)
629{ 629{
630 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 630 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
631 631
632 /* 632 /*
633 * Prevent migrating this task while 633 * Prevent migrating this task while
@@ -653,7 +653,7 @@ ia64_flush_fph (struct task_struct *task)
653void 653void
654ia64_sync_fph (struct task_struct *task) 654ia64_sync_fph (struct task_struct *task)
655{ 655{
656 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 656 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
657 657
658 ia64_flush_fph(task); 658 ia64_flush_fph(task);
659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { 659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
@@ -794,7 +794,7 @@ access_uarea (struct task_struct *child, unsigned long addr,
794 + offsetof(struct pt_regs, reg))) 794 + offsetof(struct pt_regs, reg)))
795 795
796 796
797 pt = ia64_task_regs(child); 797 pt = task_pt_regs(child);
798 sw = (struct switch_stack *) (child->thread.ksp + 16); 798 sw = (struct switch_stack *) (child->thread.ksp + 16);
799 799
800 if ((addr & 0x7) != 0) { 800 if ((addr & 0x7) != 0) {
@@ -1120,7 +1120,7 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) 1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
1121 return -EIO; 1121 return -EIO;
1122 1122
1123 pt = ia64_task_regs(child); 1123 pt = task_pt_regs(child);
1124 sw = (struct switch_stack *) (child->thread.ksp + 16); 1124 sw = (struct switch_stack *) (child->thread.ksp + 16);
1125 unw_init_from_blocked_task(&info, child); 1125 unw_init_from_blocked_task(&info, child);
1126 if (unw_unwind_to_user(&info) < 0) { 1126 if (unw_unwind_to_user(&info) < 0) {
@@ -1265,7 +1265,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) 1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1266 return -EIO; 1266 return -EIO;
1267 1267
1268 pt = ia64_task_regs(child); 1268 pt = task_pt_regs(child);
1269 sw = (struct switch_stack *) (child->thread.ksp + 16); 1269 sw = (struct switch_stack *) (child->thread.ksp + 16);
1270 unw_init_from_blocked_task(&info, child); 1270 unw_init_from_blocked_task(&info, child);
1271 if (unw_unwind_to_user(&info) < 0) { 1271 if (unw_unwind_to_user(&info) < 0) {
@@ -1403,7 +1403,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1403void 1403void
1404ptrace_disable (struct task_struct *child) 1404ptrace_disable (struct task_struct *child)
1405{ 1405{
1406 struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child)); 1406 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1407 1407
1408 /* make sure the single step/taken-branch trap bits are not set: */ 1408 /* make sure the single step/taken-branch trap bits are not set: */
1409 child_psr->ss = 0; 1409 child_psr->ss = 0;
@@ -1456,7 +1456,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1456 if (ret < 0) 1456 if (ret < 0)
1457 goto out_tsk; 1457 goto out_tsk;
1458 1458
1459 pt = ia64_task_regs(child); 1459 pt = task_pt_regs(child);
1460 sw = (struct switch_stack *) (child->thread.ksp + 16); 1460 sw = (struct switch_stack *) (child->thread.ksp + 16);
1461 1461
1462 switch (request) { 1462 switch (request) {
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 0daa8fa9ef32..c0766575a3a2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -801,7 +801,7 @@ cpu_init (void)
801#endif 801#endif
802 802
803 /* Clear the stack memory reserved for pt_regs: */ 803 /* Clear the stack memory reserved for pt_regs: */
804 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); 804 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
805 805
806 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 806 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
807 807
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index f2dbcd1db0d4..c7b943f10199 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -151,7 +151,7 @@ out:
151asmlinkage long 151asmlinkage long
152sys_pipe (void) 152sys_pipe (void)
153{ 153{
154 struct pt_regs *regs = ia64_task_regs(current); 154 struct pt_regs *regs = task_pt_regs(current);
155 int fd[2]; 155 int fd[2];
156 int retval; 156 int retval;
157 157