diff options
-rw-r--r-- | arch/ia64/ia32/elfcore32.h | 3 | ||||
-rw-r--r-- | arch/ia64/ia32/ia32_signal.c | 4 | ||||
-rw-r--r-- | arch/ia64/ia32/ia32_support.c | 4 | ||||
-rw-r--r-- | arch/ia64/ia32/sys_ia32.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 32 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 24 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/sys_ia64.c | 2 | ||||
-rw-r--r-- | drivers/input/evdev.c | 2 | ||||
-rw-r--r-- | include/asm-ia64/compat.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/processor.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/ptrace.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/system.h | 8 |
14 files changed, 56 insertions, 57 deletions
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h index b73b8b6b10c1..a47f63b204fb 100644 --- a/arch/ia64/ia32/elfcore32.h +++ b/arch/ia64/ia32/elfcore32.h | |||
@@ -95,8 +95,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, | |||
95 | static inline int elf_core_copy_task_regs(struct task_struct *t, | 95 | static inline int elf_core_copy_task_regs(struct task_struct *t, |
96 | elf_gregset_t* elfregs) | 96 | elf_gregset_t* elfregs) |
97 | { | 97 | { |
98 | struct pt_regs *pp = ia64_task_regs(t); | 98 | ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t)); |
99 | ELF_CORE_COPY_REGS((*elfregs), pp); | ||
100 | return 1; | 99 | return 1; |
101 | } | 100 | } |
102 | 101 | ||
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index aa891c9bc9b6..5856510210fa 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c | |||
@@ -255,7 +255,7 @@ save_ia32_fpstate_live (struct _fpstate_ia32 __user *save) | |||
255 | */ | 255 | */ |
256 | fp_tos = (fsr>>11)&0x7; | 256 | fp_tos = (fsr>>11)&0x7; |
257 | fr8_st_map = (8-fp_tos)&0x7; | 257 | fr8_st_map = (8-fp_tos)&0x7; |
258 | ptp = ia64_task_regs(tsk); | 258 | ptp = task_pt_regs(tsk); |
259 | fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); | 259 | fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); |
260 | ia64f2ia32f(fpregp, &ptp->f8); | 260 | ia64f2ia32f(fpregp, &ptp->f8); |
261 | copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); | 261 | copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); |
@@ -389,7 +389,7 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save) | |||
389 | fr8_st_map = (8-fp_tos)&0x7; | 389 | fr8_st_map = (8-fp_tos)&0x7; |
390 | fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); | 390 | fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); |
391 | 391 | ||
392 | ptp = ia64_task_regs(tsk); | 392 | ptp = task_pt_regs(tsk); |
393 | copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | 393 | copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); |
394 | ia32f2ia64f(&ptp->f8, fpregp); | 394 | ia32f2ia64f(&ptp->f8, fpregp); |
395 | copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); | 395 | copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); |
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c index 4f630043b3ae..c187743965a0 100644 --- a/arch/ia64/ia32/ia32_support.c +++ b/arch/ia64/ia32/ia32_support.c | |||
@@ -58,7 +58,7 @@ load_desc (u16 selector) | |||
58 | void | 58 | void |
59 | ia32_load_segment_descriptors (struct task_struct *task) | 59 | ia32_load_segment_descriptors (struct task_struct *task) |
60 | { | 60 | { |
61 | struct pt_regs *regs = ia64_task_regs(task); | 61 | struct pt_regs *regs = task_pt_regs(task); |
62 | 62 | ||
63 | /* Setup the segment descriptors */ | 63 | /* Setup the segment descriptors */ |
64 | regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ | 64 | regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ |
@@ -113,7 +113,7 @@ void | |||
113 | ia32_load_state (struct task_struct *t) | 113 | ia32_load_state (struct task_struct *t) |
114 | { | 114 | { |
115 | unsigned long eflag, fsr, fcr, fir, fdr, tssd; | 115 | unsigned long eflag, fsr, fcr, fir, fdr, tssd; |
116 | struct pt_regs *regs = ia64_task_regs(t); | 116 | struct pt_regs *regs = task_pt_regs(t); |
117 | 117 | ||
118 | eflag = t->thread.eflag; | 118 | eflag = t->thread.eflag; |
119 | fsr = t->thread.fsr; | 119 | fsr = t->thread.fsr; |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 0668b2b7714d..3945d378bd7e 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -1482,7 +1482,7 @@ getreg (struct task_struct *child, int regno) | |||
1482 | { | 1482 | { |
1483 | struct pt_regs *child_regs; | 1483 | struct pt_regs *child_regs; |
1484 | 1484 | ||
1485 | child_regs = ia64_task_regs(child); | 1485 | child_regs = task_pt_regs(child); |
1486 | switch (regno / sizeof(int)) { | 1486 | switch (regno / sizeof(int)) { |
1487 | case PT_EBX: return child_regs->r11; | 1487 | case PT_EBX: return child_regs->r11; |
1488 | case PT_ECX: return child_regs->r9; | 1488 | case PT_ECX: return child_regs->r9; |
@@ -1510,7 +1510,7 @@ putreg (struct task_struct *child, int regno, unsigned int value) | |||
1510 | { | 1510 | { |
1511 | struct pt_regs *child_regs; | 1511 | struct pt_regs *child_regs; |
1512 | 1512 | ||
1513 | child_regs = ia64_task_regs(child); | 1513 | child_regs = task_pt_regs(child); |
1514 | switch (regno / sizeof(int)) { | 1514 | switch (regno / sizeof(int)) { |
1515 | case PT_EBX: child_regs->r11 = value; break; | 1515 | case PT_EBX: child_regs->r11 = value; break; |
1516 | case PT_ECX: child_regs->r9 = value; break; | 1516 | case PT_ECX: child_regs->r9 = value; break; |
@@ -1626,7 +1626,7 @@ save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user | |||
1626 | * Stack frames start with 16-bytes of temp space | 1626 | * Stack frames start with 16-bytes of temp space |
1627 | */ | 1627 | */ |
1628 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | 1628 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); |
1629 | ptp = ia64_task_regs(tsk); | 1629 | ptp = task_pt_regs(tsk); |
1630 | tos = (tsk->thread.fsr >> 11) & 7; | 1630 | tos = (tsk->thread.fsr >> 11) & 7; |
1631 | for (i = 0; i < 8; i++) | 1631 | for (i = 0; i < 8; i++) |
1632 | put_fpreg(i, &save->st_space[i], ptp, swp, tos); | 1632 | put_fpreg(i, &save->st_space[i], ptp, swp, tos); |
@@ -1659,7 +1659,7 @@ restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __us | |||
1659 | * Stack frames start with 16-bytes of temp space | 1659 | * Stack frames start with 16-bytes of temp space |
1660 | */ | 1660 | */ |
1661 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | 1661 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); |
1662 | ptp = ia64_task_regs(tsk); | 1662 | ptp = task_pt_regs(tsk); |
1663 | tos = (tsk->thread.fsr >> 11) & 7; | 1663 | tos = (tsk->thread.fsr >> 11) & 7; |
1664 | for (i = 0; i < 8; i++) | 1664 | for (i = 0; i < 8; i++) |
1665 | get_fpreg(i, &save->st_space[i], ptp, swp, tos); | 1665 | get_fpreg(i, &save->st_space[i], ptp, swp, tos); |
@@ -1690,7 +1690,7 @@ save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user | |||
1690 | * Stack frames start with 16-bytes of temp space | 1690 | * Stack frames start with 16-bytes of temp space |
1691 | */ | 1691 | */ |
1692 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | 1692 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); |
1693 | ptp = ia64_task_regs(tsk); | 1693 | ptp = task_pt_regs(tsk); |
1694 | tos = (tsk->thread.fsr >> 11) & 7; | 1694 | tos = (tsk->thread.fsr >> 11) & 7; |
1695 | for (i = 0; i < 8; i++) | 1695 | for (i = 0; i < 8; i++) |
1696 | put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); | 1696 | put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); |
@@ -1734,7 +1734,7 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u | |||
1734 | * Stack frames start with 16-bytes of temp space | 1734 | * Stack frames start with 16-bytes of temp space |
1735 | */ | 1735 | */ |
1736 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | 1736 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); |
1737 | ptp = ia64_task_regs(tsk); | 1737 | ptp = task_pt_regs(tsk); |
1738 | tos = (tsk->thread.fsr >> 11) & 7; | 1738 | tos = (tsk->thread.fsr >> 11) & 7; |
1739 | for (i = 0; i < 8; i++) | 1739 | for (i = 0; i < 8; i++) |
1740 | get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); | 1740 | get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index c026ac1142a6..bd87cb6b7a81 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -1710,7 +1710,7 @@ static void | |||
1710 | pfm_syswide_force_stop(void *info) | 1710 | pfm_syswide_force_stop(void *info) |
1711 | { | 1711 | { |
1712 | pfm_context_t *ctx = (pfm_context_t *)info; | 1712 | pfm_context_t *ctx = (pfm_context_t *)info; |
1713 | struct pt_regs *regs = ia64_task_regs(current); | 1713 | struct pt_regs *regs = task_pt_regs(current); |
1714 | struct task_struct *owner; | 1714 | struct task_struct *owner; |
1715 | unsigned long flags; | 1715 | unsigned long flags; |
1716 | int ret; | 1716 | int ret; |
@@ -1815,7 +1815,7 @@ pfm_flush(struct file *filp) | |||
1815 | is_system = ctx->ctx_fl_system; | 1815 | is_system = ctx->ctx_fl_system; |
1816 | 1816 | ||
1817 | task = PFM_CTX_TASK(ctx); | 1817 | task = PFM_CTX_TASK(ctx); |
1818 | regs = ia64_task_regs(task); | 1818 | regs = task_pt_regs(task); |
1819 | 1819 | ||
1820 | DPRINT(("ctx_state=%d is_current=%d\n", | 1820 | DPRINT(("ctx_state=%d is_current=%d\n", |
1821 | state, | 1821 | state, |
@@ -1945,7 +1945,7 @@ pfm_close(struct inode *inode, struct file *filp) | |||
1945 | is_system = ctx->ctx_fl_system; | 1945 | is_system = ctx->ctx_fl_system; |
1946 | 1946 | ||
1947 | task = PFM_CTX_TASK(ctx); | 1947 | task = PFM_CTX_TASK(ctx); |
1948 | regs = ia64_task_regs(task); | 1948 | regs = task_pt_regs(task); |
1949 | 1949 | ||
1950 | DPRINT(("ctx_state=%d is_current=%d\n", | 1950 | DPRINT(("ctx_state=%d is_current=%d\n", |
1951 | state, | 1951 | state, |
@@ -4052,7 +4052,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
4052 | */ | 4052 | */ |
4053 | ia64_psr(regs)->up = 0; | 4053 | ia64_psr(regs)->up = 0; |
4054 | } else { | 4054 | } else { |
4055 | tregs = ia64_task_regs(task); | 4055 | tregs = task_pt_regs(task); |
4056 | 4056 | ||
4057 | /* | 4057 | /* |
4058 | * stop monitoring at the user level | 4058 | * stop monitoring at the user level |
@@ -4134,7 +4134,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
4134 | ia64_psr(regs)->up = 1; | 4134 | ia64_psr(regs)->up = 1; |
4135 | 4135 | ||
4136 | } else { | 4136 | } else { |
4137 | tregs = ia64_task_regs(ctx->ctx_task); | 4137 | tregs = task_pt_regs(ctx->ctx_task); |
4138 | 4138 | ||
4139 | /* | 4139 | /* |
4140 | * start monitoring at the kernel level the next | 4140 | * start monitoring at the kernel level the next |
@@ -4404,7 +4404,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
4404 | /* | 4404 | /* |
4405 | * when not current, task MUST be stopped, so this is safe | 4405 | * when not current, task MUST be stopped, so this is safe |
4406 | */ | 4406 | */ |
4407 | regs = ia64_task_regs(task); | 4407 | regs = task_pt_regs(task); |
4408 | 4408 | ||
4409 | /* force a full reload */ | 4409 | /* force a full reload */ |
4410 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | 4410 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; |
@@ -4530,7 +4530,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg | |||
4530 | /* | 4530 | /* |
4531 | * per-task mode | 4531 | * per-task mode |
4532 | */ | 4532 | */ |
4533 | tregs = task == current ? regs : ia64_task_regs(task); | 4533 | tregs = task == current ? regs : task_pt_regs(task); |
4534 | 4534 | ||
4535 | if (task == current) { | 4535 | if (task == current) { |
4536 | /* | 4536 | /* |
@@ -4593,7 +4593,7 @@ pfm_exit_thread(struct task_struct *task) | |||
4593 | { | 4593 | { |
4594 | pfm_context_t *ctx; | 4594 | pfm_context_t *ctx; |
4595 | unsigned long flags; | 4595 | unsigned long flags; |
4596 | struct pt_regs *regs = ia64_task_regs(task); | 4596 | struct pt_regs *regs = task_pt_regs(task); |
4597 | int ret, state; | 4597 | int ret, state; |
4598 | int free_ok = 0; | 4598 | int free_ok = 0; |
4599 | 4599 | ||
@@ -4926,7 +4926,7 @@ restart_args: | |||
4926 | if (unlikely(ret)) goto abort_locked; | 4926 | if (unlikely(ret)) goto abort_locked; |
4927 | 4927 | ||
4928 | skip_fd: | 4928 | skip_fd: |
4929 | ret = (*func)(ctx, args_k, count, ia64_task_regs(current)); | 4929 | ret = (*func)(ctx, args_k, count, task_pt_regs(current)); |
4930 | 4930 | ||
4931 | call_made = 1; | 4931 | call_made = 1; |
4932 | 4932 | ||
@@ -5050,7 +5050,7 @@ pfm_handle_work(void) | |||
5050 | 5050 | ||
5051 | pfm_clear_task_notify(); | 5051 | pfm_clear_task_notify(); |
5052 | 5052 | ||
5053 | regs = ia64_task_regs(current); | 5053 | regs = task_pt_regs(current); |
5054 | 5054 | ||
5055 | /* | 5055 | /* |
5056 | * extract reason for being here and clear | 5056 | * extract reason for being here and clear |
@@ -5794,7 +5794,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c | |||
5794 | * on every CPU, so we can rely on the pid to identify the idle task. | 5794 | * on every CPU, so we can rely on the pid to identify the idle task. |
5795 | */ | 5795 | */ |
5796 | if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { | 5796 | if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { |
5797 | regs = ia64_task_regs(task); | 5797 | regs = task_pt_regs(task); |
5798 | ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; | 5798 | ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; |
5799 | return; | 5799 | return; |
5800 | } | 5800 | } |
@@ -5877,7 +5877,7 @@ pfm_save_regs(struct task_struct *task) | |||
5877 | flags = pfm_protect_ctx_ctxsw(ctx); | 5877 | flags = pfm_protect_ctx_ctxsw(ctx); |
5878 | 5878 | ||
5879 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { | 5879 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { |
5880 | struct pt_regs *regs = ia64_task_regs(task); | 5880 | struct pt_regs *regs = task_pt_regs(task); |
5881 | 5881 | ||
5882 | pfm_clear_psr_up(); | 5882 | pfm_clear_psr_up(); |
5883 | 5883 | ||
@@ -6077,7 +6077,7 @@ pfm_load_regs (struct task_struct *task) | |||
6077 | BUG_ON(psr & IA64_PSR_I); | 6077 | BUG_ON(psr & IA64_PSR_I); |
6078 | 6078 | ||
6079 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { | 6079 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { |
6080 | struct pt_regs *regs = ia64_task_regs(task); | 6080 | struct pt_regs *regs = task_pt_regs(task); |
6081 | 6081 | ||
6082 | BUG_ON(ctx->ctx_smpl_hdr); | 6082 | BUG_ON(ctx->ctx_smpl_hdr); |
6083 | 6083 | ||
@@ -6446,7 +6446,7 @@ pfm_alt_save_pmu_state(void *data) | |||
6446 | { | 6446 | { |
6447 | struct pt_regs *regs; | 6447 | struct pt_regs *regs; |
6448 | 6448 | ||
6449 | regs = ia64_task_regs(current); | 6449 | regs = task_pt_regs(current); |
6450 | 6450 | ||
6451 | DPRINT(("called\n")); | 6451 | DPRINT(("called\n")); |
6452 | 6452 | ||
@@ -6472,7 +6472,7 @@ pfm_alt_restore_pmu_state(void *data) | |||
6472 | { | 6472 | { |
6473 | struct pt_regs *regs; | 6473 | struct pt_regs *regs; |
6474 | 6474 | ||
6475 | regs = ia64_task_regs(current); | 6475 | regs = task_pt_regs(current); |
6476 | 6476 | ||
6477 | DPRINT(("called\n")); | 6477 | DPRINT(("called\n")); |
6478 | 6478 | ||
@@ -6754,7 +6754,7 @@ dump_pmu_state(const char *from) | |||
6754 | local_irq_save(flags); | 6754 | local_irq_save(flags); |
6755 | 6755 | ||
6756 | this_cpu = smp_processor_id(); | 6756 | this_cpu = smp_processor_id(); |
6757 | regs = ia64_task_regs(current); | 6757 | regs = task_pt_regs(current); |
6758 | info = PFM_CPUINFO_GET(); | 6758 | info = PFM_CPUINFO_GET(); |
6759 | dcr = ia64_getreg(_IA64_REG_CR_DCR); | 6759 | dcr = ia64_getreg(_IA64_REG_CR_DCR); |
6760 | 6760 | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index e9904c74d2ba..309d59658e5f 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -328,7 +328,7 @@ ia64_save_extra (struct task_struct *task) | |||
328 | #endif | 328 | #endif |
329 | 329 | ||
330 | #ifdef CONFIG_IA32_SUPPORT | 330 | #ifdef CONFIG_IA32_SUPPORT |
331 | if (IS_IA32_PROCESS(ia64_task_regs(task))) | 331 | if (IS_IA32_PROCESS(task_pt_regs(task))) |
332 | ia32_save_state(task); | 332 | ia32_save_state(task); |
333 | #endif | 333 | #endif |
334 | } | 334 | } |
@@ -353,7 +353,7 @@ ia64_load_extra (struct task_struct *task) | |||
353 | #endif | 353 | #endif |
354 | 354 | ||
355 | #ifdef CONFIG_IA32_SUPPORT | 355 | #ifdef CONFIG_IA32_SUPPORT |
356 | if (IS_IA32_PROCESS(ia64_task_regs(task))) | 356 | if (IS_IA32_PROCESS(task_pt_regs(task))) |
357 | ia32_load_state(task); | 357 | ia32_load_state(task); |
358 | #endif | 358 | #endif |
359 | } | 359 | } |
@@ -488,7 +488,7 @@ copy_thread (int nr, unsigned long clone_flags, | |||
488 | * If we're cloning an IA32 task then save the IA32 extra | 488 | * If we're cloning an IA32 task then save the IA32 extra |
489 | * state from the current task to the new task | 489 | * state from the current task to the new task |
490 | */ | 490 | */ |
491 | if (IS_IA32_PROCESS(ia64_task_regs(current))) { | 491 | if (IS_IA32_PROCESS(task_pt_regs(current))) { |
492 | ia32_save_state(p); | 492 | ia32_save_state(p); |
493 | if (clone_flags & CLONE_SETTLS) | 493 | if (clone_flags & CLONE_SETTLS) |
494 | retval = ia32_clone_tls(p, child_ptregs); | 494 | retval = ia32_clone_tls(p, child_ptregs); |
@@ -701,7 +701,7 @@ int | |||
701 | kernel_thread_helper (int (*fn)(void *), void *arg) | 701 | kernel_thread_helper (int (*fn)(void *), void *arg) |
702 | { | 702 | { |
703 | #ifdef CONFIG_IA32_SUPPORT | 703 | #ifdef CONFIG_IA32_SUPPORT |
704 | if (IS_IA32_PROCESS(ia64_task_regs(current))) { | 704 | if (IS_IA32_PROCESS(task_pt_regs(current))) { |
705 | /* A kernel thread is always a 64-bit process. */ | 705 | /* A kernel thread is always a 64-bit process. */ |
706 | current->thread.map_base = DEFAULT_MAP_BASE; | 706 | current->thread.map_base = DEFAULT_MAP_BASE; |
707 | current->thread.task_size = DEFAULT_TASK_SIZE; | 707 | current->thread.task_size = DEFAULT_TASK_SIZE; |
@@ -722,7 +722,7 @@ flush_thread (void) | |||
722 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); | 722 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
723 | ia64_drop_fpu(current); | 723 | ia64_drop_fpu(current); |
724 | #ifdef CONFIG_IA32_SUPPORT | 724 | #ifdef CONFIG_IA32_SUPPORT |
725 | if (IS_IA32_PROCESS(ia64_task_regs(current))) { | 725 | if (IS_IA32_PROCESS(task_pt_regs(current))) { |
726 | ia32_drop_partial_page_list(current); | 726 | ia32_drop_partial_page_list(current); |
727 | current->thread.task_size = IA32_PAGE_OFFSET; | 727 | current->thread.task_size = IA32_PAGE_OFFSET; |
728 | set_fs(USER_DS); | 728 | set_fs(USER_DS); |
@@ -755,7 +755,7 @@ exit_thread (void) | |||
755 | if (current->thread.flags & IA64_THREAD_DBG_VALID) | 755 | if (current->thread.flags & IA64_THREAD_DBG_VALID) |
756 | pfm_release_debug_registers(current); | 756 | pfm_release_debug_registers(current); |
757 | #endif | 757 | #endif |
758 | if (IS_IA32_PROCESS(ia64_task_regs(current))) | 758 | if (IS_IA32_PROCESS(task_pt_regs(current))) |
759 | ia32_drop_partial_page_list(current); | 759 | ia32_drop_partial_page_list(current); |
760 | } | 760 | } |
761 | 761 | ||
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 8d88eeea02d1..eaed14aac6aa 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -254,7 +254,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw, | |||
254 | long num_regs, nbits; | 254 | long num_regs, nbits; |
255 | struct pt_regs *pt; | 255 | struct pt_regs *pt; |
256 | 256 | ||
257 | pt = ia64_task_regs(task); | 257 | pt = task_pt_regs(task); |
258 | kbsp = (unsigned long *) sw->ar_bspstore; | 258 | kbsp = (unsigned long *) sw->ar_bspstore; |
259 | ubspstore = (unsigned long *) pt->ar_bspstore; | 259 | ubspstore = (unsigned long *) pt->ar_bspstore; |
260 | 260 | ||
@@ -314,7 +314,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw, | |||
314 | struct pt_regs *pt; | 314 | struct pt_regs *pt; |
315 | unsigned long cfm, *urbs_kargs; | 315 | unsigned long cfm, *urbs_kargs; |
316 | 316 | ||
317 | pt = ia64_task_regs(task); | 317 | pt = task_pt_regs(task); |
318 | kbsp = (unsigned long *) sw->ar_bspstore; | 318 | kbsp = (unsigned long *) sw->ar_bspstore; |
319 | ubspstore = (unsigned long *) pt->ar_bspstore; | 319 | ubspstore = (unsigned long *) pt->ar_bspstore; |
320 | 320 | ||
@@ -407,7 +407,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, | |||
407 | 407 | ||
408 | urbs_end = (long *) user_rbs_end; | 408 | urbs_end = (long *) user_rbs_end; |
409 | laddr = (unsigned long *) addr; | 409 | laddr = (unsigned long *) addr; |
410 | child_regs = ia64_task_regs(child); | 410 | child_regs = task_pt_regs(child); |
411 | bspstore = (unsigned long *) child_regs->ar_bspstore; | 411 | bspstore = (unsigned long *) child_regs->ar_bspstore; |
412 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | 412 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; |
413 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | 413 | if (on_kernel_rbs(addr, (unsigned long) bspstore, |
@@ -467,7 +467,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack, | |||
467 | struct pt_regs *child_regs; | 467 | struct pt_regs *child_regs; |
468 | 468 | ||
469 | laddr = (unsigned long *) addr; | 469 | laddr = (unsigned long *) addr; |
470 | child_regs = ia64_task_regs(child); | 470 | child_regs = task_pt_regs(child); |
471 | bspstore = (unsigned long *) child_regs->ar_bspstore; | 471 | bspstore = (unsigned long *) child_regs->ar_bspstore; |
472 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | 472 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; |
473 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | 473 | if (on_kernel_rbs(addr, (unsigned long) bspstore, |
@@ -567,7 +567,7 @@ thread_matches (struct task_struct *thread, unsigned long addr) | |||
567 | */ | 567 | */ |
568 | return 0; | 568 | return 0; |
569 | 569 | ||
570 | thread_regs = ia64_task_regs(thread); | 570 | thread_regs = task_pt_regs(thread); |
571 | thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); | 571 | thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); |
572 | if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) | 572 | if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) |
573 | return 0; | 573 | return 0; |
@@ -627,7 +627,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr) | |||
627 | inline void | 627 | inline void |
628 | ia64_flush_fph (struct task_struct *task) | 628 | ia64_flush_fph (struct task_struct *task) |
629 | { | 629 | { |
630 | struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); | 630 | struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); |
631 | 631 | ||
632 | /* | 632 | /* |
633 | * Prevent migrating this task while | 633 | * Prevent migrating this task while |
@@ -653,7 +653,7 @@ ia64_flush_fph (struct task_struct *task) | |||
653 | void | 653 | void |
654 | ia64_sync_fph (struct task_struct *task) | 654 | ia64_sync_fph (struct task_struct *task) |
655 | { | 655 | { |
656 | struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); | 656 | struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); |
657 | 657 | ||
658 | ia64_flush_fph(task); | 658 | ia64_flush_fph(task); |
659 | if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { | 659 | if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { |
@@ -794,7 +794,7 @@ access_uarea (struct task_struct *child, unsigned long addr, | |||
794 | + offsetof(struct pt_regs, reg))) | 794 | + offsetof(struct pt_regs, reg))) |
795 | 795 | ||
796 | 796 | ||
797 | pt = ia64_task_regs(child); | 797 | pt = task_pt_regs(child); |
798 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 798 | sw = (struct switch_stack *) (child->thread.ksp + 16); |
799 | 799 | ||
800 | if ((addr & 0x7) != 0) { | 800 | if ((addr & 0x7) != 0) { |
@@ -1120,7 +1120,7 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | |||
1120 | if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) | 1120 | if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) |
1121 | return -EIO; | 1121 | return -EIO; |
1122 | 1122 | ||
1123 | pt = ia64_task_regs(child); | 1123 | pt = task_pt_regs(child); |
1124 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 1124 | sw = (struct switch_stack *) (child->thread.ksp + 16); |
1125 | unw_init_from_blocked_task(&info, child); | 1125 | unw_init_from_blocked_task(&info, child); |
1126 | if (unw_unwind_to_user(&info) < 0) { | 1126 | if (unw_unwind_to_user(&info) < 0) { |
@@ -1265,7 +1265,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | |||
1265 | if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) | 1265 | if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) |
1266 | return -EIO; | 1266 | return -EIO; |
1267 | 1267 | ||
1268 | pt = ia64_task_regs(child); | 1268 | pt = task_pt_regs(child); |
1269 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 1269 | sw = (struct switch_stack *) (child->thread.ksp + 16); |
1270 | unw_init_from_blocked_task(&info, child); | 1270 | unw_init_from_blocked_task(&info, child); |
1271 | if (unw_unwind_to_user(&info) < 0) { | 1271 | if (unw_unwind_to_user(&info) < 0) { |
@@ -1403,7 +1403,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | |||
1403 | void | 1403 | void |
1404 | ptrace_disable (struct task_struct *child) | 1404 | ptrace_disable (struct task_struct *child) |
1405 | { | 1405 | { |
1406 | struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child)); | 1406 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); |
1407 | 1407 | ||
1408 | /* make sure the single step/taken-branch trap bits are not set: */ | 1408 | /* make sure the single step/taken-branch trap bits are not set: */ |
1409 | child_psr->ss = 0; | 1409 | child_psr->ss = 0; |
@@ -1456,7 +1456,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data) | |||
1456 | if (ret < 0) | 1456 | if (ret < 0) |
1457 | goto out_tsk; | 1457 | goto out_tsk; |
1458 | 1458 | ||
1459 | pt = ia64_task_regs(child); | 1459 | pt = task_pt_regs(child); |
1460 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 1460 | sw = (struct switch_stack *) (child->thread.ksp + 16); |
1461 | 1461 | ||
1462 | switch (request) { | 1462 | switch (request) { |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 0daa8fa9ef32..c0766575a3a2 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -801,7 +801,7 @@ cpu_init (void) | |||
801 | #endif | 801 | #endif |
802 | 802 | ||
803 | /* Clear the stack memory reserved for pt_regs: */ | 803 | /* Clear the stack memory reserved for pt_regs: */ |
804 | memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); | 804 | memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); |
805 | 805 | ||
806 | ia64_set_kr(IA64_KR_FPU_OWNER, 0); | 806 | ia64_set_kr(IA64_KR_FPU_OWNER, 0); |
807 | 807 | ||
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index f2dbcd1db0d4..c7b943f10199 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c | |||
@@ -151,7 +151,7 @@ out: | |||
151 | asmlinkage long | 151 | asmlinkage long |
152 | sys_pipe (void) | 152 | sys_pipe (void) |
153 | { | 153 | { |
154 | struct pt_regs *regs = ia64_task_regs(current); | 154 | struct pt_regs *regs = task_pt_regs(current); |
155 | int fd[2]; | 155 | int fd[2]; |
156 | int retval; | 156 | int retval; |
157 | 157 | ||
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 362b33556b1a..745979f33dc2 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
@@ -159,7 +159,7 @@ struct input_event_compat { | |||
159 | #ifdef CONFIG_X86_64 | 159 | #ifdef CONFIG_X86_64 |
160 | # define COMPAT_TEST is_compat_task() | 160 | # define COMPAT_TEST is_compat_task() |
161 | #elif defined(CONFIG_IA64) | 161 | #elif defined(CONFIG_IA64) |
162 | # define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current)) | 162 | # define COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current)) |
163 | #elif defined(CONFIG_S390) | 163 | #elif defined(CONFIG_S390) |
164 | # define COMPAT_TEST test_thread_flag(TIF_31BIT) | 164 | # define COMPAT_TEST test_thread_flag(TIF_31BIT) |
165 | #elif defined(CONFIG_MIPS) | 165 | #elif defined(CONFIG_MIPS) |
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h index aaf11f4e9169..c0b19106665c 100644 --- a/include/asm-ia64/compat.h +++ b/include/asm-ia64/compat.h | |||
@@ -192,7 +192,7 @@ compat_ptr (compat_uptr_t uptr) | |||
192 | static __inline__ void __user * | 192 | static __inline__ void __user * |
193 | compat_alloc_user_space (long len) | 193 | compat_alloc_user_space (long len) |
194 | { | 194 | { |
195 | struct pt_regs *regs = ia64_task_regs(current); | 195 | struct pt_regs *regs = task_pt_regs(current); |
196 | return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); | 196 | return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); |
197 | } | 197 | } |
198 | 198 | ||
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 94e07e727395..8c648bf72bbd 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h | |||
@@ -352,7 +352,7 @@ extern unsigned long get_wchan (struct task_struct *p); | |||
352 | /* Return instruction pointer of blocked task TSK. */ | 352 | /* Return instruction pointer of blocked task TSK. */ |
353 | #define KSTK_EIP(tsk) \ | 353 | #define KSTK_EIP(tsk) \ |
354 | ({ \ | 354 | ({ \ |
355 | struct pt_regs *_regs = ia64_task_regs(tsk); \ | 355 | struct pt_regs *_regs = task_pt_regs(tsk); \ |
356 | _regs->cr_iip + ia64_psr(_regs)->ri; \ | 356 | _regs->cr_iip + ia64_psr(_regs)->ri; \ |
357 | }) | 357 | }) |
358 | 358 | ||
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 2c703d6e0c86..9471cdc3f4c0 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h | |||
@@ -248,7 +248,7 @@ struct switch_stack { | |||
248 | }) | 248 | }) |
249 | 249 | ||
250 | /* given a pointer to a task_struct, return the user's pt_regs */ | 250 | /* given a pointer to a task_struct, return the user's pt_regs */ |
251 | # define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) | 251 | # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) |
252 | # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) | 252 | # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) |
253 | # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0) | 253 | # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0) |
254 | # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs)) | 254 | # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs)) |
@@ -271,7 +271,7 @@ struct switch_stack { | |||
271 | * | 271 | * |
272 | * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall. | 272 | * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall. |
273 | */ | 273 | */ |
274 | # define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0) | 274 | # define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0) |
275 | 275 | ||
276 | struct task_struct; /* forward decl */ | 276 | struct task_struct; /* forward decl */ |
277 | struct unw_frame_info; /* forward decl */ | 277 | struct unw_frame_info; /* forward decl */ |
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 510c31c50723..80c5a234e259 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
@@ -219,14 +219,14 @@ extern void ia64_load_extra (struct task_struct *task); | |||
219 | 219 | ||
220 | #define IA64_HAS_EXTRA_STATE(t) \ | 220 | #define IA64_HAS_EXTRA_STATE(t) \ |
221 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ | 221 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ |
222 | || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) | 222 | || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) |
223 | 223 | ||
224 | #define __switch_to(prev,next,last) do { \ | 224 | #define __switch_to(prev,next,last) do { \ |
225 | if (IA64_HAS_EXTRA_STATE(prev)) \ | 225 | if (IA64_HAS_EXTRA_STATE(prev)) \ |
226 | ia64_save_extra(prev); \ | 226 | ia64_save_extra(prev); \ |
227 | if (IA64_HAS_EXTRA_STATE(next)) \ | 227 | if (IA64_HAS_EXTRA_STATE(next)) \ |
228 | ia64_load_extra(next); \ | 228 | ia64_load_extra(next); \ |
229 | ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ | 229 | ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ |
230 | (last) = ia64_switch_to((next)); \ | 230 | (last) = ia64_switch_to((next)); \ |
231 | } while (0) | 231 | } while (0) |
232 | 232 | ||
@@ -238,8 +238,8 @@ extern void ia64_load_extra (struct task_struct *task); | |||
238 | * the latest fph state from another CPU. In other words: eager save, lazy restore. | 238 | * the latest fph state from another CPU. In other words: eager save, lazy restore. |
239 | */ | 239 | */ |
240 | # define switch_to(prev,next,last) do { \ | 240 | # define switch_to(prev,next,last) do { \ |
241 | if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ | 241 | if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ |
242 | ia64_psr(ia64_task_regs(prev))->mfh = 0; \ | 242 | ia64_psr(task_pt_regs(prev))->mfh = 0; \ |
243 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ | 243 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ |
244 | __ia64_save_fpu((prev)->thread.fph); \ | 244 | __ia64_save_fpu((prev)->thread.fph); \ |
245 | } \ | 245 | } \ |