aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/entry.S5
-rw-r--r--arch/ia64/kernel/mca.c55
-rw-r--r--arch/ia64/kernel/perfmon.c21
-rw-r--r--arch/ia64/kernel/process.c59
-rw-r--r--arch/ia64/kernel/ptrace.c139
-rw-r--r--include/asm-ia64/ptrace.h11
-rw-r--r--include/asm-ia64/thread_info.h11
-rw-r--r--include/asm-ia64/unistd.h5
8 files changed, 225 insertions, 81 deletions
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index f5d3efbfbeda..3c331c464b40 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1573,7 +1573,7 @@ sys_call_table:
1573 data8 sys_fchmodat 1573 data8 sys_fchmodat
1574 data8 sys_faccessat 1574 data8 sys_faccessat
1575 data8 sys_pselect6 1575 data8 sys_pselect6
1576 data8 sys_ppoll 1576 data8 sys_ppoll // 1295
1577 data8 sys_unshare 1577 data8 sys_unshare
1578 data8 sys_splice 1578 data8 sys_splice
1579 data8 sys_set_robust_list 1579 data8 sys_set_robust_list
@@ -1588,5 +1588,8 @@ sys_call_table:
1588 data8 sys_signalfd 1588 data8 sys_signalfd
1589 data8 sys_ni_syscall 1589 data8 sys_ni_syscall
1590 data8 sys_eventfd 1590 data8 sys_eventfd
1591 data8 sys_timerfd_create // 1310
1592 data8 sys_timerfd_settime
1593 data8 sys_timerfd_gettime
1591 1594
1592 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1595 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 846e7e036b13..6e17aed53135 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -17,7 +17,7 @@
17 * Copyright (C) 2000 Intel 17 * Copyright (C) 2000 Intel
18 * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com> 18 * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
19 * 19 *
20 * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 20 * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
21 * Copyright (C) Vijay Chander <vijay@engr.sgi.com> 21 * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
22 * 22 *
23 * Copyright (C) 2006 FUJITSU LIMITED 23 * Copyright (C) 2006 FUJITSU LIMITED
@@ -1762,11 +1762,8 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1762/* Caller prevents this from being called after init */ 1762/* Caller prevents this from being called after init */
1763static void * __init_refok mca_bootmem(void) 1763static void * __init_refok mca_bootmem(void)
1764{ 1764{
1765 void *p; 1765 return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
1766 1766 KERNEL_STACK_SIZE, 0);
1767 p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS +
1768 KERNEL_STACK_SIZE);
1769 return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE);
1770} 1767}
1771 1768
1772/* Do per-CPU MCA-related initialization. */ 1769/* Do per-CPU MCA-related initialization. */
@@ -1774,33 +1771,33 @@ void __cpuinit
1774ia64_mca_cpu_init(void *cpu_data) 1771ia64_mca_cpu_init(void *cpu_data)
1775{ 1772{
1776 void *pal_vaddr; 1773 void *pal_vaddr;
1774 void *data;
1775 long sz = sizeof(struct ia64_mca_cpu);
1776 int cpu = smp_processor_id();
1777 static int first_time = 1; 1777 static int first_time = 1;
1778 1778
1779 if (first_time) {
1780 void *mca_data;
1781 int cpu;
1782
1783 first_time = 0;
1784 mca_data = mca_bootmem();
1785 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1786 format_mca_init_stack(mca_data,
1787 offsetof(struct ia64_mca_cpu, mca_stack),
1788 "MCA", cpu);
1789 format_mca_init_stack(mca_data,
1790 offsetof(struct ia64_mca_cpu, init_stack),
1791 "INIT", cpu);
1792 __per_cpu_mca[cpu] = __pa(mca_data);
1793 mca_data += sizeof(struct ia64_mca_cpu);
1794 }
1795 }
1796
1797 /* 1779 /*
1798 * The MCA info structure was allocated earlier and its 1780 * Structure will already be allocated if cpu has been online,
1799 * physical address saved in __per_cpu_mca[cpu]. Copy that 1781 * then offlined.
1800 * address * to ia64_mca_data so we can access it as a per-CPU
1801 * variable.
1802 */ 1782 */
1803 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; 1783 if (__per_cpu_mca[cpu]) {
1784 data = __va(__per_cpu_mca[cpu]);
1785 } else {
1786 if (first_time) {
1787 data = mca_bootmem();
1788 first_time = 0;
1789 } else
1790 data = page_address(alloc_pages_node(numa_node_id(),
1791 GFP_KERNEL, get_order(sz)));
1792 if (!data)
1793 panic("Could not allocate MCA memory for cpu %d\n",
1794 cpu);
1795 }
1796 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
1797 "MCA", cpu);
1798 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
1799 "INIT", cpu);
1800 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
1804 1801
1805 /* 1802 /*
1806 * Stash away a copy of the PTE needed to map the per-CPU page. 1803 * Stash away a copy of the PTE needed to map the per-CPU page.
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 78acd9fe97e9..f6b99719f10f 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -586,21 +586,6 @@ pfm_put_task(struct task_struct *task)
586} 586}
587 587
588static inline void 588static inline void
589pfm_set_task_notify(struct task_struct *task)
590{
591 struct thread_info *info;
592
593 info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
594 set_bit(TIF_PERFMON_WORK, &info->flags);
595}
596
597static inline void
598pfm_clear_task_notify(void)
599{
600 clear_thread_flag(TIF_PERFMON_WORK);
601}
602
603static inline void
604pfm_reserve_page(unsigned long a) 589pfm_reserve_page(unsigned long a)
605{ 590{
606 SetPageReserved(vmalloc_to_page((void *)a)); 591 SetPageReserved(vmalloc_to_page((void *)a));
@@ -3724,7 +3709,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3724 3709
3725 PFM_SET_WORK_PENDING(task, 1); 3710 PFM_SET_WORK_PENDING(task, 1);
3726 3711
3727 pfm_set_task_notify(task); 3712 tsk_set_notify_resume(task);
3728 3713
3729 /* 3714 /*
3730 * XXX: send reschedule if task runs on another CPU 3715 * XXX: send reschedule if task runs on another CPU
@@ -5082,7 +5067,7 @@ pfm_handle_work(void)
5082 5067
5083 PFM_SET_WORK_PENDING(current, 0); 5068 PFM_SET_WORK_PENDING(current, 0);
5084 5069
5085 pfm_clear_task_notify(); 5070 tsk_clear_notify_resume(current);
5086 5071
5087 regs = task_pt_regs(current); 5072 regs = task_pt_regs(current);
5088 5073
@@ -5450,7 +5435,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5450 * when coming from ctxsw, current still points to the 5435 * when coming from ctxsw, current still points to the
5451 * previous task, therefore we must work with task and not current. 5436 * previous task, therefore we must work with task and not current.
5452 */ 5437 */
5453 pfm_set_task_notify(task); 5438 tsk_set_notify_resume(task);
5454 } 5439 }
5455 /* 5440 /*
5456 * defer until state is changed (shorten spin window). the context is locked 5441 * defer until state is changed (shorten spin window). the context is locked
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 7377d323131d..49937a383b23 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -52,7 +52,6 @@
52#include "sigframe.h" 52#include "sigframe.h"
53 53
54void (*ia64_mark_idle)(int); 54void (*ia64_mark_idle)(int);
55static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
56 55
57unsigned long boot_option_idle_override = 0; 56unsigned long boot_option_idle_override = 0;
58EXPORT_SYMBOL(boot_option_idle_override); 57EXPORT_SYMBOL(boot_option_idle_override);
@@ -157,6 +156,17 @@ show_regs (struct pt_regs *regs)
157 show_stack(NULL, NULL); 156 show_stack(NULL, NULL);
158} 157}
159 158
159void tsk_clear_notify_resume(struct task_struct *tsk)
160{
161#ifdef CONFIG_PERFMON
162 if (tsk->thread.pfm_needs_checking)
163 return;
164#endif
165 if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE))
166 return;
167 clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
168}
169
160void 170void
161do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall) 171do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
162{ 172{
@@ -175,6 +185,10 @@ do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall
175 /* deal with pending signal delivery */ 185 /* deal with pending signal delivery */
176 if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK)) 186 if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK))
177 ia64_do_signal(scr, in_syscall); 187 ia64_do_signal(scr, in_syscall);
188
189 /* copy user rbs to kernel rbs */
190 if (unlikely(test_thread_flag(TIF_RESTORE_RSE)))
191 ia64_sync_krbs();
178} 192}
179 193
180static int pal_halt = 1; 194static int pal_halt = 1;
@@ -239,33 +253,23 @@ static inline void play_dead(void)
239} 253}
240#endif /* CONFIG_HOTPLUG_CPU */ 254#endif /* CONFIG_HOTPLUG_CPU */
241 255
242void cpu_idle_wait(void) 256static void do_nothing(void *unused)
243{ 257{
244 unsigned int cpu, this_cpu = get_cpu(); 258}
245 cpumask_t map;
246 cpumask_t tmp = current->cpus_allowed;
247
248 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
249 put_cpu();
250
251 cpus_clear(map);
252 for_each_online_cpu(cpu) {
253 per_cpu(cpu_idle_state, cpu) = 1;
254 cpu_set(cpu, map);
255 }
256
257 __get_cpu_var(cpu_idle_state) = 0;
258 259
259 wmb(); 260/*
260 do { 261 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
261 ssleep(1); 262 * pm_idle and update to new pm_idle value. Required while changing pm_idle
262 for_each_online_cpu(cpu) { 263 * handler on SMP systems.
263 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) 264 *
264 cpu_clear(cpu, map); 265 * Caller must have changed pm_idle to the new value before the call. Old
265 } 266 * pm_idle value will not be used by any CPU after the return of this function.
266 cpus_and(map, map, cpu_online_map); 267 */
267 } while (!cpus_empty(map)); 268void cpu_idle_wait(void)
268 set_cpus_allowed(current, tmp); 269{
270 smp_mb();
271 /* kick all the CPUs so that they exit out of pm_idle */
272 smp_call_function(do_nothing, NULL, 0, 1);
269} 273}
270EXPORT_SYMBOL_GPL(cpu_idle_wait); 274EXPORT_SYMBOL_GPL(cpu_idle_wait);
271 275
@@ -293,9 +297,6 @@ cpu_idle (void)
293#ifdef CONFIG_SMP 297#ifdef CONFIG_SMP
294 min_xtp(); 298 min_xtp();
295#endif 299#endif
296 if (__get_cpu_var(cpu_idle_state))
297 __get_cpu_var(cpu_idle_state) = 0;
298
299 rmb(); 300 rmb();
300 if (mark_idle) 301 if (mark_idle)
301 (*mark_idle)(1); 302 (*mark_idle)(1);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 2e96f17b2f3b..331d6768b5d5 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -547,6 +547,129 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
547 return 0; 547 return 0;
548} 548}
549 549
550static long
551ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
552 unsigned long user_rbs_start, unsigned long user_rbs_end)
553{
554 unsigned long addr, val;
555 long ret;
556
557 /* now copy word for word from user rbs to kernel rbs: */
558 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
559 if (access_process_vm(child, addr, &val, sizeof(val), 0)
560 != sizeof(val))
561 return -EIO;
562
563 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
564 if (ret < 0)
565 return ret;
566 }
567 return 0;
568}
569
570typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
571 unsigned long, unsigned long);
572
573static void do_sync_rbs(struct unw_frame_info *info, void *arg)
574{
575 struct pt_regs *pt;
576 unsigned long urbs_end;
577 syncfunc_t fn = arg;
578
579 if (unw_unwind_to_user(info) < 0)
580 return;
581 pt = task_pt_regs(info->task);
582 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
583
584 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
585}
586
587/*
588 * when a thread is stopped (ptraced), debugger might change thread's user
589 * stack (change memory directly), and we must avoid the RSE stored in kernel
590 * to override user stack (user space's RSE is newer than kernel's in the
591 * case). To workaround the issue, we copy kernel RSE to user RSE before the
592 * task is stopped, so user RSE has updated data. we then copy user RSE to
593 * kernel after the task is resummed from traced stop and kernel will use the
594 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
595 * synchronize user RSE to kernel.
596 */
597void ia64_ptrace_stop(void)
598{
599 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
600 return;
601 tsk_set_notify_resume(current);
602 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
603}
604
605/*
606 * This is called to read back the register backing store.
607 */
608void ia64_sync_krbs(void)
609{
610 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
611 tsk_clear_notify_resume(current);
612
613 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
614}
615
616/*
617 * After PTRACE_ATTACH, a thread's register backing store area in user
618 * space is assumed to contain correct data whenever the thread is
619 * stopped. arch_ptrace_stop takes care of this on tracing stops.
620 * But if the child was already stopped for job control when we attach
621 * to it, then it might not ever get into ptrace_stop by the time we
622 * want to examine the user memory containing the RBS.
623 */
624void
625ptrace_attach_sync_user_rbs (struct task_struct *child)
626{
627 int stopped = 0;
628 struct unw_frame_info info;
629
630 /*
631 * If the child is in TASK_STOPPED, we need to change that to
632 * TASK_TRACED momentarily while we operate on it. This ensures
633 * that the child won't be woken up and return to user mode while
634 * we are doing the sync. (It can only be woken up for SIGKILL.)
635 */
636
637 read_lock(&tasklist_lock);
638 if (child->signal) {
639 spin_lock_irq(&child->sighand->siglock);
640 if (child->state == TASK_STOPPED &&
641 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
642 tsk_set_notify_resume(child);
643
644 child->state = TASK_TRACED;
645 stopped = 1;
646 }
647 spin_unlock_irq(&child->sighand->siglock);
648 }
649 read_unlock(&tasklist_lock);
650
651 if (!stopped)
652 return;
653
654 unw_init_from_blocked_task(&info, child);
655 do_sync_rbs(&info, ia64_sync_user_rbs);
656
657 /*
658 * Now move the child back into TASK_STOPPED if it should be in a
659 * job control stop, so that SIGCONT can be used to wake it up.
660 */
661 read_lock(&tasklist_lock);
662 if (child->signal) {
663 spin_lock_irq(&child->sighand->siglock);
664 if (child->state == TASK_TRACED &&
665 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
666 child->state = TASK_STOPPED;
667 }
668 spin_unlock_irq(&child->sighand->siglock);
669 }
670 read_unlock(&tasklist_lock);
671}
672
550static inline int 673static inline int
551thread_matches (struct task_struct *thread, unsigned long addr) 674thread_matches (struct task_struct *thread, unsigned long addr)
552{ 675{
@@ -1422,6 +1545,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1422 struct task_struct *child; 1545 struct task_struct *child;
1423 struct switch_stack *sw; 1546 struct switch_stack *sw;
1424 long ret; 1547 long ret;
1548 struct unw_frame_info info;
1425 1549
1426 lock_kernel(); 1550 lock_kernel();
1427 ret = -EPERM; 1551 ret = -EPERM;
@@ -1453,6 +1577,8 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1453 1577
1454 if (request == PTRACE_ATTACH) { 1578 if (request == PTRACE_ATTACH) {
1455 ret = ptrace_attach(child); 1579 ret = ptrace_attach(child);
1580 if (!ret)
1581 arch_ptrace_attach(child);
1456 goto out_tsk; 1582 goto out_tsk;
1457 } 1583 }
1458 1584
@@ -1481,6 +1607,11 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1481 /* write the word at location addr */ 1607 /* write the word at location addr */
1482 urbs_end = ia64_get_user_rbs_end(child, pt, NULL); 1608 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1483 ret = ia64_poke(child, sw, urbs_end, addr, data); 1609 ret = ia64_poke(child, sw, urbs_end, addr, data);
1610
1611 /* Make sure user RBS has the latest data */
1612 unw_init_from_blocked_task(&info, child);
1613 do_sync_rbs(&info, ia64_sync_user_rbs);
1614
1484 goto out_tsk; 1615 goto out_tsk;
1485 1616
1486 case PTRACE_PEEKUSR: 1617 case PTRACE_PEEKUSR:
@@ -1634,6 +1765,10 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1634 && (current->ptrace & PT_PTRACED)) 1765 && (current->ptrace & PT_PTRACED))
1635 syscall_trace(); 1766 syscall_trace();
1636 1767
1768 /* copy user rbs to kernel rbs */
1769 if (test_thread_flag(TIF_RESTORE_RSE))
1770 ia64_sync_krbs();
1771
1637 if (unlikely(current->audit_context)) { 1772 if (unlikely(current->audit_context)) {
1638 long syscall; 1773 long syscall;
1639 int arch; 1774 int arch;
@@ -1671,4 +1806,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1671 || test_thread_flag(TIF_SINGLESTEP)) 1806 || test_thread_flag(TIF_SINGLESTEP))
1672 && (current->ptrace & PT_PTRACED)) 1807 && (current->ptrace & PT_PTRACED))
1673 syscall_trace(); 1808 syscall_trace();
1809
1810 /* copy user rbs to kernel rbs */
1811 if (test_thread_flag(TIF_RESTORE_RSE))
1812 ia64_sync_krbs();
1674} 1813}
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
index f4ef87a36236..0bdce7dde1b0 100644
--- a/include/asm-ia64/ptrace.h
+++ b/include/asm-ia64/ptrace.h
@@ -292,6 +292,7 @@ struct switch_stack {
292 unsigned long, long); 292 unsigned long, long);
293 extern void ia64_flush_fph (struct task_struct *); 293 extern void ia64_flush_fph (struct task_struct *);
294 extern void ia64_sync_fph (struct task_struct *); 294 extern void ia64_sync_fph (struct task_struct *);
295 extern void ia64_sync_krbs(void);
295 extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *, 296 extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
296 unsigned long, unsigned long); 297 unsigned long, unsigned long);
297 298
@@ -303,6 +304,16 @@ struct switch_stack {
303 extern void ia64_increment_ip (struct pt_regs *pt); 304 extern void ia64_increment_ip (struct pt_regs *pt);
304 extern void ia64_decrement_ip (struct pt_regs *pt); 305 extern void ia64_decrement_ip (struct pt_regs *pt);
305 306
307 extern void ia64_ptrace_stop(void);
308 #define arch_ptrace_stop(code, info) \
309 ia64_ptrace_stop()
310 #define arch_ptrace_stop_needed(code, info) \
311 (!test_thread_flag(TIF_RESTORE_RSE))
312
313 extern void ptrace_attach_sync_user_rbs (struct task_struct *);
314 #define arch_ptrace_attach(child) \
315 ptrace_attach_sync_user_rbs(child)
316
306#endif /* !__KERNEL__ */ 317#endif /* !__KERNEL__ */
307 318
308/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ 319/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index d16031e72efa..93d83cbe0c8c 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -71,6 +71,9 @@ struct thread_info {
71#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 71#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
72#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 72#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
73 73
74#define tsk_set_notify_resume(tsk) \
75 set_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME)
76extern void tsk_clear_notify_resume(struct task_struct *tsk);
74#endif /* !__ASSEMBLY */ 77#endif /* !__ASSEMBLY */
75 78
76/* 79/*
@@ -85,28 +88,30 @@ struct thread_info {
85#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ 88#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
86#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ 89#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
87#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 90#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
88#define TIF_PERFMON_WORK 6 /* work for pfm_handle_work() */ 91#define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
89#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 92#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
90#define TIF_MEMDIE 17 93#define TIF_MEMDIE 17
91#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 94#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
92#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ 95#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
93#define TIF_FREEZE 20 /* is freezing for suspend */ 96#define TIF_FREEZE 20 /* is freezing for suspend */
97#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
94 98
95#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 99#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
96#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 100#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
97#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 101#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
98#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP) 102#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
99#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 103#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
100#define _TIF_PERFMON_WORK (1 << TIF_PERFMON_WORK) 104#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
101#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 105#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
102#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 106#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
103#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 107#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
104#define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 108#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
105#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) 109#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
106#define _TIF_FREEZE (1 << TIF_FREEZE) 110#define _TIF_FREEZE (1 << TIF_FREEZE)
111#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
107 112
108/* "work to do on user-return" bits */ 113/* "work to do on user-return" bits */
109#define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_PERFMON_WORK|_TIF_SYSCALL_AUDIT|\ 114#define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
110 _TIF_NEED_RESCHED| _TIF_SYSCALL_TRACE|\ 115 _TIF_NEED_RESCHED| _TIF_SYSCALL_TRACE|\
111 _TIF_RESTORE_SIGMASK) 116 _TIF_RESTORE_SIGMASK)
112/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ 117/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 315f8de950a2..e60314716122 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -299,11 +299,14 @@
299#define __NR_signalfd 1307 299#define __NR_signalfd 1307
300#define __NR_timerfd 1308 300#define __NR_timerfd 1308
301#define __NR_eventfd 1309 301#define __NR_eventfd 1309
302#define __NR_timerfd_create 1310
303#define __NR_timerfd_settime 1311
304#define __NR_timerfd_gettime 1312
302 305
303#ifdef __KERNEL__ 306#ifdef __KERNEL__
304 307
305 308
306#define NR_syscalls 286 /* length of syscall table */ 309#define NR_syscalls 289 /* length of syscall table */
307 310
308/* 311/*
309 * The following defines stop scripts/checksyscalls.sh from complaining about 312 * The following defines stop scripts/checksyscalls.sh from complaining about