aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/aio.h1
-rw-r--r--include/linux/crash_dump.h6
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/i2o.h2
-rw-r--r--include/linux/init.h7
-rw-r--r--include/linux/kexec.h17
-rw-r--r--include/linux/memstick.h6
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/pagemap.h111
-rw-r--r--include/linux/parport.h3
-rw-r--r--include/linux/percpu.h29
-rw-r--r--include/linux/ptrace.h72
-rw-r--r--include/linux/radix-tree.h12
-rw-r--r--include/linux/relay.h5
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/sched.h28
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/tracehook.h575
23 files changed, 855 insertions, 75 deletions
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b51ddd28444e..09b276c35227 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -7,7 +7,6 @@
7#include <linux/uio.h> 7#include <linux/uio.h>
8 8
9#include <asm/atomic.h> 9#include <asm/atomic.h>
10#include <linux/uio.h>
11 10
12#define AIO_MAXSEGS 4 11#define AIO_MAXSEGS 4
13#define AIO_KIOGRP_NR_ATOMIC 8 12#define AIO_KIOGRP_NR_ATOMIC 8
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 6cd39a927e1f..025e4f575103 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -8,7 +8,13 @@
8#include <linux/proc_fs.h> 8#include <linux/proc_fs.h>
9 9
10#define ELFCORE_ADDR_MAX (-1ULL) 10#define ELFCORE_ADDR_MAX (-1ULL)
11
12#ifdef CONFIG_PROC_VMCORE
11extern unsigned long long elfcorehdr_addr; 13extern unsigned long long elfcorehdr_addr;
14#else
15static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
16#endif
17
12extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 18extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
13 unsigned long, int); 19 unsigned long, int);
14extern const struct file_operations proc_vmcore_operations; 20extern const struct file_operations proc_vmcore_operations;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 49d8eb7a71be..53d2edb709b3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -499,7 +499,7 @@ struct backing_dev_info;
499struct address_space { 499struct address_space {
500 struct inode *host; /* owner: inode, block_device */ 500 struct inode *host; /* owner: inode, block_device */
501 struct radix_tree_root page_tree; /* radix tree of all pages */ 501 struct radix_tree_root page_tree; /* radix tree of all pages */
502 rwlock_t tree_lock; /* and rwlock protecting it */ 502 spinlock_t tree_lock; /* and lock protecting it */
503 unsigned int i_mmap_writable;/* count VM_SHARED mappings */ 503 unsigned int i_mmap_writable;/* count VM_SHARED mappings */
504 struct prio_tree_root i_mmap; /* tree of private and shared mappings */ 504 struct prio_tree_root i_mmap; /* tree of private and shared mappings */
505 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 505 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 7d51cbca49ab..75ae6d8aba4f 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
758 } 758 }
759 759
760 dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); 760 dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
761 if (!dma_mapping_error(dma_addr)) { 761 if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
762#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 762#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
763 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 763 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
764 *mptr++ = cpu_to_le32(0x7C020002); 764 *mptr++ = cpu_to_le32(0x7C020002);
diff --git a/include/linux/init.h b/include/linux/init.h
index 42ae95411a93..11b84e106053 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -170,6 +170,13 @@ extern void (*late_time_init)(void);
170 __attribute__((__section__(".initcall" level ".init"))) = fn 170 __attribute__((__section__(".initcall" level ".init"))) = fn
171 171
172/* 172/*
173 * Early initcalls run before initializing SMP.
174 *
175 * Only for built-in code, not modules.
176 */
177#define early_initcall(fn) __define_initcall("early",fn,early)
178
179/*
173 * A "pure" initcall has no dependencies on anything else, and purely 180 * A "pure" initcall has no dependencies on anything else, and purely
174 * initializes variables that couldn't be statically initialized. 181 * initializes variables that couldn't be statically initialized.
175 * 182 *
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 3265968cd2cd..82f88a8a827b 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -83,6 +83,7 @@ struct kimage {
83 83
84 unsigned long start; 84 unsigned long start;
85 struct page *control_code_page; 85 struct page *control_code_page;
86 struct page *swap_page;
86 87
87 unsigned long nr_segments; 88 unsigned long nr_segments;
88 struct kexec_segment segment[KEXEC_SEGMENT_MAX]; 89 struct kexec_segment segment[KEXEC_SEGMENT_MAX];
@@ -98,18 +99,20 @@ struct kimage {
98 unsigned int type : 1; 99 unsigned int type : 1;
99#define KEXEC_TYPE_DEFAULT 0 100#define KEXEC_TYPE_DEFAULT 0
100#define KEXEC_TYPE_CRASH 1 101#define KEXEC_TYPE_CRASH 1
102 unsigned int preserve_context : 1;
101}; 103};
102 104
103 105
104 106
105/* kexec interface functions */ 107/* kexec interface functions */
106extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET; 108extern void machine_kexec(struct kimage *image);
107extern int machine_kexec_prepare(struct kimage *image); 109extern int machine_kexec_prepare(struct kimage *image);
108extern void machine_kexec_cleanup(struct kimage *image); 110extern void machine_kexec_cleanup(struct kimage *image);
109extern asmlinkage long sys_kexec_load(unsigned long entry, 111extern asmlinkage long sys_kexec_load(unsigned long entry,
110 unsigned long nr_segments, 112 unsigned long nr_segments,
111 struct kexec_segment __user *segments, 113 struct kexec_segment __user *segments,
112 unsigned long flags); 114 unsigned long flags);
115extern int kernel_kexec(void);
113#ifdef CONFIG_COMPAT 116#ifdef CONFIG_COMPAT
114extern asmlinkage long compat_sys_kexec_load(unsigned long entry, 117extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
115 unsigned long nr_segments, 118 unsigned long nr_segments,
@@ -156,8 +159,9 @@ extern struct kimage *kexec_crash_image;
156#define kexec_flush_icache_page(page) 159#define kexec_flush_icache_page(page)
157#endif 160#endif
158 161
159#define KEXEC_ON_CRASH 0x00000001 162#define KEXEC_ON_CRASH 0x00000001
160#define KEXEC_ARCH_MASK 0xffff0000 163#define KEXEC_PRESERVE_CONTEXT 0x00000002
164#define KEXEC_ARCH_MASK 0xffff0000
161 165
162/* These values match the ELF architecture values. 166/* These values match the ELF architecture values.
163 * Unless there is a good reason that should continue to be the case. 167 * Unless there is a good reason that should continue to be the case.
@@ -174,7 +178,12 @@ extern struct kimage *kexec_crash_image;
174#define KEXEC_ARCH_MIPS_LE (10 << 16) 178#define KEXEC_ARCH_MIPS_LE (10 << 16)
175#define KEXEC_ARCH_MIPS ( 8 << 16) 179#define KEXEC_ARCH_MIPS ( 8 << 16)
176 180
177#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */ 181/* List of defined/legal kexec flags */
182#ifndef CONFIG_KEXEC_JUMP
183#define KEXEC_FLAGS KEXEC_ON_CRASH
184#else
185#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
186#endif
178 187
179#define VMCOREINFO_BYTES (4096) 188#define VMCOREINFO_BYTES (4096)
180#define VMCOREINFO_NOTE_NAME "VMCOREINFO" 189#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index 37a5cdb03918..a9f998a3f48b 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -263,6 +263,10 @@ struct memstick_dev {
263 /* Get next request from the media driver. */ 263 /* Get next request from the media driver. */
264 int (*next_request)(struct memstick_dev *card, 264 int (*next_request)(struct memstick_dev *card,
265 struct memstick_request **mrq); 265 struct memstick_request **mrq);
266 /* Tell the media driver to stop doing things */
267 void (*stop)(struct memstick_dev *card);
268 /* Allow the media driver to continue */
269 void (*start)(struct memstick_dev *card);
266 270
267 struct device dev; 271 struct device dev;
268}; 272};
@@ -284,7 +288,7 @@ struct memstick_host {
284 /* Notify the host that some requests are pending. */ 288 /* Notify the host that some requests are pending. */
285 void (*request)(struct memstick_host *host); 289 void (*request)(struct memstick_host *host);
286 /* Set host IO parameters (power, clock, etc). */ 290 /* Set host IO parameters (power, clock, etc). */
287 void (*set_param)(struct memstick_host *host, 291 int (*set_param)(struct memstick_host *host,
288 enum memstick_param param, 292 enum memstick_param param,
289 int value); 293 int value);
290 unsigned long private[0] ____cacheline_aligned; 294 unsigned long private[0] ____cacheline_aligned;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d87a5a5fe87d..6e695eaab4ce 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -810,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
810 810
811int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, 811int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
812 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); 812 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
813void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);
814 813
815extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 814extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
816extern void do_invalidatepage(struct page *page, unsigned long offset); 815extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -833,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
833 struct vm_area_struct **pprev, unsigned long start, 832 struct vm_area_struct **pprev, unsigned long start,
834 unsigned long end, unsigned long newflags); 833 unsigned long end, unsigned long newflags);
835 834
835#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
836/*
837 * get_user_pages_fast provides equivalent functionality to get_user_pages,
838 * operating on current and current->mm (force=0 and doesn't return any vmas).
839 *
840 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
841 * can be made about locking. get_user_pages_fast is to be implemented in a
842 * way that is advantageous (vs get_user_pages()) when the user memory area is
843 * already faulted in and present in ptes. However if the pages have to be
844 * faulted in, it may turn out to be slightly slower).
845 */
846int get_user_pages_fast(unsigned long start, int nr_pages, int write,
847 struct page **pages);
848
849#else
850/*
851 * Should probably be moved to asm-generic, and architectures can include it if
852 * they don't implement their own get_user_pages_fast.
853 */
854#define get_user_pages_fast(start, nr_pages, write, pages) \
855({ \
856 struct mm_struct *mm = current->mm; \
857 int ret; \
858 \
859 down_read(&mm->mmap_sem); \
860 ret = get_user_pages(current, mm, start, nr_pages, \
861 write, 0, pages, NULL); \
862 up_read(&mm->mmap_sem); \
863 \
864 ret; \
865})
866#endif
867
836/* 868/*
837 * A callback you can register to apply pressure to ageable caches. 869 * A callback you can register to apply pressure to ageable caches.
838 * 870 *
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ee1ec2c7723c..a81d81890422 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -12,6 +12,7 @@
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/bitops.h> 14#include <linux/bitops.h>
15#include <linux/hardirq.h> /* for in_interrupt() */
15 16
16/* 17/*
17 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page 18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
@@ -62,6 +63,98 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
62#define page_cache_release(page) put_page(page) 63#define page_cache_release(page) put_page(page)
63void release_pages(struct page **pages, int nr, int cold); 64void release_pages(struct page **pages, int nr, int cold);
64 65
66/*
67 * speculatively take a reference to a page.
68 * If the page is free (_count == 0), then _count is untouched, and 0
69 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
70 *
71 * This function must be called inside the same rcu_read_lock() section as has
72 * been used to lookup the page in the pagecache radix-tree (or page table):
73 * this allows allocators to use a synchronize_rcu() to stabilize _count.
74 *
75 * Unless an RCU grace period has passed, the count of all pages coming out
76 * of the allocator must be considered unstable. page_count may return higher
77 * than expected, and put_page must be able to do the right thing when the
78 * page has been finished with, no matter what it is subsequently allocated
79 * for (because put_page is what is used here to drop an invalid speculative
80 * reference).
81 *
82 * This is the interesting part of the lockless pagecache (and lockless
83 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
84 * has the following pattern:
85 * 1. find page in radix tree
86 * 2. conditionally increment refcount
87 * 3. check the page is still in pagecache (if no, goto 1)
88 *
89 * Remove-side that cares about stability of _count (eg. reclaim) has the
90 * following (with tree_lock held for write):
91 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
92 * B. remove page from pagecache
93 * C. free the page
94 *
95 * There are 2 critical interleavings that matter:
96 * - 2 runs before A: in this case, A sees elevated refcount and bails out
97 * - A runs before 2: in this case, 2 sees zero refcount and retries;
98 * subsequently, B will complete and 1 will find no page, causing the
99 * lookup to return NULL.
100 *
101 * It is possible that between 1 and 2, the page is removed then the exact same
102 * page is inserted into the same position in pagecache. That's OK: the
103 * old find_get_page using tree_lock could equally have run before or after
104 * such a re-insertion, depending on order that locks are granted.
105 *
106 * Lookups racing against pagecache insertion isn't a big problem: either 1
107 * will find the page or it will not. Likewise, the old find_get_page could run
108 * either before the insertion or afterwards, depending on timing.
109 */
110static inline int page_cache_get_speculative(struct page *page)
111{
112 VM_BUG_ON(in_interrupt());
113
114#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
115# ifdef CONFIG_PREEMPT
116 VM_BUG_ON(!in_atomic());
117# endif
118 /*
119 * Preempt must be disabled here - we rely on rcu_read_lock doing
120 * this for us.
121 *
122 * Pagecache won't be truncated from interrupt context, so if we have
123 * found a page in the radix tree here, we have pinned its refcount by
124 * disabling preempt, and hence no need for the "speculative get" that
125 * SMP requires.
126 */
127 VM_BUG_ON(page_count(page) == 0);
128 atomic_inc(&page->_count);
129
130#else
131 if (unlikely(!get_page_unless_zero(page))) {
132 /*
133 * Either the page has been freed, or will be freed.
134 * In either case, retry here and the caller should
135 * do the right thing (see comments above).
136 */
137 return 0;
138 }
139#endif
140 VM_BUG_ON(PageTail(page));
141
142 return 1;
143}
144
145static inline int page_freeze_refs(struct page *page, int count)
146{
147 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
148}
149
150static inline void page_unfreeze_refs(struct page *page, int count)
151{
152 VM_BUG_ON(page_count(page) != 0);
153 VM_BUG_ON(count == 0);
154
155 atomic_set(&page->_count, count);
156}
157
65#ifdef CONFIG_NUMA 158#ifdef CONFIG_NUMA
66extern struct page *__page_cache_alloc(gfp_t gfp); 159extern struct page *__page_cache_alloc(gfp_t gfp);
67#else 160#else
@@ -133,7 +226,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
133 return read_cache_page(mapping, index, filler, data); 226 return read_cache_page(mapping, index, filler, data);
134} 227}
135 228
136int add_to_page_cache(struct page *page, struct address_space *mapping, 229int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
137 pgoff_t index, gfp_t gfp_mask); 230 pgoff_t index, gfp_t gfp_mask);
138int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 231int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
139 pgoff_t index, gfp_t gfp_mask); 232 pgoff_t index, gfp_t gfp_mask);
@@ -141,6 +234,22 @@ extern void remove_from_page_cache(struct page *page);
141extern void __remove_from_page_cache(struct page *page); 234extern void __remove_from_page_cache(struct page *page);
142 235
143/* 236/*
237 * Like add_to_page_cache_locked, but used to add newly allocated pages:
238 * the page is new, so we can just run SetPageLocked() against it.
239 */
240static inline int add_to_page_cache(struct page *page,
241 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
242{
243 int error;
244
245 SetPageLocked(page);
246 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
247 if (unlikely(error))
248 ClearPageLocked(page);
249 return error;
250}
251
252/*
144 * Return byte-offset into filesystem object for page. 253 * Return byte-offset into filesystem object for page.
145 */ 254 */
146static inline loff_t page_offset(struct page *page) 255static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/parport.h b/include/linux/parport.h
index dcb9e01a69ca..6a0d7cdb5774 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -560,5 +560,8 @@ extern int parport_device_proc_unregister(struct pardevice *device);
560 560
561#endif /* !CONFIG_PARPORT_NOT_PC */ 561#endif /* !CONFIG_PARPORT_NOT_PC */
562 562
563extern unsigned long parport_default_timeslice;
564extern int parport_default_spintime;
565
563#endif /* __KERNEL__ */ 566#endif /* __KERNEL__ */
564#endif /* _PARPORT_H_ */ 567#endif /* _PARPORT_H_ */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 4cdd393e71e1..fac3337547eb 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -74,11 +74,6 @@ struct percpu_data {
74 (__typeof__(ptr))__p->ptrs[(cpu)]; \ 74 (__typeof__(ptr))__p->ptrs[(cpu)]; \
75}) 75})
76 76
77extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu);
78extern void percpu_depopulate(void *__pdata, int cpu);
79extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
80 cpumask_t *mask);
81extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask);
82extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 77extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
83extern void percpu_free(void *__pdata); 78extern void percpu_free(void *__pdata);
84 79
@@ -86,26 +81,6 @@ extern void percpu_free(void *__pdata);
86 81
87#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 82#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
88 83
89static inline void percpu_depopulate(void *__pdata, int cpu)
90{
91}
92
93static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
94{
95}
96
97static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp,
98 int cpu)
99{
100 return percpu_ptr(__pdata, cpu);
101}
102
103static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
104 cpumask_t *mask)
105{
106 return 0;
107}
108
109static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 84static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
110{ 85{
111 return kzalloc(size, gfp); 86 return kzalloc(size, gfp);
@@ -118,10 +93,6 @@ static inline void percpu_free(void *__pdata)
118 93
119#endif /* CONFIG_SMP */ 94#endif /* CONFIG_SMP */
120 95
121#define percpu_populate_mask(__pdata, size, gfp, mask) \
122 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
123#define percpu_depopulate_mask(__pdata, mask) \
124 __percpu_depopulate_mask((__pdata), &(mask))
125#define percpu_alloc_mask(size, gfp, mask) \ 96#define percpu_alloc_mask(size, gfp, mask) \
126 __percpu_alloc_mask((size), (gfp), &(mask)) 97 __percpu_alloc_mask((size), (gfp), &(mask))
127 98
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index c6f5f9dd0cee..fd31756e1a00 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -121,6 +121,74 @@ static inline void ptrace_unlink(struct task_struct *child)
121int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); 121int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
122int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); 122int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
123 123
124/**
125 * task_ptrace - return %PT_* flags that apply to a task
126 * @task: pointer to &task_struct in question
127 *
128 * Returns the %PT_* flags that apply to @task.
129 */
130static inline int task_ptrace(struct task_struct *task)
131{
132 return task->ptrace;
133}
134
135/**
136 * ptrace_event - possibly stop for a ptrace event notification
137 * @mask: %PT_* bit to check in @current->ptrace
138 * @event: %PTRACE_EVENT_* value to report if @mask is set
139 * @message: value for %PTRACE_GETEVENTMSG to return
140 *
141 * This checks the @mask bit to see if ptrace wants stops for this event.
142 * If so we stop, reporting @event and @message to the ptrace parent.
143 *
144 * Returns nonzero if we did a ptrace notification, zero if not.
145 *
146 * Called without locks.
147 */
148static inline int ptrace_event(int mask, int event, unsigned long message)
149{
150 if (mask && likely(!(current->ptrace & mask)))
151 return 0;
152 current->ptrace_message = message;
153 ptrace_notify((event << 8) | SIGTRAP);
154 return 1;
155}
156
157/**
158 * ptrace_init_task - initialize ptrace state for a new child
159 * @child: new child task
160 * @ptrace: true if child should be ptrace'd by parent's tracer
161 *
162 * This is called immediately after adding @child to its parent's children
163 * list. @ptrace is false in the normal case, and true to ptrace @child.
164 *
165 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
166 */
167static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
168{
169 INIT_LIST_HEAD(&child->ptrace_entry);
170 INIT_LIST_HEAD(&child->ptraced);
171 child->parent = child->real_parent;
172 child->ptrace = 0;
173 if (unlikely(ptrace)) {
174 child->ptrace = current->ptrace;
175 __ptrace_link(child, current->parent);
176 }
177}
178
179/**
180 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
181 * @task: task in %EXIT_DEAD state
182 *
183 * Called with write_lock(&tasklist_lock) held.
184 */
185static inline void ptrace_release_task(struct task_struct *task)
186{
187 BUG_ON(!list_empty(&task->ptraced));
188 ptrace_unlink(task);
189 BUG_ON(!list_empty(&task->ptrace_entry));
190}
191
124#ifndef force_successful_syscall_return 192#ifndef force_successful_syscall_return
125/* 193/*
126 * System call handlers that, upon successful completion, need to return a 194 * System call handlers that, upon successful completion, need to return a
@@ -246,6 +314,10 @@ static inline void user_enable_block_step(struct task_struct *task)
246#define arch_ptrace_stop(code, info) do { } while (0) 314#define arch_ptrace_stop(code, info) do { } while (0)
247#endif 315#endif
248 316
317extern int task_current_syscall(struct task_struct *target, long *callno,
318 unsigned long args[6], unsigned int maxargs,
319 unsigned long *sp, unsigned long *pc);
320
249#endif 321#endif
250 322
251#endif 323#endif
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index b8ce2b444bb5..a916c6660dfa 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -99,12 +99,15 @@ do { \
99 * 99 *
100 * The notable exceptions to this rule are the following functions: 100 * The notable exceptions to this rule are the following functions:
101 * radix_tree_lookup 101 * radix_tree_lookup
102 * radix_tree_lookup_slot
102 * radix_tree_tag_get 103 * radix_tree_tag_get
103 * radix_tree_gang_lookup 104 * radix_tree_gang_lookup
105 * radix_tree_gang_lookup_slot
104 * radix_tree_gang_lookup_tag 106 * radix_tree_gang_lookup_tag
107 * radix_tree_gang_lookup_tag_slot
105 * radix_tree_tagged 108 * radix_tree_tagged
106 * 109 *
107 * The first 4 functions are able to be called locklessly, using RCU. The 110 * The first 7 functions are able to be called locklessly, using RCU. The
108 * caller must ensure calls to these functions are made within rcu_read_lock() 111 * caller must ensure calls to these functions are made within rcu_read_lock()
109 * regions. Other readers (lock-free or otherwise) and modifications may be 112 * regions. Other readers (lock-free or otherwise) and modifications may be
110 * running concurrently. 113 * running concurrently.
@@ -159,6 +162,9 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
159unsigned int 162unsigned int
160radix_tree_gang_lookup(struct radix_tree_root *root, void **results, 163radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
161 unsigned long first_index, unsigned int max_items); 164 unsigned long first_index, unsigned int max_items);
165unsigned int
166radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
167 unsigned long first_index, unsigned int max_items);
162unsigned long radix_tree_next_hole(struct radix_tree_root *root, 168unsigned long radix_tree_next_hole(struct radix_tree_root *root,
163 unsigned long index, unsigned long max_scan); 169 unsigned long index, unsigned long max_scan);
164int radix_tree_preload(gfp_t gfp_mask); 170int radix_tree_preload(gfp_t gfp_mask);
@@ -173,6 +179,10 @@ unsigned int
173radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, 179radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
174 unsigned long first_index, unsigned int max_items, 180 unsigned long first_index, unsigned int max_items,
175 unsigned int tag); 181 unsigned int tag);
182unsigned int
183radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
184 unsigned long first_index, unsigned int max_items,
185 unsigned int tag);
176int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); 186int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
177 187
178static inline void radix_tree_preload_end(void) 188static inline void radix_tree_preload_end(void)
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 6cd8c4425fc7..953fc055e875 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -48,6 +48,7 @@ struct rchan_buf
48 size_t *padding; /* padding counts per sub-buffer */ 48 size_t *padding; /* padding counts per sub-buffer */
49 size_t prev_padding; /* temporary variable */ 49 size_t prev_padding; /* temporary variable */
50 size_t bytes_consumed; /* bytes consumed in cur read subbuf */ 50 size_t bytes_consumed; /* bytes consumed in cur read subbuf */
51 size_t early_bytes; /* bytes consumed before VFS inited */
51 unsigned int cpu; /* this buf's cpu */ 52 unsigned int cpu; /* this buf's cpu */
52} ____cacheline_aligned; 53} ____cacheline_aligned;
53 54
@@ -68,6 +69,7 @@ struct rchan
68 int is_global; /* One global buffer ? */ 69 int is_global; /* One global buffer ? */
69 struct list_head list; /* for channel list */ 70 struct list_head list; /* for channel list */
70 struct dentry *parent; /* parent dentry passed to open */ 71 struct dentry *parent; /* parent dentry passed to open */
72 int has_base_filename; /* has a filename associated? */
71 char base_filename[NAME_MAX]; /* saved base filename */ 73 char base_filename[NAME_MAX]; /* saved base filename */
72}; 74};
73 75
@@ -169,6 +171,9 @@ struct rchan *relay_open(const char *base_filename,
169 size_t n_subbufs, 171 size_t n_subbufs,
170 struct rchan_callbacks *cb, 172 struct rchan_callbacks *cb,
171 void *private_data); 173 void *private_data);
174extern int relay_late_setup_files(struct rchan *chan,
175 const char *base_filename,
176 struct dentry *parent);
172extern void relay_close(struct rchan *chan); 177extern void relay_close(struct rchan *chan);
173extern void relay_flush(struct rchan *chan); 178extern void relay_flush(struct rchan *chan);
174extern void relay_subbufs_consumed(struct rchan *chan, 179extern void relay_subbufs_consumed(struct rchan *chan,
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index b01fe004cb5e..91f597ad6acc 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -225,8 +225,6 @@ typedef struct rtc_task {
225int rtc_register(rtc_task_t *task); 225int rtc_register(rtc_task_t *task);
226int rtc_unregister(rtc_task_t *task); 226int rtc_unregister(rtc_task_t *task);
227int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); 227int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
228void rtc_get_rtc_time(struct rtc_time *rtc_tm);
229irqreturn_t rtc_interrupt(int irq, void *dev_id);
230 228
231#endif /* __KERNEL__ */ 229#endif /* __KERNEL__ */
232 230
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 42036ffe6b00..f59318a0099b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -292,7 +292,6 @@ extern void sched_show_task(struct task_struct *p);
292 292
293#ifdef CONFIG_DETECT_SOFTLOCKUP 293#ifdef CONFIG_DETECT_SOFTLOCKUP
294extern void softlockup_tick(void); 294extern void softlockup_tick(void);
295extern void spawn_softlockup_task(void);
296extern void touch_softlockup_watchdog(void); 295extern void touch_softlockup_watchdog(void);
297extern void touch_all_softlockup_watchdogs(void); 296extern void touch_all_softlockup_watchdogs(void);
298extern unsigned int softlockup_panic; 297extern unsigned int softlockup_panic;
@@ -1797,7 +1796,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
1797extern int kill_pgrp(struct pid *pid, int sig, int priv); 1796extern int kill_pgrp(struct pid *pid, int sig, int priv);
1798extern int kill_pid(struct pid *pid, int sig, int priv); 1797extern int kill_pid(struct pid *pid, int sig, int priv);
1799extern int kill_proc_info(int, struct siginfo *, pid_t); 1798extern int kill_proc_info(int, struct siginfo *, pid_t);
1800extern void do_notify_parent(struct task_struct *, int); 1799extern int do_notify_parent(struct task_struct *, int);
1801extern void force_sig(int, struct task_struct *); 1800extern void force_sig(int, struct task_struct *);
1802extern void force_sig_specific(int, struct task_struct *); 1801extern void force_sig_specific(int, struct task_struct *);
1803extern int send_sig(int, struct task_struct *, int); 1802extern int send_sig(int, struct task_struct *, int);
@@ -1883,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
1883extern char *get_task_comm(char *to, struct task_struct *tsk); 1882extern char *get_task_comm(char *to, struct task_struct *tsk);
1884 1883
1885#ifdef CONFIG_SMP 1884#ifdef CONFIG_SMP
1886extern void wait_task_inactive(struct task_struct * p); 1885extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1887#else 1886#else
1888#define wait_task_inactive(p) do { } while (0) 1887static inline unsigned long wait_task_inactive(struct task_struct *p,
1888 long match_state)
1889{
1890 return 1;
1891}
1889#endif 1892#endif
1890 1893
1891#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 1894#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
@@ -2139,16 +2142,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2139 2142
2140#endif /* CONFIG_SMP */ 2143#endif /* CONFIG_SMP */
2141 2144
2142#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
2143extern void arch_pick_mmap_layout(struct mm_struct *mm); 2145extern void arch_pick_mmap_layout(struct mm_struct *mm);
2144#else
2145static inline void arch_pick_mmap_layout(struct mm_struct *mm)
2146{
2147 mm->mmap_base = TASK_UNMAPPED_BASE;
2148 mm->get_unmapped_area = arch_get_unmapped_area;
2149 mm->unmap_area = arch_unmap_area;
2150}
2151#endif
2152 2146
2153#ifdef CONFIG_TRACING 2147#ifdef CONFIG_TRACING
2154extern void 2148extern void
@@ -2231,14 +2225,6 @@ static inline void inc_syscw(struct task_struct *tsk)
2231} 2225}
2232#endif 2226#endif
2233 2227
2234#ifdef CONFIG_SMP
2235void migration_init(void);
2236#else
2237static inline void migration_init(void)
2238{
2239}
2240#endif
2241
2242#ifndef TASK_SIZE_OF 2228#ifndef TASK_SIZE_OF
2243#define TASK_SIZE_OF(tsk) TASK_SIZE 2229#define TASK_SIZE_OF(tsk) TASK_SIZE
2244#endif 2230#endif
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 41103910f8a2..9ff8e8499403 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -58,7 +58,7 @@ int slab_is_available(void);
58 58
59struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 59struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
60 unsigned long, 60 unsigned long,
61 void (*)(struct kmem_cache *, void *)); 61 void (*)(void *));
62void kmem_cache_destroy(struct kmem_cache *); 62void kmem_cache_destroy(struct kmem_cache *);
63int kmem_cache_shrink(struct kmem_cache *); 63int kmem_cache_shrink(struct kmem_cache *);
64void kmem_cache_free(struct kmem_cache *, void *); 64void kmem_cache_free(struct kmem_cache *, void *);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d117ea2825a9..5bad61a93f65 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -85,7 +85,7 @@ struct kmem_cache {
85 struct kmem_cache_order_objects min; 85 struct kmem_cache_order_objects min;
86 gfp_t allocflags; /* gfp flags to use on each alloc */ 86 gfp_t allocflags; /* gfp flags to use on each alloc */
87 int refcount; /* Refcount for slab cache destroy */ 87 int refcount; /* Refcount for slab cache destroy */
88 void (*ctor)(struct kmem_cache *, void *); 88 void (*ctor)(void *);
89 int inuse; /* Offset to metadata */ 89 int inuse; /* Offset to metadata */
90 int align; /* Alignment */ 90 int align; /* Alignment */
91 const char *name; /* Name (only for display!) */ 91 const char *name; /* Name (only for display!) */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 48262f86c969..66484d4a8459 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -74,15 +74,10 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data);
74#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 74#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
75void generic_smp_call_function_single_interrupt(void); 75void generic_smp_call_function_single_interrupt(void);
76void generic_smp_call_function_interrupt(void); 76void generic_smp_call_function_interrupt(void);
77void init_call_single_data(void);
78void ipi_call_lock(void); 77void ipi_call_lock(void);
79void ipi_call_unlock(void); 78void ipi_call_unlock(void);
80void ipi_call_lock_irq(void); 79void ipi_call_lock_irq(void);
81void ipi_call_unlock_irq(void); 80void ipi_call_unlock_irq(void);
82#else
83static inline void init_call_single_data(void)
84{
85}
86#endif 81#endif
87 82
88/* 83/*
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 4bf8cade9dbc..e530026eedf7 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
427{ 427{
428 switch (dev->bus->bustype) { 428 switch (dev->bus->bustype) {
429 case SSB_BUSTYPE_PCI: 429 case SSB_BUSTYPE_PCI:
430 return pci_dma_mapping_error(addr); 430 return pci_dma_mapping_error(dev->bus->host_pci, addr);
431 case SSB_BUSTYPE_SSB: 431 case SSB_BUSTYPE_SSB:
432 return dma_mapping_error(addr); 432 return dma_mapping_error(dev->dev, addr);
433 default: 433 default:
434 __ssb_dma_not_implemented(dev); 434 __ssb_dma_not_implemented(dev);
435 } 435 }
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index e8e69159af71..c63435095970 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -278,4 +278,6 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e)
278} 278}
279#endif 279#endif
280 280
281extern struct mutex pm_mutex;
282
281#endif /* _LINUX_SUSPEND_H */ 283#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 0b3377650c85..de40f169a4e4 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -237,7 +237,6 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
237 237
238/* linux/mm/swapfile.c */ 238/* linux/mm/swapfile.c */
239extern long total_swap_pages; 239extern long total_swap_pages;
240extern unsigned int nr_swapfiles;
241extern void si_swapinfo(struct sysinfo *); 240extern void si_swapinfo(struct sysinfo *);
242extern swp_entry_t get_swap_page(void); 241extern swp_entry_t get_swap_page(void);
243extern swp_entry_t get_swap_page_of_type(int); 242extern swp_entry_t get_swap_page_of_type(int);
@@ -254,8 +253,6 @@ extern int can_share_swap_page(struct page *);
254extern int remove_exclusive_swap_page(struct page *); 253extern int remove_exclusive_swap_page(struct page *);
255struct backing_dev_info; 254struct backing_dev_info;
256 255
257extern spinlock_t swap_lock;
258
259/* linux/mm/thrash.c */ 256/* linux/mm/thrash.c */
260extern struct mm_struct * swap_token_mm; 257extern struct mm_struct * swap_token_mm;
261extern void grab_swap_token(void); 258extern void grab_swap_token(void);
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
new file mode 100644
index 000000000000..589f429619c9
--- /dev/null
+++ b/include/linux/tracehook.h
@@ -0,0 +1,575 @@
1/*
2 * Tracing hooks
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * This file defines hook entry points called by core code where
11 * user tracing/debugging support might need to do something. These
12 * entry points are called tracehook_*(). Each hook declared below
13 * has a detailed kerneldoc comment giving the context (locking et
14 * al) from which it is called, and the meaning of its return value.
15 *
16 * Each function here typically has only one call site, so it is ok
17 * to have some nontrivial tracehook_*() inlines. In all cases, the
18 * fast path when no tracing is enabled should be very short.
19 *
20 * The purpose of this file and the tracehook_* layer is to consolidate
21 * the interface that the kernel core and arch code uses to enable any
22 * user debugging or tracing facility (such as ptrace). The interfaces
23 * here are carefully documented so that maintainers of core and arch
24 * code do not need to think about the implementation details of the
25 * tracing facilities. Likewise, maintainers of the tracing code do not
26 * need to understand all the calling core or arch code in detail, just
27 * documented circumstances of each call, such as locking conditions.
28 *
29 * If the calling core code changes so that locking is different, then
30 * it is ok to change the interface documented here. The maintainer of
31 * core code changing should notify the maintainers of the tracing code
32 * that they need to work out the change.
33 *
34 * Some tracehook_*() inlines take arguments that the current tracing
35 * implementations might not necessarily use. These function signatures
36 * are chosen to pass in all the information that is on hand in the
37 * caller and might conceivably be relevant to a tracer, so that the
38 * core code won't have to be updated when tracing adds more features.
39 * If a call site changes so that some of those parameters are no longer
40 * already on hand without extra work, then the tracehook_* interface
41 * can change so there is no make-work burden on the core code. The
42 * maintainer of core code changing should notify the maintainers of the
43 * tracing code that they need to work out the change.
44 */
45
46#ifndef _LINUX_TRACEHOOK_H
47#define _LINUX_TRACEHOOK_H 1
48
49#include <linux/sched.h>
50#include <linux/ptrace.h>
51#include <linux/security.h>
52struct linux_binprm;
53
54/**
55 * tracehook_expect_breakpoints - guess if task memory might be touched
56 * @task: current task, making a new mapping
57 *
58 * Return nonzero if @task is expected to want breakpoint insertion in
59 * its memory at some point. A zero return is no guarantee it won't
60 * be done, but this is a hint that it's known to be likely.
61 *
62 * May be called with @task->mm->mmap_sem held for writing.
63 */
64static inline int tracehook_expect_breakpoints(struct task_struct *task)
65{
66 return (task_ptrace(task) & PT_PTRACED) != 0;
67}
68
69/*
70 * ptrace report for syscall entry and exit looks identical.
71 */
72static inline void ptrace_report_syscall(struct pt_regs *regs)
73{
74 int ptrace = task_ptrace(current);
75
76 if (!(ptrace & PT_PTRACED))
77 return;
78
79 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
80
81 /*
82 * this isn't the same as continuing with a signal, but it will do
83 * for normal use. strace only continues with a signal if the
84 * stopping signal is not SIGTRAP. -brl
85 */
86 if (current->exit_code) {
87 send_sig(current->exit_code, current, 1);
88 current->exit_code = 0;
89 }
90}
91
92/**
93 * tracehook_report_syscall_entry - task is about to attempt a system call
94 * @regs: user register state of current task
95 *
96 * This will be called if %TIF_SYSCALL_TRACE has been set, when the
97 * current task has just entered the kernel for a system call.
98 * Full user register state is available here. Changing the values
99 * in @regs can affect the system call number and arguments to be tried.
100 * It is safe to block here, preventing the system call from beginning.
101 *
102 * Returns zero normally, or nonzero if the calling arch code should abort
103 * the system call. That must prevent normal entry so no system call is
104 * made. If @task ever returns to user mode after this, its register state
105 * is unspecified, but should be something harmless like an %ENOSYS error
106 * return. It should preserve enough information so that syscall_rollback()
107 * can work (see asm-generic/syscall.h).
108 *
109 * Called without locks, just after entering kernel mode.
110 */
111static inline __must_check int tracehook_report_syscall_entry(
112 struct pt_regs *regs)
113{
114 ptrace_report_syscall(regs);
115 return 0;
116}
117
118/**
119 * tracehook_report_syscall_exit - task has just finished a system call
120 * @regs: user register state of current task
121 * @step: nonzero if simulating single-step or block-step
122 *
123 * This will be called if %TIF_SYSCALL_TRACE has been set, when the
124 * current task has just finished an attempted system call. Full
125 * user register state is available here. It is safe to block here,
126 * preventing signals from being processed.
127 *
128 * If @step is nonzero, this report is also in lieu of the normal
129 * trap that would follow the system call instruction because
130 * user_enable_block_step() or user_enable_single_step() was used.
131 * In this case, %TIF_SYSCALL_TRACE might not be set.
132 *
133 * Called without locks, just before checking for pending signals.
134 */
135static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
136{
137 ptrace_report_syscall(regs);
138}
139
140/**
141 * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
142 * @task: current task doing exec
143 *
144 * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
145 *
146 * Called with task_lock() held on @task.
147 */
148static inline int tracehook_unsafe_exec(struct task_struct *task)
149{
150 int unsafe = 0;
151 int ptrace = task_ptrace(task);
152 if (ptrace & PT_PTRACED) {
153 if (ptrace & PT_PTRACE_CAP)
154 unsafe |= LSM_UNSAFE_PTRACE_CAP;
155 else
156 unsafe |= LSM_UNSAFE_PTRACE;
157 }
158 return unsafe;
159}
160
161/**
162 * tracehook_tracer_task - return the task that is tracing the given task
163 * @tsk: task to consider
164 *
165 * Returns NULL if noone is tracing @task, or the &struct task_struct
166 * pointer to its tracer.
167 *
168 * Must called under rcu_read_lock(). The pointer returned might be kept
169 * live only by RCU. During exec, this may be called with task_lock()
170 * held on @task, still held from when tracehook_unsafe_exec() was called.
171 */
172static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
173{
174 if (task_ptrace(tsk) & PT_PTRACED)
175 return rcu_dereference(tsk->parent);
176 return NULL;
177}
178
179/**
180 * tracehook_report_exec - a successful exec was completed
181 * @fmt: &struct linux_binfmt that performed the exec
182 * @bprm: &struct linux_binprm containing exec details
183 * @regs: user-mode register state
184 *
185 * An exec just completed, we are shortly going to return to user mode.
186 * The freshly initialized register state can be seen and changed in @regs.
187 * The name, file and other pointers in @bprm are still on hand to be
188 * inspected, but will be freed as soon as this returns.
189 *
190 * Called with no locks, but with some kernel resources held live
191 * and a reference on @fmt->module.
192 */
193static inline void tracehook_report_exec(struct linux_binfmt *fmt,
194 struct linux_binprm *bprm,
195 struct pt_regs *regs)
196{
197 if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
198 unlikely(task_ptrace(current) & PT_PTRACED))
199 send_sig(SIGTRAP, current, 0);
200}
201
202/**
203 * tracehook_report_exit - task has begun to exit
204 * @exit_code: pointer to value destined for @current->exit_code
205 *
206 * @exit_code points to the value passed to do_exit(), which tracing
207 * might change here. This is almost the first thing in do_exit(),
208 * before freeing any resources or setting the %PF_EXITING flag.
209 *
210 * Called with no locks held.
211 */
212static inline void tracehook_report_exit(long *exit_code)
213{
214 ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
215}
216
217/**
218 * tracehook_prepare_clone - prepare for new child to be cloned
219 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
220 *
221 * This is called before a new user task is to be cloned.
222 * Its return value will be passed to tracehook_finish_clone().
223 *
224 * Called with no locks held.
225 */
226static inline int tracehook_prepare_clone(unsigned clone_flags)
227{
228 if (clone_flags & CLONE_UNTRACED)
229 return 0;
230
231 if (clone_flags & CLONE_VFORK) {
232 if (current->ptrace & PT_TRACE_VFORK)
233 return PTRACE_EVENT_VFORK;
234 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
235 if (current->ptrace & PT_TRACE_CLONE)
236 return PTRACE_EVENT_CLONE;
237 } else if (current->ptrace & PT_TRACE_FORK)
238 return PTRACE_EVENT_FORK;
239
240 return 0;
241}
242
243/**
244 * tracehook_finish_clone - new child created and being attached
245 * @child: new child task
246 * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
247 * @trace: return value from tracehook_clone_prepare()
248 *
249 * This is called immediately after adding @child to its parent's children list.
250 * The @trace value is that returned by tracehook_prepare_clone().
251 *
252 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
253 */
254static inline void tracehook_finish_clone(struct task_struct *child,
255 unsigned long clone_flags, int trace)
256{
257 ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
258}
259
260/**
261 * tracehook_report_clone - in parent, new child is about to start running
262 * @trace: return value from tracehook_clone_prepare()
263 * @regs: parent's user register state
264 * @clone_flags: flags from parent's system call
265 * @pid: new child's PID in the parent's namespace
266 * @child: new child task
267 *
268 * Called after a child is set up, but before it has been started running.
269 * The @trace value is that returned by tracehook_clone_prepare().
270 * This is not a good place to block, because the child has not started yet.
271 * Suspend the child here if desired, and block in tracehook_clone_complete().
272 * This must prevent the child from self-reaping if tracehook_clone_complete()
273 * uses the @child pointer; otherwise it might have died and been released by
274 * the time tracehook_report_clone_complete() is called.
275 *
276 * Called with no locks held, but the child cannot run until this returns.
277 */
278static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
279 unsigned long clone_flags,
280 pid_t pid, struct task_struct *child)
281{
282 if (unlikely(trace)) {
283 /*
284 * The child starts up with an immediate SIGSTOP.
285 */
286 sigaddset(&child->pending.signal, SIGSTOP);
287 set_tsk_thread_flag(child, TIF_SIGPENDING);
288 }
289}
290
291/**
292 * tracehook_report_clone_complete - new child is running
293 * @trace: return value from tracehook_clone_prepare()
294 * @regs: parent's user register state
295 * @clone_flags: flags from parent's system call
296 * @pid: new child's PID in the parent's namespace
297 * @child: child task, already running
298 *
299 * This is called just after the child has started running. This is
300 * just before the clone/fork syscall returns, or blocks for vfork
301 * child completion if @clone_flags has the %CLONE_VFORK bit set.
302 * The @child pointer may be invalid if a self-reaping child died and
303 * tracehook_report_clone() took no action to prevent it from self-reaping.
304 *
305 * Called with no locks held.
306 */
307static inline void tracehook_report_clone_complete(int trace,
308 struct pt_regs *regs,
309 unsigned long clone_flags,
310 pid_t pid,
311 struct task_struct *child)
312{
313 if (unlikely(trace))
314 ptrace_event(0, trace, pid);
315}
316
317/**
318 * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
319 * @child: child task, already running
320 * @pid: new child's PID in the parent's namespace
321 *
322 * Called after a %CLONE_VFORK parent has waited for the child to complete.
323 * The clone/vfork system call will return immediately after this.
324 * The @child pointer may be invalid if a self-reaping child died and
325 * tracehook_report_clone() took no action to prevent it from self-reaping.
326 *
327 * Called with no locks held.
328 */
329static inline void tracehook_report_vfork_done(struct task_struct *child,
330 pid_t pid)
331{
332 ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
333}
334
335/**
336 * tracehook_prepare_release_task - task is being reaped, clean up tracing
337 * @task: task in %EXIT_DEAD state
338 *
339 * This is called in release_task() just before @task gets finally reaped
340 * and freed. This would be the ideal place to remove and clean up any
341 * tracing-related state for @task.
342 *
343 * Called with no locks held.
344 */
345static inline void tracehook_prepare_release_task(struct task_struct *task)
346{
347}
348
349/**
350 * tracehook_finish_release_task - task is being reaped, clean up tracing
351 * @task: task in %EXIT_DEAD state
352 *
353 * This is called in release_task() when @task is being in the middle of
354 * being reaped. After this, there must be no tracing entanglements.
355 *
356 * Called with write_lock_irq(&tasklist_lock) held.
357 */
358static inline void tracehook_finish_release_task(struct task_struct *task)
359{
360 ptrace_release_task(task);
361}
362
363/**
364 * tracehook_signal_handler - signal handler setup is complete
365 * @sig: number of signal being delivered
366 * @info: siginfo_t of signal being delivered
367 * @ka: sigaction setting that chose the handler
368 * @regs: user register state
369 * @stepping: nonzero if debugger single-step or block-step in use
370 *
371 * Called by the arch code after a signal handler has been set up.
372 * Register and stack state reflects the user handler about to run.
373 * Signal mask changes have already been made.
374 *
375 * Called without locks, shortly before returning to user mode
376 * (or handling more signals).
377 */
378static inline void tracehook_signal_handler(int sig, siginfo_t *info,
379 const struct k_sigaction *ka,
380 struct pt_regs *regs, int stepping)
381{
382 if (stepping)
383 ptrace_notify(SIGTRAP);
384}
385
386/**
387 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
388 * @task: task receiving the signal
389 * @sig: signal number being sent
390 * @handler: %SIG_IGN or %SIG_DFL
391 *
392 * Return zero iff tracing doesn't care to examine this ignored signal,
393 * so it can short-circuit normal delivery and never even get queued.
394 * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN.
395 *
396 * Called with @task->sighand->siglock held.
397 */
398static inline int tracehook_consider_ignored_signal(struct task_struct *task,
399 int sig,
400 void __user *handler)
401{
402 return (task_ptrace(task) & PT_PTRACED) != 0;
403}
404
405/**
406 * tracehook_consider_fatal_signal - suppress special handling of fatal signal
407 * @task: task receiving the signal
408 * @sig: signal number being sent
409 * @handler: %SIG_DFL or %SIG_IGN
410 *
411 * Return nonzero to prevent special handling of this termination signal.
412 * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored,
413 * in which case force_sig() is about to reset it to %SIG_DFL.
414 * When this returns zero, this signal might cause a quick termination
415 * that does not give the debugger a chance to intercept the signal.
416 *
417 * Called with or without @task->sighand->siglock held.
418 */
419static inline int tracehook_consider_fatal_signal(struct task_struct *task,
420 int sig,
421 void __user *handler)
422{
423 return (task_ptrace(task) & PT_PTRACED) != 0;
424}
425
426/**
427 * tracehook_force_sigpending - let tracing force signal_pending(current) on
428 *
429 * Called when recomputing our signal_pending() flag. Return nonzero
430 * to force the signal_pending() flag on, so that tracehook_get_signal()
431 * will be called before the next return to user mode.
432 *
433 * Called with @current->sighand->siglock held.
434 */
435static inline int tracehook_force_sigpending(void)
436{
437 return 0;
438}
439
440/**
441 * tracehook_get_signal - deliver synthetic signal to traced task
442 * @task: @current
443 * @regs: task_pt_regs(@current)
444 * @info: details of synthetic signal
445 * @return_ka: sigaction for synthetic signal
446 *
447 * Return zero to check for a real pending signal normally.
448 * Return -1 after releasing the siglock to repeat the check.
449 * Return a signal number to induce an artifical signal delivery,
450 * setting *@info and *@return_ka to specify its details and behavior.
451 *
452 * The @return_ka->sa_handler value controls the disposition of the
453 * signal, no matter the signal number. For %SIG_DFL, the return value
454 * is a representative signal to indicate the behavior (e.g. %SIGTERM
455 * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
456 * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
457 * reported will be @info->si_signo instead.
458 *
459 * Called with @task->sighand->siglock held, before dequeuing pending signals.
460 */
461static inline int tracehook_get_signal(struct task_struct *task,
462 struct pt_regs *regs,
463 siginfo_t *info,
464 struct k_sigaction *return_ka)
465{
466 return 0;
467}
468
469/**
470 * tracehook_notify_jctl - report about job control stop/continue
471 * @notify: nonzero if this is the last thread in the group to stop
472 * @why: %CLD_STOPPED or %CLD_CONTINUED
473 *
474 * This is called when we might call do_notify_parent_cldstop().
475 * It's called when about to stop for job control; we are already in
476 * %TASK_STOPPED state, about to call schedule(). It's also called when
477 * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made.
478 *
479 * Return nonzero to generate a %SIGCHLD with @why, which is
480 * normal if @notify is nonzero.
481 *
482 * Called with no locks held.
483 */
484static inline int tracehook_notify_jctl(int notify, int why)
485{
486 return notify || (current->ptrace & PT_PTRACED);
487}
488
489/**
490 * tracehook_notify_death - task is dead, ready to notify parent
491 * @task: @current task now exiting
492 * @death_cookie: value to pass to tracehook_report_death()
493 * @group_dead: nonzero if this was the last thread in the group to die
494 *
495 * Return the signal number to send our parent with do_notify_parent(), or
496 * zero to send no signal and leave a zombie, or -1 to self-reap right now.
497 *
498 * Called with write_lock_irq(&tasklist_lock) held.
499 */
500static inline int tracehook_notify_death(struct task_struct *task,
501 void **death_cookie, int group_dead)
502{
503 if (task->exit_signal == -1)
504 return task->ptrace ? SIGCHLD : -1;
505
506 /*
507 * If something other than our normal parent is ptracing us, then
508 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
509 * only has special meaning to our real parent.
510 */
511 if (thread_group_empty(task) && !ptrace_reparented(task))
512 return task->exit_signal;
513
514 return task->ptrace ? SIGCHLD : 0;
515}
516
517/**
518 * tracehook_report_death - task is dead and ready to be reaped
519 * @task: @current task now exiting
520 * @signal: signal number sent to parent, or 0 or -1
521 * @death_cookie: value passed back from tracehook_notify_death()
522 * @group_dead: nonzero if this was the last thread in the group to die
523 *
524 * Thread has just become a zombie or is about to self-reap. If positive,
525 * @signal is the signal number just sent to the parent (usually %SIGCHLD).
526 * If @signal is -1, this thread will self-reap. If @signal is 0, this is
527 * a delayed_group_leader() zombie. The @death_cookie was passed back by
528 * tracehook_notify_death().
529 *
530 * If normal reaping is not inhibited, @task->exit_state might be changing
531 * in parallel.
532 *
533 * Called without locks.
534 */
535static inline void tracehook_report_death(struct task_struct *task,
536 int signal, void *death_cookie,
537 int group_dead)
538{
539}
540
541#ifdef TIF_NOTIFY_RESUME
542/**
543 * set_notify_resume - cause tracehook_notify_resume() to be called
544 * @task: task that will call tracehook_notify_resume()
545 *
546 * Calling this arranges that @task will call tracehook_notify_resume()
547 * before returning to user mode. If it's already running in user mode,
548 * it will enter the kernel and call tracehook_notify_resume() soon.
549 * If it's blocked, it will not be woken.
550 */
551static inline void set_notify_resume(struct task_struct *task)
552{
553 if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
554 kick_process(task);
555}
556
557/**
558 * tracehook_notify_resume - report when about to return to user mode
559 * @regs: user-mode registers of @current task
560 *
561 * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
562 * about to return to user mode, and the user state in @regs can be
563 * inspected or adjusted. The caller in arch code has cleared
564 * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
565 * asynchronously, this will be called again before we return to
566 * user mode.
567 *
568 * Called without locks.
569 */
570static inline void tracehook_notify_resume(struct pt_regs *regs)
571{
572}
573#endif /* TIF_NOTIFY_RESUME */
574
575#endif /* <linux/tracehook.h> */