aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 23:49:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 23:49:51 -0400
commit5af43c24ca59a448c9312dd4a4a51d27ec3b9a73 (patch)
tree65288caabc91fc04242acace38789a6dd5b86ed4 /include
parent9affd6becbfb2c3f0d04e554bb87234761b37aba (diff)
parenta27bb332c04cec8c4afd7912df0dc7890db27560 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge more incoming from Andrew Morton: - Various fixes which were stalled or which I picked up recently - A large rotorooting of the AIO code. Allegedly to improve performance but I don't really have good performance numbers (I might have lost the email) and I can't raise Kent today. I held this out of 3.9 and we could give it another cycle if it's all too late/scary. I ended up taking only the first two thirds of the AIO rotorooting. I left the percpu parts and the batch completion for later. - Linus * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (33 commits) aio: don't include aio.h in sched.h aio: kill ki_retry aio: kill ki_key aio: give shared kioctx fields their own cachelines aio: kill struct aio_ring_info aio: kill batch allocation aio: change reqs_active to include unreaped completions aio: use cancellation list lazily aio: use flush_dcache_page() aio: make aio_read_evt() more efficient, convert to hrtimers wait: add wait_event_hrtimeout() aio: refcounting cleanup aio: make aio_put_req() lockless aio: do fget() after aio_get_req() aio: dprintk() -> pr_debug() aio: move private stuff out of aio.h aio: add kiocb_cancel() aio: kill return value of aio_complete() char: add aio_{read,write} to /dev/{null,zero} aio: remove retry-based AIO ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/aio.h178
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/hugetlb.h19
-rw-r--r--include/linux/mm.h20
-rw-r--r--include/linux/pid_namespace.h1
-rw-r--r--include/linux/random.h7
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/wait.h86
-rw-r--r--include/linux/writeback.h1
10 files changed, 141 insertions, 175 deletions
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 31ff6dba4872..1bdf965339f9 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -9,91 +9,32 @@
9 9
10#include <linux/atomic.h> 10#include <linux/atomic.h>
11 11
12#define AIO_MAXSEGS 4
13#define AIO_KIOGRP_NR_ATOMIC 8
14
15struct kioctx; 12struct kioctx;
13struct kiocb;
16 14
17/* Notes on cancelling a kiocb: 15#define KIOCB_KEY 0
18 * If a kiocb is cancelled, aio_complete may return 0 to indicate
19 * that cancel has not yet disposed of the kiocb. All cancel
20 * operations *must* call aio_put_req to dispose of the kiocb
21 * to guard against races with the completion code.
22 */
23#define KIOCB_C_CANCELLED 0x01
24#define KIOCB_C_COMPLETE 0x02
25
26#define KIOCB_SYNC_KEY (~0U)
27 16
28/* ki_flags bits */
29/* 17/*
30 * This may be used for cancel/retry serialization in the future, but 18 * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
31 * for now it's unused and we probably don't want modules to even 19 * cancelled or completed (this makes a certain amount of sense because
32 * think they can use it. 20 * successful cancellation - io_cancel() - does deliver the completion to
21 * userspace).
22 *
23 * And since most things don't implement kiocb cancellation and we'd really like
24 * kiocb completion to be lockless when possible, we use ki_cancel to
25 * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
26 * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
33 */ 27 */
34/* #define KIF_LOCKED 0 */ 28#define KIOCB_CANCELLED ((void *) (~0ULL))
35#define KIF_KICKED 1
36#define KIF_CANCELLED 2
37
38#define kiocbTryLock(iocb) test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags)
39#define kiocbTryKick(iocb) test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags)
40 29
41#define kiocbSetLocked(iocb) set_bit(KIF_LOCKED, &(iocb)->ki_flags) 30typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
42#define kiocbSetKicked(iocb) set_bit(KIF_KICKED, &(iocb)->ki_flags)
43#define kiocbSetCancelled(iocb) set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
44 31
45#define kiocbClearLocked(iocb) clear_bit(KIF_LOCKED, &(iocb)->ki_flags)
46#define kiocbClearKicked(iocb) clear_bit(KIF_KICKED, &(iocb)->ki_flags)
47#define kiocbClearCancelled(iocb) clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
48
49#define kiocbIsLocked(iocb) test_bit(KIF_LOCKED, &(iocb)->ki_flags)
50#define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags)
51#define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
52
53/* is there a better place to document function pointer methods? */
54/**
55 * ki_retry - iocb forward progress callback
56 * @kiocb: The kiocb struct to advance by performing an operation.
57 *
58 * This callback is called when the AIO core wants a given AIO operation
59 * to make forward progress. The kiocb argument describes the operation
60 * that is to be performed. As the operation proceeds, perhaps partially,
61 * ki_retry is expected to update the kiocb with progress made. Typically
62 * ki_retry is set in the AIO core and it itself calls file_operations
63 * helpers.
64 *
65 * ki_retry's return value determines when the AIO operation is completed
66 * and an event is generated in the AIO event ring. Except the special
67 * return values described below, the value that is returned from ki_retry
68 * is transferred directly into the completion ring as the operation's
69 * resulting status. Once this has happened ki_retry *MUST NOT* reference
70 * the kiocb pointer again.
71 *
72 * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
73 * will be called on the kiocb pointer in the future. The AIO core will
74 * not ask the method again -- ki_retry must ensure forward progress.
75 * aio_complete() must be called once and only once in the future, multiple
76 * calls may result in undefined behaviour.
77 *
78 * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb()
79 * will be called on the kiocb pointer in the future. This may happen
80 * through generic helpers that associate kiocb->ki_wait with a wait
81 * queue head that ki_retry uses via current->io_wait. It can also happen
82 * with custom tracking and manual calls to kick_iocb(), though that is
83 * discouraged. In either case, kick_iocb() must be called once and only
84 * once. ki_retry must ensure forward progress, the AIO core will wait
85 * indefinitely for kick_iocb() to be called.
86 */
87struct kiocb { 32struct kiocb {
88 struct list_head ki_run_list; 33 atomic_t ki_users;
89 unsigned long ki_flags;
90 int ki_users;
91 unsigned ki_key; /* id of this request */
92 34
93 struct file *ki_filp; 35 struct file *ki_filp;
94 struct kioctx *ki_ctx; /* may be NULL for sync ops */ 36 struct kioctx *ki_ctx; /* NULL for sync ops */
95 int (*ki_cancel)(struct kiocb *, struct io_event *); 37 kiocb_cancel_fn *ki_cancel;
96 ssize_t (*ki_retry)(struct kiocb *);
97 void (*ki_dtor)(struct kiocb *); 38 void (*ki_dtor)(struct kiocb *);
98 39
99 union { 40 union {
@@ -117,7 +58,6 @@ struct kiocb {
117 58
118 struct list_head ki_list; /* the aio core uses this 59 struct list_head ki_list; /* the aio core uses this
119 * for cancellation */ 60 * for cancellation */
120 struct list_head ki_batch; /* batch allocation */
121 61
122 /* 62 /*
123 * If the aio_resfd field of the userspace iocb is not zero, 63 * If the aio_resfd field of the userspace iocb is not zero,
@@ -128,106 +68,40 @@ struct kiocb {
128 68
129static inline bool is_sync_kiocb(struct kiocb *kiocb) 69static inline bool is_sync_kiocb(struct kiocb *kiocb)
130{ 70{
131 return kiocb->ki_key == KIOCB_SYNC_KEY; 71 return kiocb->ki_ctx == NULL;
132} 72}
133 73
134static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) 74static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
135{ 75{
136 *kiocb = (struct kiocb) { 76 *kiocb = (struct kiocb) {
137 .ki_users = 1, 77 .ki_users = ATOMIC_INIT(1),
138 .ki_key = KIOCB_SYNC_KEY, 78 .ki_ctx = NULL,
139 .ki_filp = filp, 79 .ki_filp = filp,
140 .ki_obj.tsk = current, 80 .ki_obj.tsk = current,
141 }; 81 };
142} 82}
143 83
144#define AIO_RING_MAGIC 0xa10a10a1
145#define AIO_RING_COMPAT_FEATURES 1
146#define AIO_RING_INCOMPAT_FEATURES 0
147struct aio_ring {
148 unsigned id; /* kernel internal index number */
149 unsigned nr; /* number of io_events */
150 unsigned head;
151 unsigned tail;
152
153 unsigned magic;
154 unsigned compat_features;
155 unsigned incompat_features;
156 unsigned header_length; /* size of aio_ring */
157
158
159 struct io_event io_events[0];
160}; /* 128 bytes + ring size */
161
162#define AIO_RING_PAGES 8
163struct aio_ring_info {
164 unsigned long mmap_base;
165 unsigned long mmap_size;
166
167 struct page **ring_pages;
168 spinlock_t ring_lock;
169 long nr_pages;
170
171 unsigned nr, tail;
172
173 struct page *internal_pages[AIO_RING_PAGES];
174};
175
176static inline unsigned aio_ring_avail(struct aio_ring_info *info,
177 struct aio_ring *ring)
178{
179 return (ring->head + info->nr - 1 - ring->tail) % info->nr;
180}
181
182struct kioctx {
183 atomic_t users;
184 int dead;
185 struct mm_struct *mm;
186
187 /* This needs improving */
188 unsigned long user_id;
189 struct hlist_node list;
190
191 wait_queue_head_t wait;
192
193 spinlock_t ctx_lock;
194
195 int reqs_active;
196 struct list_head active_reqs; /* used for cancellation */
197 struct list_head run_list; /* used for kicked reqs */
198
199 /* sys_io_setup currently limits this to an unsigned int */
200 unsigned max_reqs;
201
202 struct aio_ring_info ring_info;
203
204 struct delayed_work wq;
205
206 struct rcu_head rcu_head;
207};
208
209/* prototypes */ 84/* prototypes */
210extern unsigned aio_max_size;
211
212#ifdef CONFIG_AIO 85#ifdef CONFIG_AIO
213extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); 86extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
214extern int aio_put_req(struct kiocb *iocb); 87extern void aio_put_req(struct kiocb *iocb);
215extern void kick_iocb(struct kiocb *iocb); 88extern void aio_complete(struct kiocb *iocb, long res, long res2);
216extern int aio_complete(struct kiocb *iocb, long res, long res2);
217struct mm_struct; 89struct mm_struct;
218extern void exit_aio(struct mm_struct *mm); 90extern void exit_aio(struct mm_struct *mm);
219extern long do_io_submit(aio_context_t ctx_id, long nr, 91extern long do_io_submit(aio_context_t ctx_id, long nr,
220 struct iocb __user *__user *iocbpp, bool compat); 92 struct iocb __user *__user *iocbpp, bool compat);
93void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
221#else 94#else
222static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } 95static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
223static inline int aio_put_req(struct kiocb *iocb) { return 0; } 96static inline void aio_put_req(struct kiocb *iocb) { }
224static inline void kick_iocb(struct kiocb *iocb) { } 97static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
225static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
226struct mm_struct; 98struct mm_struct;
227static inline void exit_aio(struct mm_struct *mm) { } 99static inline void exit_aio(struct mm_struct *mm) { }
228static inline long do_io_submit(aio_context_t ctx_id, long nr, 100static inline long do_io_submit(aio_context_t ctx_id, long nr,
229 struct iocb __user * __user *iocbpp, 101 struct iocb __user * __user *iocbpp,
230 bool compat) { return 0; } 102 bool compat) { return 0; }
103static inline void kiocb_set_cancel_fn(struct kiocb *req,
104 kiocb_cancel_fn *cancel) { }
231#endif /* CONFIG_AIO */ 105#endif /* CONFIG_AIO */
232 106
233static inline struct kiocb *list_kiocb(struct list_head *h) 107static inline struct kiocb *list_kiocb(struct list_head *h)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3bff9ce09cf7..5047355b9a0f 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -28,6 +28,7 @@ struct cgroup_subsys;
28struct inode; 28struct inode;
29struct cgroup; 29struct cgroup;
30struct css_id; 30struct css_id;
31struct eventfd_ctx;
31 32
32extern int cgroup_init_early(void); 33extern int cgroup_init_early(void);
33extern int cgroup_init(void); 34extern int cgroup_init(void);
diff --git a/include/linux/errno.h b/include/linux/errno.h
index f6bf082d4d4f..89627b9187f9 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -28,6 +28,5 @@
28#define EBADTYPE 527 /* Type not supported by server */ 28#define EBADTYPE 527 /* Type not supported by server */
29#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ 29#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
30#define EIOCBQUEUED 529 /* iocb queued, will get completion event */ 30#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
31#define EIOCBRETRY 530 /* iocb queued, will trigger a retry */
32 31
33#endif 32#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3a62df310f2e..6b4890fa57e7 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -189,8 +189,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
189 189
190extern const struct file_operations hugetlbfs_file_operations; 190extern const struct file_operations hugetlbfs_file_operations;
191extern const struct vm_operations_struct hugetlb_vm_ops; 191extern const struct vm_operations_struct hugetlb_vm_ops;
192struct file *hugetlb_file_setup(const char *name, unsigned long addr, 192struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
193 size_t size, vm_flags_t acct,
194 struct user_struct **user, int creat_flags, 193 struct user_struct **user, int creat_flags,
195 int page_size_log); 194 int page_size_log);
196 195
@@ -209,8 +208,8 @@ static inline int is_file_hugepages(struct file *file)
209 208
210#define is_file_hugepages(file) 0 209#define is_file_hugepages(file) 0
211static inline struct file * 210static inline struct file *
212hugetlb_file_setup(const char *name, unsigned long addr, size_t size, 211hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
213 vm_flags_t acctflag, struct user_struct **user, int creat_flags, 212 struct user_struct **user, int creat_flags,
214 int page_size_log) 213 int page_size_log)
215{ 214{
216 return ERR_PTR(-ENOSYS); 215 return ERR_PTR(-ENOSYS);
@@ -288,6 +287,13 @@ static inline struct hstate *hstate_file(struct file *f)
288 return hstate_inode(file_inode(f)); 287 return hstate_inode(file_inode(f));
289} 288}
290 289
290static inline struct hstate *hstate_sizelog(int page_size_log)
291{
292 if (!page_size_log)
293 return &default_hstate;
294 return size_to_hstate(1 << page_size_log);
295}
296
291static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 297static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
292{ 298{
293 return hstate_file(vma->vm_file); 299 return hstate_file(vma->vm_file);
@@ -352,11 +358,12 @@ static inline int hstate_index(struct hstate *h)
352 return h - hstates; 358 return h - hstates;
353} 359}
354 360
355#else 361#else /* CONFIG_HUGETLB_PAGE */
356struct hstate {}; 362struct hstate {};
357#define alloc_huge_page_node(h, nid) NULL 363#define alloc_huge_page_node(h, nid) NULL
358#define alloc_bootmem_huge_page(h) NULL 364#define alloc_bootmem_huge_page(h) NULL
359#define hstate_file(f) NULL 365#define hstate_file(f) NULL
366#define hstate_sizelog(s) NULL
360#define hstate_vma(v) NULL 367#define hstate_vma(v) NULL
361#define hstate_inode(i) NULL 368#define hstate_inode(i) NULL
362#define huge_page_size(h) PAGE_SIZE 369#define huge_page_size(h) PAGE_SIZE
@@ -371,6 +378,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
371} 378}
372#define hstate_index_to_shift(index) 0 379#define hstate_index_to_shift(index) 0
373#define hstate_index(h) 0 380#define hstate_index(h) 0
374#endif 381#endif /* CONFIG_HUGETLB_PAGE */
375 382
376#endif /* _LINUX_HUGETLB_H */ 383#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1a7f19e7f1a0..e0c8528a41a4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -951,13 +951,19 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
951 * (see walk_page_range for more details) 951 * (see walk_page_range for more details)
952 */ 952 */
953struct mm_walk { 953struct mm_walk {
954 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); 954 int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
955 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); 955 unsigned long next, struct mm_walk *walk);
956 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); 956 int (*pud_entry)(pud_t *pud, unsigned long addr,
957 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); 957 unsigned long next, struct mm_walk *walk);
958 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); 958 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
959 int (*hugetlb_entry)(pte_t *, unsigned long, 959 unsigned long next, struct mm_walk *walk);
960 unsigned long, unsigned long, struct mm_walk *); 960 int (*pte_entry)(pte_t *pte, unsigned long addr,
961 unsigned long next, struct mm_walk *walk);
962 int (*pte_hole)(unsigned long addr, unsigned long next,
963 struct mm_walk *walk);
964 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
965 unsigned long addr, unsigned long next,
966 struct mm_walk *walk);
961 struct mm_struct *mm; 967 struct mm_struct *mm;
962 void *private; 968 void *private;
963}; 969};
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 731e4ecee3bd..e2772666f004 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -4,6 +4,7 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/bug.h> 5#include <linux/bug.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/workqueue.h>
7#include <linux/threads.h> 8#include <linux/threads.h>
8#include <linux/nsproxy.h> 9#include <linux/nsproxy.h>
9#include <linux/kref.h> 10#include <linux/kref.h>
diff --git a/include/linux/random.h b/include/linux/random.h
index 347ce553a306..3b9377d6b7a5 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -29,13 +29,6 @@ u32 prandom_u32(void);
29void prandom_bytes(void *buf, int nbytes); 29void prandom_bytes(void *buf, int nbytes);
30void prandom_seed(u32 seed); 30void prandom_seed(u32 seed);
31 31
32/*
33 * These macros are preserved for backward compatibility and should be
34 * removed as soon as a transition is finished.
35 */
36#define random32() prandom_u32()
37#define srandom32(seed) prandom_seed(seed)
38
39u32 prandom_u32_state(struct rnd_state *); 32u32 prandom_u32_state(struct rnd_state *);
40void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); 33void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
41 34
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4800e9d1864c..022c085ac3c5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -313,8 +313,6 @@ extern void schedule_preempt_disabled(void);
313struct nsproxy; 313struct nsproxy;
314struct user_namespace; 314struct user_namespace;
315 315
316#include <linux/aio.h>
317
318#ifdef CONFIG_MMU 316#ifdef CONFIG_MMU
319extern void arch_pick_mmap_layout(struct mm_struct *mm); 317extern void arch_pick_mmap_layout(struct mm_struct *mm);
320extern unsigned long 318extern unsigned long
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 7cb64d4b499d..ac38be2692d8 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -330,6 +330,92 @@ do { \
330 __ret; \ 330 __ret; \
331}) 331})
332 332
333#define __wait_event_hrtimeout(wq, condition, timeout, state) \
334({ \
335 int __ret = 0; \
336 DEFINE_WAIT(__wait); \
337 struct hrtimer_sleeper __t; \
338 \
339 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
340 HRTIMER_MODE_REL); \
341 hrtimer_init_sleeper(&__t, current); \
342 if ((timeout).tv64 != KTIME_MAX) \
343 hrtimer_start_range_ns(&__t.timer, timeout, \
344 current->timer_slack_ns, \
345 HRTIMER_MODE_REL); \
346 \
347 for (;;) { \
348 prepare_to_wait(&wq, &__wait, state); \
349 if (condition) \
350 break; \
351 if (state == TASK_INTERRUPTIBLE && \
352 signal_pending(current)) { \
353 __ret = -ERESTARTSYS; \
354 break; \
355 } \
356 if (!__t.task) { \
357 __ret = -ETIME; \
358 break; \
359 } \
360 schedule(); \
361 } \
362 \
363 hrtimer_cancel(&__t.timer); \
364 destroy_hrtimer_on_stack(&__t.timer); \
365 finish_wait(&wq, &__wait); \
366 __ret; \
367})
368
369/**
370 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
371 * @wq: the waitqueue to wait on
372 * @condition: a C expression for the event to wait for
373 * @timeout: timeout, as a ktime_t
374 *
375 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
376 * @condition evaluates to true or a signal is received.
377 * The @condition is checked each time the waitqueue @wq is woken up.
378 *
379 * wake_up() has to be called after changing any variable that could
380 * change the result of the wait condition.
381 *
382 * The function returns 0 if @condition became true, or -ETIME if the timeout
383 * elapsed.
384 */
385#define wait_event_hrtimeout(wq, condition, timeout) \
386({ \
387 int __ret = 0; \
388 if (!(condition)) \
389 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
390 TASK_UNINTERRUPTIBLE); \
391 __ret; \
392})
393
394/**
395 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
396 * @wq: the waitqueue to wait on
397 * @condition: a C expression for the event to wait for
398 * @timeout: timeout, as a ktime_t
399 *
400 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
401 * @condition evaluates to true or a signal is received.
402 * The @condition is checked each time the waitqueue @wq is woken up.
403 *
404 * wake_up() has to be called after changing any variable that could
405 * change the result of the wait condition.
406 *
407 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
408 * interrupted by a signal, or -ETIME if the timeout elapsed.
409 */
410#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
411({ \
412 long __ret = 0; \
413 if (!(condition)) \
414 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
415 TASK_INTERRUPTIBLE); \
416 __ret; \
417})
418
333#define __wait_event_interruptible_exclusive(wq, condition, ret) \ 419#define __wait_event_interruptible_exclusive(wq, condition, ret) \
334do { \ 420do { \
335 DEFINE_WAIT(__wait); \ 421 DEFINE_WAIT(__wait); \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 9a9367c0c076..579a5007c696 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -5,6 +5,7 @@
5#define WRITEBACK_H 5#define WRITEBACK_H
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/workqueue.h>
8#include <linux/fs.h> 9#include <linux/fs.h>
9 10
10DECLARE_PER_CPU(int, dirty_throttle_leaks); 11DECLARE_PER_CPU(int, dirty_throttle_leaks);