diff options
94 files changed, 216 insertions, 213 deletions
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl index da5c087462b1..c3c705591532 100644 --- a/Documentation/DocBook/kernel-hacking.tmpl +++ b/Documentation/DocBook/kernel-hacking.tmpl | |||
@@ -819,7 +819,7 @@ printk(KERN_INFO "my ip: %pI4\n", &ipaddress); | |||
819 | certain condition is true. They must be used carefully to ensure | 819 | certain condition is true. They must be used carefully to ensure |
820 | there is no race condition. You declare a | 820 | there is no race condition. You declare a |
821 | <type>wait_queue_head_t</type>, and then processes which want to | 821 | <type>wait_queue_head_t</type>, and then processes which want to |
822 | wait for that condition declare a <type>wait_queue_t</type> | 822 | wait for that condition declare a <type>wait_queue_entry_t</type> |
823 | referring to themselves, and place that in the queue. | 823 | referring to themselves, and place that in the queue. |
824 | </para> | 824 | </para> |
825 | 825 | ||
diff --git a/Documentation/filesystems/autofs4.txt b/Documentation/filesystems/autofs4.txt index f10dd590f69f..8444dc3d57e8 100644 --- a/Documentation/filesystems/autofs4.txt +++ b/Documentation/filesystems/autofs4.txt | |||
@@ -316,7 +316,7 @@ For version 5, the format of the message is: | |||
316 | struct autofs_v5_packet { | 316 | struct autofs_v5_packet { |
317 | int proto_version; /* Protocol version */ | 317 | int proto_version; /* Protocol version */ |
318 | int type; /* Type of packet */ | 318 | int type; /* Type of packet */ |
319 | autofs_wqt_t wait_queue_token; | 319 | autofs_wqt_t wait_queue_entry_token; |
320 | __u32 dev; | 320 | __u32 dev; |
321 | __u64 ino; | 321 | __u64 ino; |
322 | __u32 uid; | 322 | __u32 uid; |
@@ -341,12 +341,12 @@ The pipe will be set to "packet mode" (equivalent to passing | |||
341 | `O_DIRECT`) to _pipe2(2)_ so that a read from the pipe will return at | 341 | `O_DIRECT`) to _pipe2(2)_ so that a read from the pipe will return at |
342 | most one packet, and any unread portion of a packet will be discarded. | 342 | most one packet, and any unread portion of a packet will be discarded. |
343 | 343 | ||
344 | The `wait_queue_token` is a unique number which can identify a | 344 | The `wait_queue_entry_token` is a unique number which can identify a |
345 | particular request to be acknowledged. When a message is sent over | 345 | particular request to be acknowledged. When a message is sent over |
346 | the pipe the affected dentry is marked as either "active" or | 346 | the pipe the affected dentry is marked as either "active" or |
347 | "expiring" and other accesses to it block until the message is | 347 | "expiring" and other accesses to it block until the message is |
348 | acknowledged using one of the ioctls below and the relevant | 348 | acknowledged using one of the ioctls below and the relevant |
349 | `wait_queue_token`. | 349 | `wait_queue_entry_token`. |
350 | 350 | ||
351 | Communicating with autofs: root directory ioctls | 351 | Communicating with autofs: root directory ioctls |
352 | ------------------------------------------------ | 352 | ------------------------------------------------ |
@@ -358,7 +358,7 @@ capability, or must be the automount daemon. | |||
358 | The available ioctl commands are: | 358 | The available ioctl commands are: |
359 | 359 | ||
360 | - **AUTOFS_IOC_READY**: a notification has been handled. The argument | 360 | - **AUTOFS_IOC_READY**: a notification has been handled. The argument |
361 | to the ioctl command is the "wait_queue_token" number | 361 | to the ioctl command is the "wait_queue_entry_token" number |
362 | corresponding to the notification being acknowledged. | 362 | corresponding to the notification being acknowledged. |
363 | - **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with | 363 | - **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with |
364 | the error code `ENOENT`. | 364 | the error code `ENOENT`. |
@@ -382,14 +382,14 @@ The available ioctl commands are: | |||
382 | struct autofs_packet_expire_multi { | 382 | struct autofs_packet_expire_multi { |
383 | int proto_version; /* Protocol version */ | 383 | int proto_version; /* Protocol version */ |
384 | int type; /* Type of packet */ | 384 | int type; /* Type of packet */ |
385 | autofs_wqt_t wait_queue_token; | 385 | autofs_wqt_t wait_queue_entry_token; |
386 | int len; | 386 | int len; |
387 | char name[NAME_MAX+1]; | 387 | char name[NAME_MAX+1]; |
388 | }; | 388 | }; |
389 | 389 | ||
390 | is required. This is filled in with the name of something | 390 | is required. This is filled in with the name of something |
391 | that can be unmounted or removed. If nothing can be expired, | 391 | that can be unmounted or removed. If nothing can be expired, |
392 | `errno` is set to `EAGAIN`. Even though a `wait_queue_token` | 392 | `errno` is set to `EAGAIN`. Even though a `wait_queue_entry_token` |
393 | is present in the structure, no "wait queue" is established | 393 | is present in the structure, no "wait queue" is established |
394 | and no acknowledgment is needed. | 394 | and no acknowledgment is needed. |
395 | - **AUTOFS_IOC_EXPIRE_MULTI**: This is similar to | 395 | - **AUTOFS_IOC_EXPIRE_MULTI**: This is similar to |
diff --git a/block/blk-mq.c b/block/blk-mq.c index bb66c96850b1..a083f95e04b1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -926,7 +926,7 @@ static bool reorder_tags_to_front(struct list_head *list) | |||
926 | return first != NULL; | 926 | return first != NULL; |
927 | } | 927 | } |
928 | 928 | ||
929 | static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags, | 929 | static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, |
930 | void *key) | 930 | void *key) |
931 | { | 931 | { |
932 | struct blk_mq_hw_ctx *hctx; | 932 | struct blk_mq_hw_ctx *hctx; |
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 17676f4d7fd1..5f3a37c2784c 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c | |||
@@ -503,7 +503,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) | |||
503 | } | 503 | } |
504 | 504 | ||
505 | static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw, | 505 | static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw, |
506 | wait_queue_t *wait, unsigned long rw) | 506 | wait_queue_entry_t *wait, unsigned long rw) |
507 | { | 507 | { |
508 | /* | 508 | /* |
509 | * inc it here even if disabled, since we'll dec it at completion. | 509 | * inc it here even if disabled, since we'll dec it at completion. |
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index b9faabc75fdb..b95d6bd714c0 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c | |||
@@ -99,7 +99,7 @@ struct kyber_hctx_data { | |||
99 | struct list_head rqs[KYBER_NUM_DOMAINS]; | 99 | struct list_head rqs[KYBER_NUM_DOMAINS]; |
100 | unsigned int cur_domain; | 100 | unsigned int cur_domain; |
101 | unsigned int batching; | 101 | unsigned int batching; |
102 | wait_queue_t domain_wait[KYBER_NUM_DOMAINS]; | 102 | wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS]; |
103 | atomic_t wait_index[KYBER_NUM_DOMAINS]; | 103 | atomic_t wait_index[KYBER_NUM_DOMAINS]; |
104 | }; | 104 | }; |
105 | 105 | ||
@@ -507,7 +507,7 @@ static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd, | |||
507 | } | 507 | } |
508 | } | 508 | } |
509 | 509 | ||
510 | static int kyber_domain_wake(wait_queue_t *wait, unsigned mode, int flags, | 510 | static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, |
511 | void *key) | 511 | void *key) |
512 | { | 512 | { |
513 | struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private); | 513 | struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private); |
@@ -523,7 +523,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd, | |||
523 | { | 523 | { |
524 | unsigned int sched_domain = khd->cur_domain; | 524 | unsigned int sched_domain = khd->cur_domain; |
525 | struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; | 525 | struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; |
526 | wait_queue_t *wait = &khd->domain_wait[sched_domain]; | 526 | wait_queue_entry_t *wait = &khd->domain_wait[sched_domain]; |
527 | struct sbq_wait_state *ws; | 527 | struct sbq_wait_state *ws; |
528 | int nr; | 528 | int nr; |
529 | 529 | ||
@@ -734,7 +734,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \ | |||
734 | { \ | 734 | { \ |
735 | struct blk_mq_hw_ctx *hctx = data; \ | 735 | struct blk_mq_hw_ctx *hctx = data; \ |
736 | struct kyber_hctx_data *khd = hctx->sched_data; \ | 736 | struct kyber_hctx_data *khd = hctx->sched_data; \ |
737 | wait_queue_t *wait = &khd->domain_wait[domain]; \ | 737 | wait_queue_entry_t *wait = &khd->domain_wait[domain]; \ |
738 | \ | 738 | \ |
739 | seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \ | 739 | seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \ |
740 | return 0; \ | 740 | return 0; \ |
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index c38cb5b91291..fe850f0567cb 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c | |||
@@ -602,7 +602,7 @@ static int btmrvl_service_main_thread(void *data) | |||
602 | struct btmrvl_thread *thread = data; | 602 | struct btmrvl_thread *thread = data; |
603 | struct btmrvl_private *priv = thread->priv; | 603 | struct btmrvl_private *priv = thread->priv; |
604 | struct btmrvl_adapter *adapter = priv->adapter; | 604 | struct btmrvl_adapter *adapter = priv->adapter; |
605 | wait_queue_t wait; | 605 | wait_queue_entry_t wait; |
606 | struct sk_buff *skb; | 606 | struct sk_buff *skb; |
607 | ulong flags; | 607 | ulong flags; |
608 | 608 | ||
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index d165af8abe36..a5c6cfe71a8e 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -821,7 +821,7 @@ static ssize_t ipmi_read(struct file *file, | |||
821 | loff_t *ppos) | 821 | loff_t *ppos) |
822 | { | 822 | { |
823 | int rv = 0; | 823 | int rv = 0; |
824 | wait_queue_t wait; | 824 | wait_queue_entry_t wait; |
825 | 825 | ||
826 | if (count <= 0) | 826 | if (count <= 0) |
827 | return 0; | 827 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 129c58bb4805..a4a920c4c454 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h | |||
@@ -123,7 +123,7 @@ struct drm_i915_gem_request { | |||
123 | * It is used by the driver to then queue the request for execution. | 123 | * It is used by the driver to then queue the request for execution. |
124 | */ | 124 | */ |
125 | struct i915_sw_fence submit; | 125 | struct i915_sw_fence submit; |
126 | wait_queue_t submitq; | 126 | wait_queue_entry_t submitq; |
127 | wait_queue_head_t execute; | 127 | wait_queue_head_t execute; |
128 | 128 | ||
129 | /* A list of everyone we wait upon, and everyone who waits upon us. | 129 | /* A list of everyone we wait upon, and everyone who waits upon us. |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index a277f8eb7beb..8669bfa33064 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c | |||
@@ -152,7 +152,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, | |||
152 | struct list_head *continuation) | 152 | struct list_head *continuation) |
153 | { | 153 | { |
154 | wait_queue_head_t *x = &fence->wait; | 154 | wait_queue_head_t *x = &fence->wait; |
155 | wait_queue_t *pos, *next; | 155 | wait_queue_entry_t *pos, *next; |
156 | unsigned long flags; | 156 | unsigned long flags; |
157 | 157 | ||
158 | debug_fence_deactivate(fence); | 158 | debug_fence_deactivate(fence); |
@@ -254,7 +254,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence) | |||
254 | __i915_sw_fence_commit(fence); | 254 | __i915_sw_fence_commit(fence); |
255 | } | 255 | } |
256 | 256 | ||
257 | static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) | 257 | static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) |
258 | { | 258 | { |
259 | list_del(&wq->task_list); | 259 | list_del(&wq->task_list); |
260 | __i915_sw_fence_complete(wq->private, key); | 260 | __i915_sw_fence_complete(wq->private, key); |
@@ -267,7 +267,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void * | |||
267 | static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, | 267 | static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, |
268 | const struct i915_sw_fence * const signaler) | 268 | const struct i915_sw_fence * const signaler) |
269 | { | 269 | { |
270 | wait_queue_t *wq; | 270 | wait_queue_entry_t *wq; |
271 | 271 | ||
272 | if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) | 272 | if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) |
273 | return false; | 273 | return false; |
@@ -288,7 +288,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, | |||
288 | 288 | ||
289 | static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence) | 289 | static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence) |
290 | { | 290 | { |
291 | wait_queue_t *wq; | 291 | wait_queue_entry_t *wq; |
292 | 292 | ||
293 | if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) | 293 | if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) |
294 | return; | 294 | return; |
@@ -320,7 +320,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, | |||
320 | 320 | ||
321 | static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | 321 | static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, |
322 | struct i915_sw_fence *signaler, | 322 | struct i915_sw_fence *signaler, |
323 | wait_queue_t *wq, gfp_t gfp) | 323 | wait_queue_entry_t *wq, gfp_t gfp) |
324 | { | 324 | { |
325 | unsigned long flags; | 325 | unsigned long flags; |
326 | int pending; | 326 | int pending; |
@@ -359,7 +359,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | |||
359 | 359 | ||
360 | spin_lock_irqsave(&signaler->wait.lock, flags); | 360 | spin_lock_irqsave(&signaler->wait.lock, flags); |
361 | if (likely(!i915_sw_fence_done(signaler))) { | 361 | if (likely(!i915_sw_fence_done(signaler))) { |
362 | __add_wait_queue_tail(&signaler->wait, wq); | 362 | __add_wait_queue_entry_tail(&signaler->wait, wq); |
363 | pending = 1; | 363 | pending = 1; |
364 | } else { | 364 | } else { |
365 | i915_sw_fence_wake(wq, 0, 0, NULL); | 365 | i915_sw_fence_wake(wq, 0, 0, NULL); |
@@ -372,7 +372,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | |||
372 | 372 | ||
373 | int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | 373 | int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, |
374 | struct i915_sw_fence *signaler, | 374 | struct i915_sw_fence *signaler, |
375 | wait_queue_t *wq) | 375 | wait_queue_entry_t *wq) |
376 | { | 376 | { |
377 | return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0); | 377 | return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0); |
378 | } | 378 | } |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index d31cefbbcc04..fd3c3bf6c8b7 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h | |||
@@ -66,7 +66,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence); | |||
66 | 66 | ||
67 | int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | 67 | int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, |
68 | struct i915_sw_fence *after, | 68 | struct i915_sw_fence *after, |
69 | wait_queue_t *wq); | 69 | wait_queue_entry_t *wq); |
70 | int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, | 70 | int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, |
71 | struct i915_sw_fence *after, | 71 | struct i915_sw_fence *after, |
72 | gfp_t gfp); | 72 | gfp_t gfp); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index c1c8e2208a21..e562a78510ff 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -375,7 +375,7 @@ struct radeon_fence { | |||
375 | unsigned ring; | 375 | unsigned ring; |
376 | bool is_vm_update; | 376 | bool is_vm_update; |
377 | 377 | ||
378 | wait_queue_t fence_wake; | 378 | wait_queue_entry_t fence_wake; |
379 | }; | 379 | }; |
380 | 380 | ||
381 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); | 381 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index ef09f0a63754..e86f2bd38410 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev, | |||
158 | * for the fence locking itself, so unlocked variants are used for | 158 | * for the fence locking itself, so unlocked variants are used for |
159 | * fence_signal, and remove_wait_queue. | 159 | * fence_signal, and remove_wait_queue. |
160 | */ | 160 | */ |
161 | static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) | 161 | static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) |
162 | { | 162 | { |
163 | struct radeon_fence *fence; | 163 | struct radeon_fence *fence; |
164 | u64 seq; | 164 | u64 seq; |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 92f1452dad57..76875f6299b8 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) | |||
417 | { | 417 | { |
418 | struct vga_device *vgadev, *conflict; | 418 | struct vga_device *vgadev, *conflict; |
419 | unsigned long flags; | 419 | unsigned long flags; |
420 | wait_queue_t wait; | 420 | wait_queue_entry_t wait; |
421 | int rc = 0; | 421 | int rc = 0; |
422 | 422 | ||
423 | vga_check_first_use(); | 423 | vga_check_first_use(); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index a3f18a22f5ed..e0f47cc2effc 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c | |||
@@ -1939,7 +1939,7 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev, | |||
1939 | bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev) | 1939 | bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev) |
1940 | { | 1940 | { |
1941 | struct i40iw_device *iwdev; | 1941 | struct i40iw_device *iwdev; |
1942 | wait_queue_t wait; | 1942 | wait_queue_entry_t wait; |
1943 | 1943 | ||
1944 | iwdev = dev->back_dev; | 1944 | iwdev = dev->back_dev; |
1945 | 1945 | ||
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 9b80417cd547..73da1f5626cb 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
@@ -207,7 +207,7 @@ void bkey_put(struct cache_set *c, struct bkey *k); | |||
207 | 207 | ||
208 | struct btree_op { | 208 | struct btree_op { |
209 | /* for waiting on btree reserve in btree_split() */ | 209 | /* for waiting on btree reserve in btree_split() */ |
210 | wait_queue_t wait; | 210 | wait_queue_entry_t wait; |
211 | 211 | ||
212 | /* Btree level at which we start taking write locks */ | 212 | /* Btree level at which we start taking write locks */ |
213 | short lock; | 213 | short lock; |
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index bed9ef17bc26..7ccffbb0019e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h | |||
@@ -144,7 +144,7 @@ static inline int | |||
144 | sleep_cond(wait_queue_head_t *wait_queue, int *condition) | 144 | sleep_cond(wait_queue_head_t *wait_queue, int *condition) |
145 | { | 145 | { |
146 | int errno = 0; | 146 | int errno = 0; |
147 | wait_queue_t we; | 147 | wait_queue_entry_t we; |
148 | 148 | ||
149 | init_waitqueue_entry(&we, current); | 149 | init_waitqueue_entry(&we, current); |
150 | add_wait_queue(wait_queue, &we); | 150 | add_wait_queue(wait_queue, &we); |
@@ -171,7 +171,7 @@ sleep_timeout_cond(wait_queue_head_t *wait_queue, | |||
171 | int *condition, | 171 | int *condition, |
172 | int timeout) | 172 | int timeout) |
173 | { | 173 | { |
174 | wait_queue_t we; | 174 | wait_queue_entry_t we; |
175 | 175 | ||
176 | init_waitqueue_entry(&we, current); | 176 | init_waitqueue_entry(&we, current); |
177 | add_wait_queue(wait_queue, &we); | 177 | add_wait_queue(wait_queue, &we); |
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 1b7e125a28e2..6a13303af2b7 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c | |||
@@ -3066,7 +3066,7 @@ static int airo_thread(void *data) { | |||
3066 | if (ai->jobs) { | 3066 | if (ai->jobs) { |
3067 | locked = down_interruptible(&ai->sem); | 3067 | locked = down_interruptible(&ai->sem); |
3068 | } else { | 3068 | } else { |
3069 | wait_queue_t wait; | 3069 | wait_queue_entry_t wait; |
3070 | 3070 | ||
3071 | init_waitqueue_entry(&wait, current); | 3071 | init_waitqueue_entry(&wait, current); |
3072 | add_wait_queue(&ai->thr_wait, &wait); | 3072 | add_wait_queue(&ai->thr_wait, &wait); |
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index b2c6b065b542..ff153ce29539 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c | |||
@@ -2544,7 +2544,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev, | |||
2544 | ret = -EINVAL; | 2544 | ret = -EINVAL; |
2545 | } | 2545 | } |
2546 | if (local->iw_mode == IW_MODE_MASTER) { | 2546 | if (local->iw_mode == IW_MODE_MASTER) { |
2547 | wait_queue_t __wait; | 2547 | wait_queue_entry_t __wait; |
2548 | init_waitqueue_entry(&__wait, current); | 2548 | init_waitqueue_entry(&__wait, current); |
2549 | add_wait_queue(&local->hostscan_wq, &__wait); | 2549 | add_wait_queue(&local->hostscan_wq, &__wait); |
2550 | set_current_state(TASK_INTERRUPTIBLE); | 2550 | set_current_state(TASK_INTERRUPTIBLE); |
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index e3500203715c..dde065d0d5c1 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c | |||
@@ -453,7 +453,7 @@ static int lbs_thread(void *data) | |||
453 | { | 453 | { |
454 | struct net_device *dev = data; | 454 | struct net_device *dev = data; |
455 | struct lbs_private *priv = dev->ml_priv; | 455 | struct lbs_private *priv = dev->ml_priv; |
456 | wait_queue_t wait; | 456 | wait_queue_entry_t wait; |
457 | 457 | ||
458 | lbs_deb_enter(LBS_DEB_THREAD); | 458 | lbs_deb_enter(LBS_DEB_THREAD); |
459 | 459 | ||
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h index bd9e31e16249..16fc380b5512 100644 --- a/drivers/scsi/dpt/dpti_i2o.h +++ b/drivers/scsi/dpt/dpti_i2o.h | |||
@@ -48,7 +48,7 @@ | |||
48 | #include <linux/wait.h> | 48 | #include <linux/wait.h> |
49 | typedef wait_queue_head_t adpt_wait_queue_head_t; | 49 | typedef wait_queue_head_t adpt_wait_queue_head_t; |
50 | #define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait) | 50 | #define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait) |
51 | typedef wait_queue_t adpt_wait_queue_t; | 51 | typedef wait_queue_entry_t adpt_wait_queue_entry_t; |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * message structures | 54 | * message structures |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 3419e1bcdff6..67621308eb9c 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -301,13 +301,13 @@ static uint32_t ips_statupd_copperhead_memio(ips_ha_t *); | |||
301 | static uint32_t ips_statupd_morpheus(ips_ha_t *); | 301 | static uint32_t ips_statupd_morpheus(ips_ha_t *); |
302 | static ips_scb_t *ips_getscb(ips_ha_t *); | 302 | static ips_scb_t *ips_getscb(ips_ha_t *); |
303 | static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); | 303 | static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); |
304 | static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *); | 304 | static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *); |
305 | static void ips_putq_copp_tail(ips_copp_queue_t *, | 305 | static void ips_putq_copp_tail(ips_copp_queue_t *, |
306 | ips_copp_wait_item_t *); | 306 | ips_copp_wait_item_t *); |
307 | static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); | 307 | static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); |
308 | static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); | 308 | static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); |
309 | static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *); | 309 | static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *); |
310 | static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *, | 310 | static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *, |
311 | struct scsi_cmnd *); | 311 | struct scsi_cmnd *); |
312 | static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, | 312 | static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, |
313 | ips_copp_wait_item_t *); | 313 | ips_copp_wait_item_t *); |
@@ -2871,7 +2871,7 @@ ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item) | |||
2871 | /* ASSUMED to be called from within the HA lock */ | 2871 | /* ASSUMED to be called from within the HA lock */ |
2872 | /* */ | 2872 | /* */ |
2873 | /****************************************************************************/ | 2873 | /****************************************************************************/ |
2874 | static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item) | 2874 | static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item) |
2875 | { | 2875 | { |
2876 | METHOD_TRACE("ips_putq_wait_tail", 1); | 2876 | METHOD_TRACE("ips_putq_wait_tail", 1); |
2877 | 2877 | ||
@@ -2902,7 +2902,7 @@ static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item) | |||
2902 | /* ASSUMED to be called from within the HA lock */ | 2902 | /* ASSUMED to be called from within the HA lock */ |
2903 | /* */ | 2903 | /* */ |
2904 | /****************************************************************************/ | 2904 | /****************************************************************************/ |
2905 | static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue) | 2905 | static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue) |
2906 | { | 2906 | { |
2907 | struct scsi_cmnd *item; | 2907 | struct scsi_cmnd *item; |
2908 | 2908 | ||
@@ -2936,7 +2936,7 @@ static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue) | |||
2936 | /* ASSUMED to be called from within the HA lock */ | 2936 | /* ASSUMED to be called from within the HA lock */ |
2937 | /* */ | 2937 | /* */ |
2938 | /****************************************************************************/ | 2938 | /****************************************************************************/ |
2939 | static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue, | 2939 | static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue, |
2940 | struct scsi_cmnd *item) | 2940 | struct scsi_cmnd *item) |
2941 | { | 2941 | { |
2942 | struct scsi_cmnd *p; | 2942 | struct scsi_cmnd *p; |
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h index b782bb60baf0..366be3b2f9b4 100644 --- a/drivers/scsi/ips.h +++ b/drivers/scsi/ips.h | |||
@@ -989,7 +989,7 @@ typedef struct ips_wait_queue { | |||
989 | struct scsi_cmnd *head; | 989 | struct scsi_cmnd *head; |
990 | struct scsi_cmnd *tail; | 990 | struct scsi_cmnd *tail; |
991 | int count; | 991 | int count; |
992 | } ips_wait_queue_t; | 992 | } ips_wait_queue_entry_t; |
993 | 993 | ||
994 | typedef struct ips_copp_wait_item { | 994 | typedef struct ips_copp_wait_item { |
995 | struct scsi_cmnd *scsi_cmd; | 995 | struct scsi_cmnd *scsi_cmd; |
@@ -1035,7 +1035,7 @@ typedef struct ips_ha { | |||
1035 | ips_stat_t sp; /* Status packer pointer */ | 1035 | ips_stat_t sp; /* Status packer pointer */ |
1036 | struct ips_scb *scbs; /* Array of all CCBS */ | 1036 | struct ips_scb *scbs; /* Array of all CCBS */ |
1037 | struct ips_scb *scb_freelist; /* SCB free list */ | 1037 | struct ips_scb *scb_freelist; /* SCB free list */ |
1038 | ips_wait_queue_t scb_waitlist; /* Pending SCB list */ | 1038 | ips_wait_queue_entry_t scb_waitlist; /* Pending SCB list */ |
1039 | ips_copp_queue_t copp_waitlist; /* Pending PT list */ | 1039 | ips_copp_queue_t copp_waitlist; /* Pending PT list */ |
1040 | ips_scb_queue_t scb_activelist; /* Active SCB list */ | 1040 | ips_scb_queue_t scb_activelist; /* Active SCB list */ |
1041 | IPS_IO_CMD *dummy; /* dummy command */ | 1041 | IPS_IO_CMD *dummy; /* dummy command */ |
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 0db662d6abdd..85b242ec5f9b 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | |||
@@ -3267,7 +3267,7 @@ int | |||
3267 | kiblnd_connd(void *arg) | 3267 | kiblnd_connd(void *arg) |
3268 | { | 3268 | { |
3269 | spinlock_t *lock = &kiblnd_data.kib_connd_lock; | 3269 | spinlock_t *lock = &kiblnd_data.kib_connd_lock; |
3270 | wait_queue_t wait; | 3270 | wait_queue_entry_t wait; |
3271 | unsigned long flags; | 3271 | unsigned long flags; |
3272 | struct kib_conn *conn; | 3272 | struct kib_conn *conn; |
3273 | int timeout; | 3273 | int timeout; |
@@ -3521,7 +3521,7 @@ kiblnd_scheduler(void *arg) | |||
3521 | long id = (long)arg; | 3521 | long id = (long)arg; |
3522 | struct kib_sched_info *sched; | 3522 | struct kib_sched_info *sched; |
3523 | struct kib_conn *conn; | 3523 | struct kib_conn *conn; |
3524 | wait_queue_t wait; | 3524 | wait_queue_entry_t wait; |
3525 | unsigned long flags; | 3525 | unsigned long flags; |
3526 | struct ib_wc wc; | 3526 | struct ib_wc wc; |
3527 | int did_something; | 3527 | int did_something; |
@@ -3656,7 +3656,7 @@ kiblnd_failover_thread(void *arg) | |||
3656 | { | 3656 | { |
3657 | rwlock_t *glock = &kiblnd_data.kib_global_lock; | 3657 | rwlock_t *glock = &kiblnd_data.kib_global_lock; |
3658 | struct kib_dev *dev; | 3658 | struct kib_dev *dev; |
3659 | wait_queue_t wait; | 3659 | wait_queue_entry_t wait; |
3660 | unsigned long flags; | 3660 | unsigned long flags; |
3661 | int rc; | 3661 | int rc; |
3662 | 3662 | ||
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 3ed3b08c122c..6b38d5a8fe92 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | |||
@@ -2166,7 +2166,7 @@ ksocknal_connd(void *arg) | |||
2166 | { | 2166 | { |
2167 | spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; | 2167 | spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; |
2168 | struct ksock_connreq *cr; | 2168 | struct ksock_connreq *cr; |
2169 | wait_queue_t wait; | 2169 | wait_queue_entry_t wait; |
2170 | int nloops = 0; | 2170 | int nloops = 0; |
2171 | int cons_retry = 0; | 2171 | int cons_retry = 0; |
2172 | 2172 | ||
@@ -2554,7 +2554,7 @@ ksocknal_check_peer_timeouts(int idx) | |||
2554 | int | 2554 | int |
2555 | ksocknal_reaper(void *arg) | 2555 | ksocknal_reaper(void *arg) |
2556 | { | 2556 | { |
2557 | wait_queue_t wait; | 2557 | wait_queue_entry_t wait; |
2558 | struct ksock_conn *conn; | 2558 | struct ksock_conn *conn; |
2559 | struct ksock_sched *sched; | 2559 | struct ksock_sched *sched; |
2560 | struct list_head enomem_conns; | 2560 | struct list_head enomem_conns; |
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c index c56e9922cd5b..49deb448b044 100644 --- a/drivers/staging/lustre/lnet/libcfs/debug.c +++ b/drivers/staging/lustre/lnet/libcfs/debug.c | |||
@@ -361,7 +361,7 @@ static int libcfs_debug_dumplog_thread(void *arg) | |||
361 | 361 | ||
362 | void libcfs_debug_dumplog(void) | 362 | void libcfs_debug_dumplog(void) |
363 | { | 363 | { |
364 | wait_queue_t wait; | 364 | wait_queue_entry_t wait; |
365 | struct task_struct *dumper; | 365 | struct task_struct *dumper; |
366 | 366 | ||
367 | /* we're being careful to ensure that the kernel thread is | 367 | /* we're being careful to ensure that the kernel thread is |
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index 9599b7441feb..27082d2f7938 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c | |||
@@ -990,7 +990,7 @@ static int tracefiled(void *arg) | |||
990 | complete(&tctl->tctl_start); | 990 | complete(&tctl->tctl_start); |
991 | 991 | ||
992 | while (1) { | 992 | while (1) { |
993 | wait_queue_t __wait; | 993 | wait_queue_entry_t __wait; |
994 | 994 | ||
995 | pc.pc_want_daemon_pages = 0; | 995 | pc.pc_want_daemon_pages = 0; |
996 | collect_pages(&pc); | 996 | collect_pages(&pc); |
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c index ce4b83584e17..9ebba4ef5f90 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-eq.c +++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c | |||
@@ -312,7 +312,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock) | |||
312 | { | 312 | { |
313 | int tms = *timeout_ms; | 313 | int tms = *timeout_ms; |
314 | int wait; | 314 | int wait; |
315 | wait_queue_t wl; | 315 | wait_queue_entry_t wl; |
316 | unsigned long now; | 316 | unsigned long now; |
317 | 317 | ||
318 | if (!tms) | 318 | if (!tms) |
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c index 9fca8d225ee0..f075706bba6d 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-socket.c +++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c | |||
@@ -516,7 +516,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port, | |||
516 | int | 516 | int |
517 | lnet_sock_accept(struct socket **newsockp, struct socket *sock) | 517 | lnet_sock_accept(struct socket **newsockp, struct socket *sock) |
518 | { | 518 | { |
519 | wait_queue_t wait; | 519 | wait_queue_entry_t wait; |
520 | struct socket *newsock; | 520 | struct socket *newsock; |
521 | int rc; | 521 | int rc; |
522 | 522 | ||
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c index 999f250ceed0..bf31bc200d27 100644 --- a/drivers/staging/lustre/lustre/fid/fid_request.c +++ b/drivers/staging/lustre/lustre/fid/fid_request.c | |||
@@ -192,7 +192,7 @@ static int seq_client_alloc_seq(const struct lu_env *env, | |||
192 | } | 192 | } |
193 | 193 | ||
194 | static int seq_fid_alloc_prep(struct lu_client_seq *seq, | 194 | static int seq_fid_alloc_prep(struct lu_client_seq *seq, |
195 | wait_queue_t *link) | 195 | wait_queue_entry_t *link) |
196 | { | 196 | { |
197 | if (seq->lcs_update) { | 197 | if (seq->lcs_update) { |
198 | add_wait_queue(&seq->lcs_waitq, link); | 198 | add_wait_queue(&seq->lcs_waitq, link); |
@@ -223,7 +223,7 @@ static void seq_fid_alloc_fini(struct lu_client_seq *seq) | |||
223 | int seq_client_alloc_fid(const struct lu_env *env, | 223 | int seq_client_alloc_fid(const struct lu_env *env, |
224 | struct lu_client_seq *seq, struct lu_fid *fid) | 224 | struct lu_client_seq *seq, struct lu_fid *fid) |
225 | { | 225 | { |
226 | wait_queue_t link; | 226 | wait_queue_entry_t link; |
227 | int rc; | 227 | int rc; |
228 | 228 | ||
229 | LASSERT(seq); | 229 | LASSERT(seq); |
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(seq_client_alloc_fid); | |||
290 | */ | 290 | */ |
291 | void seq_client_flush(struct lu_client_seq *seq) | 291 | void seq_client_flush(struct lu_client_seq *seq) |
292 | { | 292 | { |
293 | wait_queue_t link; | 293 | wait_queue_entry_t link; |
294 | 294 | ||
295 | LASSERT(seq); | 295 | LASSERT(seq); |
296 | init_waitqueue_entry(&link, current); | 296 | init_waitqueue_entry(&link, current); |
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index b04d613846ee..f24970da8323 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h | |||
@@ -201,7 +201,7 @@ struct l_wait_info { | |||
201 | sigmask(SIGALRM)) | 201 | sigmask(SIGALRM)) |
202 | 202 | ||
203 | /** | 203 | /** |
204 | * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively | 204 | * wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively |
205 | * waiting threads, which is not always desirable because all threads will | 205 | * waiting threads, which is not always desirable because all threads will |
206 | * be waken up again and again, even user only needs a few of them to be | 206 | * be waken up again and again, even user only needs a few of them to be |
207 | * active most time. This is not good for performance because cache can | 207 | * active most time. This is not good for performance because cache can |
@@ -228,7 +228,7 @@ struct l_wait_info { | |||
228 | */ | 228 | */ |
229 | #define __l_wait_event(wq, condition, info, ret, l_add_wait) \ | 229 | #define __l_wait_event(wq, condition, info, ret, l_add_wait) \ |
230 | do { \ | 230 | do { \ |
231 | wait_queue_t __wait; \ | 231 | wait_queue_entry_t __wait; \ |
232 | long __timeout = info->lwi_timeout; \ | 232 | long __timeout = info->lwi_timeout; \ |
233 | sigset_t __blocked; \ | 233 | sigset_t __blocked; \ |
234 | int __allow_intr = info->lwi_allow_intr; \ | 234 | int __allow_intr = info->lwi_allow_intr; \ |
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c index 8af611033e12..96515b839436 100644 --- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c +++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c | |||
@@ -207,7 +207,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) | |||
207 | static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) | 207 | static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) |
208 | { | 208 | { |
209 | struct lu_object_header *header = obj->co_lu.lo_header; | 209 | struct lu_object_header *header = obj->co_lu.lo_header; |
210 | wait_queue_t waiter; | 210 | wait_queue_entry_t waiter; |
211 | 211 | ||
212 | if (unlikely(atomic_read(&header->loh_ref) != 1)) { | 212 | if (unlikely(atomic_read(&header->loh_ref) != 1)) { |
213 | struct lu_site *site = obj->co_lu.lo_dev->ld_site; | 213 | struct lu_site *site = obj->co_lu.lo_dev->ld_site; |
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h index 391c632365ae..e889d3a7de9c 100644 --- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h | |||
@@ -370,7 +370,7 @@ struct lov_thread_info { | |||
370 | struct ost_lvb lti_lvb; | 370 | struct ost_lvb lti_lvb; |
371 | struct cl_2queue lti_cl2q; | 371 | struct cl_2queue lti_cl2q; |
372 | struct cl_page_list lti_plist; | 372 | struct cl_page_list lti_plist; |
373 | wait_queue_t lti_waiter; | 373 | wait_queue_entry_t lti_waiter; |
374 | struct cl_attr lti_attr; | 374 | struct cl_attr lti_attr; |
375 | }; | 375 | }; |
376 | 376 | ||
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c index ab3ecfeeadc8..eddabbe31e5c 100644 --- a/drivers/staging/lustre/lustre/lov/lov_object.c +++ b/drivers/staging/lustre/lustre/lov/lov_object.c | |||
@@ -371,7 +371,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, | |||
371 | struct lov_layout_raid0 *r0; | 371 | struct lov_layout_raid0 *r0; |
372 | struct lu_site *site; | 372 | struct lu_site *site; |
373 | struct lu_site_bkt_data *bkt; | 373 | struct lu_site_bkt_data *bkt; |
374 | wait_queue_t *waiter; | 374 | wait_queue_entry_t *waiter; |
375 | 375 | ||
376 | r0 = &lov->u.raid0; | 376 | r0 = &lov->u.raid0; |
377 | LASSERT(r0->lo_sub[idx] == los); | 377 | LASSERT(r0->lo_sub[idx] == los); |
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index abcf951208d2..76ae600ae2c8 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c | |||
@@ -556,7 +556,7 @@ EXPORT_SYMBOL(lu_object_print); | |||
556 | static struct lu_object *htable_lookup(struct lu_site *s, | 556 | static struct lu_object *htable_lookup(struct lu_site *s, |
557 | struct cfs_hash_bd *bd, | 557 | struct cfs_hash_bd *bd, |
558 | const struct lu_fid *f, | 558 | const struct lu_fid *f, |
559 | wait_queue_t *waiter, | 559 | wait_queue_entry_t *waiter, |
560 | __u64 *version) | 560 | __u64 *version) |
561 | { | 561 | { |
562 | struct lu_site_bkt_data *bkt; | 562 | struct lu_site_bkt_data *bkt; |
@@ -670,7 +670,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, | |||
670 | struct lu_device *dev, | 670 | struct lu_device *dev, |
671 | const struct lu_fid *f, | 671 | const struct lu_fid *f, |
672 | const struct lu_object_conf *conf, | 672 | const struct lu_object_conf *conf, |
673 | wait_queue_t *waiter) | 673 | wait_queue_entry_t *waiter) |
674 | { | 674 | { |
675 | struct lu_object *o; | 675 | struct lu_object *o; |
676 | struct lu_object *shadow; | 676 | struct lu_object *shadow; |
@@ -750,7 +750,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, | |||
750 | { | 750 | { |
751 | struct lu_site_bkt_data *bkt; | 751 | struct lu_site_bkt_data *bkt; |
752 | struct lu_object *obj; | 752 | struct lu_object *obj; |
753 | wait_queue_t wait; | 753 | wait_queue_entry_t wait; |
754 | 754 | ||
755 | while (1) { | 755 | while (1) { |
756 | obj = lu_object_find_try(env, dev, f, conf, &wait); | 756 | obj = lu_object_find_try(env, dev, f, conf, &wait); |
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 31885f20fc15..cc047de72e2a 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c | |||
@@ -184,7 +184,7 @@ static void hdlcdev_exit(struct slgt_info *info); | |||
184 | struct cond_wait { | 184 | struct cond_wait { |
185 | struct cond_wait *next; | 185 | struct cond_wait *next; |
186 | wait_queue_head_t q; | 186 | wait_queue_head_t q; |
187 | wait_queue_t wait; | 187 | wait_queue_entry_t wait; |
188 | unsigned int data; | 188 | unsigned int data; |
189 | }; | 189 | }; |
190 | static void init_cond_wait(struct cond_wait *w, unsigned int data); | 190 | static void init_cond_wait(struct cond_wait *w, unsigned int data); |
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c index 27c89cd5d70b..4797217e5e72 100644 --- a/drivers/vfio/virqfd.c +++ b/drivers/vfio/virqfd.c | |||
@@ -43,7 +43,7 @@ static void virqfd_deactivate(struct virqfd *virqfd) | |||
43 | queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown); | 43 | queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown); |
44 | } | 44 | } |
45 | 45 | ||
46 | static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) | 46 | static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
47 | { | 47 | { |
48 | struct virqfd *virqfd = container_of(wait, struct virqfd, wait); | 48 | struct virqfd *virqfd = container_of(wait, struct virqfd, wait); |
49 | unsigned long flags = (unsigned long)key; | 49 | unsigned long flags = (unsigned long)key; |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 042030e5a035..e4613a3c362d 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -165,7 +165,7 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, | |||
165 | add_wait_queue(wqh, &poll->wait); | 165 | add_wait_queue(wqh, &poll->wait); |
166 | } | 166 | } |
167 | 167 | ||
168 | static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, | 168 | static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, |
169 | void *key) | 169 | void *key) |
170 | { | 170 | { |
171 | struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); | 171 | struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f55671d53f28..f72095868b93 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -31,7 +31,7 @@ struct vhost_work { | |||
31 | struct vhost_poll { | 31 | struct vhost_poll { |
32 | poll_table table; | 32 | poll_table table; |
33 | wait_queue_head_t *wqh; | 33 | wait_queue_head_t *wqh; |
34 | wait_queue_t wait; | 34 | wait_queue_entry_t wait; |
35 | struct vhost_work work; | 35 | struct vhost_work work; |
36 | unsigned long mask; | 36 | unsigned long mask; |
37 | struct vhost_dev *dev; | 37 | struct vhost_dev *dev; |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index beef981aa54f..974f5346458a 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -83,7 +83,7 @@ struct autofs_info { | |||
83 | struct autofs_wait_queue { | 83 | struct autofs_wait_queue { |
84 | wait_queue_head_t queue; | 84 | wait_queue_head_t queue; |
85 | struct autofs_wait_queue *next; | 85 | struct autofs_wait_queue *next; |
86 | autofs_wqt_t wait_queue_token; | 86 | autofs_wqt_t wait_queue_entry_token; |
87 | /* We use the following to see what we are waiting for */ | 87 | /* We use the following to see what we are waiting for */ |
88 | struct qstr name; | 88 | struct qstr name; |
89 | u32 dev; | 89 | u32 dev; |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 24a58bf9ca72..7071895b0678 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -104,7 +104,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | |||
104 | size_t pktsz; | 104 | size_t pktsz; |
105 | 105 | ||
106 | pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", | 106 | pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", |
107 | (unsigned long) wq->wait_queue_token, | 107 | (unsigned long) wq->wait_queue_entry_token, |
108 | wq->name.len, wq->name.name, type); | 108 | wq->name.len, wq->name.name, type); |
109 | 109 | ||
110 | memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ | 110 | memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ |
@@ -120,7 +120,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | |||
120 | 120 | ||
121 | pktsz = sizeof(*mp); | 121 | pktsz = sizeof(*mp); |
122 | 122 | ||
123 | mp->wait_queue_token = wq->wait_queue_token; | 123 | mp->wait_queue_entry_token = wq->wait_queue_entry_token; |
124 | mp->len = wq->name.len; | 124 | mp->len = wq->name.len; |
125 | memcpy(mp->name, wq->name.name, wq->name.len); | 125 | memcpy(mp->name, wq->name.name, wq->name.len); |
126 | mp->name[wq->name.len] = '\0'; | 126 | mp->name[wq->name.len] = '\0'; |
@@ -133,7 +133,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | |||
133 | 133 | ||
134 | pktsz = sizeof(*ep); | 134 | pktsz = sizeof(*ep); |
135 | 135 | ||
136 | ep->wait_queue_token = wq->wait_queue_token; | 136 | ep->wait_queue_entry_token = wq->wait_queue_entry_token; |
137 | ep->len = wq->name.len; | 137 | ep->len = wq->name.len; |
138 | memcpy(ep->name, wq->name.name, wq->name.len); | 138 | memcpy(ep->name, wq->name.name, wq->name.len); |
139 | ep->name[wq->name.len] = '\0'; | 139 | ep->name[wq->name.len] = '\0'; |
@@ -153,7 +153,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | |||
153 | 153 | ||
154 | pktsz = sizeof(*packet); | 154 | pktsz = sizeof(*packet); |
155 | 155 | ||
156 | packet->wait_queue_token = wq->wait_queue_token; | 156 | packet->wait_queue_entry_token = wq->wait_queue_entry_token; |
157 | packet->len = wq->name.len; | 157 | packet->len = wq->name.len; |
158 | memcpy(packet->name, wq->name.name, wq->name.len); | 158 | memcpy(packet->name, wq->name.name, wq->name.len); |
159 | packet->name[wq->name.len] = '\0'; | 159 | packet->name[wq->name.len] = '\0'; |
@@ -428,7 +428,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, | |||
428 | return -ENOMEM; | 428 | return -ENOMEM; |
429 | } | 429 | } |
430 | 430 | ||
431 | wq->wait_queue_token = autofs4_next_wait_queue; | 431 | wq->wait_queue_entry_token = autofs4_next_wait_queue; |
432 | if (++autofs4_next_wait_queue == 0) | 432 | if (++autofs4_next_wait_queue == 0) |
433 | autofs4_next_wait_queue = 1; | 433 | autofs4_next_wait_queue = 1; |
434 | wq->next = sbi->queues; | 434 | wq->next = sbi->queues; |
@@ -461,7 +461,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, | |||
461 | } | 461 | } |
462 | 462 | ||
463 | pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", | 463 | pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", |
464 | (unsigned long) wq->wait_queue_token, wq->name.len, | 464 | (unsigned long) wq->wait_queue_entry_token, wq->name.len, |
465 | wq->name.name, notify); | 465 | wq->name.name, notify); |
466 | 466 | ||
467 | /* | 467 | /* |
@@ -471,7 +471,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, | |||
471 | } else { | 471 | } else { |
472 | wq->wait_ctr++; | 472 | wq->wait_ctr++; |
473 | pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", | 473 | pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", |
474 | (unsigned long) wq->wait_queue_token, wq->name.len, | 474 | (unsigned long) wq->wait_queue_entry_token, wq->name.len, |
475 | wq->name.name, notify); | 475 | wq->name.name, notify); |
476 | mutex_unlock(&sbi->wq_mutex); | 476 | mutex_unlock(&sbi->wq_mutex); |
477 | kfree(qstr.name); | 477 | kfree(qstr.name); |
@@ -550,13 +550,13 @@ int autofs4_wait(struct autofs_sb_info *sbi, | |||
550 | } | 550 | } |
551 | 551 | ||
552 | 552 | ||
553 | int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status) | 553 | int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_entry_token, int status) |
554 | { | 554 | { |
555 | struct autofs_wait_queue *wq, **wql; | 555 | struct autofs_wait_queue *wq, **wql; |
556 | 556 | ||
557 | mutex_lock(&sbi->wq_mutex); | 557 | mutex_lock(&sbi->wq_mutex); |
558 | for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { | 558 | for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { |
559 | if (wq->wait_queue_token == wait_queue_token) | 559 | if (wq->wait_queue_entry_token == wait_queue_entry_token) |
560 | break; | 560 | break; |
561 | } | 561 | } |
562 | 562 | ||
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 9bf90bcc56ac..54a4fcd679ed 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h | |||
@@ -97,7 +97,7 @@ struct cachefiles_cache { | |||
97 | * backing file read tracking | 97 | * backing file read tracking |
98 | */ | 98 | */ |
99 | struct cachefiles_one_read { | 99 | struct cachefiles_one_read { |
100 | wait_queue_t monitor; /* link into monitored waitqueue */ | 100 | wait_queue_entry_t monitor; /* link into monitored waitqueue */ |
101 | struct page *back_page; /* backing file page we're waiting for */ | 101 | struct page *back_page; /* backing file page we're waiting for */ |
102 | struct page *netfs_page; /* netfs page we're going to fill */ | 102 | struct page *netfs_page; /* netfs page we're going to fill */ |
103 | struct fscache_retrieval *op; /* retrieval op covering this */ | 103 | struct fscache_retrieval *op; /* retrieval op covering this */ |
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 41df8a27d7eb..3978b324cbca 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c | |||
@@ -204,7 +204,7 @@ wait_for_old_object: | |||
204 | wait_queue_head_t *wq; | 204 | wait_queue_head_t *wq; |
205 | 205 | ||
206 | signed long timeout = 60 * HZ; | 206 | signed long timeout = 60 * HZ; |
207 | wait_queue_t wait; | 207 | wait_queue_entry_t wait; |
208 | bool requeue; | 208 | bool requeue; |
209 | 209 | ||
210 | /* if the object we're waiting for is queued for processing, | 210 | /* if the object we're waiting for is queued for processing, |
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index afbdc418966d..8be33b33b981 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * - we use this to detect read completion of backing pages | 21 | * - we use this to detect read completion of backing pages |
22 | * - the caller holds the waitqueue lock | 22 | * - the caller holds the waitqueue lock |
23 | */ | 23 | */ |
24 | static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, | 24 | static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, |
25 | int sync, void *_key) | 25 | int sync, void *_key) |
26 | { | 26 | { |
27 | struct cachefiles_one_read *monitor = | 27 | struct cachefiles_one_read *monitor = |
@@ -84,7 +84,7 @@ struct exceptional_entry_key { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct wait_exceptional_entry_queue { | 86 | struct wait_exceptional_entry_queue { |
87 | wait_queue_t wait; | 87 | wait_queue_entry_t wait; |
88 | struct exceptional_entry_key key; | 88 | struct exceptional_entry_key key; |
89 | }; | 89 | }; |
90 | 90 | ||
@@ -108,7 +108,7 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, | |||
108 | return wait_table + hash; | 108 | return wait_table + hash; |
109 | } | 109 | } |
110 | 110 | ||
111 | static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, | 111 | static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, |
112 | int sync, void *keyp) | 112 | int sync, void *keyp) |
113 | { | 113 | { |
114 | struct exceptional_entry_key *key = keyp; | 114 | struct exceptional_entry_key *key = keyp; |
diff --git a/fs/eventfd.c b/fs/eventfd.c index 68b9fffcb2c8..9736df2ce89d 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
@@ -191,7 +191,7 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) | |||
191 | * This is used to atomically remove a wait queue entry from the eventfd wait | 191 | * This is used to atomically remove a wait queue entry from the eventfd wait |
192 | * queue head, and read/reset the counter value. | 192 | * queue head, and read/reset the counter value. |
193 | */ | 193 | */ |
194 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, | 194 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, |
195 | __u64 *cnt) | 195 | __u64 *cnt) |
196 | { | 196 | { |
197 | unsigned long flags; | 197 | unsigned long flags; |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 5420767c9b68..5ac1cba5ef72 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -244,7 +244,7 @@ struct eppoll_entry { | |||
244 | * Wait queue item that will be linked to the target file wait | 244 | * Wait queue item that will be linked to the target file wait |
245 | * queue head. | 245 | * queue head. |
246 | */ | 246 | */ |
247 | wait_queue_t wait; | 247 | wait_queue_entry_t wait; |
248 | 248 | ||
249 | /* The wait queue head that linked the "wait" wait queue item */ | 249 | /* The wait queue head that linked the "wait" wait queue item */ |
250 | wait_queue_head_t *whead; | 250 | wait_queue_head_t *whead; |
@@ -347,13 +347,13 @@ static inline int ep_is_linked(struct list_head *p) | |||
347 | return !list_empty(p); | 347 | return !list_empty(p); |
348 | } | 348 | } |
349 | 349 | ||
350 | static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p) | 350 | static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p) |
351 | { | 351 | { |
352 | return container_of(p, struct eppoll_entry, wait); | 352 | return container_of(p, struct eppoll_entry, wait); |
353 | } | 353 | } |
354 | 354 | ||
355 | /* Get the "struct epitem" from a wait queue pointer */ | 355 | /* Get the "struct epitem" from a wait queue pointer */ |
356 | static inline struct epitem *ep_item_from_wait(wait_queue_t *p) | 356 | static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p) |
357 | { | 357 | { |
358 | return container_of(p, struct eppoll_entry, wait)->base; | 358 | return container_of(p, struct eppoll_entry, wait)->base; |
359 | } | 359 | } |
@@ -1078,7 +1078,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) | |||
1078 | * mechanism. It is called by the stored file descriptors when they | 1078 | * mechanism. It is called by the stored file descriptors when they |
1079 | * have events to report. | 1079 | * have events to report. |
1080 | */ | 1080 | */ |
1081 | static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) | 1081 | static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
1082 | { | 1082 | { |
1083 | int pwake = 0; | 1083 | int pwake = 0; |
1084 | unsigned long flags; | 1084 | unsigned long flags; |
@@ -1699,7 +1699,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | |||
1699 | int res = 0, eavail, timed_out = 0; | 1699 | int res = 0, eavail, timed_out = 0; |
1700 | unsigned long flags; | 1700 | unsigned long flags; |
1701 | u64 slack = 0; | 1701 | u64 slack = 0; |
1702 | wait_queue_t wait; | 1702 | wait_queue_entry_t wait; |
1703 | ktime_t expires, *to = NULL; | 1703 | ktime_t expires, *to = NULL; |
1704 | 1704 | ||
1705 | if (timeout > 0) { | 1705 | if (timeout > 0) { |
diff --git a/fs/fs_pin.c b/fs/fs_pin.c index 611b5408f6ec..7b447a245760 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c | |||
@@ -34,7 +34,7 @@ void pin_insert(struct fs_pin *pin, struct vfsmount *m) | |||
34 | 34 | ||
35 | void pin_kill(struct fs_pin *p) | 35 | void pin_kill(struct fs_pin *p) |
36 | { | 36 | { |
37 | wait_queue_t wait; | 37 | wait_queue_entry_t wait; |
38 | 38 | ||
39 | if (!p) { | 39 | if (!p) { |
40 | rcu_read_unlock(); | 40 | rcu_read_unlock(); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c08c46a3b8cd..be5a8f84e5bb 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -6372,7 +6372,7 @@ struct nfs4_lock_waiter { | |||
6372 | }; | 6372 | }; |
6373 | 6373 | ||
6374 | static int | 6374 | static int |
6375 | nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key) | 6375 | nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) |
6376 | { | 6376 | { |
6377 | int ret; | 6377 | int ret; |
6378 | struct cb_notify_lock_args *cbnl = key; | 6378 | struct cb_notify_lock_args *cbnl = key; |
@@ -6415,7 +6415,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) | |||
6415 | .inode = state->inode, | 6415 | .inode = state->inode, |
6416 | .owner = &owner, | 6416 | .owner = &owner, |
6417 | .notified = false }; | 6417 | .notified = false }; |
6418 | wait_queue_t wait; | 6418 | wait_queue_entry_t wait; |
6419 | 6419 | ||
6420 | /* Don't bother with waitqueue if we don't expect a callback */ | 6420 | /* Don't bother with waitqueue if we don't expect a callback */ |
6421 | if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) | 6421 | if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index febed1217b3f..775304e7f96f 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -2161,7 +2161,7 @@ void nilfs_flush_segment(struct super_block *sb, ino_t ino) | |||
2161 | } | 2161 | } |
2162 | 2162 | ||
2163 | struct nilfs_segctor_wait_request { | 2163 | struct nilfs_segctor_wait_request { |
2164 | wait_queue_t wq; | 2164 | wait_queue_entry_t wq; |
2165 | __u32 seq; | 2165 | __u32 seq; |
2166 | int err; | 2166 | int err; |
2167 | atomic_t done; | 2167 | atomic_t done; |
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index 83b506020718..9e37b7028ea4 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c | |||
@@ -47,7 +47,7 @@ static void run_down(struct slot_map *m) | |||
47 | if (m->c != -1) { | 47 | if (m->c != -1) { |
48 | for (;;) { | 48 | for (;;) { |
49 | if (likely(list_empty(&wait.task_list))) | 49 | if (likely(list_empty(&wait.task_list))) |
50 | __add_wait_queue_tail(&m->q, &wait); | 50 | __add_wait_queue_entry_tail(&m->q, &wait); |
51 | set_current_state(TASK_UNINTERRUPTIBLE); | 51 | set_current_state(TASK_UNINTERRUPTIBLE); |
52 | 52 | ||
53 | if (m->c == -1) | 53 | if (m->c == -1) |
@@ -85,7 +85,7 @@ static int wait_for_free(struct slot_map *m) | |||
85 | do { | 85 | do { |
86 | long n = left, t; | 86 | long n = left, t; |
87 | if (likely(list_empty(&wait.task_list))) | 87 | if (likely(list_empty(&wait.task_list))) |
88 | __add_wait_queue_tail_exclusive(&m->q, &wait); | 88 | __add_wait_queue_entry_tail_exclusive(&m->q, &wait); |
89 | set_current_state(TASK_INTERRUPTIBLE); | 89 | set_current_state(TASK_INTERRUPTIBLE); |
90 | 90 | ||
91 | if (m->c > 0) | 91 | if (m->c > 0) |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 39bb1e838d8d..a11d773e5ff3 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -2956,7 +2956,7 @@ void reiserfs_wait_on_write_block(struct super_block *s) | |||
2956 | 2956 | ||
2957 | static void queue_log_writer(struct super_block *s) | 2957 | static void queue_log_writer(struct super_block *s) |
2958 | { | 2958 | { |
2959 | wait_queue_t wait; | 2959 | wait_queue_entry_t wait; |
2960 | struct reiserfs_journal *journal = SB_JOURNAL(s); | 2960 | struct reiserfs_journal *journal = SB_JOURNAL(s); |
2961 | set_bit(J_WRITERS_QUEUED, &journal->j_state); | 2961 | set_bit(J_WRITERS_QUEUED, &journal->j_state); |
2962 | 2962 | ||
diff --git a/fs/select.c b/fs/select.c index d6c652a31e99..5b524a977d91 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -180,7 +180,7 @@ static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) | |||
180 | return table->entry++; | 180 | return table->entry++; |
181 | } | 181 | } |
182 | 182 | ||
183 | static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | 183 | static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
184 | { | 184 | { |
185 | struct poll_wqueues *pwq = wait->private; | 185 | struct poll_wqueues *pwq = wait->private; |
186 | DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); | 186 | DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); |
@@ -206,7 +206,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
206 | return default_wake_function(&dummy_wait, mode, sync, key); | 206 | return default_wake_function(&dummy_wait, mode, sync, key); |
207 | } | 207 | } |
208 | 208 | ||
209 | static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | 209 | static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
210 | { | 210 | { |
211 | struct poll_table_entry *entry; | 211 | struct poll_table_entry *entry; |
212 | 212 | ||
diff --git a/fs/signalfd.c b/fs/signalfd.c index 7e3d71109f51..593b022ac11b 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
@@ -43,7 +43,7 @@ void signalfd_cleanup(struct sighand_struct *sighand) | |||
43 | if (likely(!waitqueue_active(wqh))) | 43 | if (likely(!waitqueue_active(wqh))) |
44 | return; | 44 | return; |
45 | 45 | ||
46 | /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */ | 46 | /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */ |
47 | wake_up_poll(wqh, POLLHUP | POLLFREE); | 47 | wake_up_poll(wqh, POLLHUP | POLLFREE); |
48 | } | 48 | } |
49 | 49 | ||
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1d622f276e3a..bda64fcd8a0c 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -81,7 +81,7 @@ struct userfaultfd_unmap_ctx { | |||
81 | 81 | ||
82 | struct userfaultfd_wait_queue { | 82 | struct userfaultfd_wait_queue { |
83 | struct uffd_msg msg; | 83 | struct uffd_msg msg; |
84 | wait_queue_t wq; | 84 | wait_queue_entry_t wq; |
85 | struct userfaultfd_ctx *ctx; | 85 | struct userfaultfd_ctx *ctx; |
86 | bool waken; | 86 | bool waken; |
87 | }; | 87 | }; |
@@ -91,7 +91,7 @@ struct userfaultfd_wake_range { | |||
91 | unsigned long len; | 91 | unsigned long len; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, | 94 | static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, |
95 | int wake_flags, void *key) | 95 | int wake_flags, void *key) |
96 | { | 96 | { |
97 | struct userfaultfd_wake_range *range = key; | 97 | struct userfaultfd_wake_range *range = key; |
@@ -860,7 +860,7 @@ wakeup: | |||
860 | static inline struct userfaultfd_wait_queue *find_userfault_in( | 860 | static inline struct userfaultfd_wait_queue *find_userfault_in( |
861 | wait_queue_head_t *wqh) | 861 | wait_queue_head_t *wqh) |
862 | { | 862 | { |
863 | wait_queue_t *wq; | 863 | wait_queue_entry_t *wq; |
864 | struct userfaultfd_wait_queue *uwq; | 864 | struct userfaultfd_wait_queue *uwq; |
865 | 865 | ||
866 | VM_BUG_ON(!spin_is_locked(&wqh->lock)); | 866 | VM_BUG_ON(!spin_is_locked(&wqh->lock)); |
@@ -1747,7 +1747,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd, | |||
1747 | static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) | 1747 | static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) |
1748 | { | 1748 | { |
1749 | struct userfaultfd_ctx *ctx = f->private_data; | 1749 | struct userfaultfd_ctx *ctx = f->private_data; |
1750 | wait_queue_t *wq; | 1750 | wait_queue_entry_t *wq; |
1751 | struct userfaultfd_wait_queue *uwq; | 1751 | struct userfaultfd_wait_queue *uwq; |
1752 | unsigned long pending = 0, total = 0; | 1752 | unsigned long pending = 0, total = 0; |
1753 | 1753 | ||
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index fcd641032f8d..95ba83806c5d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -33,7 +33,7 @@ struct blk_mq_hw_ctx { | |||
33 | struct blk_mq_ctx **ctxs; | 33 | struct blk_mq_ctx **ctxs; |
34 | unsigned int nr_ctx; | 34 | unsigned int nr_ctx; |
35 | 35 | ||
36 | wait_queue_t dispatch_wait; | 36 | wait_queue_entry_t dispatch_wait; |
37 | atomic_t wait_index; | 37 | atomic_t wait_index; |
38 | 38 | ||
39 | struct blk_mq_tags *tags; | 39 | struct blk_mq_tags *tags; |
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index ff0b981f078e..9e4befd95bc7 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h | |||
@@ -37,7 +37,7 @@ struct eventfd_ctx *eventfd_ctx_fdget(int fd); | |||
37 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); | 37 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); |
38 | __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); | 38 | __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); |
39 | ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); | 39 | ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); |
40 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, | 40 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, |
41 | __u64 *cnt); | 41 | __u64 *cnt); |
42 | 42 | ||
43 | #else /* CONFIG_EVENTFD */ | 43 | #else /* CONFIG_EVENTFD */ |
@@ -73,7 +73,7 @@ static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, | |||
73 | } | 73 | } |
74 | 74 | ||
75 | static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, | 75 | static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, |
76 | wait_queue_t *wait, __u64 *cnt) | 76 | wait_queue_entry_t *wait, __u64 *cnt) |
77 | { | 77 | { |
78 | return -ENOSYS; | 78 | return -ENOSYS; |
79 | } | 79 | } |
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index 0c1de05098c8..76c2fbc59f35 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h | |||
@@ -46,7 +46,7 @@ struct kvm_kernel_irqfd_resampler { | |||
46 | struct kvm_kernel_irqfd { | 46 | struct kvm_kernel_irqfd { |
47 | /* Used for MSI fast-path */ | 47 | /* Used for MSI fast-path */ |
48 | struct kvm *kvm; | 48 | struct kvm *kvm; |
49 | wait_queue_t wait; | 49 | wait_queue_entry_t wait; |
50 | /* Update side is protected by irqfds.lock */ | 50 | /* Update side is protected by irqfds.lock */ |
51 | struct kvm_kernel_irq_routing_entry irq_entry; | 51 | struct kvm_kernel_irq_routing_entry irq_entry; |
52 | seqcount_t irq_entry_sc; | 52 | seqcount_t irq_entry_sc; |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 316a19f6b635..e7bbd9d4dc6c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -524,7 +524,7 @@ void page_endio(struct page *page, bool is_write, int err); | |||
524 | /* | 524 | /* |
525 | * Add an arbitrary waiter to a page's wait queue | 525 | * Add an arbitrary waiter to a page's wait queue |
526 | */ | 526 | */ |
527 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | 527 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
528 | 528 | ||
529 | /* | 529 | /* |
530 | * Fault everything in given userspace address range in. | 530 | * Fault everything in given userspace address range in. |
diff --git a/include/linux/poll.h b/include/linux/poll.h index 75ffc5729e4c..2889f09a1c60 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h | |||
@@ -75,7 +75,7 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) | |||
75 | struct poll_table_entry { | 75 | struct poll_table_entry { |
76 | struct file *filp; | 76 | struct file *filp; |
77 | unsigned long key; | 77 | unsigned long key; |
78 | wait_queue_t wait; | 78 | wait_queue_entry_t wait; |
79 | wait_queue_head_t *wait_address; | 79 | wait_queue_head_t *wait_address; |
80 | }; | 80 | }; |
81 | 81 | ||
diff --git a/include/linux/vfio.h b/include/linux/vfio.h index edf9b2cad277..f57076b958b7 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h | |||
@@ -183,7 +183,7 @@ struct virqfd { | |||
183 | void (*thread)(void *, void *); | 183 | void (*thread)(void *, void *); |
184 | void *data; | 184 | void *data; |
185 | struct work_struct inject; | 185 | struct work_struct inject; |
186 | wait_queue_t wait; | 186 | wait_queue_entry_t wait; |
187 | poll_table pt; | 187 | poll_table pt; |
188 | struct work_struct shutdown; | 188 | struct work_struct shutdown; |
189 | struct virqfd **pvirqfd; | 189 | struct virqfd **pvirqfd; |
diff --git a/include/linux/wait.h b/include/linux/wait.h index db076ca7f11d..5889f0c86ff7 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -10,15 +10,18 @@ | |||
10 | #include <asm/current.h> | 10 | #include <asm/current.h> |
11 | #include <uapi/linux/wait.h> | 11 | #include <uapi/linux/wait.h> |
12 | 12 | ||
13 | typedef struct __wait_queue wait_queue_t; | 13 | typedef struct wait_queue_entry wait_queue_entry_t; |
14 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); | 14 | typedef int (*wait_queue_func_t)(wait_queue_entry_t *wait, unsigned mode, int flags, void *key); |
15 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); | 15 | int default_wake_function(wait_queue_entry_t *wait, unsigned mode, int flags, void *key); |
16 | 16 | ||
17 | /* __wait_queue::flags */ | 17 | /* wait_queue_entry::flags */ |
18 | #define WQ_FLAG_EXCLUSIVE 0x01 | 18 | #define WQ_FLAG_EXCLUSIVE 0x01 |
19 | #define WQ_FLAG_WOKEN 0x02 | 19 | #define WQ_FLAG_WOKEN 0x02 |
20 | 20 | ||
21 | struct __wait_queue { | 21 | /* |
22 | * A single wait-queue entry structure: | ||
23 | */ | ||
24 | struct wait_queue_entry { | ||
22 | unsigned int flags; | 25 | unsigned int flags; |
23 | void *private; | 26 | void *private; |
24 | wait_queue_func_t func; | 27 | wait_queue_func_t func; |
@@ -34,7 +37,7 @@ struct wait_bit_key { | |||
34 | 37 | ||
35 | struct wait_bit_queue { | 38 | struct wait_bit_queue { |
36 | struct wait_bit_key key; | 39 | struct wait_bit_key key; |
37 | wait_queue_t wait; | 40 | wait_queue_entry_t wait; |
38 | }; | 41 | }; |
39 | 42 | ||
40 | struct __wait_queue_head { | 43 | struct __wait_queue_head { |
@@ -55,7 +58,7 @@ struct task_struct; | |||
55 | .task_list = { NULL, NULL } } | 58 | .task_list = { NULL, NULL } } |
56 | 59 | ||
57 | #define DECLARE_WAITQUEUE(name, tsk) \ | 60 | #define DECLARE_WAITQUEUE(name, tsk) \ |
58 | wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) | 61 | wait_queue_entry_t name = __WAITQUEUE_INITIALIZER(name, tsk) |
59 | 62 | ||
60 | #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ | 63 | #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ |
61 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ | 64 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
@@ -88,7 +91,7 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct | |||
88 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) | 91 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) |
89 | #endif | 92 | #endif |
90 | 93 | ||
91 | static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) | 94 | static inline void init_waitqueue_entry(wait_queue_entry_t *q, struct task_struct *p) |
92 | { | 95 | { |
93 | q->flags = 0; | 96 | q->flags = 0; |
94 | q->private = p; | 97 | q->private = p; |
@@ -96,7 +99,7 @@ static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) | |||
96 | } | 99 | } |
97 | 100 | ||
98 | static inline void | 101 | static inline void |
99 | init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) | 102 | init_waitqueue_func_entry(wait_queue_entry_t *q, wait_queue_func_t func) |
100 | { | 103 | { |
101 | q->flags = 0; | 104 | q->flags = 0; |
102 | q->private = NULL; | 105 | q->private = NULL; |
@@ -159,11 +162,11 @@ static inline bool wq_has_sleeper(wait_queue_head_t *wq) | |||
159 | return waitqueue_active(wq); | 162 | return waitqueue_active(wq); |
160 | } | 163 | } |
161 | 164 | ||
162 | extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); | 165 | extern void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait); |
163 | extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); | 166 | extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait); |
164 | extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); | 167 | extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait); |
165 | 168 | ||
166 | static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) | 169 | static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new) |
167 | { | 170 | { |
168 | list_add(&new->task_list, &head->task_list); | 171 | list_add(&new->task_list, &head->task_list); |
169 | } | 172 | } |
@@ -172,27 +175,27 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) | |||
172 | * Used for wake-one threads: | 175 | * Used for wake-one threads: |
173 | */ | 176 | */ |
174 | static inline void | 177 | static inline void |
175 | __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | 178 | __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) |
176 | { | 179 | { |
177 | wait->flags |= WQ_FLAG_EXCLUSIVE; | 180 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
178 | __add_wait_queue(q, wait); | 181 | __add_wait_queue(q, wait); |
179 | } | 182 | } |
180 | 183 | ||
181 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, | 184 | static inline void __add_wait_queue_entry_tail(wait_queue_head_t *head, |
182 | wait_queue_t *new) | 185 | wait_queue_entry_t *new) |
183 | { | 186 | { |
184 | list_add_tail(&new->task_list, &head->task_list); | 187 | list_add_tail(&new->task_list, &head->task_list); |
185 | } | 188 | } |
186 | 189 | ||
187 | static inline void | 190 | static inline void |
188 | __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | 191 | __add_wait_queue_entry_tail_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) |
189 | { | 192 | { |
190 | wait->flags |= WQ_FLAG_EXCLUSIVE; | 193 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
191 | __add_wait_queue_tail(q, wait); | 194 | __add_wait_queue_entry_tail(q, wait); |
192 | } | 195 | } |
193 | 196 | ||
194 | static inline void | 197 | static inline void |
195 | __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) | 198 | __remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old) |
196 | { | 199 | { |
197 | list_del(&old->task_list); | 200 | list_del(&old->task_list); |
198 | } | 201 | } |
@@ -249,7 +252,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); | |||
249 | (!__builtin_constant_p(state) || \ | 252 | (!__builtin_constant_p(state) || \ |
250 | state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ | 253 | state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ |
251 | 254 | ||
252 | extern void init_wait_entry(wait_queue_t *__wait, int flags); | 255 | extern void init_wait_entry(wait_queue_entry_t *__wait, int flags); |
253 | 256 | ||
254 | /* | 257 | /* |
255 | * The below macro ___wait_event() has an explicit shadow of the __ret | 258 | * The below macro ___wait_event() has an explicit shadow of the __ret |
@@ -266,7 +269,7 @@ extern void init_wait_entry(wait_queue_t *__wait, int flags); | |||
266 | #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ | 269 | #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ |
267 | ({ \ | 270 | ({ \ |
268 | __label__ __out; \ | 271 | __label__ __out; \ |
269 | wait_queue_t __wait; \ | 272 | wait_queue_entry_t __wait; \ |
270 | long __ret = ret; /* explicit shadow */ \ | 273 | long __ret = ret; /* explicit shadow */ \ |
271 | \ | 274 | \ |
272 | init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ | 275 | init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ |
@@ -620,8 +623,8 @@ do { \ | |||
620 | __ret; \ | 623 | __ret; \ |
621 | }) | 624 | }) |
622 | 625 | ||
623 | extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *); | 626 | extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); |
624 | extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *); | 627 | extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); |
625 | 628 | ||
626 | #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ | 629 | #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ |
627 | ({ \ | 630 | ({ \ |
@@ -967,17 +970,17 @@ do { \ | |||
967 | /* | 970 | /* |
968 | * Waitqueues which are removed from the waitqueue_head at wakeup time | 971 | * Waitqueues which are removed from the waitqueue_head at wakeup time |
969 | */ | 972 | */ |
970 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); | 973 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); |
971 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); | 974 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); |
972 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); | 975 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); |
973 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); | 976 | void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait); |
974 | long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); | 977 | long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout); |
975 | int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 978 | int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); |
976 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 979 | int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); |
977 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 980 | int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); |
978 | 981 | ||
979 | #define DEFINE_WAIT_FUNC(name, function) \ | 982 | #define DEFINE_WAIT_FUNC(name, function) \ |
980 | wait_queue_t name = { \ | 983 | wait_queue_entry_t name = { \ |
981 | .private = current, \ | 984 | .private = current, \ |
982 | .func = function, \ | 985 | .func = function, \ |
983 | .task_list = LIST_HEAD_INIT((name).task_list), \ | 986 | .task_list = LIST_HEAD_INIT((name).task_list), \ |
diff --git a/include/net/af_unix.h b/include/net/af_unix.h index fd60eccb59a6..75e612a45824 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h | |||
@@ -62,7 +62,7 @@ struct unix_sock { | |||
62 | #define UNIX_GC_CANDIDATE 0 | 62 | #define UNIX_GC_CANDIDATE 0 |
63 | #define UNIX_GC_MAYBE_CYCLE 1 | 63 | #define UNIX_GC_MAYBE_CYCLE 1 |
64 | struct socket_wq peer_wq; | 64 | struct socket_wq peer_wq; |
65 | wait_queue_t peer_wake; | 65 | wait_queue_entry_t peer_wake; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static inline struct unix_sock *unix_sk(const struct sock *sk) | 68 | static inline struct unix_sock *unix_sk(const struct sock *sk) |
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index aa63451ef20a..1953f8d6063b 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h | |||
@@ -26,7 +26,7 @@ | |||
26 | #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION | 26 | #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed | 29 | * The wait_queue_entry_token (autofs_wqt_t) is part of a structure which is passed |
30 | * back to the kernel via ioctl from userspace. On architectures where 32- and | 30 | * back to the kernel via ioctl from userspace. On architectures where 32- and |
31 | * 64-bit userspace binaries can be executed it's important that the size of | 31 | * 64-bit userspace binaries can be executed it's important that the size of |
32 | * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we | 32 | * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we |
@@ -49,7 +49,7 @@ struct autofs_packet_hdr { | |||
49 | 49 | ||
50 | struct autofs_packet_missing { | 50 | struct autofs_packet_missing { |
51 | struct autofs_packet_hdr hdr; | 51 | struct autofs_packet_hdr hdr; |
52 | autofs_wqt_t wait_queue_token; | 52 | autofs_wqt_t wait_queue_entry_token; |
53 | int len; | 53 | int len; |
54 | char name[NAME_MAX+1]; | 54 | char name[NAME_MAX+1]; |
55 | }; | 55 | }; |
diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h index 7c6da423d54e..65b72d0222e7 100644 --- a/include/uapi/linux/auto_fs4.h +++ b/include/uapi/linux/auto_fs4.h | |||
@@ -108,7 +108,7 @@ enum autofs_notify { | |||
108 | /* v4 multi expire (via pipe) */ | 108 | /* v4 multi expire (via pipe) */ |
109 | struct autofs_packet_expire_multi { | 109 | struct autofs_packet_expire_multi { |
110 | struct autofs_packet_hdr hdr; | 110 | struct autofs_packet_hdr hdr; |
111 | autofs_wqt_t wait_queue_token; | 111 | autofs_wqt_t wait_queue_entry_token; |
112 | int len; | 112 | int len; |
113 | char name[NAME_MAX+1]; | 113 | char name[NAME_MAX+1]; |
114 | }; | 114 | }; |
@@ -123,7 +123,7 @@ union autofs_packet_union { | |||
123 | /* autofs v5 common packet struct */ | 123 | /* autofs v5 common packet struct */ |
124 | struct autofs_v5_packet { | 124 | struct autofs_v5_packet { |
125 | struct autofs_packet_hdr hdr; | 125 | struct autofs_packet_hdr hdr; |
126 | autofs_wqt_t wait_queue_token; | 126 | autofs_wqt_t wait_queue_entry_token; |
127 | __u32 dev; | 127 | __u32 dev; |
128 | __u64 ino; | 128 | __u64 ino; |
129 | __u32 uid; | 129 | __u32 uid; |
diff --git a/kernel/exit.c b/kernel/exit.c index 516acdb0e0ec..7d694437ab44 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1004,7 +1004,7 @@ struct wait_opts { | |||
1004 | int __user *wo_stat; | 1004 | int __user *wo_stat; |
1005 | struct rusage __user *wo_rusage; | 1005 | struct rusage __user *wo_rusage; |
1006 | 1006 | ||
1007 | wait_queue_t child_wait; | 1007 | wait_queue_entry_t child_wait; |
1008 | int notask_error; | 1008 | int notask_error; |
1009 | }; | 1009 | }; |
1010 | 1010 | ||
@@ -1541,7 +1541,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) | |||
1541 | return 0; | 1541 | return 0; |
1542 | } | 1542 | } |
1543 | 1543 | ||
1544 | static int child_wait_callback(wait_queue_t *wait, unsigned mode, | 1544 | static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, |
1545 | int sync, void *key) | 1545 | int sync, void *key) |
1546 | { | 1546 | { |
1547 | struct wait_opts *wo = container_of(wait, struct wait_opts, | 1547 | struct wait_opts *wo = container_of(wait, struct wait_opts, |
diff --git a/kernel/futex.c b/kernel/futex.c index 357348a6cf6b..d6cf71d08f21 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -225,7 +225,7 @@ struct futex_pi_state { | |||
225 | * @requeue_pi_key: the requeue_pi target futex key | 225 | * @requeue_pi_key: the requeue_pi target futex key |
226 | * @bitset: bitset for the optional bitmasked wakeup | 226 | * @bitset: bitset for the optional bitmasked wakeup |
227 | * | 227 | * |
228 | * We use this hashed waitqueue, instead of a normal wait_queue_t, so | 228 | * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so |
229 | * we can wake only the relevant ones (hashed queues may be shared). | 229 | * we can wake only the relevant ones (hashed queues may be shared). |
230 | * | 230 | * |
231 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. | 231 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 53f9558fa925..13fc5ae9bf2f 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c | |||
@@ -66,7 +66,7 @@ do_wait_for_common(struct completion *x, | |||
66 | if (!x->done) { | 66 | if (!x->done) { |
67 | DECLARE_WAITQUEUE(wait, current); | 67 | DECLARE_WAITQUEUE(wait, current); |
68 | 68 | ||
69 | __add_wait_queue_tail_exclusive(&x->wait, &wait); | 69 | __add_wait_queue_entry_tail_exclusive(&x->wait, &wait); |
70 | do { | 70 | do { |
71 | if (signal_pending_state(state, current)) { | 71 | if (signal_pending_state(state, current)) { |
72 | timeout = -ERESTARTSYS; | 72 | timeout = -ERESTARTSYS; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 326d4f88e2b1..5b36644536ab 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3687,7 +3687,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) | |||
3687 | exception_exit(prev_state); | 3687 | exception_exit(prev_state); |
3688 | } | 3688 | } |
3689 | 3689 | ||
3690 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, | 3690 | int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, |
3691 | void *key) | 3691 | void *key) |
3692 | { | 3692 | { |
3693 | return try_to_wake_up(curr->private, mode, wake_flags); | 3693 | return try_to_wake_up(curr->private, mode, wake_flags); |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index b8c84c6dee64..301ea02dede0 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -21,7 +21,7 @@ void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_c | |||
21 | 21 | ||
22 | EXPORT_SYMBOL(__init_waitqueue_head); | 22 | EXPORT_SYMBOL(__init_waitqueue_head); |
23 | 23 | ||
24 | void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | 24 | void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait) |
25 | { | 25 | { |
26 | unsigned long flags; | 26 | unsigned long flags; |
27 | 27 | ||
@@ -32,18 +32,18 @@ void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | |||
32 | } | 32 | } |
33 | EXPORT_SYMBOL(add_wait_queue); | 33 | EXPORT_SYMBOL(add_wait_queue); |
34 | 34 | ||
35 | void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | 35 | void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) |
36 | { | 36 | { |
37 | unsigned long flags; | 37 | unsigned long flags; |
38 | 38 | ||
39 | wait->flags |= WQ_FLAG_EXCLUSIVE; | 39 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
40 | spin_lock_irqsave(&q->lock, flags); | 40 | spin_lock_irqsave(&q->lock, flags); |
41 | __add_wait_queue_tail(q, wait); | 41 | __add_wait_queue_entry_tail(q, wait); |
42 | spin_unlock_irqrestore(&q->lock, flags); | 42 | spin_unlock_irqrestore(&q->lock, flags); |
43 | } | 43 | } |
44 | EXPORT_SYMBOL(add_wait_queue_exclusive); | 44 | EXPORT_SYMBOL(add_wait_queue_exclusive); |
45 | 45 | ||
46 | void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | 46 | void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait) |
47 | { | 47 | { |
48 | unsigned long flags; | 48 | unsigned long flags; |
49 | 49 | ||
@@ -66,7 +66,7 @@ EXPORT_SYMBOL(remove_wait_queue); | |||
66 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 66 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
67 | int nr_exclusive, int wake_flags, void *key) | 67 | int nr_exclusive, int wake_flags, void *key) |
68 | { | 68 | { |
69 | wait_queue_t *curr, *next; | 69 | wait_queue_entry_t *curr, *next; |
70 | 70 | ||
71 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { | 71 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { |
72 | unsigned flags = curr->flags; | 72 | unsigned flags = curr->flags; |
@@ -170,7 +170,7 @@ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | |||
170 | * loads to move into the critical region). | 170 | * loads to move into the critical region). |
171 | */ | 171 | */ |
172 | void | 172 | void |
173 | prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) | 173 | prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) |
174 | { | 174 | { |
175 | unsigned long flags; | 175 | unsigned long flags; |
176 | 176 | ||
@@ -184,20 +184,20 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) | |||
184 | EXPORT_SYMBOL(prepare_to_wait); | 184 | EXPORT_SYMBOL(prepare_to_wait); |
185 | 185 | ||
186 | void | 186 | void |
187 | prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | 187 | prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) |
188 | { | 188 | { |
189 | unsigned long flags; | 189 | unsigned long flags; |
190 | 190 | ||
191 | wait->flags |= WQ_FLAG_EXCLUSIVE; | 191 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
192 | spin_lock_irqsave(&q->lock, flags); | 192 | spin_lock_irqsave(&q->lock, flags); |
193 | if (list_empty(&wait->task_list)) | 193 | if (list_empty(&wait->task_list)) |
194 | __add_wait_queue_tail(q, wait); | 194 | __add_wait_queue_entry_tail(q, wait); |
195 | set_current_state(state); | 195 | set_current_state(state); |
196 | spin_unlock_irqrestore(&q->lock, flags); | 196 | spin_unlock_irqrestore(&q->lock, flags); |
197 | } | 197 | } |
198 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | 198 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
199 | 199 | ||
200 | void init_wait_entry(wait_queue_t *wait, int flags) | 200 | void init_wait_entry(wait_queue_entry_t *wait, int flags) |
201 | { | 201 | { |
202 | wait->flags = flags; | 202 | wait->flags = flags; |
203 | wait->private = current; | 203 | wait->private = current; |
@@ -206,7 +206,7 @@ void init_wait_entry(wait_queue_t *wait, int flags) | |||
206 | } | 206 | } |
207 | EXPORT_SYMBOL(init_wait_entry); | 207 | EXPORT_SYMBOL(init_wait_entry); |
208 | 208 | ||
209 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) | 209 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) |
210 | { | 210 | { |
211 | unsigned long flags; | 211 | unsigned long flags; |
212 | long ret = 0; | 212 | long ret = 0; |
@@ -230,7 +230,7 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) | |||
230 | } else { | 230 | } else { |
231 | if (list_empty(&wait->task_list)) { | 231 | if (list_empty(&wait->task_list)) { |
232 | if (wait->flags & WQ_FLAG_EXCLUSIVE) | 232 | if (wait->flags & WQ_FLAG_EXCLUSIVE) |
233 | __add_wait_queue_tail(q, wait); | 233 | __add_wait_queue_entry_tail(q, wait); |
234 | else | 234 | else |
235 | __add_wait_queue(q, wait); | 235 | __add_wait_queue(q, wait); |
236 | } | 236 | } |
@@ -249,10 +249,10 @@ EXPORT_SYMBOL(prepare_to_wait_event); | |||
249 | * condition in the caller before they add the wait | 249 | * condition in the caller before they add the wait |
250 | * entry to the wake queue. | 250 | * entry to the wake queue. |
251 | */ | 251 | */ |
252 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait) | 252 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
253 | { | 253 | { |
254 | if (likely(list_empty(&wait->task_list))) | 254 | if (likely(list_empty(&wait->task_list))) |
255 | __add_wait_queue_tail(wq, wait); | 255 | __add_wait_queue_entry_tail(wq, wait); |
256 | 256 | ||
257 | set_current_state(TASK_INTERRUPTIBLE); | 257 | set_current_state(TASK_INTERRUPTIBLE); |
258 | if (signal_pending(current)) | 258 | if (signal_pending(current)) |
@@ -265,10 +265,10 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait) | |||
265 | } | 265 | } |
266 | EXPORT_SYMBOL(do_wait_intr); | 266 | EXPORT_SYMBOL(do_wait_intr); |
267 | 267 | ||
268 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait) | 268 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
269 | { | 269 | { |
270 | if (likely(list_empty(&wait->task_list))) | 270 | if (likely(list_empty(&wait->task_list))) |
271 | __add_wait_queue_tail(wq, wait); | 271 | __add_wait_queue_entry_tail(wq, wait); |
272 | 272 | ||
273 | set_current_state(TASK_INTERRUPTIBLE); | 273 | set_current_state(TASK_INTERRUPTIBLE); |
274 | if (signal_pending(current)) | 274 | if (signal_pending(current)) |
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(do_wait_intr_irq); | |||
290 | * the wait descriptor from the given waitqueue if still | 290 | * the wait descriptor from the given waitqueue if still |
291 | * queued. | 291 | * queued. |
292 | */ | 292 | */ |
293 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | 293 | void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait) |
294 | { | 294 | { |
295 | unsigned long flags; | 295 | unsigned long flags; |
296 | 296 | ||
@@ -316,7 +316,7 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | |||
316 | } | 316 | } |
317 | EXPORT_SYMBOL(finish_wait); | 317 | EXPORT_SYMBOL(finish_wait); |
318 | 318 | ||
319 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | 319 | int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
320 | { | 320 | { |
321 | int ret = default_wake_function(wait, mode, sync, key); | 321 | int ret = default_wake_function(wait, mode, sync, key); |
322 | 322 | ||
@@ -351,7 +351,7 @@ static inline bool is_kthread_should_stop(void) | |||
351 | * remove_wait_queue(&wq, &wait); | 351 | * remove_wait_queue(&wq, &wait); |
352 | * | 352 | * |
353 | */ | 353 | */ |
354 | long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) | 354 | long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout) |
355 | { | 355 | { |
356 | set_current_state(mode); /* A */ | 356 | set_current_state(mode); /* A */ |
357 | /* | 357 | /* |
@@ -375,7 +375,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) | |||
375 | } | 375 | } |
376 | EXPORT_SYMBOL(wait_woken); | 376 | EXPORT_SYMBOL(wait_woken); |
377 | 377 | ||
378 | int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | 378 | int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
379 | { | 379 | { |
380 | /* | 380 | /* |
381 | * Although this function is called under waitqueue lock, LOCK | 381 | * Although this function is called under waitqueue lock, LOCK |
@@ -391,7 +391,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
391 | } | 391 | } |
392 | EXPORT_SYMBOL(woken_wake_function); | 392 | EXPORT_SYMBOL(woken_wake_function); |
393 | 393 | ||
394 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) | 394 | int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) |
395 | { | 395 | { |
396 | struct wait_bit_key *key = arg; | 396 | struct wait_bit_key *key = arg; |
397 | struct wait_bit_queue *wait_bit | 397 | struct wait_bit_queue *wait_bit |
@@ -534,7 +534,7 @@ static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) | |||
534 | return bit_waitqueue(p, 0); | 534 | return bit_waitqueue(p, 0); |
535 | } | 535 | } |
536 | 536 | ||
537 | static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync, | 537 | static int wake_atomic_t_function(wait_queue_entry_t *wait, unsigned mode, int sync, |
538 | void *arg) | 538 | void *arg) |
539 | { | 539 | { |
540 | struct wait_bit_key *key = arg; | 540 | struct wait_bit_key *key = arg; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c74bf39ef764..a86688fabc55 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2864,11 +2864,11 @@ bool flush_work(struct work_struct *work) | |||
2864 | EXPORT_SYMBOL_GPL(flush_work); | 2864 | EXPORT_SYMBOL_GPL(flush_work); |
2865 | 2865 | ||
2866 | struct cwt_wait { | 2866 | struct cwt_wait { |
2867 | wait_queue_t wait; | 2867 | wait_queue_entry_t wait; |
2868 | struct work_struct *work; | 2868 | struct work_struct *work; |
2869 | }; | 2869 | }; |
2870 | 2870 | ||
2871 | static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) | 2871 | static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
2872 | { | 2872 | { |
2873 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); | 2873 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); |
2874 | 2874 | ||
diff --git a/mm/filemap.c b/mm/filemap.c index 6f1be573a5e6..80c19ee81e95 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -768,10 +768,10 @@ struct wait_page_key { | |||
768 | struct wait_page_queue { | 768 | struct wait_page_queue { |
769 | struct page *page; | 769 | struct page *page; |
770 | int bit_nr; | 770 | int bit_nr; |
771 | wait_queue_t wait; | 771 | wait_queue_entry_t wait; |
772 | }; | 772 | }; |
773 | 773 | ||
774 | static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) | 774 | static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) |
775 | { | 775 | { |
776 | struct wait_page_key *key = arg; | 776 | struct wait_page_key *key = arg; |
777 | struct wait_page_queue *wait_page | 777 | struct wait_page_queue *wait_page |
@@ -834,7 +834,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, | |||
834 | struct page *page, int bit_nr, int state, bool lock) | 834 | struct page *page, int bit_nr, int state, bool lock) |
835 | { | 835 | { |
836 | struct wait_page_queue wait_page; | 836 | struct wait_page_queue wait_page; |
837 | wait_queue_t *wait = &wait_page.wait; | 837 | wait_queue_entry_t *wait = &wait_page.wait; |
838 | int ret = 0; | 838 | int ret = 0; |
839 | 839 | ||
840 | init_wait(wait); | 840 | init_wait(wait); |
@@ -847,7 +847,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, | |||
847 | 847 | ||
848 | if (likely(list_empty(&wait->task_list))) { | 848 | if (likely(list_empty(&wait->task_list))) { |
849 | if (lock) | 849 | if (lock) |
850 | __add_wait_queue_tail_exclusive(q, wait); | 850 | __add_wait_queue_entry_tail_exclusive(q, wait); |
851 | else | 851 | else |
852 | __add_wait_queue(q, wait); | 852 | __add_wait_queue(q, wait); |
853 | SetPageWaiters(page); | 853 | SetPageWaiters(page); |
@@ -907,7 +907,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr) | |||
907 | * | 907 | * |
908 | * Add an arbitrary @waiter to the wait queue for the nominated @page. | 908 | * Add an arbitrary @waiter to the wait queue for the nominated @page. |
909 | */ | 909 | */ |
910 | void add_page_wait_queue(struct page *page, wait_queue_t *waiter) | 910 | void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) |
911 | { | 911 | { |
912 | wait_queue_head_t *q = page_waitqueue(page); | 912 | wait_queue_head_t *q = page_waitqueue(page); |
913 | unsigned long flags; | 913 | unsigned long flags; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 94172089f52f..9a90b096dc6b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -170,7 +170,7 @@ struct mem_cgroup_event { | |||
170 | */ | 170 | */ |
171 | poll_table pt; | 171 | poll_table pt; |
172 | wait_queue_head_t *wqh; | 172 | wait_queue_head_t *wqh; |
173 | wait_queue_t wait; | 173 | wait_queue_entry_t wait; |
174 | struct work_struct remove; | 174 | struct work_struct remove; |
175 | }; | 175 | }; |
176 | 176 | ||
@@ -1479,10 +1479,10 @@ static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); | |||
1479 | 1479 | ||
1480 | struct oom_wait_info { | 1480 | struct oom_wait_info { |
1481 | struct mem_cgroup *memcg; | 1481 | struct mem_cgroup *memcg; |
1482 | wait_queue_t wait; | 1482 | wait_queue_entry_t wait; |
1483 | }; | 1483 | }; |
1484 | 1484 | ||
1485 | static int memcg_oom_wake_function(wait_queue_t *wait, | 1485 | static int memcg_oom_wake_function(wait_queue_entry_t *wait, |
1486 | unsigned mode, int sync, void *arg) | 1486 | unsigned mode, int sync, void *arg) |
1487 | { | 1487 | { |
1488 | struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; | 1488 | struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; |
@@ -3725,7 +3725,7 @@ static void memcg_event_remove(struct work_struct *work) | |||
3725 | * | 3725 | * |
3726 | * Called with wqh->lock held and interrupts disabled. | 3726 | * Called with wqh->lock held and interrupts disabled. |
3727 | */ | 3727 | */ |
3728 | static int memcg_event_wake(wait_queue_t *wait, unsigned mode, | 3728 | static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, |
3729 | int sync, void *key) | 3729 | int sync, void *key) |
3730 | { | 3730 | { |
3731 | struct mem_cgroup_event *event = | 3731 | struct mem_cgroup_event *event = |
diff --git a/mm/mempool.c b/mm/mempool.c index 47a659dedd44..1c0294858527 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
@@ -312,7 +312,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) | |||
312 | { | 312 | { |
313 | void *element; | 313 | void *element; |
314 | unsigned long flags; | 314 | unsigned long flags; |
315 | wait_queue_t wait; | 315 | wait_queue_entry_t wait; |
316 | gfp_t gfp_temp; | 316 | gfp_t gfp_temp; |
317 | 317 | ||
318 | VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); | 318 | VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); |
diff --git a/mm/shmem.c b/mm/shmem.c index e67d6ba4e98e..a6c7dece4660 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1902,7 +1902,7 @@ unlock: | |||
1902 | * entry unconditionally - even if something else had already woken the | 1902 | * entry unconditionally - even if something else had already woken the |
1903 | * target. | 1903 | * target. |
1904 | */ | 1904 | */ |
1905 | static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | 1905 | static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
1906 | { | 1906 | { |
1907 | int ret = default_wake_function(wait, mode, sync, key); | 1907 | int ret = default_wake_function(wait, mode, sync, key); |
1908 | list_del_init(&wait->task_list); | 1908 | list_del_init(&wait->task_list); |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 7bc2208b6cc4..dca3cdd1a014 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -95,7 +95,7 @@ enum { | |||
95 | 95 | ||
96 | struct p9_poll_wait { | 96 | struct p9_poll_wait { |
97 | struct p9_conn *conn; | 97 | struct p9_conn *conn; |
98 | wait_queue_t wait; | 98 | wait_queue_entry_t wait; |
99 | wait_queue_head_t *wait_addr; | 99 | wait_queue_head_t *wait_addr; |
100 | }; | 100 | }; |
101 | 101 | ||
@@ -522,7 +522,7 @@ error: | |||
522 | clear_bit(Wworksched, &m->wsched); | 522 | clear_bit(Wworksched, &m->wsched); |
523 | } | 523 | } |
524 | 524 | ||
525 | static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) | 525 | static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) |
526 | { | 526 | { |
527 | struct p9_poll_wait *pwait = | 527 | struct p9_poll_wait *pwait = |
528 | container_of(wait, struct p9_poll_wait, wait); | 528 | container_of(wait, struct p9_poll_wait, wait); |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index fbf251fef70f..5c4808b3da2d 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -484,7 +484,7 @@ static int bnep_session(void *arg) | |||
484 | struct net_device *dev = s->dev; | 484 | struct net_device *dev = s->dev; |
485 | struct sock *sk = s->sock->sk; | 485 | struct sock *sk = s->sock->sk; |
486 | struct sk_buff *skb; | 486 | struct sk_buff *skb; |
487 | wait_queue_t wait; | 487 | wait_queue_entry_t wait; |
488 | 488 | ||
489 | BT_DBG(""); | 489 | BT_DBG(""); |
490 | 490 | ||
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 9e59b6654126..14f7c8135c31 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c | |||
@@ -280,7 +280,7 @@ static int cmtp_session(void *arg) | |||
280 | struct cmtp_session *session = arg; | 280 | struct cmtp_session *session = arg; |
281 | struct sock *sk = session->sock->sk; | 281 | struct sock *sk = session->sock->sk; |
282 | struct sk_buff *skb; | 282 | struct sk_buff *skb; |
283 | wait_queue_t wait; | 283 | wait_queue_entry_t wait; |
284 | 284 | ||
285 | BT_DBG("session %p", session); | 285 | BT_DBG("session %p", session); |
286 | 286 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 0bec4588c3c8..fc31161e98f2 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -1244,7 +1244,7 @@ static void hidp_session_run(struct hidp_session *session) | |||
1244 | static int hidp_session_thread(void *arg) | 1244 | static int hidp_session_thread(void *arg) |
1245 | { | 1245 | { |
1246 | struct hidp_session *session = arg; | 1246 | struct hidp_session *session = arg; |
1247 | wait_queue_t ctrl_wait, intr_wait; | 1247 | wait_queue_entry_t ctrl_wait, intr_wait; |
1248 | 1248 | ||
1249 | BT_DBG("session %p", session); | 1249 | BT_DBG("session %p", session); |
1250 | 1250 | ||
diff --git a/net/core/datagram.c b/net/core/datagram.c index db1866f2ffcf..34678828e2bb 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -68,7 +68,7 @@ static inline int connection_based(struct sock *sk) | |||
68 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | 68 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; |
69 | } | 69 | } |
70 | 70 | ||
71 | static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, | 71 | static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync, |
72 | void *key) | 72 | void *key) |
73 | { | 73 | { |
74 | unsigned long bits = (unsigned long)key; | 74 | unsigned long bits = (unsigned long)key; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 1a0c961f4ffe..c77ced0109b7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -343,7 +343,7 @@ found: | |||
343 | * are still connected to it and there's no way to inform "a polling | 343 | * are still connected to it and there's no way to inform "a polling |
344 | * implementation" that it should let go of a certain wait queue | 344 | * implementation" that it should let go of a certain wait queue |
345 | * | 345 | * |
346 | * In order to propagate a wake up, a wait_queue_t of the client | 346 | * In order to propagate a wake up, a wait_queue_entry_t of the client |
347 | * socket is enqueued on the peer_wait queue of the server socket | 347 | * socket is enqueued on the peer_wait queue of the server socket |
348 | * whose wake function does a wake_up on the ordinary client socket | 348 | * whose wake function does a wake_up on the ordinary client socket |
349 | * wait queue. This connection is established whenever a write (or | 349 | * wait queue. This connection is established whenever a write (or |
@@ -352,7 +352,7 @@ found: | |||
352 | * was relayed. | 352 | * was relayed. |
353 | */ | 353 | */ |
354 | 354 | ||
355 | static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags, | 355 | static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags, |
356 | void *key) | 356 | void *key) |
357 | { | 357 | { |
358 | struct unix_sock *u; | 358 | struct unix_sock *u; |
diff --git a/sound/core/control.c b/sound/core/control.c index c109b82eef4b..6362da17ac3f 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
@@ -1577,7 +1577,7 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer, | |||
1577 | struct snd_ctl_event ev; | 1577 | struct snd_ctl_event ev; |
1578 | struct snd_kctl_event *kev; | 1578 | struct snd_kctl_event *kev; |
1579 | while (list_empty(&ctl->events)) { | 1579 | while (list_empty(&ctl->events)) { |
1580 | wait_queue_t wait; | 1580 | wait_queue_entry_t wait; |
1581 | if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { | 1581 | if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { |
1582 | err = -EAGAIN; | 1582 | err = -EAGAIN; |
1583 | goto __end_lock; | 1583 | goto __end_lock; |
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index 9602a7e38d8a..a73baa1242be 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c | |||
@@ -85,7 +85,7 @@ static int snd_hwdep_open(struct inode *inode, struct file * file) | |||
85 | int major = imajor(inode); | 85 | int major = imajor(inode); |
86 | struct snd_hwdep *hw; | 86 | struct snd_hwdep *hw; |
87 | int err; | 87 | int err; |
88 | wait_queue_t wait; | 88 | wait_queue_entry_t wait; |
89 | 89 | ||
90 | if (major == snd_major) { | 90 | if (major == snd_major) { |
91 | hw = snd_lookup_minor_data(iminor(inode), | 91 | hw = snd_lookup_minor_data(iminor(inode), |
diff --git a/sound/core/init.c b/sound/core/init.c index 6bda8436d765..d61d2b3cd521 100644 --- a/sound/core/init.c +++ b/sound/core/init.c | |||
@@ -989,7 +989,7 @@ EXPORT_SYMBOL(snd_card_file_remove); | |||
989 | */ | 989 | */ |
990 | int snd_power_wait(struct snd_card *card, unsigned int power_state) | 990 | int snd_power_wait(struct snd_card *card, unsigned int power_state) |
991 | { | 991 | { |
992 | wait_queue_t wait; | 992 | wait_queue_entry_t wait; |
993 | int result = 0; | 993 | int result = 0; |
994 | 994 | ||
995 | /* fastpath */ | 995 | /* fastpath */ |
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 36baf962f9b0..cd8b7bef8d06 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c | |||
@@ -1554,7 +1554,7 @@ static int snd_pcm_oss_sync1(struct snd_pcm_substream *substream, size_t size) | |||
1554 | ssize_t result = 0; | 1554 | ssize_t result = 0; |
1555 | snd_pcm_state_t state; | 1555 | snd_pcm_state_t state; |
1556 | long res; | 1556 | long res; |
1557 | wait_queue_t wait; | 1557 | wait_queue_entry_t wait; |
1558 | 1558 | ||
1559 | runtime = substream->runtime; | 1559 | runtime = substream->runtime; |
1560 | init_waitqueue_entry(&wait, current); | 1560 | init_waitqueue_entry(&wait, current); |
@@ -2387,7 +2387,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file) | |||
2387 | struct snd_pcm_oss_file *pcm_oss_file; | 2387 | struct snd_pcm_oss_file *pcm_oss_file; |
2388 | struct snd_pcm_oss_setup setup[2]; | 2388 | struct snd_pcm_oss_setup setup[2]; |
2389 | int nonblock; | 2389 | int nonblock; |
2390 | wait_queue_t wait; | 2390 | wait_queue_entry_t wait; |
2391 | 2391 | ||
2392 | err = nonseekable_open(inode, file); | 2392 | err = nonseekable_open(inode, file); |
2393 | if (err < 0) | 2393 | if (err < 0) |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 5088d4b8db22..dd5254077ef7 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -1904,7 +1904,7 @@ static int wait_for_avail(struct snd_pcm_substream *substream, | |||
1904 | { | 1904 | { |
1905 | struct snd_pcm_runtime *runtime = substream->runtime; | 1905 | struct snd_pcm_runtime *runtime = substream->runtime; |
1906 | int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; | 1906 | int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; |
1907 | wait_queue_t wait; | 1907 | wait_queue_entry_t wait; |
1908 | int err = 0; | 1908 | int err = 0; |
1909 | snd_pcm_uframes_t avail = 0; | 1909 | snd_pcm_uframes_t avail = 0; |
1910 | long wait_time, tout; | 1910 | long wait_time, tout; |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 13dec5ec93f2..faa2e2be6f2e 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -1652,7 +1652,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, | |||
1652 | struct snd_card *card; | 1652 | struct snd_card *card; |
1653 | struct snd_pcm_runtime *runtime; | 1653 | struct snd_pcm_runtime *runtime; |
1654 | struct snd_pcm_substream *s; | 1654 | struct snd_pcm_substream *s; |
1655 | wait_queue_t wait; | 1655 | wait_queue_entry_t wait; |
1656 | int result = 0; | 1656 | int result = 0; |
1657 | int nonblock = 0; | 1657 | int nonblock = 0; |
1658 | 1658 | ||
@@ -2353,7 +2353,7 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file) | |||
2353 | static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) | 2353 | static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) |
2354 | { | 2354 | { |
2355 | int err; | 2355 | int err; |
2356 | wait_queue_t wait; | 2356 | wait_queue_entry_t wait; |
2357 | 2357 | ||
2358 | if (pcm == NULL) { | 2358 | if (pcm == NULL) { |
2359 | err = -ENODEV; | 2359 | err = -ENODEV; |
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index ab890336175f..32588ad05653 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
@@ -368,7 +368,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) | |||
368 | int err; | 368 | int err; |
369 | struct snd_rawmidi *rmidi; | 369 | struct snd_rawmidi *rmidi; |
370 | struct snd_rawmidi_file *rawmidi_file = NULL; | 370 | struct snd_rawmidi_file *rawmidi_file = NULL; |
371 | wait_queue_t wait; | 371 | wait_queue_entry_t wait; |
372 | 372 | ||
373 | if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK)) | 373 | if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK)) |
374 | return -EINVAL; /* invalid combination */ | 374 | return -EINVAL; /* invalid combination */ |
@@ -1002,7 +1002,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun | |||
1002 | while (count > 0) { | 1002 | while (count > 0) { |
1003 | spin_lock_irq(&runtime->lock); | 1003 | spin_lock_irq(&runtime->lock); |
1004 | while (!snd_rawmidi_ready(substream)) { | 1004 | while (!snd_rawmidi_ready(substream)) { |
1005 | wait_queue_t wait; | 1005 | wait_queue_entry_t wait; |
1006 | if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { | 1006 | if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { |
1007 | spin_unlock_irq(&runtime->lock); | 1007 | spin_unlock_irq(&runtime->lock); |
1008 | return result > 0 ? result : -EAGAIN; | 1008 | return result > 0 ? result : -EAGAIN; |
@@ -1306,7 +1306,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf, | |||
1306 | while (count > 0) { | 1306 | while (count > 0) { |
1307 | spin_lock_irq(&runtime->lock); | 1307 | spin_lock_irq(&runtime->lock); |
1308 | while (!snd_rawmidi_ready_append(substream, count)) { | 1308 | while (!snd_rawmidi_ready_append(substream, count)) { |
1309 | wait_queue_t wait; | 1309 | wait_queue_entry_t wait; |
1310 | if (file->f_flags & O_NONBLOCK) { | 1310 | if (file->f_flags & O_NONBLOCK) { |
1311 | spin_unlock_irq(&runtime->lock); | 1311 | spin_unlock_irq(&runtime->lock); |
1312 | return result > 0 ? result : -EAGAIN; | 1312 | return result > 0 ? result : -EAGAIN; |
@@ -1338,7 +1338,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf, | |||
1338 | if (file->f_flags & O_DSYNC) { | 1338 | if (file->f_flags & O_DSYNC) { |
1339 | spin_lock_irq(&runtime->lock); | 1339 | spin_lock_irq(&runtime->lock); |
1340 | while (runtime->avail != runtime->buffer_size) { | 1340 | while (runtime->avail != runtime->buffer_size) { |
1341 | wait_queue_t wait; | 1341 | wait_queue_entry_t wait; |
1342 | unsigned int last_avail = runtime->avail; | 1342 | unsigned int last_avail = runtime->avail; |
1343 | init_waitqueue_entry(&wait, current); | 1343 | init_waitqueue_entry(&wait, current); |
1344 | add_wait_queue(&runtime->sleep, &wait); | 1344 | add_wait_queue(&runtime->sleep, &wait); |
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index 01c4cfe30c9f..a8c2822e0198 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c | |||
@@ -179,7 +179,7 @@ int snd_seq_fifo_cell_out(struct snd_seq_fifo *f, | |||
179 | { | 179 | { |
180 | struct snd_seq_event_cell *cell; | 180 | struct snd_seq_event_cell *cell; |
181 | unsigned long flags; | 181 | unsigned long flags; |
182 | wait_queue_t wait; | 182 | wait_queue_entry_t wait; |
183 | 183 | ||
184 | if (snd_BUG_ON(!f)) | 184 | if (snd_BUG_ON(!f)) |
185 | return -EINVAL; | 185 | return -EINVAL; |
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index d4c61ec9be13..d6e9aacdc36b 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c | |||
@@ -227,7 +227,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, | |||
227 | struct snd_seq_event_cell *cell; | 227 | struct snd_seq_event_cell *cell; |
228 | unsigned long flags; | 228 | unsigned long flags; |
229 | int err = -EAGAIN; | 229 | int err = -EAGAIN; |
230 | wait_queue_t wait; | 230 | wait_queue_entry_t wait; |
231 | 231 | ||
232 | if (pool == NULL) | 232 | if (pool == NULL) |
233 | return -EINVAL; | 233 | return -EINVAL; |
diff --git a/sound/core/timer.c b/sound/core/timer.c index cd67d1c12cf1..884c3066b028 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c | |||
@@ -1964,7 +1964,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, | |||
1964 | spin_lock_irq(&tu->qlock); | 1964 | spin_lock_irq(&tu->qlock); |
1965 | while ((long)count - result >= unit) { | 1965 | while ((long)count - result >= unit) { |
1966 | while (!tu->qused) { | 1966 | while (!tu->qused) { |
1967 | wait_queue_t wait; | 1967 | wait_queue_entry_t wait; |
1968 | 1968 | ||
1969 | if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { | 1969 | if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { |
1970 | err = -EAGAIN; | 1970 | err = -EAGAIN; |
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index 4dae9ff9ef5a..0b1e4b34b299 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c | |||
@@ -1782,7 +1782,7 @@ wavefront_should_cause_interrupt (snd_wavefront_t *dev, | |||
1782 | int val, int port, unsigned long timeout) | 1782 | int val, int port, unsigned long timeout) |
1783 | 1783 | ||
1784 | { | 1784 | { |
1785 | wait_queue_t wait; | 1785 | wait_queue_entry_t wait; |
1786 | 1786 | ||
1787 | init_waitqueue_entry(&wait, current); | 1787 | init_waitqueue_entry(&wait, current); |
1788 | spin_lock_irq(&dev->irq_lock); | 1788 | spin_lock_irq(&dev->irq_lock); |
diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c index dccf3db48fe0..8bf2ce32d4a8 100644 --- a/sound/pci/mixart/mixart_core.c +++ b/sound/pci/mixart/mixart_core.c | |||
@@ -239,7 +239,7 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int | |||
239 | struct mixart_msg resp; | 239 | struct mixart_msg resp; |
240 | u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */ | 240 | u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */ |
241 | int err; | 241 | int err; |
242 | wait_queue_t wait; | 242 | wait_queue_entry_t wait; |
243 | long timeout; | 243 | long timeout; |
244 | 244 | ||
245 | init_waitqueue_entry(&wait, current); | 245 | init_waitqueue_entry(&wait, current); |
@@ -284,7 +284,7 @@ int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr, | |||
284 | struct mixart_msg *request, u32 notif_event) | 284 | struct mixart_msg *request, u32 notif_event) |
285 | { | 285 | { |
286 | int err; | 286 | int err; |
287 | wait_queue_t wait; | 287 | wait_queue_entry_t wait; |
288 | long timeout; | 288 | long timeout; |
289 | 289 | ||
290 | if (snd_BUG_ON(!notif_event)) | 290 | if (snd_BUG_ON(!notif_event)) |
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c index fe4ba463b57c..1114166c685c 100644 --- a/sound/pci/ymfpci/ymfpci_main.c +++ b/sound/pci/ymfpci/ymfpci_main.c | |||
@@ -781,7 +781,7 @@ static snd_pcm_uframes_t snd_ymfpci_capture_pointer(struct snd_pcm_substream *su | |||
781 | 781 | ||
782 | static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) | 782 | static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) |
783 | { | 783 | { |
784 | wait_queue_t wait; | 784 | wait_queue_entry_t wait; |
785 | int loops = 4; | 785 | int loops = 4; |
786 | 786 | ||
787 | while (loops-- > 0) { | 787 | while (loops-- > 0) { |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index a8d540398bbd..9120edf3c94b 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -184,7 +184,7 @@ int __attribute__((weak)) kvm_arch_set_irq_inatomic( | |||
184 | * Called with wqh->lock held and interrupts disabled | 184 | * Called with wqh->lock held and interrupts disabled |
185 | */ | 185 | */ |
186 | static int | 186 | static int |
187 | irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) | 187 | irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
188 | { | 188 | { |
189 | struct kvm_kernel_irqfd *irqfd = | 189 | struct kvm_kernel_irqfd *irqfd = |
190 | container_of(wait, struct kvm_kernel_irqfd, wait); | 190 | container_of(wait, struct kvm_kernel_irqfd, wait); |