aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavide Libenzi <davidel@xmailserver.org>2007-05-18 15:02:33 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-18 16:09:34 -0400
commitd48eb2331595224ffe89665e79721d44b40bb047 (patch)
treeb4e398ec71e0775a441329b60cb0771c43e92c54 /fs
parent347b4599dd6ffef27e18c227532d1ec66556000b (diff)
eventfd use waitqueue lock ...
The eventfd was using the unlocked waitqueue operations, but it was using a different lock, so poll_wait() would race with it. This makes eventfd directly use the waitqueue lock. Signed-off-by: Davide Libenzi <davidel@xmailserver.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/eventfd.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 480e2b3c4166..2ce19c000d2a 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -17,7 +17,6 @@
17#include <linux/eventfd.h> 17#include <linux/eventfd.h>
18 18
19struct eventfd_ctx { 19struct eventfd_ctx {
20 spinlock_t lock;
21 wait_queue_head_t wqh; 20 wait_queue_head_t wqh;
22 /* 21 /*
23 * Every time that a write(2) is performed on an eventfd, the 22 * Every time that a write(2) is performed on an eventfd, the
@@ -45,13 +44,13 @@ int eventfd_signal(struct file *file, int n)
45 44
46 if (n < 0) 45 if (n < 0)
47 return -EINVAL; 46 return -EINVAL;
48 spin_lock_irqsave(&ctx->lock, flags); 47 spin_lock_irqsave(&ctx->wqh.lock, flags);
49 if (ULLONG_MAX - ctx->count < n) 48 if (ULLONG_MAX - ctx->count < n)
50 n = (int) (ULLONG_MAX - ctx->count); 49 n = (int) (ULLONG_MAX - ctx->count);
51 ctx->count += n; 50 ctx->count += n;
52 if (waitqueue_active(&ctx->wqh)) 51 if (waitqueue_active(&ctx->wqh))
53 wake_up_locked(&ctx->wqh); 52 wake_up_locked(&ctx->wqh);
54 spin_unlock_irqrestore(&ctx->lock, flags); 53 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
55 54
56 return n; 55 return n;
57} 56}
@@ -70,14 +69,14 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait)
70 69
71 poll_wait(file, &ctx->wqh, wait); 70 poll_wait(file, &ctx->wqh, wait);
72 71
73 spin_lock_irqsave(&ctx->lock, flags); 72 spin_lock_irqsave(&ctx->wqh.lock, flags);
74 if (ctx->count > 0) 73 if (ctx->count > 0)
75 events |= POLLIN; 74 events |= POLLIN;
76 if (ctx->count == ULLONG_MAX) 75 if (ctx->count == ULLONG_MAX)
77 events |= POLLERR; 76 events |= POLLERR;
78 if (ULLONG_MAX - 1 > ctx->count) 77 if (ULLONG_MAX - 1 > ctx->count)
79 events |= POLLOUT; 78 events |= POLLOUT;
80 spin_unlock_irqrestore(&ctx->lock, flags); 79 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
81 80
82 return events; 81 return events;
83} 82}
@@ -92,7 +91,7 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
92 91
93 if (count < sizeof(ucnt)) 92 if (count < sizeof(ucnt))
94 return -EINVAL; 93 return -EINVAL;
95 spin_lock_irq(&ctx->lock); 94 spin_lock_irq(&ctx->wqh.lock);
96 res = -EAGAIN; 95 res = -EAGAIN;
97 ucnt = ctx->count; 96 ucnt = ctx->count;
98 if (ucnt > 0) 97 if (ucnt > 0)
@@ -110,9 +109,9 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
110 res = -ERESTARTSYS; 109 res = -ERESTARTSYS;
111 break; 110 break;
112 } 111 }
113 spin_unlock_irq(&ctx->lock); 112 spin_unlock_irq(&ctx->wqh.lock);
114 schedule(); 113 schedule();
115 spin_lock_irq(&ctx->lock); 114 spin_lock_irq(&ctx->wqh.lock);
116 } 115 }
117 __remove_wait_queue(&ctx->wqh, &wait); 116 __remove_wait_queue(&ctx->wqh, &wait);
118 __set_current_state(TASK_RUNNING); 117 __set_current_state(TASK_RUNNING);
@@ -122,7 +121,7 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
122 if (waitqueue_active(&ctx->wqh)) 121 if (waitqueue_active(&ctx->wqh))
123 wake_up_locked(&ctx->wqh); 122 wake_up_locked(&ctx->wqh);
124 } 123 }
125 spin_unlock_irq(&ctx->lock); 124 spin_unlock_irq(&ctx->wqh.lock);
126 if (res > 0 && put_user(ucnt, (__u64 __user *) buf)) 125 if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
127 return -EFAULT; 126 return -EFAULT;
128 127
@@ -143,7 +142,7 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
143 return -EFAULT; 142 return -EFAULT;
144 if (ucnt == ULLONG_MAX) 143 if (ucnt == ULLONG_MAX)
145 return -EINVAL; 144 return -EINVAL;
146 spin_lock_irq(&ctx->lock); 145 spin_lock_irq(&ctx->wqh.lock);
147 res = -EAGAIN; 146 res = -EAGAIN;
148 if (ULLONG_MAX - ctx->count > ucnt) 147 if (ULLONG_MAX - ctx->count > ucnt)
149 res = sizeof(ucnt); 148 res = sizeof(ucnt);
@@ -159,9 +158,9 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
159 res = -ERESTARTSYS; 158 res = -ERESTARTSYS;
160 break; 159 break;
161 } 160 }
162 spin_unlock_irq(&ctx->lock); 161 spin_unlock_irq(&ctx->wqh.lock);
163 schedule(); 162 schedule();
164 spin_lock_irq(&ctx->lock); 163 spin_lock_irq(&ctx->wqh.lock);
165 } 164 }
166 __remove_wait_queue(&ctx->wqh, &wait); 165 __remove_wait_queue(&ctx->wqh, &wait);
167 __set_current_state(TASK_RUNNING); 166 __set_current_state(TASK_RUNNING);
@@ -171,7 +170,7 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
171 if (waitqueue_active(&ctx->wqh)) 170 if (waitqueue_active(&ctx->wqh))
172 wake_up_locked(&ctx->wqh); 171 wake_up_locked(&ctx->wqh);
173 } 172 }
174 spin_unlock_irq(&ctx->lock); 173 spin_unlock_irq(&ctx->wqh.lock);
175 174
176 return res; 175 return res;
177} 176}
@@ -210,7 +209,6 @@ asmlinkage long sys_eventfd(unsigned int count)
210 return -ENOMEM; 209 return -ENOMEM;
211 210
212 init_waitqueue_head(&ctx->wqh); 211 init_waitqueue_head(&ctx->wqh);
213 spin_lock_init(&ctx->lock);
214 ctx->count = count; 212 ctx->count = count;
215 213
216 /* 214 /*