aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavide Libenzi <davidel@xmailserver.org>2007-05-11 01:23:19 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-11 11:29:36 -0400
commite1ad7468c77ddb94b0615d5f50fa255525fde0f0 (patch)
tree856be1a028fece7e1fa10b7b585096839913fe2e /fs
parent83f5d1266926c75890f1bc4678e49d79483cb573 (diff)
signal/timer/event: eventfd core
This is a very simple and light file descriptor, that can be used as event wait/dispatch by userspace (both wait and dispatch) and by the kernel (dispatch only). It can be used instead of pipe(2) in all cases where those would simply be used to signal events. Their kernel overhead is much lower than pipes, and they do not consume two fds. When used in the kernel, it can offer an fd-bridge to enable, for example, functionalities like KAIO or syslets/threadlets to signal to an fd the completion of certain operations. But more in general, an eventfd can be used by the kernel to signal readiness, in a POSIX poll/select way, of interfaces that would otherwise be incompatible with it. The API is: int eventfd(unsigned int count); The eventfd API accepts an initial "count" parameter, and returns an eventfd fd. It supports poll(2) (POLLIN, POLLOUT, POLLERR), read(2) and write(2). The POLLIN flag is raised when the internal counter is greater than zero. The POLLOUT flag is raised when at least a value of "1" can be written to the internal counter. The POLLERR flag is raised when an overflow in the counter value is detected. The write(2) operation can never overflow the counter, since it blocks (unless O_NONBLOCK is set, in which case -EAGAIN is returned). But the eventfd_signal() function can do it, since it's supposed to not sleep during its operation. The read(2) function reads the __u64 counter value, and reset the internal value to zero. If the value read is equal to (__u64) -1, an overflow happened on the internal counter (due to 2^64 eventfd_signal() posts that has never been retired - unlickely, but possible). The write(2) call writes an __u64 count value, and adds it to the current counter. The eventfd fd supports O_NONBLOCK also. On the kernel side, we have: struct file *eventfd_fget(int fd); int eventfd_signal(struct file *file, unsigned int n); The eventfd_fget() should be called to get a struct file* from an eventfd fd (this is an fget() + check of f_op being an eventfd fops pointer). The kernel can then call eventfd_signal() every time it wants to post an event to userspace. The eventfd_signal() function can be called from any context. An eventfd() simple test and bench is available here: http://www.xmailserver.org/eventfd-bench.c This is the eventfd-based version of pipetest-4 (pipe(2) based): http://www.xmailserver.org/pipetest-4.c Not that performance matters much in the eventfd case, but eventfd-bench shows almost as double as performance than pipetest-4. [akpm@linux-foundation.org: fix i386 build] [akpm@linux-foundation.org: add sys_eventfd to sys_ni.c] Signed-off-by: Davide Libenzi <davidel@xmailserver.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/Makefile1
-rw-r--r--fs/eventfd.c228
2 files changed, 229 insertions, 0 deletions
diff --git a/fs/Makefile b/fs/Makefile
index 39625da9e2d6..720c29d57a62 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_EPOLL) += eventpoll.o
25obj-$(CONFIG_ANON_INODES) += anon_inodes.o 25obj-$(CONFIG_ANON_INODES) += anon_inodes.o
26obj-$(CONFIG_SIGNALFD) += signalfd.o 26obj-$(CONFIG_SIGNALFD) += signalfd.o
27obj-$(CONFIG_TIMERFD) += timerfd.o 27obj-$(CONFIG_TIMERFD) += timerfd.o
28obj-$(CONFIG_EVENTFD) += eventfd.o
28obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o 29obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
29 30
30nfsd-$(CONFIG_NFSD) := nfsctl.o 31nfsd-$(CONFIG_NFSD) := nfsctl.o
diff --git a/fs/eventfd.c b/fs/eventfd.c
new file mode 100644
index 000000000000..480e2b3c4166
--- /dev/null
+++ b/fs/eventfd.c
@@ -0,0 +1,228 @@
1/*
2 * fs/eventfd.c
3 *
4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
5 *
6 */
7
8#include <linux/file.h>
9#include <linux/poll.h>
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/anon_inodes.h>
17#include <linux/eventfd.h>
18
19struct eventfd_ctx {
20 spinlock_t lock;
21 wait_queue_head_t wqh;
22 /*
23 * Every time that a write(2) is performed on an eventfd, the
24 * value of the __u64 being written is added to "count" and a
25 * wakeup is performed on "wqh". A read(2) will return the "count"
26 * value to userspace, and will reset "count" to zero. The kernel
27 * size eventfd_signal() also, adds to the "count" counter and
28 * issue a wakeup.
29 */
30 __u64 count;
31};
32
33/*
34 * Adds "n" to the eventfd counter "count". Returns "n" in case of
35 * success, or a value lower then "n" in case of coutner overflow.
36 * This function is supposed to be called by the kernel in paths
37 * that do not allow sleeping. In this function we allow the counter
38 * to reach the ULLONG_MAX value, and we signal this as overflow
39 * condition by returining a POLLERR to poll(2).
40 */
41int eventfd_signal(struct file *file, int n)
42{
43 struct eventfd_ctx *ctx = file->private_data;
44 unsigned long flags;
45
46 if (n < 0)
47 return -EINVAL;
48 spin_lock_irqsave(&ctx->lock, flags);
49 if (ULLONG_MAX - ctx->count < n)
50 n = (int) (ULLONG_MAX - ctx->count);
51 ctx->count += n;
52 if (waitqueue_active(&ctx->wqh))
53 wake_up_locked(&ctx->wqh);
54 spin_unlock_irqrestore(&ctx->lock, flags);
55
56 return n;
57}
58
59static int eventfd_release(struct inode *inode, struct file *file)
60{
61 kfree(file->private_data);
62 return 0;
63}
64
65static unsigned int eventfd_poll(struct file *file, poll_table *wait)
66{
67 struct eventfd_ctx *ctx = file->private_data;
68 unsigned int events = 0;
69 unsigned long flags;
70
71 poll_wait(file, &ctx->wqh, wait);
72
73 spin_lock_irqsave(&ctx->lock, flags);
74 if (ctx->count > 0)
75 events |= POLLIN;
76 if (ctx->count == ULLONG_MAX)
77 events |= POLLERR;
78 if (ULLONG_MAX - 1 > ctx->count)
79 events |= POLLOUT;
80 spin_unlock_irqrestore(&ctx->lock, flags);
81
82 return events;
83}
84
85static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
86 loff_t *ppos)
87{
88 struct eventfd_ctx *ctx = file->private_data;
89 ssize_t res;
90 __u64 ucnt;
91 DECLARE_WAITQUEUE(wait, current);
92
93 if (count < sizeof(ucnt))
94 return -EINVAL;
95 spin_lock_irq(&ctx->lock);
96 res = -EAGAIN;
97 ucnt = ctx->count;
98 if (ucnt > 0)
99 res = sizeof(ucnt);
100 else if (!(file->f_flags & O_NONBLOCK)) {
101 __add_wait_queue(&ctx->wqh, &wait);
102 for (res = 0;;) {
103 set_current_state(TASK_INTERRUPTIBLE);
104 if (ctx->count > 0) {
105 ucnt = ctx->count;
106 res = sizeof(ucnt);
107 break;
108 }
109 if (signal_pending(current)) {
110 res = -ERESTARTSYS;
111 break;
112 }
113 spin_unlock_irq(&ctx->lock);
114 schedule();
115 spin_lock_irq(&ctx->lock);
116 }
117 __remove_wait_queue(&ctx->wqh, &wait);
118 __set_current_state(TASK_RUNNING);
119 }
120 if (res > 0) {
121 ctx->count = 0;
122 if (waitqueue_active(&ctx->wqh))
123 wake_up_locked(&ctx->wqh);
124 }
125 spin_unlock_irq(&ctx->lock);
126 if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
127 return -EFAULT;
128
129 return res;
130}
131
132static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
133 loff_t *ppos)
134{
135 struct eventfd_ctx *ctx = file->private_data;
136 ssize_t res;
137 __u64 ucnt;
138 DECLARE_WAITQUEUE(wait, current);
139
140 if (count < sizeof(ucnt))
141 return -EINVAL;
142 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
143 return -EFAULT;
144 if (ucnt == ULLONG_MAX)
145 return -EINVAL;
146 spin_lock_irq(&ctx->lock);
147 res = -EAGAIN;
148 if (ULLONG_MAX - ctx->count > ucnt)
149 res = sizeof(ucnt);
150 else if (!(file->f_flags & O_NONBLOCK)) {
151 __add_wait_queue(&ctx->wqh, &wait);
152 for (res = 0;;) {
153 set_current_state(TASK_INTERRUPTIBLE);
154 if (ULLONG_MAX - ctx->count > ucnt) {
155 res = sizeof(ucnt);
156 break;
157 }
158 if (signal_pending(current)) {
159 res = -ERESTARTSYS;
160 break;
161 }
162 spin_unlock_irq(&ctx->lock);
163 schedule();
164 spin_lock_irq(&ctx->lock);
165 }
166 __remove_wait_queue(&ctx->wqh, &wait);
167 __set_current_state(TASK_RUNNING);
168 }
169 if (res > 0) {
170 ctx->count += ucnt;
171 if (waitqueue_active(&ctx->wqh))
172 wake_up_locked(&ctx->wqh);
173 }
174 spin_unlock_irq(&ctx->lock);
175
176 return res;
177}
178
179static const struct file_operations eventfd_fops = {
180 .release = eventfd_release,
181 .poll = eventfd_poll,
182 .read = eventfd_read,
183 .write = eventfd_write,
184};
185
186struct file *eventfd_fget(int fd)
187{
188 struct file *file;
189
190 file = fget(fd);
191 if (!file)
192 return ERR_PTR(-EBADF);
193 if (file->f_op != &eventfd_fops) {
194 fput(file);
195 return ERR_PTR(-EINVAL);
196 }
197
198 return file;
199}
200
201asmlinkage long sys_eventfd(unsigned int count)
202{
203 int error, fd;
204 struct eventfd_ctx *ctx;
205 struct file *file;
206 struct inode *inode;
207
208 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
209 if (!ctx)
210 return -ENOMEM;
211
212 init_waitqueue_head(&ctx->wqh);
213 spin_lock_init(&ctx->lock);
214 ctx->count = count;
215
216 /*
217 * When we call this, the initialization must be complete, since
218 * anon_inode_getfd() will install the fd.
219 */
220 error = anon_inode_getfd(&fd, &inode, &file, "[eventfd]",
221 &eventfd_fops, ctx);
222 if (!error)
223 return fd;
224
225 kfree(ctx);
226 return error;
227}
228