aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <clm@fb.com>2015-02-17 16:46:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-17 17:34:52 -0500
commite22553e2a25ed3f2a9c874088e0f20cdcd97c7b0 (patch)
tree134114516dc3fa8418a6b8a1829fae8279d4d754 /fs
parent7647f14fe4cd98151f8e90656c01fe61044de714 (diff)
eventfd: don't take the spinlock in eventfd_poll
The spinlock in eventfd_poll is trying to protect the count of events so it can decide if it should return POLLIN, POLLERR, or POLLOUT. But, because of the way we drop the lock after calling poll_wait, and drop it again before returning, we have the same pile of races with the lock as we do with a single read of ctx->count(). This replaces the lock with a read barrier and single read. eventfd_write does a single bump of ctx->count, so this should not add new races with adding events. eventfd_read is similar, it will do a single decrement with the lock held, and so we're making the race with concurrent readers slightly larger. This spinlock is the top CPU user in kernel code during one of our workloads. Removing it gives us a ~2% boost. [arnd@arndb.de: avoid unused variable warning] [dan.carpenter@oracle.com: type bug in eventfd_poll()] Signed-off-by: Chris Mason <clm@fb.com> Cc: Davide Libenzi <davidel@xmailserver.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/eventfd.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 4b0a226024fa..8d0c0df01854 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -118,18 +118,18 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait)
118{ 118{
119 struct eventfd_ctx *ctx = file->private_data; 119 struct eventfd_ctx *ctx = file->private_data;
120 unsigned int events = 0; 120 unsigned int events = 0;
121 unsigned long flags; 121 u64 count;
122 122
123 poll_wait(file, &ctx->wqh, wait); 123 poll_wait(file, &ctx->wqh, wait);
124 smp_rmb();
125 count = ctx->count;
124 126
125 spin_lock_irqsave(&ctx->wqh.lock, flags); 127 if (count > 0)
126 if (ctx->count > 0)
127 events |= POLLIN; 128 events |= POLLIN;
128 if (ctx->count == ULLONG_MAX) 129 if (count == ULLONG_MAX)
129 events |= POLLERR; 130 events |= POLLERR;
130 if (ULLONG_MAX - 1 > ctx->count) 131 if (ULLONG_MAX - 1 > count)
131 events |= POLLOUT; 132 events |= POLLOUT;
132 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
133 133
134 return events; 134 return events;
135} 135}