aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavide Libenzi <davidel@xmailserver.org>2009-03-31 18:24:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 11:59:20 -0400
commit37e5540b3c9d838eb20f2ca8ea2eb8072271e403 (patch)
treef1788aef6d73d2334e739da7ca87f2900285c262 /net
parentc0da37753695e010776ccf2200a5731e0f88a9f3 (diff)
epoll keyed wakeups: make sockets use keyed wakeups
Add support for event-aware wakeups to the sockets code. Events are delivered to the wakeup target, so that epoll can avoid spurious wakeups for non-interesting events. Signed-off-by: Davide Libenzi <davidel@xmailserver.org> Acked-by: Alan Cox <alan@lxorguk.ukuu.org.uk> Cc: Ingo Molnar <mingo@elte.hu> Cc: David Miller <davem@davemloft.net> Cc: William Lee Irwin III <wli@movementarian.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 0620046e4eba..7dbf3ffb35cc 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1677,7 +1677,7 @@ static void sock_def_error_report(struct sock *sk)
1677{ 1677{
1678 read_lock(&sk->sk_callback_lock); 1678 read_lock(&sk->sk_callback_lock);
1679 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1679 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1680 wake_up_interruptible(sk->sk_sleep); 1680 wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
1681 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1681 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1682 read_unlock(&sk->sk_callback_lock); 1682 read_unlock(&sk->sk_callback_lock);
1683} 1683}
@@ -1686,7 +1686,8 @@ static void sock_def_readable(struct sock *sk, int len)
1686{ 1686{
1687 read_lock(&sk->sk_callback_lock); 1687 read_lock(&sk->sk_callback_lock);
1688 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1688 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1689 wake_up_interruptible_sync(sk->sk_sleep); 1689 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
1690 POLLRDNORM | POLLRDBAND);
1690 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1691 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1691 read_unlock(&sk->sk_callback_lock); 1692 read_unlock(&sk->sk_callback_lock);
1692} 1693}
@@ -1700,7 +1701,8 @@ static void sock_def_write_space(struct sock *sk)
1700 */ 1701 */
1701 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1702 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1702 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1703 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1703 wake_up_interruptible_sync(sk->sk_sleep); 1704 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
1705 POLLWRNORM | POLLWRBAND);
1704 1706
1705 /* Should agree with poll, otherwise some programs break */ 1707 /* Should agree with poll, otherwise some programs break */
1706 if (sock_writeable(sk)) 1708 if (sock_writeable(sk))