diff options
author | Amos Kong <akong@redhat.com> | 2011-06-09 03:27:10 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-06-09 03:27:10 -0400 |
commit | 61a5ff15ebdab87887861a6b128b108404e4706d (patch) | |
tree | e439d62aa7299ad6644b37b3e569ca3de8c0f60b /drivers/net/tun.c | |
parent | 6f7c156c08d5eaa9fff2bd062f0a2b9d09a1e7a9 (diff) |
tun: do not put self in waitq if doing a nonblock read
Perf shows a relatively high rate (about 8%) race in
spin_lock_irqsave() when doing netperf between external host and
guest. It's mainly becuase the lock contention between the
tun_do_read() and tun_xmit_skb(), so this patch do not put self into
waitqueue to reduce this kind of race. After this patch, it drops to
4%.
Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Amos Kong <akong@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r-- | drivers/net/tun.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2829badbae38..ef68e13c042d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -817,7 +817,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, | |||
817 | 817 | ||
818 | tun_debug(KERN_INFO, tun, "tun_chr_read\n"); | 818 | tun_debug(KERN_INFO, tun, "tun_chr_read\n"); |
819 | 819 | ||
820 | add_wait_queue(&tun->wq.wait, &wait); | 820 | if (unlikely(!noblock)) |
821 | add_wait_queue(&tun->wq.wait, &wait); | ||
821 | while (len) { | 822 | while (len) { |
822 | current->state = TASK_INTERRUPTIBLE; | 823 | current->state = TASK_INTERRUPTIBLE; |
823 | 824 | ||
@@ -848,7 +849,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, | |||
848 | } | 849 | } |
849 | 850 | ||
850 | current->state = TASK_RUNNING; | 851 | current->state = TASK_RUNNING; |
851 | remove_wait_queue(&tun->wq.wait, &wait); | 852 | if (unlikely(!noblock)) |
853 | remove_wait_queue(&tun->wq.wait, &wait); | ||
852 | 854 | ||
853 | return ret; | 855 | return ret; |
854 | } | 856 | } |