aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEliezer Tamir <eliezer.tamir@linux.intel.com>2013-07-08 09:20:34 -0400
committerDavid S. Miller <davem@davemloft.net>2013-07-08 22:25:45 -0400
commitcbf55001b2ddb814329735641be5d29b08c82b08 (patch)
tree110c1191f4b6699bef04ebdf45e4677c623a7ceb /net
parentc7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1 (diff)
net: rename low latency sockets functions to busy poll
Rename functions in include/net/ll_poll.h to busy wait. Clarify documentation about expected power use increase. Rename POLL_LL to POLL_BUSY_LOOP. Add need_resched() testing to poll/select busy loops. Note, that in select and poll can_busy_poll is dynamic and is updated continuously to reflect the existence of supported sockets with valid queue information. Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/socket.c12
3 files changed, 11 insertions, 10 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 9cbaba98ce4c..6e9ab31e457e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -208,7 +208,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
208 } 208 }
209 spin_unlock_irqrestore(&queue->lock, cpu_flags); 209 spin_unlock_irqrestore(&queue->lock, cpu_flags);
210 210
211 if (sk_valid_ll(sk) && sk_poll_ll(sk, flags & MSG_DONTWAIT)) 211 if (sk_can_busy_loop(sk) &&
212 sk_busy_loop(sk, flags & MSG_DONTWAIT))
212 continue; 213 continue;
213 214
214 /* User doesn't want to wait */ 215 /* User doesn't want to wait */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46ed9afd1f5e..15cbfa94bd8e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1554,9 +1554,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1554 struct sk_buff *skb; 1554 struct sk_buff *skb;
1555 u32 urg_hole = 0; 1555 u32 urg_hole = 0;
1556 1556
1557 if (sk_valid_ll(sk) && skb_queue_empty(&sk->sk_receive_queue) 1557 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1558 && (sk->sk_state == TCP_ESTABLISHED)) 1558 (sk->sk_state == TCP_ESTABLISHED))
1559 sk_poll_ll(sk, nonblock); 1559 sk_busy_loop(sk, nonblock);
1560 1560
1561 lock_sock(sk); 1561 lock_sock(sk);
1562 1562
diff --git a/net/socket.c b/net/socket.c
index 4da14cbd49b6..45afa648364a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1148,7 +1148,7 @@ EXPORT_SYMBOL(sock_create_lite);
1148/* No kernel lock held - perfect */ 1148/* No kernel lock held - perfect */
1149static unsigned int sock_poll(struct file *file, poll_table *wait) 1149static unsigned int sock_poll(struct file *file, poll_table *wait)
1150{ 1150{
1151 unsigned int ll_flag = 0; 1151 unsigned int busy_flag = 0;
1152 struct socket *sock; 1152 struct socket *sock;
1153 1153
1154 /* 1154 /*
@@ -1156,16 +1156,16 @@ static unsigned int sock_poll(struct file *file, poll_table *wait)
1156 */ 1156 */
1157 sock = file->private_data; 1157 sock = file->private_data;
1158 1158
1159 if (sk_valid_ll(sock->sk)) { 1159 if (sk_can_busy_loop(sock->sk)) {
1160 /* this socket can poll_ll so tell the system call */ 1160 /* this socket can poll_ll so tell the system call */
1161 ll_flag = POLL_LL; 1161 busy_flag = POLL_BUSY_LOOP;
1162 1162
1163 /* once, only if requested by syscall */ 1163 /* once, only if requested by syscall */
1164 if (wait && (wait->_key & POLL_LL)) 1164 if (wait && (wait->_key & POLL_BUSY_LOOP))
1165 sk_poll_ll(sock->sk, 1); 1165 sk_busy_loop(sock->sk, 1);
1166 } 1166 }
1167 1167
1168 return ll_flag | sock->ops->poll(file, sock, wait); 1168 return busy_flag | sock->ops->poll(file, sock, wait);
1169} 1169}
1170 1170
1171static int sock_mmap(struct file *file, struct vm_area_struct *vma) 1171static int sock_mmap(struct file *file, struct vm_area_struct *vma)