aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/ll_poll.h
diff options
context:
space:
mode:
authorEliezer Tamir <eliezer.tamir@linux.intel.com>2013-07-08 09:20:34 -0400
committerDavid S. Miller <davem@davemloft.net>2013-07-08 22:25:45 -0400
commitcbf55001b2ddb814329735641be5d29b08c82b08 (patch)
tree110c1191f4b6699bef04ebdf45e4677c623a7ceb /include/net/ll_poll.h
parentc7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1 (diff)
net: rename low latency sockets functions to busy poll
Rename functions in include/net/ll_poll.h to busy wait. Clarify documentation about expected power use increase. Rename POLL_LL to POLL_BUSY_LOOP. Add need_resched() testing to poll/select busy loops. Note, that in select and poll can_busy_poll is dynamic and is updated continuously to reflect the existence of supported sockets with valid queue information. Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/ll_poll.h')
-rw-r--r--include/net/ll_poll.h46
1 files changed, 24 insertions, 22 deletions
diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h
index 0d620ba19bc5..f14dd88dafc8 100644
--- a/include/net/ll_poll.h
+++ b/include/net/ll_poll.h
@@ -37,9 +37,9 @@ extern unsigned int sysctl_net_ll_poll __read_mostly;
37#define LL_FLUSH_FAILED -1 37#define LL_FLUSH_FAILED -1
38#define LL_FLUSH_BUSY -2 38#define LL_FLUSH_BUSY -2
39 39
40static inline unsigned int ll_get_flag(void) 40static inline bool net_busy_loop_on(void)
41{ 41{
42 return sysctl_net_ll_poll ? POLL_LL : 0; 42 return sysctl_net_ll_poll;
43} 43}
44 44
45/* a wrapper to make debug_smp_processor_id() happy 45/* a wrapper to make debug_smp_processor_id() happy
@@ -47,7 +47,7 @@ static inline unsigned int ll_get_flag(void)
47 * we only care that the average is bounded 47 * we only care that the average is bounded
48 */ 48 */
49#ifdef CONFIG_DEBUG_PREEMPT 49#ifdef CONFIG_DEBUG_PREEMPT
50static inline u64 ll_sched_clock(void) 50static inline u64 busy_loop_sched_clock(void)
51{ 51{
52 u64 rc; 52 u64 rc;
53 53
@@ -58,7 +58,7 @@ static inline u64 ll_sched_clock(void)
58 return rc; 58 return rc;
59} 59}
60#else /* CONFIG_DEBUG_PREEMPT */ 60#else /* CONFIG_DEBUG_PREEMPT */
61static inline u64 ll_sched_clock(void) 61static inline u64 busy_loop_sched_clock(void)
62{ 62{
63 return sched_clock(); 63 return sched_clock();
64} 64}
@@ -67,7 +67,7 @@ static inline u64 ll_sched_clock(void)
67/* we don't mind a ~2.5% imprecision so <<10 instead of *1000 67/* we don't mind a ~2.5% imprecision so <<10 instead of *1000
68 * sk->sk_ll_usec is a u_int so this can't overflow 68 * sk->sk_ll_usec is a u_int so this can't overflow
69 */ 69 */
70static inline u64 ll_sk_run_time(struct sock *sk) 70static inline u64 sk_busy_loop_end_time(struct sock *sk)
71{ 71{
72 return (u64)ACCESS_ONCE(sk->sk_ll_usec) << 10; 72 return (u64)ACCESS_ONCE(sk->sk_ll_usec) << 10;
73} 73}
@@ -75,27 +75,29 @@ static inline u64 ll_sk_run_time(struct sock *sk)
75/* in poll/select we use the global sysctl_net_ll_poll value 75/* in poll/select we use the global sysctl_net_ll_poll value
76 * only call sched_clock() if enabled 76 * only call sched_clock() if enabled
77 */ 77 */
78static inline u64 ll_run_time(void) 78static inline u64 busy_loop_end_time(void)
79{ 79{
80 return (u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10; 80 return (u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10;
81} 81}
82 82
83/* if flag is not set we don't need to know the time */ 83/* if flag is not set we don't need to know the time
84static inline u64 ll_start_time(unsigned int flag) 84 * so we want to avoid a potentially expensive sched_clock()
85 */
86static inline u64 busy_loop_start_time(unsigned int flag)
85{ 87{
86 return flag ? ll_sched_clock() : 0; 88 return flag ? busy_loop_sched_clock() : 0;
87} 89}
88 90
89static inline bool sk_valid_ll(struct sock *sk) 91static inline bool sk_can_busy_loop(struct sock *sk)
90{ 92{
91 return sk->sk_ll_usec && sk->sk_napi_id && 93 return sk->sk_ll_usec && sk->sk_napi_id &&
92 !need_resched() && !signal_pending(current); 94 !need_resched() && !signal_pending(current);
93} 95}
94 96
95/* careful! time_in_range64 will evaluate now twice */ 97/* careful! time_in_range64 will evaluate now twice */
96static inline bool can_poll_ll(u64 start_time, u64 run_time) 98static inline bool busy_loop_range(u64 start_time, u64 run_time)
97{ 99{
98 u64 now = ll_sched_clock(); 100 u64 now = busy_loop_sched_clock();
99 101
100 return time_in_range64(now, start_time, start_time + run_time); 102 return time_in_range64(now, start_time, start_time + run_time);
101} 103}
@@ -103,10 +105,10 @@ static inline bool can_poll_ll(u64 start_time, u64 run_time)
103/* when used in sock_poll() nonblock is known at compile time to be true 105/* when used in sock_poll() nonblock is known at compile time to be true
104 * so the loop and end_time will be optimized out 106 * so the loop and end_time will be optimized out
105 */ 107 */
106static inline bool sk_poll_ll(struct sock *sk, int nonblock) 108static inline bool sk_busy_loop(struct sock *sk, int nonblock)
107{ 109{
108 u64 start_time = ll_start_time(!nonblock); 110 u64 start_time = busy_loop_start_time(!nonblock);
109 u64 run_time = ll_sk_run_time(sk); 111 u64 end_time = sk_busy_loop_end_time(sk);
110 const struct net_device_ops *ops; 112 const struct net_device_ops *ops;
111 struct napi_struct *napi; 113 struct napi_struct *napi;
112 int rc = false; 114 int rc = false;
@@ -137,7 +139,7 @@ static inline bool sk_poll_ll(struct sock *sk, int nonblock)
137 LINUX_MIB_LOWLATENCYRXPACKETS, rc); 139 LINUX_MIB_LOWLATENCYRXPACKETS, rc);
138 140
139 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 141 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
140 can_poll_ll(start_time, run_time)); 142 busy_loop_range(start_time, end_time));
141 143
142 rc = !skb_queue_empty(&sk->sk_receive_queue); 144 rc = !skb_queue_empty(&sk->sk_receive_queue);
143out: 145out:
@@ -158,27 +160,27 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
158} 160}
159 161
160#else /* CONFIG_NET_LL_RX_POLL */ 162#else /* CONFIG_NET_LL_RX_POLL */
161static inline unsigned long ll_get_flag(void) 163static inline unsigned long net_busy_loop_on(void)
162{ 164{
163 return 0; 165 return 0;
164} 166}
165 167
166static inline u64 ll_start_time(unsigned int flag) 168static inline u64 busy_loop_start_time(unsigned int flag)
167{ 169{
168 return 0; 170 return 0;
169} 171}
170 172
171static inline u64 ll_run_time(void) 173static inline u64 busy_loop_end_time(void)
172{ 174{
173 return 0; 175 return 0;
174} 176}
175 177
176static inline bool sk_valid_ll(struct sock *sk) 178static inline bool sk_can_busy_loop(struct sock *sk)
177{ 179{
178 return false; 180 return false;
179} 181}
180 182
181static inline bool sk_poll_ll(struct sock *sk, int nonblock) 183static inline bool sk_busy_poll(struct sock *sk, int nonblock)
182{ 184{
183 return false; 185 return false;
184} 186}
@@ -191,7 +193,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
191{ 193{
192} 194}
193 195
194static inline bool can_poll_ll(u64 start_time, u64 run_time) 196static inline bool busy_loop_range(u64 start_time, u64 run_time)
195{ 197{
196 return false; 198 return false;
197} 199}