aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cisco
diff options
context:
space:
mode:
authorGovindarajulu Varadarajan <_govind@gmx.com>2014-06-23 06:38:04 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-23 17:32:19 -0400
commit14747cd977195a8aae13d0b1ad021e33c8786afe (patch)
tree25f8225f9c5f9cf8d9abd29f98417b193d44a94e /drivers/net/ethernet/cisco
parent8e091340cfcd6f96ca0dddb078ce28c407a6d44c (diff)
enic: add low latency socket busy_poll support
This patch adds support for low latency busy_poll. * Introduce drivers ndo_busy_poll function enic_busy_poll, which is called by socket waiting for data. * Introduce locking between napi_poll nad busy_poll * enic_busy_poll cleans up all the rx pkts possible. While in busy_poll, rq holds the state ENIC_POLL_STATE_POLL. While in napi_poll, rq holds the state ENIC_POLL_STATE_NAPI. * in napi_poll we return if we are in busy_poll. Incase of INTx & msix, we just service wq and return if busy_poll is going on. Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cisco')
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c85
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h122
2 files changed, 195 insertions, 12 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 5448df2d78c2..d4918eef5050 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -42,6 +42,9 @@
42#ifdef CONFIG_RFS_ACCEL 42#ifdef CONFIG_RFS_ACCEL
43#include <linux/cpu_rmap.h> 43#include <linux/cpu_rmap.h>
44#endif 44#endif
45#ifdef CONFIG_NET_RX_BUSY_POLL
46#include <net/busy_poll.h>
47#endif
45 48
46#include "cq_enet_desc.h" 49#include "cq_enet_desc.h"
47#include "vnic_dev.h" 50#include "vnic_dev.h"
@@ -1053,10 +1056,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1053 if (vlan_stripped) 1056 if (vlan_stripped)
1054 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1057 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1055 1058
1056 if (netdev->features & NETIF_F_GRO) 1059 skb_mark_napi_id(skb, &enic->napi[rq->index]);
1057 napi_gro_receive(&enic->napi[q_number], skb); 1060 if (enic_poll_busy_polling(rq) ||
1058 else 1061 !(netdev->features & NETIF_F_GRO))
1059 netif_receive_skb(skb); 1062 netif_receive_skb(skb);
1063 else
1064 napi_gro_receive(&enic->napi[q_number], skb);
1060 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1065 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1061 enic_intr_update_pkt_size(&cq->pkt_size_counter, 1066 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1062 bytes_written); 1067 bytes_written);
@@ -1093,16 +1098,22 @@ static int enic_poll(struct napi_struct *napi, int budget)
1093 unsigned int work_done, rq_work_done = 0, wq_work_done; 1098 unsigned int work_done, rq_work_done = 0, wq_work_done;
1094 int err; 1099 int err;
1095 1100
1096 /* Service RQ (first) and WQ 1101 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1097 */ 1102 enic_wq_service, NULL);
1103
1104 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
1105 if (wq_work_done > 0)
1106 vnic_intr_return_credits(&enic->intr[intr],
1107 wq_work_done,
1108 0 /* dont unmask intr */,
1109 0 /* dont reset intr timer */);
1110 return rq_work_done;
1111 }
1098 1112
1099 if (budget > 0) 1113 if (budget > 0)
1100 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], 1114 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1101 rq_work_to_do, enic_rq_service, NULL); 1115 rq_work_to_do, enic_rq_service, NULL);
1102 1116
1103 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1104 wq_work_to_do, enic_wq_service, NULL);
1105
1106 /* Accumulate intr event credits for this polling 1117 /* Accumulate intr event credits for this polling
1107 * cycle. An intr event is the completion of a 1118 * cycle. An intr event is the completion of a
1108 * a WQ or RQ packet. 1119 * a WQ or RQ packet.
@@ -1134,6 +1145,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1134 napi_complete(napi); 1145 napi_complete(napi);
1135 vnic_intr_unmask(&enic->intr[intr]); 1146 vnic_intr_unmask(&enic->intr[intr]);
1136 } 1147 }
1148 enic_poll_unlock_napi(&enic->rq[cq_rq]);
1137 1149
1138 return rq_work_done; 1150 return rq_work_done;
1139} 1151}
@@ -1234,6 +1246,34 @@ static void enic_set_rx_cpu_rmap(struct enic *enic)
1234 1246
1235#endif /* CONFIG_RFS_ACCEL */ 1247#endif /* CONFIG_RFS_ACCEL */
1236 1248
1249#ifdef CONFIG_NET_RX_BUSY_POLL
1250int enic_busy_poll(struct napi_struct *napi)
1251{
1252 struct net_device *netdev = napi->dev;
1253 struct enic *enic = netdev_priv(netdev);
1254 unsigned int rq = (napi - &enic->napi[0]);
1255 unsigned int cq = enic_cq_rq(enic, rq);
1256 unsigned int intr = enic_msix_rq_intr(enic, rq);
1257 unsigned int work_to_do = -1; /* clean all pkts possible */
1258 unsigned int work_done;
1259
1260 if (!enic_poll_lock_poll(&enic->rq[rq]))
1261 return LL_FLUSH_BUSY;
1262 work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
1263 enic_rq_service, NULL);
1264
1265 if (work_done > 0)
1266 vnic_intr_return_credits(&enic->intr[intr],
1267 work_done, 0, 0);
1268 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1269 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1270 enic_calc_int_moderation(enic, &enic->rq[rq]);
1271 enic_poll_unlock_poll(&enic->rq[rq]);
1272
1273 return work_done;
1274}
1275#endif /* CONFIG_NET_RX_BUSY_POLL */
1276
1237static int enic_poll_msix(struct napi_struct *napi, int budget) 1277static int enic_poll_msix(struct napi_struct *napi, int budget)
1238{ 1278{
1239 struct net_device *netdev = napi->dev; 1279 struct net_device *netdev = napi->dev;
@@ -1245,6 +1285,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1245 unsigned int work_done = 0; 1285 unsigned int work_done = 0;
1246 int err; 1286 int err;
1247 1287
1288 if (!enic_poll_lock_napi(&enic->rq[rq]))
1289 return work_done;
1248 /* Service RQ 1290 /* Service RQ
1249 */ 1291 */
1250 1292
@@ -1290,6 +1332,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1290 enic_set_int_moderation(enic, &enic->rq[rq]); 1332 enic_set_int_moderation(enic, &enic->rq[rq]);
1291 vnic_intr_unmask(&enic->intr[intr]); 1333 vnic_intr_unmask(&enic->intr[intr]);
1292 } 1334 }
1335 enic_poll_unlock_napi(&enic->rq[rq]);
1293 1336
1294 return work_done; 1337 return work_done;
1295} 1338}
@@ -1538,8 +1581,10 @@ static int enic_open(struct net_device *netdev)
1538 1581
1539 netif_tx_wake_all_queues(netdev); 1582 netif_tx_wake_all_queues(netdev);
1540 1583
1541 for (i = 0; i < enic->rq_count; i++) 1584 for (i = 0; i < enic->rq_count; i++) {
1585 enic_busy_poll_init_lock(&enic->rq[i]);
1542 napi_enable(&enic->napi[i]); 1586 napi_enable(&enic->napi[i]);
1587 }
1543 1588
1544 enic_dev_enable(enic); 1589 enic_dev_enable(enic);
1545 1590
@@ -1578,8 +1623,13 @@ static int enic_stop(struct net_device *netdev)
1578 1623
1579 enic_dev_disable(enic); 1624 enic_dev_disable(enic);
1580 1625
1581 for (i = 0; i < enic->rq_count; i++) 1626 local_bh_disable();
1627 for (i = 0; i < enic->rq_count; i++) {
1582 napi_disable(&enic->napi[i]); 1628 napi_disable(&enic->napi[i]);
1629 while (!enic_poll_lock_napi(&enic->rq[i]))
1630 mdelay(1);
1631 }
1632 local_bh_enable();
1583 1633
1584 netif_carrier_off(netdev); 1634 netif_carrier_off(netdev);
1585 netif_tx_disable(netdev); 1635 netif_tx_disable(netdev);
@@ -2070,6 +2120,9 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2070#ifdef CONFIG_RFS_ACCEL 2120#ifdef CONFIG_RFS_ACCEL
2071 .ndo_rx_flow_steer = enic_rx_flow_steer, 2121 .ndo_rx_flow_steer = enic_rx_flow_steer,
2072#endif 2122#endif
2123#ifdef CONFIG_NET_RX_BUSY_POLL
2124 .ndo_busy_poll = enic_busy_poll,
2125#endif
2073}; 2126};
2074 2127
2075static const struct net_device_ops enic_netdev_ops = { 2128static const struct net_device_ops enic_netdev_ops = {
@@ -2093,14 +2146,19 @@ static const struct net_device_ops enic_netdev_ops = {
2093#ifdef CONFIG_RFS_ACCEL 2146#ifdef CONFIG_RFS_ACCEL
2094 .ndo_rx_flow_steer = enic_rx_flow_steer, 2147 .ndo_rx_flow_steer = enic_rx_flow_steer,
2095#endif 2148#endif
2149#ifdef CONFIG_NET_RX_BUSY_POLL
2150 .ndo_busy_poll = enic_busy_poll,
2151#endif
2096}; 2152};
2097 2153
2098static void enic_dev_deinit(struct enic *enic) 2154static void enic_dev_deinit(struct enic *enic)
2099{ 2155{
2100 unsigned int i; 2156 unsigned int i;
2101 2157
2102 for (i = 0; i < enic->rq_count; i++) 2158 for (i = 0; i < enic->rq_count; i++) {
2159 napi_hash_del(&enic->napi[i]);
2103 netif_napi_del(&enic->napi[i]); 2160 netif_napi_del(&enic->napi[i]);
2161 }
2104 2162
2105 enic_free_vnic_resources(enic); 2163 enic_free_vnic_resources(enic);
2106 enic_clear_intr_mode(enic); 2164 enic_clear_intr_mode(enic);
@@ -2166,11 +2224,14 @@ static int enic_dev_init(struct enic *enic)
2166 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2224 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2167 default: 2225 default:
2168 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); 2226 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2227 napi_hash_add(&enic->napi[0]);
2169 break; 2228 break;
2170 case VNIC_DEV_INTR_MODE_MSIX: 2229 case VNIC_DEV_INTR_MODE_MSIX:
2171 for (i = 0; i < enic->rq_count; i++) 2230 for (i = 0; i < enic->rq_count; i++) {
2172 netif_napi_add(netdev, &enic->napi[i], 2231 netif_napi_add(netdev, &enic->napi[i],
2173 enic_poll_msix, 64); 2232 enic_poll_msix, 64);
2233 napi_hash_add(&enic->napi[i]);
2234 }
2174 break; 2235 break;
2175 } 2236 }
2176 2237
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index ee7bc95af278..8111d5202df2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -85,6 +85,21 @@ struct vnic_rq {
85 struct vnic_rq_buf *to_clean; 85 struct vnic_rq_buf *to_clean;
86 void *os_buf_head; 86 void *os_buf_head;
87 unsigned int pkts_outstanding; 87 unsigned int pkts_outstanding;
88#ifdef CONFIG_NET_RX_BUSY_POLL
89#define ENIC_POLL_STATE_IDLE 0
90#define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */
91#define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */
92#define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */
93#define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */
94#define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \
95 ENIC_POLL_STATE_POLL_YIELD)
96#define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \
97 ENIC_POLL_STATE_POLL)
98#define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \
99 ENIC_POLL_STATE_POLL_YIELD)
100 unsigned int bpoll_state;
101 spinlock_t bpoll_lock;
102#endif /* CONFIG_NET_RX_BUSY_POLL */
88}; 103};
89 104
90static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) 105static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
@@ -197,6 +212,113 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
197 return 0; 212 return 0;
198} 213}
199 214
215#ifdef CONFIG_NET_RX_BUSY_POLL
216static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
217{
218 spin_lock_init(&rq->bpoll_lock);
219 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
220}
221
222static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
223{
224 bool rc = true;
225
226 spin_lock(&rq->bpoll_lock);
227 if (rq->bpoll_state & ENIC_POLL_LOCKED) {
228 WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
229 rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
230 rc = false;
231 } else {
232 rq->bpoll_state = ENIC_POLL_STATE_NAPI;
233 }
234 spin_unlock(&rq->bpoll_lock);
235
236 return rc;
237}
238
239static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
240{
241 bool rc = false;
242
243 spin_lock(&rq->bpoll_lock);
244 WARN_ON(rq->bpoll_state &
245 (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
246 if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
247 rc = true;
248 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
249 spin_unlock(&rq->bpoll_lock);
250
251 return rc;
252}
253
254static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
255{
256 bool rc = true;
257
258 spin_lock_bh(&rq->bpoll_lock);
259 if (rq->bpoll_state & ENIC_POLL_LOCKED) {
260 rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
261 rc = false;
262 } else {
263 rq->bpoll_state |= ENIC_POLL_STATE_POLL;
264 }
265 spin_unlock_bh(&rq->bpoll_lock);
266
267 return rc;
268}
269
270static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
271{
272 bool rc = false;
273
274 spin_lock_bh(&rq->bpoll_lock);
275 WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
276 if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
277 rc = true;
278 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
279 spin_unlock_bh(&rq->bpoll_lock);
280
281 return rc;
282}
283
284static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
285{
286 WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
287 return rq->bpoll_state & ENIC_POLL_USER_PEND;
288}
289
290#else
291
292static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
293{
294}
295
296static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
297{
298 return true;
299}
300
301static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
302{
303 return false;
304}
305
306static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
307{
308 return false;
309}
310
311static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
312{
313 return false;
314}
315
316static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
317{
318 return false;
319}
320#endif /* CONFIG_NET_RX_BUSY_POLL */
321
200void vnic_rq_free(struct vnic_rq *rq); 322void vnic_rq_free(struct vnic_rq *rq);
201int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, 323int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
202 unsigned int desc_count, unsigned int desc_size); 324 unsigned int desc_count, unsigned int desc_size);