aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c129
1 files changed, 120 insertions, 9 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7c64c74711e8..ec98d43916a8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i)
149 return i & (MAX_PENDING_REQS-1); 149 return i & (MAX_PENDING_REQS-1);
150} 150}
151 151
152bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) 152static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
153{
154 if (vif->gso_mask)
155 return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
156 else
157 return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
158}
159
160static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
153{ 161{
154 RING_IDX prod, cons; 162 RING_IDX prod, cons;
163 int needed;
164
165 needed = xenvif_rx_ring_slots_needed(queue->vif);
155 166
156 do { 167 do {
157 prod = queue->rx.sring->req_prod; 168 prod = queue->rx.sring->req_prod;
@@ -513,7 +524,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
513 524
514 skb_queue_head_init(&rxq); 525 skb_queue_head_init(&rxq);
515 526
516 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) 527 while (xenvif_rx_ring_slots_available(queue)
517 && (skb = xenvif_rx_dequeue(queue)) != NULL) { 528 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
518 queue->last_rx_time = jiffies; 529 queue->last_rx_time = jiffies;
519 530
@@ -1157,6 +1168,80 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1157 return false; 1168 return false;
1158} 1169}
1159 1170
1171/* No locking is required in xenvif_mcast_add/del() as they are
1172 * only ever invoked from NAPI poll. An RCU list is used because
1173 * xenvif_mcast_match() is called asynchronously, during start_xmit.
1174 */
1175
1176static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
1177{
1178 struct xenvif_mcast_addr *mcast;
1179
1180 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
1181 if (net_ratelimit())
1182 netdev_err(vif->dev,
1183 "Too many multicast addresses\n");
1184 return -ENOSPC;
1185 }
1186
1187 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
1188 if (!mcast)
1189 return -ENOMEM;
1190
1191 ether_addr_copy(mcast->addr, addr);
1192 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
1193 vif->fe_mcast_count++;
1194
1195 return 0;
1196}
1197
1198static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
1199{
1200 struct xenvif_mcast_addr *mcast;
1201
1202 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1203 if (ether_addr_equal(addr, mcast->addr)) {
1204 --vif->fe_mcast_count;
1205 list_del_rcu(&mcast->entry);
1206 kfree_rcu(mcast, rcu);
1207 break;
1208 }
1209 }
1210}
1211
1212bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
1213{
1214 struct xenvif_mcast_addr *mcast;
1215
1216 rcu_read_lock();
1217 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1218 if (ether_addr_equal(addr, mcast->addr)) {
1219 rcu_read_unlock();
1220 return true;
1221 }
1222 }
1223 rcu_read_unlock();
1224
1225 return false;
1226}
1227
1228void xenvif_mcast_addr_list_free(struct xenvif *vif)
1229{
1230 /* No need for locking or RCU here. NAPI poll and TX queue
1231 * are stopped.
1232 */
1233 while (!list_empty(&vif->fe_mcast_addr)) {
1234 struct xenvif_mcast_addr *mcast;
1235
1236 mcast = list_first_entry(&vif->fe_mcast_addr,
1237 struct xenvif_mcast_addr,
1238 entry);
1239 --vif->fe_mcast_count;
1240 list_del(&mcast->entry);
1241 kfree(mcast);
1242 }
1243}
1244
1160static void xenvif_tx_build_gops(struct xenvif_queue *queue, 1245static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1161 int budget, 1246 int budget,
1162 unsigned *copy_ops, 1247 unsigned *copy_ops,
@@ -1215,6 +1300,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1215 break; 1300 break;
1216 } 1301 }
1217 1302
1303 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
1304 struct xen_netif_extra_info *extra;
1305
1306 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1307 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1308
1309 make_tx_response(queue, &txreq,
1310 (ret == 0) ?
1311 XEN_NETIF_RSP_OKAY :
1312 XEN_NETIF_RSP_ERROR);
1313 push_tx_responses(queue);
1314 continue;
1315 }
1316
1317 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
1318 struct xen_netif_extra_info *extra;
1319
1320 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1321 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1322
1323 make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
1324 push_tx_responses(queue);
1325 continue;
1326 }
1327
1218 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); 1328 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1219 if (unlikely(ret < 0)) 1329 if (unlikely(ret < 0))
1220 break; 1330 break;
@@ -1839,8 +1949,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
1839 prod = queue->rx.sring->req_prod; 1949 prod = queue->rx.sring->req_prod;
1840 cons = queue->rx.req_cons; 1950 cons = queue->rx.req_cons;
1841 1951
1842 return !queue->stalled 1952 return !queue->stalled && prod - cons < 1
1843 && prod - cons < XEN_NETBK_RX_SLOTS_MAX
1844 && time_after(jiffies, 1953 && time_after(jiffies,
1845 queue->last_rx_time + queue->vif->stall_timeout); 1954 queue->last_rx_time + queue->vif->stall_timeout);
1846} 1955}
@@ -1852,14 +1961,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
1852 prod = queue->rx.sring->req_prod; 1961 prod = queue->rx.sring->req_prod;
1853 cons = queue->rx.req_cons; 1962 cons = queue->rx.req_cons;
1854 1963
1855 return queue->stalled 1964 return queue->stalled && prod - cons >= 1;
1856 && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
1857} 1965}
1858 1966
1859static bool xenvif_have_rx_work(struct xenvif_queue *queue) 1967static bool xenvif_have_rx_work(struct xenvif_queue *queue)
1860{ 1968{
1861 return (!skb_queue_empty(&queue->rx_queue) 1969 return (!skb_queue_empty(&queue->rx_queue)
1862 && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)) 1970 && xenvif_rx_ring_slots_available(queue))
1863 || (queue->vif->stall_timeout && 1971 || (queue->vif->stall_timeout &&
1864 (xenvif_rx_queue_stalled(queue) 1972 (xenvif_rx_queue_stalled(queue)
1865 || xenvif_rx_queue_ready(queue))) 1973 || xenvif_rx_queue_ready(queue)))
@@ -2006,8 +2114,11 @@ static int __init netback_init(void)
2006 if (!xen_domain()) 2114 if (!xen_domain())
2007 return -ENODEV; 2115 return -ENODEV;
2008 2116
2009 /* Allow as many queues as there are CPUs, by default */ 2117 /* Allow as many queues as there are CPUs if user has not
2010 xenvif_max_queues = num_online_cpus(); 2118 * specified a value.
2119 */
2120 if (xenvif_max_queues == 0)
2121 xenvif_max_queues = num_online_cpus();
2011 2122
2012 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { 2123 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2013 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", 2124 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",