aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2015-09-02 12:58:36 -0400
committerDavid S. Miller <davem@davemloft.net>2015-09-02 14:45:00 -0400
commit210c34dcd8d912dcc740f1f17625a7293af5cb56 (patch)
tree33b7944163879cad30a6e631490ad6d2d3297164 /drivers/net/xen-netback/netback.c
parent4db78d31deff77f227de56316ee865d65eaa7f01 (diff)
xen-netback: add support for multicast control
Xen's PV network protocol includes messages to add/remove ethernet multicast addresses to/from a filter list in the backend. This allows the frontend to request the backend only forward multicast packets which are of interest thus preventing unnecessary noise on the shared ring. The canonical netif header in git://xenbits.xen.org/xen.git specifies the message format (two more XEN_NETIF_EXTRA_TYPEs) so the minimal necessary changes have been pulled into include/xen/interface/io/netif.h. To prevent the frontend from extending the multicast filter list arbitrarily a limit (XEN_NETBK_MCAST_MAX) has been set to 64 entries. This limit is not specified by the protocol and so may change in future. If the limit is reached then the next XEN_NETIF_EXTRA_TYPE_MCAST_ADD sent by the frontend will be failed with NETIF_RSP_ERROR. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c99
1 files changed, 99 insertions, 0 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3f44b522b831..42569b994ea8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1157,6 +1157,80 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1157 return false; 1157 return false;
1158} 1158}
1159 1159
1160/* No locking is required in xenvif_mcast_add/del() as they are
1161 * only ever invoked from NAPI poll. An RCU list is used because
1162 * xenvif_mcast_match() is called asynchronously, during start_xmit.
1163 */
1164
1165static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
1166{
1167 struct xenvif_mcast_addr *mcast;
1168
1169 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
1170 if (net_ratelimit())
1171 netdev_err(vif->dev,
1172 "Too many multicast addresses\n");
1173 return -ENOSPC;
1174 }
1175
1176 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
1177 if (!mcast)
1178 return -ENOMEM;
1179
1180 ether_addr_copy(mcast->addr, addr);
1181 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
1182 vif->fe_mcast_count++;
1183
1184 return 0;
1185}
1186
1187static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
1188{
1189 struct xenvif_mcast_addr *mcast;
1190
1191 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1192 if (ether_addr_equal(addr, mcast->addr)) {
1193 --vif->fe_mcast_count;
1194 list_del_rcu(&mcast->entry);
1195 kfree_rcu(mcast, rcu);
1196 break;
1197 }
1198 }
1199}
1200
1201bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
1202{
1203 struct xenvif_mcast_addr *mcast;
1204
1205 rcu_read_lock();
1206 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1207 if (ether_addr_equal(addr, mcast->addr)) {
1208 rcu_read_unlock();
1209 return true;
1210 }
1211 }
1212 rcu_read_unlock();
1213
1214 return false;
1215}
1216
1217void xenvif_mcast_addr_list_free(struct xenvif *vif)
1218{
1219 /* No need for locking or RCU here. NAPI poll and TX queue
1220 * are stopped.
1221 */
1222 while (!list_empty(&vif->fe_mcast_addr)) {
1223 struct xenvif_mcast_addr *mcast;
1224
1225 mcast = list_first_entry(&vif->fe_mcast_addr,
1226 struct xenvif_mcast_addr,
1227 entry);
1228 --vif->fe_mcast_count;
1229 list_del(&mcast->entry);
1230 kfree(mcast);
1231 }
1232}
1233
1160static void xenvif_tx_build_gops(struct xenvif_queue *queue, 1234static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1161 int budget, 1235 int budget,
1162 unsigned *copy_ops, 1236 unsigned *copy_ops,
@@ -1215,6 +1289,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1215 break; 1289 break;
1216 } 1290 }
1217 1291
1292 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
1293 struct xen_netif_extra_info *extra;
1294
1295 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1296 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1297
1298 make_tx_response(queue, &txreq,
1299 (ret == 0) ?
1300 XEN_NETIF_RSP_OKAY :
1301 XEN_NETIF_RSP_ERROR);
1302 push_tx_responses(queue);
1303 continue;
1304 }
1305
1306 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
1307 struct xen_netif_extra_info *extra;
1308
1309 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1310 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1311
1312 make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
1313 push_tx_responses(queue);
1314 continue;
1315 }
1316
1218 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); 1317 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1219 if (unlikely(ret < 0)) 1318 if (unlikely(ret < 0))
1220 break; 1319 break;