aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c269
1 files changed, 153 insertions, 116 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64f0e0d18b81..27bbe58dcbe7 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -452,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
452 } 452 }
453 453
454 /* Set up a GSO prefix descriptor, if necessary */ 454 /* Set up a GSO prefix descriptor, if necessary */
455 if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { 455 if ((1 << gso_type) & vif->gso_prefix_mask) {
456 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 456 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
457 meta = npo->meta + npo->meta_prod++; 457 meta = npo->meta + npo->meta_prod++;
458 meta->gso_type = gso_type; 458 meta->gso_type = gso_type;
@@ -1149,75 +1149,95 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1149 return 0; 1149 return 0;
1150} 1150}
1151 1151
1152static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) 1152static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
1153 unsigned int max)
1153{ 1154{
1154 if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { 1155 if (skb_headlen(skb) >= len)
1155 /* If we need to pullup then pullup to the max, so we 1156 return 0;
1156 * won't need to do it again. 1157
1157 */ 1158 /* If we need to pullup then pullup to the max, so we
1158 int target = min_t(int, skb->len, MAX_TCP_HEADER); 1159 * won't need to do it again.
1159 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1160 */
1160 } 1161 if (max > skb->len)
1162 max = skb->len;
1163
1164 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
1165 return -ENOMEM;
1166
1167 if (skb_headlen(skb) < len)
1168 return -EPROTO;
1169
1170 return 0;
1161} 1171}
1162 1172
1173/* This value should be large enough to cover a tagged ethernet header plus
1174 * maximally sized IP and TCP or UDP headers.
1175 */
1176#define MAX_IP_HDR_LEN 128
1177
1163static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, 1178static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1164 int recalculate_partial_csum) 1179 int recalculate_partial_csum)
1165{ 1180{
1166 struct iphdr *iph = (void *)skb->data;
1167 unsigned int header_size;
1168 unsigned int off; 1181 unsigned int off;
1169 int err = -EPROTO; 1182 bool fragment;
1183 int err;
1184
1185 fragment = false;
1170 1186
1171 off = sizeof(struct iphdr); 1187 err = maybe_pull_tail(skb,
1188 sizeof(struct iphdr),
1189 MAX_IP_HDR_LEN);
1190 if (err < 0)
1191 goto out;
1192
1193 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
1194 fragment = true;
1172 1195
1173 header_size = skb->network_header + off + MAX_IPOPTLEN; 1196 off = ip_hdrlen(skb);
1174 maybe_pull_tail(skb, header_size);
1175 1197
1176 off = iph->ihl * 4; 1198 err = -EPROTO;
1199
1200 if (fragment)
1201 goto out;
1177 1202
1178 switch (iph->protocol) { 1203 switch (ip_hdr(skb)->protocol) {
1179 case IPPROTO_TCP: 1204 case IPPROTO_TCP:
1205 err = maybe_pull_tail(skb,
1206 off + sizeof(struct tcphdr),
1207 MAX_IP_HDR_LEN);
1208 if (err < 0)
1209 goto out;
1210
1180 if (!skb_partial_csum_set(skb, off, 1211 if (!skb_partial_csum_set(skb, off,
1181 offsetof(struct tcphdr, check))) 1212 offsetof(struct tcphdr, check)))
1182 goto out; 1213 goto out;
1183 1214
1184 if (recalculate_partial_csum) { 1215 if (recalculate_partial_csum)
1185 struct tcphdr *tcph = tcp_hdr(skb); 1216 tcp_hdr(skb)->check =
1186 1217 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1187 header_size = skb->network_header + 1218 ip_hdr(skb)->daddr,
1188 off + 1219 skb->len - off,
1189 sizeof(struct tcphdr); 1220 IPPROTO_TCP, 0);
1190 maybe_pull_tail(skb, header_size);
1191
1192 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1193 skb->len - off,
1194 IPPROTO_TCP, 0);
1195 }
1196 break; 1221 break;
1197 case IPPROTO_UDP: 1222 case IPPROTO_UDP:
1223 err = maybe_pull_tail(skb,
1224 off + sizeof(struct udphdr),
1225 MAX_IP_HDR_LEN);
1226 if (err < 0)
1227 goto out;
1228
1198 if (!skb_partial_csum_set(skb, off, 1229 if (!skb_partial_csum_set(skb, off,
1199 offsetof(struct udphdr, check))) 1230 offsetof(struct udphdr, check)))
1200 goto out; 1231 goto out;
1201 1232
1202 if (recalculate_partial_csum) { 1233 if (recalculate_partial_csum)
1203 struct udphdr *udph = udp_hdr(skb); 1234 udp_hdr(skb)->check =
1204 1235 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1205 header_size = skb->network_header + 1236 ip_hdr(skb)->daddr,
1206 off + 1237 skb->len - off,
1207 sizeof(struct udphdr); 1238 IPPROTO_UDP, 0);
1208 maybe_pull_tail(skb, header_size);
1209
1210 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1211 skb->len - off,
1212 IPPROTO_UDP, 0);
1213 }
1214 break; 1239 break;
1215 default: 1240 default:
1216 if (net_ratelimit())
1217 netdev_err(vif->dev,
1218 "Attempting to checksum a non-TCP/UDP packet, "
1219 "dropping a protocol %d packet\n",
1220 iph->protocol);
1221 goto out; 1241 goto out;
1222 } 1242 }
1223 1243
@@ -1227,121 +1247,138 @@ out:
1227 return err; 1247 return err;
1228} 1248}
1229 1249
1250/* This value should be large enough to cover a tagged ethernet header plus
1251 * an IPv6 header, all options, and a maximal TCP or UDP header.
1252 */
1253#define MAX_IPV6_HDR_LEN 256
1254
1255#define OPT_HDR(type, skb, off) \
1256 (type *)(skb_network_header(skb) + (off))
1257
1230static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, 1258static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1231 int recalculate_partial_csum) 1259 int recalculate_partial_csum)
1232{ 1260{
1233 int err = -EPROTO; 1261 int err;
1234 struct ipv6hdr *ipv6h = (void *)skb->data;
1235 u8 nexthdr; 1262 u8 nexthdr;
1236 unsigned int header_size;
1237 unsigned int off; 1263 unsigned int off;
1264 unsigned int len;
1238 bool fragment; 1265 bool fragment;
1239 bool done; 1266 bool done;
1240 1267
1268 fragment = false;
1241 done = false; 1269 done = false;
1242 1270
1243 off = sizeof(struct ipv6hdr); 1271 off = sizeof(struct ipv6hdr);
1244 1272
1245 header_size = skb->network_header + off; 1273 err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
1246 maybe_pull_tail(skb, header_size); 1274 if (err < 0)
1275 goto out;
1247 1276
1248 nexthdr = ipv6h->nexthdr; 1277 nexthdr = ipv6_hdr(skb)->nexthdr;
1249 1278
1250 while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && 1279 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
1251 !done) { 1280 while (off <= len && !done) {
1252 switch (nexthdr) { 1281 switch (nexthdr) {
1253 case IPPROTO_DSTOPTS: 1282 case IPPROTO_DSTOPTS:
1254 case IPPROTO_HOPOPTS: 1283 case IPPROTO_HOPOPTS:
1255 case IPPROTO_ROUTING: { 1284 case IPPROTO_ROUTING: {
1256 struct ipv6_opt_hdr *hp = (void *)(skb->data + off); 1285 struct ipv6_opt_hdr *hp;
1257 1286
1258 header_size = skb->network_header + 1287 err = maybe_pull_tail(skb,
1259 off + 1288 off +
1260 sizeof(struct ipv6_opt_hdr); 1289 sizeof(struct ipv6_opt_hdr),
1261 maybe_pull_tail(skb, header_size); 1290 MAX_IPV6_HDR_LEN);
1291 if (err < 0)
1292 goto out;
1262 1293
1294 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
1263 nexthdr = hp->nexthdr; 1295 nexthdr = hp->nexthdr;
1264 off += ipv6_optlen(hp); 1296 off += ipv6_optlen(hp);
1265 break; 1297 break;
1266 } 1298 }
1267 case IPPROTO_AH: { 1299 case IPPROTO_AH: {
1268 struct ip_auth_hdr *hp = (void *)(skb->data + off); 1300 struct ip_auth_hdr *hp;
1301
1302 err = maybe_pull_tail(skb,
1303 off +
1304 sizeof(struct ip_auth_hdr),
1305 MAX_IPV6_HDR_LEN);
1306 if (err < 0)
1307 goto out;
1269 1308
1270 header_size = skb->network_header + 1309 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
1271 off + 1310 nexthdr = hp->nexthdr;
1272 sizeof(struct ip_auth_hdr); 1311 off += ipv6_authlen(hp);
1273 maybe_pull_tail(skb, header_size); 1312 break;
1313 }
1314 case IPPROTO_FRAGMENT: {
1315 struct frag_hdr *hp;
1316
1317 err = maybe_pull_tail(skb,
1318 off +
1319 sizeof(struct frag_hdr),
1320 MAX_IPV6_HDR_LEN);
1321 if (err < 0)
1322 goto out;
1323
1324 hp = OPT_HDR(struct frag_hdr, skb, off);
1325
1326 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
1327 fragment = true;
1274 1328
1275 nexthdr = hp->nexthdr; 1329 nexthdr = hp->nexthdr;
1276 off += (hp->hdrlen+2)<<2; 1330 off += sizeof(struct frag_hdr);
1277 break; 1331 break;
1278 } 1332 }
1279 case IPPROTO_FRAGMENT:
1280 fragment = true;
1281 /* fall through */
1282 default: 1333 default:
1283 done = true; 1334 done = true;
1284 break; 1335 break;
1285 } 1336 }
1286 } 1337 }
1287 1338
1288 if (!done) { 1339 err = -EPROTO;
1289 if (net_ratelimit())
1290 netdev_err(vif->dev, "Failed to parse packet header\n");
1291 goto out;
1292 }
1293 1340
1294 if (fragment) { 1341 if (!done || fragment)
1295 if (net_ratelimit())
1296 netdev_err(vif->dev, "Packet is a fragment!\n");
1297 goto out; 1342 goto out;
1298 }
1299 1343
1300 switch (nexthdr) { 1344 switch (nexthdr) {
1301 case IPPROTO_TCP: 1345 case IPPROTO_TCP:
1346 err = maybe_pull_tail(skb,
1347 off + sizeof(struct tcphdr),
1348 MAX_IPV6_HDR_LEN);
1349 if (err < 0)
1350 goto out;
1351
1302 if (!skb_partial_csum_set(skb, off, 1352 if (!skb_partial_csum_set(skb, off,
1303 offsetof(struct tcphdr, check))) 1353 offsetof(struct tcphdr, check)))
1304 goto out; 1354 goto out;
1305 1355
1306 if (recalculate_partial_csum) { 1356 if (recalculate_partial_csum)
1307 struct tcphdr *tcph = tcp_hdr(skb); 1357 tcp_hdr(skb)->check =
1308 1358 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1309 header_size = skb->network_header + 1359 &ipv6_hdr(skb)->daddr,
1310 off + 1360 skb->len - off,
1311 sizeof(struct tcphdr); 1361 IPPROTO_TCP, 0);
1312 maybe_pull_tail(skb, header_size);
1313
1314 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1315 &ipv6h->daddr,
1316 skb->len - off,
1317 IPPROTO_TCP, 0);
1318 }
1319 break; 1362 break;
1320 case IPPROTO_UDP: 1363 case IPPROTO_UDP:
1364 err = maybe_pull_tail(skb,
1365 off + sizeof(struct udphdr),
1366 MAX_IPV6_HDR_LEN);
1367 if (err < 0)
1368 goto out;
1369
1321 if (!skb_partial_csum_set(skb, off, 1370 if (!skb_partial_csum_set(skb, off,
1322 offsetof(struct udphdr, check))) 1371 offsetof(struct udphdr, check)))
1323 goto out; 1372 goto out;
1324 1373
1325 if (recalculate_partial_csum) { 1374 if (recalculate_partial_csum)
1326 struct udphdr *udph = udp_hdr(skb); 1375 udp_hdr(skb)->check =
1327 1376 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1328 header_size = skb->network_header + 1377 &ipv6_hdr(skb)->daddr,
1329 off + 1378 skb->len - off,
1330 sizeof(struct udphdr); 1379 IPPROTO_UDP, 0);
1331 maybe_pull_tail(skb, header_size);
1332
1333 udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1334 &ipv6h->daddr,
1335 skb->len - off,
1336 IPPROTO_UDP, 0);
1337 }
1338 break; 1380 break;
1339 default: 1381 default:
1340 if (net_ratelimit())
1341 netdev_err(vif->dev,
1342 "Attempting to checksum a non-TCP/UDP packet, "
1343 "dropping a protocol %d packet\n",
1344 nexthdr);
1345 goto out; 1382 goto out;
1346 } 1383 }
1347 1384
@@ -1411,14 +1448,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1411 return false; 1448 return false;
1412} 1449}
1413 1450
1414static unsigned xenvif_tx_build_gops(struct xenvif *vif) 1451static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1415{ 1452{
1416 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; 1453 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1417 struct sk_buff *skb; 1454 struct sk_buff *skb;
1418 int ret; 1455 int ret;
1419 1456
1420 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX 1457 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1421 < MAX_PENDING_REQS)) { 1458 < MAX_PENDING_REQS) &&
1459 (skb_queue_len(&vif->tx_queue) < budget)) {
1422 struct xen_netif_tx_request txreq; 1460 struct xen_netif_tx_request txreq;
1423 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1461 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1424 struct page *page; 1462 struct page *page;
@@ -1440,7 +1478,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1440 continue; 1478 continue;
1441 } 1479 }
1442 1480
1443 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1481 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
1444 if (!work_to_do) 1482 if (!work_to_do)
1445 break; 1483 break;
1446 1484
@@ -1580,14 +1618,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1580} 1618}
1581 1619
1582 1620
1583static int xenvif_tx_submit(struct xenvif *vif, int budget) 1621static int xenvif_tx_submit(struct xenvif *vif)
1584{ 1622{
1585 struct gnttab_copy *gop = vif->tx_copy_ops; 1623 struct gnttab_copy *gop = vif->tx_copy_ops;
1586 struct sk_buff *skb; 1624 struct sk_buff *skb;
1587 int work_done = 0; 1625 int work_done = 0;
1588 1626
1589 while (work_done < budget && 1627 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1590 (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1591 struct xen_netif_tx_request *txp; 1628 struct xen_netif_tx_request *txp;
1592 u16 pending_idx; 1629 u16 pending_idx;
1593 unsigned data_len; 1630 unsigned data_len;
@@ -1662,14 +1699,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
1662 if (unlikely(!tx_work_todo(vif))) 1699 if (unlikely(!tx_work_todo(vif)))
1663 return 0; 1700 return 0;
1664 1701
1665 nr_gops = xenvif_tx_build_gops(vif); 1702 nr_gops = xenvif_tx_build_gops(vif, budget);
1666 1703
1667 if (nr_gops == 0) 1704 if (nr_gops == 0)
1668 return 0; 1705 return 0;
1669 1706
1670 gnttab_batch_copy(vif->tx_copy_ops, nr_gops); 1707 gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1671 1708
1672 work_done = xenvif_tx_submit(vif, nr_gops); 1709 work_done = xenvif_tx_submit(vif);
1673 1710
1674 return work_done; 1711 return work_done;
1675} 1712}