aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-12-18 16:42:06 -0500
committerDavid S. Miller <davem@davemloft.net>2013-12-18 16:42:06 -0500
commit143c9054949436cb05e468439dc5e46231f33d09 (patch)
treec2e972d8188fb1b36368e9acb5b6b59466c9d903 /drivers/net/xen-netback/netback.c
parent0b6807034791160d5e584138943d2daea765436d (diff)
parent35eecf052250f663f07a4cded7d3503fd1b50729 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/intel/i40e/i40e_main.c drivers/net/macvtap.c Both minor merge hassles, simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c81
1 files changed, 40 insertions, 41 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 43341b82649c..773b731e3e52 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -355,7 +355,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
355 } 355 }
356 356
357 /* Set up a GSO prefix descriptor, if necessary */ 357 /* Set up a GSO prefix descriptor, if necessary */
358 if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { 358 if ((1 << gso_type) & vif->gso_prefix_mask) {
359 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 359 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
360 meta = npo->meta + npo->meta_prod++; 360 meta = npo->meta + npo->meta_prod++;
361 meta->gso_type = gso_type; 361 meta->gso_type = gso_type;
@@ -1099,44 +1099,45 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1099 1099
1100 err = -EPROTO; 1100 err = -EPROTO;
1101 1101
1102 if (fragment)
1103 goto out;
1104
1102 switch (ip_hdr(skb)->protocol) { 1105 switch (ip_hdr(skb)->protocol) {
1103 case IPPROTO_TCP: 1106 case IPPROTO_TCP:
1107 err = maybe_pull_tail(skb,
1108 off + sizeof(struct tcphdr),
1109 MAX_IP_HDR_LEN);
1110 if (err < 0)
1111 goto out;
1112
1104 if (!skb_partial_csum_set(skb, off, 1113 if (!skb_partial_csum_set(skb, off,
1105 offsetof(struct tcphdr, check))) 1114 offsetof(struct tcphdr, check)))
1106 goto out; 1115 goto out;
1107 1116
1108 if (recalculate_partial_csum) { 1117 if (recalculate_partial_csum)
1109 err = maybe_pull_tail(skb,
1110 off + sizeof(struct tcphdr),
1111 MAX_IP_HDR_LEN);
1112 if (err < 0)
1113 goto out;
1114
1115 tcp_hdr(skb)->check = 1118 tcp_hdr(skb)->check =
1116 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 1119 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1117 ip_hdr(skb)->daddr, 1120 ip_hdr(skb)->daddr,
1118 skb->len - off, 1121 skb->len - off,
1119 IPPROTO_TCP, 0); 1122 IPPROTO_TCP, 0);
1120 }
1121 break; 1123 break;
1122 case IPPROTO_UDP: 1124 case IPPROTO_UDP:
1125 err = maybe_pull_tail(skb,
1126 off + sizeof(struct udphdr),
1127 MAX_IP_HDR_LEN);
1128 if (err < 0)
1129 goto out;
1130
1123 if (!skb_partial_csum_set(skb, off, 1131 if (!skb_partial_csum_set(skb, off,
1124 offsetof(struct udphdr, check))) 1132 offsetof(struct udphdr, check)))
1125 goto out; 1133 goto out;
1126 1134
1127 if (recalculate_partial_csum) { 1135 if (recalculate_partial_csum)
1128 err = maybe_pull_tail(skb,
1129 off + sizeof(struct udphdr),
1130 MAX_IP_HDR_LEN);
1131 if (err < 0)
1132 goto out;
1133
1134 udp_hdr(skb)->check = 1136 udp_hdr(skb)->check =
1135 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 1137 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1136 ip_hdr(skb)->daddr, 1138 ip_hdr(skb)->daddr,
1137 skb->len - off, 1139 skb->len - off,
1138 IPPROTO_UDP, 0); 1140 IPPROTO_UDP, 0);
1139 }
1140 break; 1141 break;
1141 default: 1142 default:
1142 goto out; 1143 goto out;
@@ -1244,42 +1245,40 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1244 1245
1245 switch (nexthdr) { 1246 switch (nexthdr) {
1246 case IPPROTO_TCP: 1247 case IPPROTO_TCP:
1248 err = maybe_pull_tail(skb,
1249 off + sizeof(struct tcphdr),
1250 MAX_IPV6_HDR_LEN);
1251 if (err < 0)
1252 goto out;
1253
1247 if (!skb_partial_csum_set(skb, off, 1254 if (!skb_partial_csum_set(skb, off,
1248 offsetof(struct tcphdr, check))) 1255 offsetof(struct tcphdr, check)))
1249 goto out; 1256 goto out;
1250 1257
1251 if (recalculate_partial_csum) { 1258 if (recalculate_partial_csum)
1252 err = maybe_pull_tail(skb,
1253 off + sizeof(struct tcphdr),
1254 MAX_IPV6_HDR_LEN);
1255 if (err < 0)
1256 goto out;
1257
1258 tcp_hdr(skb)->check = 1259 tcp_hdr(skb)->check =
1259 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1260 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1260 &ipv6_hdr(skb)->daddr, 1261 &ipv6_hdr(skb)->daddr,
1261 skb->len - off, 1262 skb->len - off,
1262 IPPROTO_TCP, 0); 1263 IPPROTO_TCP, 0);
1263 }
1264 break; 1264 break;
1265 case IPPROTO_UDP: 1265 case IPPROTO_UDP:
1266 err = maybe_pull_tail(skb,
1267 off + sizeof(struct udphdr),
1268 MAX_IPV6_HDR_LEN);
1269 if (err < 0)
1270 goto out;
1271
1266 if (!skb_partial_csum_set(skb, off, 1272 if (!skb_partial_csum_set(skb, off,
1267 offsetof(struct udphdr, check))) 1273 offsetof(struct udphdr, check)))
1268 goto out; 1274 goto out;
1269 1275
1270 if (recalculate_partial_csum) { 1276 if (recalculate_partial_csum)
1271 err = maybe_pull_tail(skb,
1272 off + sizeof(struct udphdr),
1273 MAX_IPV6_HDR_LEN);
1274 if (err < 0)
1275 goto out;
1276
1277 udp_hdr(skb)->check = 1277 udp_hdr(skb)->check =
1278 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1278 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1279 &ipv6_hdr(skb)->daddr, 1279 &ipv6_hdr(skb)->daddr,
1280 skb->len - off, 1280 skb->len - off,
1281 IPPROTO_UDP, 0); 1281 IPPROTO_UDP, 0);
1282 }
1283 break; 1282 break;
1284 default: 1283 default:
1285 goto out; 1284 goto out;
@@ -1351,14 +1350,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1351 return false; 1350 return false;
1352} 1351}
1353 1352
1354static unsigned xenvif_tx_build_gops(struct xenvif *vif) 1353static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1355{ 1354{
1356 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; 1355 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1357 struct sk_buff *skb; 1356 struct sk_buff *skb;
1358 int ret; 1357 int ret;
1359 1358
1360 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX 1359 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1361 < MAX_PENDING_REQS)) { 1360 < MAX_PENDING_REQS) &&
1361 (skb_queue_len(&vif->tx_queue) < budget)) {
1362 struct xen_netif_tx_request txreq; 1362 struct xen_netif_tx_request txreq;
1363 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1363 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1364 struct page *page; 1364 struct page *page;
@@ -1380,7 +1380,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1380 continue; 1380 continue;
1381 } 1381 }
1382 1382
1383 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1383 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
1384 if (!work_to_do) 1384 if (!work_to_do)
1385 break; 1385 break;
1386 1386
@@ -1520,14 +1520,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1520} 1520}
1521 1521
1522 1522
1523static int xenvif_tx_submit(struct xenvif *vif, int budget) 1523static int xenvif_tx_submit(struct xenvif *vif)
1524{ 1524{
1525 struct gnttab_copy *gop = vif->tx_copy_ops; 1525 struct gnttab_copy *gop = vif->tx_copy_ops;
1526 struct sk_buff *skb; 1526 struct sk_buff *skb;
1527 int work_done = 0; 1527 int work_done = 0;
1528 1528
1529 while (work_done < budget && 1529 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1530 (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1531 struct xen_netif_tx_request *txp; 1530 struct xen_netif_tx_request *txp;
1532 u16 pending_idx; 1531 u16 pending_idx;
1533 unsigned data_len; 1532 unsigned data_len;
@@ -1602,14 +1601,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
1602 if (unlikely(!tx_work_todo(vif))) 1601 if (unlikely(!tx_work_todo(vif)))
1603 return 0; 1602 return 0;
1604 1603
1605 nr_gops = xenvif_tx_build_gops(vif); 1604 nr_gops = xenvif_tx_build_gops(vif, budget);
1606 1605
1607 if (nr_gops == 0) 1606 if (nr_gops == 0)
1608 return 0; 1607 return 0;
1609 1608
1610 gnttab_batch_copy(vif->tx_copy_ops, nr_gops); 1609 gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1611 1610
1612 work_done = xenvif_tx_submit(vif, nr_gops); 1611 work_done = xenvif_tx_submit(vif);
1613 1612
1614 return work_done; 1613 return work_done;
1615} 1614}