aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2014-01-02 08:02:06 -0500
committerMichal Marek <mmarek@suse.cz>2014-01-02 08:02:06 -0500
commit37e2c2a775fc887acd1432908478dfd532f7f00f (patch)
treee51ebc699d8e262fd47e0913be6a711cb1a7b565 /drivers/net/xen-netback/netback.c
parent1c8ddae09f4c102b97c9086cc70347e89468a547 (diff)
parent6ce4eac1f600b34f2f7f58f9cd8f0503d79e42ae (diff)
Merge commit v3.13-rc1 into kbuild/misc
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c398
1 files changed, 311 insertions, 87 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 956130c70036..919b6509455c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -109,15 +109,12 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); 109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
110} 110}
111 111
112/* 112/* This is a miniumum size for the linear area to avoid lots of
113 * This is the amount of packet we copy rather than map, so that the 113 * calls to __pskb_pull_tail() as we set up checksum offsets. The
114 * guest can't fiddle with the contents of the headers while we do 114 * value 128 was chosen as it covers all IPv4 and most likely
115 * packet processing on them (netfilter, routing, etc). 115 * IPv6 headers.
116 */ 116 */
117#define PKT_PROT_LEN (ETH_HLEN + \ 117#define PKT_PROT_LEN 128
118 VLAN_HLEN + \
119 sizeof(struct iphdr) + MAX_IPOPTLEN + \
120 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
121 118
122static u16 frag_get_pending_idx(skb_frag_t *frag) 119static u16 frag_get_pending_idx(skb_frag_t *frag)
123{ 120{
@@ -145,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
145 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); 142 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
146 143
147 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ 144 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
148 if (vif->can_sg || vif->gso || vif->gso_prefix) 145 if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
149 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ 146 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
150 147
151 return max; 148 return max;
@@ -212,6 +209,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
212 return false; 209 return false;
213} 210}
214 211
212struct xenvif_count_slot_state {
213 unsigned long copy_off;
214 bool head;
215};
216
217unsigned int xenvif_count_frag_slots(struct xenvif *vif,
218 unsigned long offset, unsigned long size,
219 struct xenvif_count_slot_state *state)
220{
221 unsigned count = 0;
222
223 offset &= ~PAGE_MASK;
224
225 while (size > 0) {
226 unsigned long bytes;
227
228 bytes = PAGE_SIZE - offset;
229
230 if (bytes > size)
231 bytes = size;
232
233 if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
234 count++;
235 state->copy_off = 0;
236 }
237
238 if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
239 bytes = MAX_BUFFER_OFFSET - state->copy_off;
240
241 state->copy_off += bytes;
242
243 offset += bytes;
244 size -= bytes;
245
246 if (offset == PAGE_SIZE)
247 offset = 0;
248
249 state->head = false;
250 }
251
252 return count;
253}
254
215/* 255/*
216 * Figure out how many ring slots we're going to need to send @skb to 256 * Figure out how many ring slots we're going to need to send @skb to
217 * the guest. This function is essentially a dry run of 257 * the guest. This function is essentially a dry run of
@@ -219,48 +259,39 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
219 */ 259 */
220unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) 260unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
221{ 261{
262 struct xenvif_count_slot_state state;
222 unsigned int count; 263 unsigned int count;
223 int i, copy_off; 264 unsigned char *data;
265 unsigned i;
224 266
225 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); 267 state.head = true;
268 state.copy_off = 0;
226 269
227 copy_off = skb_headlen(skb) % PAGE_SIZE; 270 /* Slot for the first (partial) page of data. */
271 count = 1;
228 272
273 /* Need a slot for the GSO prefix for GSO extra data? */
229 if (skb_shinfo(skb)->gso_size) 274 if (skb_shinfo(skb)->gso_size)
230 count++; 275 count++;
231 276
232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 277 data = skb->data;
233 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 278 while (data < skb_tail_pointer(skb)) {
234 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; 279 unsigned long offset = offset_in_page(data);
235 unsigned long bytes; 280 unsigned long size = PAGE_SIZE - offset;
236
237 offset &= ~PAGE_MASK;
238
239 while (size > 0) {
240 BUG_ON(offset >= PAGE_SIZE);
241 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
242
243 bytes = PAGE_SIZE - offset;
244
245 if (bytes > size)
246 bytes = size;
247 281
248 if (start_new_rx_buffer(copy_off, bytes, 0)) { 282 if (data + size > skb_tail_pointer(skb))
249 count++; 283 size = skb_tail_pointer(skb) - data;
250 copy_off = 0;
251 }
252 284
253 if (copy_off + bytes > MAX_BUFFER_OFFSET) 285 count += xenvif_count_frag_slots(vif, offset, size, &state);
254 bytes = MAX_BUFFER_OFFSET - copy_off;
255 286
256 copy_off += bytes; 287 data += size;
288 }
257 289
258 offset += bytes; 290 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
259 size -= bytes; 291 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
292 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
260 293
261 if (offset == PAGE_SIZE) 294 count += xenvif_count_frag_slots(vif, offset, size, &state);
262 offset = 0;
263 }
264 } 295 }
265 return count; 296 return count;
266} 297}
@@ -283,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
283 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 314 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
284 315
285 meta = npo->meta + npo->meta_prod++; 316 meta = npo->meta + npo->meta_prod++;
317 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
286 meta->gso_size = 0; 318 meta->gso_size = 0;
287 meta->size = 0; 319 meta->size = 0;
288 meta->id = req->id; 320 meta->id = req->id;
@@ -305,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
305 struct gnttab_copy *copy_gop; 337 struct gnttab_copy *copy_gop;
306 struct xenvif_rx_meta *meta; 338 struct xenvif_rx_meta *meta;
307 unsigned long bytes; 339 unsigned long bytes;
340 int gso_type;
308 341
309 /* Data must not cross a page boundary. */ 342 /* Data must not cross a page boundary. */
310 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); 343 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -363,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
363 } 396 }
364 397
365 /* Leave a gap for the GSO descriptor. */ 398 /* Leave a gap for the GSO descriptor. */
366 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) 399 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
400 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
401 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
402 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
403 else
404 gso_type = XEN_NETIF_GSO_TYPE_NONE;
405
406 if (*head && ((1 << gso_type) & vif->gso_mask))
367 vif->rx.req_cons++; 407 vif->rx.req_cons++;
368 408
369 *head = 0; /* There must be something in this buffer now. */ 409 *head = 0; /* There must be something in this buffer now. */
@@ -394,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
394 unsigned char *data; 434 unsigned char *data;
395 int head = 1; 435 int head = 1;
396 int old_meta_prod; 436 int old_meta_prod;
437 int gso_type;
438 int gso_size;
397 439
398 old_meta_prod = npo->meta_prod; 440 old_meta_prod = npo->meta_prod;
399 441
442 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
443 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
444 gso_size = skb_shinfo(skb)->gso_size;
445 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
446 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
447 gso_size = skb_shinfo(skb)->gso_size;
448 } else {
449 gso_type = XEN_NETIF_GSO_TYPE_NONE;
450 gso_size = 0;
451 }
452
400 /* Set up a GSO prefix descriptor, if necessary */ 453 /* Set up a GSO prefix descriptor, if necessary */
401 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { 454 if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
402 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 455 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
403 meta = npo->meta + npo->meta_prod++; 456 meta = npo->meta + npo->meta_prod++;
404 meta->gso_size = skb_shinfo(skb)->gso_size; 457 meta->gso_type = gso_type;
458 meta->gso_size = gso_size;
405 meta->size = 0; 459 meta->size = 0;
406 meta->id = req->id; 460 meta->id = req->id;
407 } 461 }
@@ -409,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
409 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 463 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
410 meta = npo->meta + npo->meta_prod++; 464 meta = npo->meta + npo->meta_prod++;
411 465
412 if (!vif->gso_prefix) 466 if ((1 << gso_type) & vif->gso_mask) {
413 meta->gso_size = skb_shinfo(skb)->gso_size; 467 meta->gso_type = gso_type;
414 else 468 meta->gso_size = gso_size;
469 } else {
470 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
415 meta->gso_size = 0; 471 meta->gso_size = 0;
472 }
416 473
417 meta->size = 0; 474 meta->size = 0;
418 meta->id = req->id; 475 meta->id = req->id;
@@ -558,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
558 615
559 vif = netdev_priv(skb->dev); 616 vif = netdev_priv(skb->dev);
560 617
561 if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) { 618 if ((1 << vif->meta[npo.meta_cons].gso_type) &
619 vif->gso_prefix_mask) {
562 resp = RING_GET_RESPONSE(&vif->rx, 620 resp = RING_GET_RESPONSE(&vif->rx,
563 vif->rx.rsp_prod_pvt++); 621 vif->rx.rsp_prod_pvt++);
564 622
@@ -595,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
595 vif->meta[npo.meta_cons].size, 653 vif->meta[npo.meta_cons].size,
596 flags); 654 flags);
597 655
598 if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { 656 if ((1 << vif->meta[npo.meta_cons].gso_type) &
657 vif->gso_mask) {
599 struct xen_netif_extra_info *gso = 658 struct xen_netif_extra_info *gso =
600 (struct xen_netif_extra_info *) 659 (struct xen_netif_extra_info *)
601 RING_GET_RESPONSE(&vif->rx, 660 RING_GET_RESPONSE(&vif->rx,
@@ -603,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
603 662
604 resp->flags |= XEN_NETRXF_extra_info; 663 resp->flags |= XEN_NETRXF_extra_info;
605 664
665 gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
606 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; 666 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
607 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
608 gso->u.gso.pad = 0; 667 gso->u.gso.pad = 0;
609 gso->u.gso.features = 0; 668 gso->u.gso.features = 0;
610 669
@@ -1067,15 +1126,20 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1067 return -EINVAL; 1126 return -EINVAL;
1068 } 1127 }
1069 1128
1070 /* Currently only TCPv4 S.O. is supported. */ 1129 switch (gso->u.gso.type) {
1071 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 1130 case XEN_NETIF_GSO_TYPE_TCPV4:
1131 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1132 break;
1133 case XEN_NETIF_GSO_TYPE_TCPV6:
1134 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1135 break;
1136 default:
1072 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1137 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1073 xenvif_fatal_tx_err(vif); 1138 xenvif_fatal_tx_err(vif);
1074 return -EINVAL; 1139 return -EINVAL;
1075 } 1140 }
1076 1141
1077 skb_shinfo(skb)->gso_size = gso->u.gso.size; 1142 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1078 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1079 1143
1080 /* Header must be checked, and gso_segs computed. */ 1144 /* Header must be checked, and gso_segs computed. */
1081 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 1145 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -1084,61 +1148,74 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
1084 return 0; 1148 return 0;
1085} 1149}
1086 1150
1087static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) 1151static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
1152{
1153 if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
1154 /* If we need to pullup then pullup to the max, so we
1155 * won't need to do it again.
1156 */
1157 int target = min_t(int, skb->len, MAX_TCP_HEADER);
1158 __pskb_pull_tail(skb, target - skb_headlen(skb));
1159 }
1160}
1161
1162static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1163 int recalculate_partial_csum)
1088{ 1164{
1089 struct iphdr *iph; 1165 struct iphdr *iph = (void *)skb->data;
1166 unsigned int header_size;
1167 unsigned int off;
1090 int err = -EPROTO; 1168 int err = -EPROTO;
1091 int recalculate_partial_csum = 0;
1092 1169
1093 /* 1170 off = sizeof(struct iphdr);
1094 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1095 * peers can fail to set NETRXF_csum_blank when sending a GSO
1096 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1097 * recalculate the partial checksum.
1098 */
1099 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1100 vif->rx_gso_checksum_fixup++;
1101 skb->ip_summed = CHECKSUM_PARTIAL;
1102 recalculate_partial_csum = 1;
1103 }
1104 1171
1105 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 1172 header_size = skb->network_header + off + MAX_IPOPTLEN;
1106 if (skb->ip_summed != CHECKSUM_PARTIAL) 1173 maybe_pull_tail(skb, header_size);
1107 return 0;
1108 1174
1109 if (skb->protocol != htons(ETH_P_IP)) 1175 off = iph->ihl * 4;
1110 goto out;
1111 1176
1112 iph = (void *)skb->data;
1113 switch (iph->protocol) { 1177 switch (iph->protocol) {
1114 case IPPROTO_TCP: 1178 case IPPROTO_TCP:
1115 if (!skb_partial_csum_set(skb, 4 * iph->ihl, 1179 if (!skb_partial_csum_set(skb, off,
1116 offsetof(struct tcphdr, check))) 1180 offsetof(struct tcphdr, check)))
1117 goto out; 1181 goto out;
1118 1182
1119 if (recalculate_partial_csum) { 1183 if (recalculate_partial_csum) {
1120 struct tcphdr *tcph = tcp_hdr(skb); 1184 struct tcphdr *tcph = tcp_hdr(skb);
1185
1186 header_size = skb->network_header +
1187 off +
1188 sizeof(struct tcphdr);
1189 maybe_pull_tail(skb, header_size);
1190
1121 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1191 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1122 skb->len - iph->ihl*4, 1192 skb->len - off,
1123 IPPROTO_TCP, 0); 1193 IPPROTO_TCP, 0);
1124 } 1194 }
1125 break; 1195 break;
1126 case IPPROTO_UDP: 1196 case IPPROTO_UDP:
1127 if (!skb_partial_csum_set(skb, 4 * iph->ihl, 1197 if (!skb_partial_csum_set(skb, off,
1128 offsetof(struct udphdr, check))) 1198 offsetof(struct udphdr, check)))
1129 goto out; 1199 goto out;
1130 1200
1131 if (recalculate_partial_csum) { 1201 if (recalculate_partial_csum) {
1132 struct udphdr *udph = udp_hdr(skb); 1202 struct udphdr *udph = udp_hdr(skb);
1203
1204 header_size = skb->network_header +
1205 off +
1206 sizeof(struct udphdr);
1207 maybe_pull_tail(skb, header_size);
1208
1133 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1209 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1134 skb->len - iph->ihl*4, 1210 skb->len - off,
1135 IPPROTO_UDP, 0); 1211 IPPROTO_UDP, 0);
1136 } 1212 }
1137 break; 1213 break;
1138 default: 1214 default:
1139 if (net_ratelimit()) 1215 if (net_ratelimit())
1140 netdev_err(vif->dev, 1216 netdev_err(vif->dev,
1141 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", 1217 "Attempting to checksum a non-TCP/UDP packet, "
1218 "dropping a protocol %d packet\n",
1142 iph->protocol); 1219 iph->protocol);
1143 goto out; 1220 goto out;
1144 } 1221 }
@@ -1149,11 +1226,162 @@ out:
1149 return err; 1226 return err;
1150} 1227}
1151 1228
1229static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1230 int recalculate_partial_csum)
1231{
1232 int err = -EPROTO;
1233 struct ipv6hdr *ipv6h = (void *)skb->data;
1234 u8 nexthdr;
1235 unsigned int header_size;
1236 unsigned int off;
1237 bool fragment;
1238 bool done;
1239
1240 done = false;
1241
1242 off = sizeof(struct ipv6hdr);
1243
1244 header_size = skb->network_header + off;
1245 maybe_pull_tail(skb, header_size);
1246
1247 nexthdr = ipv6h->nexthdr;
1248
1249 while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
1250 !done) {
1251 switch (nexthdr) {
1252 case IPPROTO_DSTOPTS:
1253 case IPPROTO_HOPOPTS:
1254 case IPPROTO_ROUTING: {
1255 struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
1256
1257 header_size = skb->network_header +
1258 off +
1259 sizeof(struct ipv6_opt_hdr);
1260 maybe_pull_tail(skb, header_size);
1261
1262 nexthdr = hp->nexthdr;
1263 off += ipv6_optlen(hp);
1264 break;
1265 }
1266 case IPPROTO_AH: {
1267 struct ip_auth_hdr *hp = (void *)(skb->data + off);
1268
1269 header_size = skb->network_header +
1270 off +
1271 sizeof(struct ip_auth_hdr);
1272 maybe_pull_tail(skb, header_size);
1273
1274 nexthdr = hp->nexthdr;
1275 off += (hp->hdrlen+2)<<2;
1276 break;
1277 }
1278 case IPPROTO_FRAGMENT:
1279 fragment = true;
1280 /* fall through */
1281 default:
1282 done = true;
1283 break;
1284 }
1285 }
1286
1287 if (!done) {
1288 if (net_ratelimit())
1289 netdev_err(vif->dev, "Failed to parse packet header\n");
1290 goto out;
1291 }
1292
1293 if (fragment) {
1294 if (net_ratelimit())
1295 netdev_err(vif->dev, "Packet is a fragment!\n");
1296 goto out;
1297 }
1298
1299 switch (nexthdr) {
1300 case IPPROTO_TCP:
1301 if (!skb_partial_csum_set(skb, off,
1302 offsetof(struct tcphdr, check)))
1303 goto out;
1304
1305 if (recalculate_partial_csum) {
1306 struct tcphdr *tcph = tcp_hdr(skb);
1307
1308 header_size = skb->network_header +
1309 off +
1310 sizeof(struct tcphdr);
1311 maybe_pull_tail(skb, header_size);
1312
1313 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1314 &ipv6h->daddr,
1315 skb->len - off,
1316 IPPROTO_TCP, 0);
1317 }
1318 break;
1319 case IPPROTO_UDP:
1320 if (!skb_partial_csum_set(skb, off,
1321 offsetof(struct udphdr, check)))
1322 goto out;
1323
1324 if (recalculate_partial_csum) {
1325 struct udphdr *udph = udp_hdr(skb);
1326
1327 header_size = skb->network_header +
1328 off +
1329 sizeof(struct udphdr);
1330 maybe_pull_tail(skb, header_size);
1331
1332 udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1333 &ipv6h->daddr,
1334 skb->len - off,
1335 IPPROTO_UDP, 0);
1336 }
1337 break;
1338 default:
1339 if (net_ratelimit())
1340 netdev_err(vif->dev,
1341 "Attempting to checksum a non-TCP/UDP packet, "
1342 "dropping a protocol %d packet\n",
1343 nexthdr);
1344 goto out;
1345 }
1346
1347 err = 0;
1348
1349out:
1350 return err;
1351}
1352
1353static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1354{
1355 int err = -EPROTO;
1356 int recalculate_partial_csum = 0;
1357
1358 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1359 * peers can fail to set NETRXF_csum_blank when sending a GSO
1360 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1361 * recalculate the partial checksum.
1362 */
1363 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1364 vif->rx_gso_checksum_fixup++;
1365 skb->ip_summed = CHECKSUM_PARTIAL;
1366 recalculate_partial_csum = 1;
1367 }
1368
1369 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1370 if (skb->ip_summed != CHECKSUM_PARTIAL)
1371 return 0;
1372
1373 if (skb->protocol == htons(ETH_P_IP))
1374 err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
1375 else if (skb->protocol == htons(ETH_P_IPV6))
1376 err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
1377
1378 return err;
1379}
1380
1152static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) 1381static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1153{ 1382{
1154 unsigned long now = jiffies; 1383 u64 now = get_jiffies_64();
1155 unsigned long next_credit = 1384 u64 next_credit = vif->credit_window_start +
1156 vif->credit_timeout.expires +
1157 msecs_to_jiffies(vif->credit_usec / 1000); 1385 msecs_to_jiffies(vif->credit_usec / 1000);
1158 1386
1159 /* Timer could already be pending in rare cases. */ 1387 /* Timer could already be pending in rare cases. */
@@ -1161,8 +1389,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1161 return true; 1389 return true;
1162 1390
1163 /* Passed the point where we can replenish credit? */ 1391 /* Passed the point where we can replenish credit? */
1164 if (time_after_eq(now, next_credit)) { 1392 if (time_after_eq64(now, next_credit)) {
1165 vif->credit_timeout.expires = now; 1393 vif->credit_window_start = now;
1166 tx_add_credit(vif); 1394 tx_add_credit(vif);
1167 } 1395 }
1168 1396
@@ -1174,6 +1402,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1174 tx_credit_callback; 1402 tx_credit_callback;
1175 mod_timer(&vif->credit_timeout, 1403 mod_timer(&vif->credit_timeout,
1176 next_credit); 1404 next_credit);
1405 vif->credit_window_start = next_credit;
1177 1406
1178 return true; 1407 return true;
1179 } 1408 }
@@ -1394,12 +1623,7 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)
1394 1623
1395 xenvif_fill_frags(vif, skb); 1624 xenvif_fill_frags(vif, skb);
1396 1625
1397 /* 1626 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1398 * If the initial fragment was < PKT_PROT_LEN then
1399 * pull through some bytes from the other fragments to
1400 * increase the linear region to PKT_PROT_LEN bytes.
1401 */
1402 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1403 int target = min_t(int, skb->len, PKT_PROT_LEN); 1627 int target = min_t(int, skb->len, PKT_PROT_LEN);
1404 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1628 __pskb_pull_tail(skb, target - skb_headlen(skb));
1405 } 1629 }