diff options
author | Takashi Iwai <tiwai@suse.de> | 2013-12-19 06:22:11 -0500 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2013-12-19 06:22:11 -0500 |
commit | 356f402da0f989b16e4b6849e88dba5df0e25944 (patch) | |
tree | d1d41d07abf30bdd7fe1498f6eb239eaced6d9b3 /drivers/net/xen-netback | |
parent | 3a6c5d8ad0a9253aafb76df3577edcb68c09b939 (diff) | |
parent | 96b7fe0119b932ad25451d2b6357e727bbe6a309 (diff) |
Merge tag 'asoc-v3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v3.13
The fixes here are all driver specific ones, none of which particularly
stand out but all of which are useful to users of those drivers.
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 20 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 266 |
2 files changed, 162 insertions, 124 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 2329cccf1fa6..870f1fa58370 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -368,11 +368,11 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
368 | unsigned long rx_ring_ref, unsigned int tx_evtchn, | 368 | unsigned long rx_ring_ref, unsigned int tx_evtchn, |
369 | unsigned int rx_evtchn) | 369 | unsigned int rx_evtchn) |
370 | { | 370 | { |
371 | struct task_struct *task; | ||
371 | int err = -ENOMEM; | 372 | int err = -ENOMEM; |
372 | 373 | ||
373 | /* Already connected through? */ | 374 | BUG_ON(vif->tx_irq); |
374 | if (vif->tx_irq) | 375 | BUG_ON(vif->task); |
375 | return 0; | ||
376 | 376 | ||
377 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); | 377 | err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); |
378 | if (err < 0) | 378 | if (err < 0) |
@@ -411,14 +411,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
411 | } | 411 | } |
412 | 412 | ||
413 | init_waitqueue_head(&vif->wq); | 413 | init_waitqueue_head(&vif->wq); |
414 | vif->task = kthread_create(xenvif_kthread, | 414 | task = kthread_create(xenvif_kthread, |
415 | (void *)vif, "%s", vif->dev->name); | 415 | (void *)vif, "%s", vif->dev->name); |
416 | if (IS_ERR(vif->task)) { | 416 | if (IS_ERR(task)) { |
417 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); | 417 | pr_warn("Could not allocate kthread for %s\n", vif->dev->name); |
418 | err = PTR_ERR(vif->task); | 418 | err = PTR_ERR(task); |
419 | goto err_rx_unbind; | 419 | goto err_rx_unbind; |
420 | } | 420 | } |
421 | 421 | ||
422 | vif->task = task; | ||
423 | |||
422 | rtnl_lock(); | 424 | rtnl_lock(); |
423 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 425 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
424 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 426 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
@@ -461,8 +463,10 @@ void xenvif_disconnect(struct xenvif *vif) | |||
461 | if (netif_carrier_ok(vif->dev)) | 463 | if (netif_carrier_ok(vif->dev)) |
462 | xenvif_carrier_off(vif); | 464 | xenvif_carrier_off(vif); |
463 | 465 | ||
464 | if (vif->task) | 466 | if (vif->task) { |
465 | kthread_stop(vif->task); | 467 | kthread_stop(vif->task); |
468 | vif->task = NULL; | ||
469 | } | ||
466 | 470 | ||
467 | if (vif->tx_irq) { | 471 | if (vif->tx_irq) { |
468 | if (vif->tx_irq == vif->rx_irq) | 472 | if (vif->tx_irq == vif->rx_irq) |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 64f0e0d18b81..e884ee1fe7ed 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -452,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
452 | } | 452 | } |
453 | 453 | ||
454 | /* Set up a GSO prefix descriptor, if necessary */ | 454 | /* Set up a GSO prefix descriptor, if necessary */ |
455 | if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { | 455 | if ((1 << gso_type) & vif->gso_prefix_mask) { |
456 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 456 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); |
457 | meta = npo->meta + npo->meta_prod++; | 457 | meta = npo->meta + npo->meta_prod++; |
458 | meta->gso_type = gso_type; | 458 | meta->gso_type = gso_type; |
@@ -1149,75 +1149,92 @@ static int xenvif_set_skb_gso(struct xenvif *vif, | |||
1149 | return 0; | 1149 | return 0; |
1150 | } | 1150 | } |
1151 | 1151 | ||
1152 | static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) | 1152 | static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len, |
1153 | unsigned int max) | ||
1153 | { | 1154 | { |
1154 | if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { | 1155 | if (skb_headlen(skb) >= len) |
1155 | /* If we need to pullup then pullup to the max, so we | 1156 | return 0; |
1156 | * won't need to do it again. | 1157 | |
1157 | */ | 1158 | /* If we need to pullup then pullup to the max, so we |
1158 | int target = min_t(int, skb->len, MAX_TCP_HEADER); | 1159 | * won't need to do it again. |
1159 | __pskb_pull_tail(skb, target - skb_headlen(skb)); | 1160 | */ |
1160 | } | 1161 | if (max > skb->len) |
1162 | max = skb->len; | ||
1163 | |||
1164 | if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) | ||
1165 | return -ENOMEM; | ||
1166 | |||
1167 | if (skb_headlen(skb) < len) | ||
1168 | return -EPROTO; | ||
1169 | |||
1170 | return 0; | ||
1161 | } | 1171 | } |
1162 | 1172 | ||
1173 | /* This value should be large enough to cover a tagged ethernet header plus | ||
1174 | * maximally sized IP and TCP or UDP headers. | ||
1175 | */ | ||
1176 | #define MAX_IP_HDR_LEN 128 | ||
1177 | |||
1163 | static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, | 1178 | static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, |
1164 | int recalculate_partial_csum) | 1179 | int recalculate_partial_csum) |
1165 | { | 1180 | { |
1166 | struct iphdr *iph = (void *)skb->data; | ||
1167 | unsigned int header_size; | ||
1168 | unsigned int off; | 1181 | unsigned int off; |
1169 | int err = -EPROTO; | 1182 | bool fragment; |
1183 | int err; | ||
1170 | 1184 | ||
1171 | off = sizeof(struct iphdr); | 1185 | fragment = false; |
1172 | 1186 | ||
1173 | header_size = skb->network_header + off + MAX_IPOPTLEN; | 1187 | err = maybe_pull_tail(skb, |
1174 | maybe_pull_tail(skb, header_size); | 1188 | sizeof(struct iphdr), |
1189 | MAX_IP_HDR_LEN); | ||
1190 | if (err < 0) | ||
1191 | goto out; | ||
1175 | 1192 | ||
1176 | off = iph->ihl * 4; | 1193 | if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) |
1194 | fragment = true; | ||
1177 | 1195 | ||
1178 | switch (iph->protocol) { | 1196 | off = ip_hdrlen(skb); |
1197 | |||
1198 | err = -EPROTO; | ||
1199 | |||
1200 | switch (ip_hdr(skb)->protocol) { | ||
1179 | case IPPROTO_TCP: | 1201 | case IPPROTO_TCP: |
1202 | err = maybe_pull_tail(skb, | ||
1203 | off + sizeof(struct tcphdr), | ||
1204 | MAX_IP_HDR_LEN); | ||
1205 | if (err < 0) | ||
1206 | goto out; | ||
1207 | |||
1180 | if (!skb_partial_csum_set(skb, off, | 1208 | if (!skb_partial_csum_set(skb, off, |
1181 | offsetof(struct tcphdr, check))) | 1209 | offsetof(struct tcphdr, check))) |
1182 | goto out; | 1210 | goto out; |
1183 | 1211 | ||
1184 | if (recalculate_partial_csum) { | 1212 | if (recalculate_partial_csum) |
1185 | struct tcphdr *tcph = tcp_hdr(skb); | 1213 | tcp_hdr(skb)->check = |
1186 | 1214 | ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
1187 | header_size = skb->network_header + | 1215 | ip_hdr(skb)->daddr, |
1188 | off + | 1216 | skb->len - off, |
1189 | sizeof(struct tcphdr); | 1217 | IPPROTO_TCP, 0); |
1190 | maybe_pull_tail(skb, header_size); | ||
1191 | |||
1192 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
1193 | skb->len - off, | ||
1194 | IPPROTO_TCP, 0); | ||
1195 | } | ||
1196 | break; | 1218 | break; |
1197 | case IPPROTO_UDP: | 1219 | case IPPROTO_UDP: |
1220 | err = maybe_pull_tail(skb, | ||
1221 | off + sizeof(struct udphdr), | ||
1222 | MAX_IP_HDR_LEN); | ||
1223 | if (err < 0) | ||
1224 | goto out; | ||
1225 | |||
1198 | if (!skb_partial_csum_set(skb, off, | 1226 | if (!skb_partial_csum_set(skb, off, |
1199 | offsetof(struct udphdr, check))) | 1227 | offsetof(struct udphdr, check))) |
1200 | goto out; | 1228 | goto out; |
1201 | 1229 | ||
1202 | if (recalculate_partial_csum) { | 1230 | if (recalculate_partial_csum) |
1203 | struct udphdr *udph = udp_hdr(skb); | 1231 | udp_hdr(skb)->check = |
1204 | 1232 | ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
1205 | header_size = skb->network_header + | 1233 | ip_hdr(skb)->daddr, |
1206 | off + | 1234 | skb->len - off, |
1207 | sizeof(struct udphdr); | 1235 | IPPROTO_UDP, 0); |
1208 | maybe_pull_tail(skb, header_size); | ||
1209 | |||
1210 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
1211 | skb->len - off, | ||
1212 | IPPROTO_UDP, 0); | ||
1213 | } | ||
1214 | break; | 1236 | break; |
1215 | default: | 1237 | default: |
1216 | if (net_ratelimit()) | ||
1217 | netdev_err(vif->dev, | ||
1218 | "Attempting to checksum a non-TCP/UDP packet, " | ||
1219 | "dropping a protocol %d packet\n", | ||
1220 | iph->protocol); | ||
1221 | goto out; | 1238 | goto out; |
1222 | } | 1239 | } |
1223 | 1240 | ||
@@ -1227,121 +1244,138 @@ out: | |||
1227 | return err; | 1244 | return err; |
1228 | } | 1245 | } |
1229 | 1246 | ||
1247 | /* This value should be large enough to cover a tagged ethernet header plus | ||
1248 | * an IPv6 header, all options, and a maximal TCP or UDP header. | ||
1249 | */ | ||
1250 | #define MAX_IPV6_HDR_LEN 256 | ||
1251 | |||
1252 | #define OPT_HDR(type, skb, off) \ | ||
1253 | (type *)(skb_network_header(skb) + (off)) | ||
1254 | |||
1230 | static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, | 1255 | static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, |
1231 | int recalculate_partial_csum) | 1256 | int recalculate_partial_csum) |
1232 | { | 1257 | { |
1233 | int err = -EPROTO; | 1258 | int err; |
1234 | struct ipv6hdr *ipv6h = (void *)skb->data; | ||
1235 | u8 nexthdr; | 1259 | u8 nexthdr; |
1236 | unsigned int header_size; | ||
1237 | unsigned int off; | 1260 | unsigned int off; |
1261 | unsigned int len; | ||
1238 | bool fragment; | 1262 | bool fragment; |
1239 | bool done; | 1263 | bool done; |
1240 | 1264 | ||
1265 | fragment = false; | ||
1241 | done = false; | 1266 | done = false; |
1242 | 1267 | ||
1243 | off = sizeof(struct ipv6hdr); | 1268 | off = sizeof(struct ipv6hdr); |
1244 | 1269 | ||
1245 | header_size = skb->network_header + off; | 1270 | err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); |
1246 | maybe_pull_tail(skb, header_size); | 1271 | if (err < 0) |
1272 | goto out; | ||
1247 | 1273 | ||
1248 | nexthdr = ipv6h->nexthdr; | 1274 | nexthdr = ipv6_hdr(skb)->nexthdr; |
1249 | 1275 | ||
1250 | while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && | 1276 | len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); |
1251 | !done) { | 1277 | while (off <= len && !done) { |
1252 | switch (nexthdr) { | 1278 | switch (nexthdr) { |
1253 | case IPPROTO_DSTOPTS: | 1279 | case IPPROTO_DSTOPTS: |
1254 | case IPPROTO_HOPOPTS: | 1280 | case IPPROTO_HOPOPTS: |
1255 | case IPPROTO_ROUTING: { | 1281 | case IPPROTO_ROUTING: { |
1256 | struct ipv6_opt_hdr *hp = (void *)(skb->data + off); | 1282 | struct ipv6_opt_hdr *hp; |
1257 | 1283 | ||
1258 | header_size = skb->network_header + | 1284 | err = maybe_pull_tail(skb, |
1259 | off + | 1285 | off + |
1260 | sizeof(struct ipv6_opt_hdr); | 1286 | sizeof(struct ipv6_opt_hdr), |
1261 | maybe_pull_tail(skb, header_size); | 1287 | MAX_IPV6_HDR_LEN); |
1288 | if (err < 0) | ||
1289 | goto out; | ||
1262 | 1290 | ||
1291 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); | ||
1263 | nexthdr = hp->nexthdr; | 1292 | nexthdr = hp->nexthdr; |
1264 | off += ipv6_optlen(hp); | 1293 | off += ipv6_optlen(hp); |
1265 | break; | 1294 | break; |
1266 | } | 1295 | } |
1267 | case IPPROTO_AH: { | 1296 | case IPPROTO_AH: { |
1268 | struct ip_auth_hdr *hp = (void *)(skb->data + off); | 1297 | struct ip_auth_hdr *hp; |
1269 | 1298 | ||
1270 | header_size = skb->network_header + | 1299 | err = maybe_pull_tail(skb, |
1271 | off + | 1300 | off + |
1272 | sizeof(struct ip_auth_hdr); | 1301 | sizeof(struct ip_auth_hdr), |
1273 | maybe_pull_tail(skb, header_size); | 1302 | MAX_IPV6_HDR_LEN); |
1303 | if (err < 0) | ||
1304 | goto out; | ||
1274 | 1305 | ||
1306 | hp = OPT_HDR(struct ip_auth_hdr, skb, off); | ||
1275 | nexthdr = hp->nexthdr; | 1307 | nexthdr = hp->nexthdr; |
1276 | off += (hp->hdrlen+2)<<2; | 1308 | off += ipv6_authlen(hp); |
1309 | break; | ||
1310 | } | ||
1311 | case IPPROTO_FRAGMENT: { | ||
1312 | struct frag_hdr *hp; | ||
1313 | |||
1314 | err = maybe_pull_tail(skb, | ||
1315 | off + | ||
1316 | sizeof(struct frag_hdr), | ||
1317 | MAX_IPV6_HDR_LEN); | ||
1318 | if (err < 0) | ||
1319 | goto out; | ||
1320 | |||
1321 | hp = OPT_HDR(struct frag_hdr, skb, off); | ||
1322 | |||
1323 | if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) | ||
1324 | fragment = true; | ||
1325 | |||
1326 | nexthdr = hp->nexthdr; | ||
1327 | off += sizeof(struct frag_hdr); | ||
1277 | break; | 1328 | break; |
1278 | } | 1329 | } |
1279 | case IPPROTO_FRAGMENT: | ||
1280 | fragment = true; | ||
1281 | /* fall through */ | ||
1282 | default: | 1330 | default: |
1283 | done = true; | 1331 | done = true; |
1284 | break; | 1332 | break; |
1285 | } | 1333 | } |
1286 | } | 1334 | } |
1287 | 1335 | ||
1288 | if (!done) { | 1336 | err = -EPROTO; |
1289 | if (net_ratelimit()) | ||
1290 | netdev_err(vif->dev, "Failed to parse packet header\n"); | ||
1291 | goto out; | ||
1292 | } | ||
1293 | 1337 | ||
1294 | if (fragment) { | 1338 | if (!done || fragment) |
1295 | if (net_ratelimit()) | ||
1296 | netdev_err(vif->dev, "Packet is a fragment!\n"); | ||
1297 | goto out; | 1339 | goto out; |
1298 | } | ||
1299 | 1340 | ||
1300 | switch (nexthdr) { | 1341 | switch (nexthdr) { |
1301 | case IPPROTO_TCP: | 1342 | case IPPROTO_TCP: |
1343 | err = maybe_pull_tail(skb, | ||
1344 | off + sizeof(struct tcphdr), | ||
1345 | MAX_IPV6_HDR_LEN); | ||
1346 | if (err < 0) | ||
1347 | goto out; | ||
1348 | |||
1302 | if (!skb_partial_csum_set(skb, off, | 1349 | if (!skb_partial_csum_set(skb, off, |
1303 | offsetof(struct tcphdr, check))) | 1350 | offsetof(struct tcphdr, check))) |
1304 | goto out; | 1351 | goto out; |
1305 | 1352 | ||
1306 | if (recalculate_partial_csum) { | 1353 | if (recalculate_partial_csum) |
1307 | struct tcphdr *tcph = tcp_hdr(skb); | 1354 | tcp_hdr(skb)->check = |
1308 | 1355 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
1309 | header_size = skb->network_header + | 1356 | &ipv6_hdr(skb)->daddr, |
1310 | off + | 1357 | skb->len - off, |
1311 | sizeof(struct tcphdr); | 1358 | IPPROTO_TCP, 0); |
1312 | maybe_pull_tail(skb, header_size); | ||
1313 | |||
1314 | tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, | ||
1315 | &ipv6h->daddr, | ||
1316 | skb->len - off, | ||
1317 | IPPROTO_TCP, 0); | ||
1318 | } | ||
1319 | break; | 1359 | break; |
1320 | case IPPROTO_UDP: | 1360 | case IPPROTO_UDP: |
1361 | err = maybe_pull_tail(skb, | ||
1362 | off + sizeof(struct udphdr), | ||
1363 | MAX_IPV6_HDR_LEN); | ||
1364 | if (err < 0) | ||
1365 | goto out; | ||
1366 | |||
1321 | if (!skb_partial_csum_set(skb, off, | 1367 | if (!skb_partial_csum_set(skb, off, |
1322 | offsetof(struct udphdr, check))) | 1368 | offsetof(struct udphdr, check))) |
1323 | goto out; | 1369 | goto out; |
1324 | 1370 | ||
1325 | if (recalculate_partial_csum) { | 1371 | if (recalculate_partial_csum) |
1326 | struct udphdr *udph = udp_hdr(skb); | 1372 | udp_hdr(skb)->check = |
1327 | 1373 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
1328 | header_size = skb->network_header + | 1374 | &ipv6_hdr(skb)->daddr, |
1329 | off + | 1375 | skb->len - off, |
1330 | sizeof(struct udphdr); | 1376 | IPPROTO_UDP, 0); |
1331 | maybe_pull_tail(skb, header_size); | ||
1332 | |||
1333 | udph->check = ~csum_ipv6_magic(&ipv6h->saddr, | ||
1334 | &ipv6h->daddr, | ||
1335 | skb->len - off, | ||
1336 | IPPROTO_UDP, 0); | ||
1337 | } | ||
1338 | break; | 1377 | break; |
1339 | default: | 1378 | default: |
1340 | if (net_ratelimit()) | ||
1341 | netdev_err(vif->dev, | ||
1342 | "Attempting to checksum a non-TCP/UDP packet, " | ||
1343 | "dropping a protocol %d packet\n", | ||
1344 | nexthdr); | ||
1345 | goto out; | 1379 | goto out; |
1346 | } | 1380 | } |
1347 | 1381 | ||
@@ -1411,14 +1445,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) | |||
1411 | return false; | 1445 | return false; |
1412 | } | 1446 | } |
1413 | 1447 | ||
1414 | static unsigned xenvif_tx_build_gops(struct xenvif *vif) | 1448 | static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) |
1415 | { | 1449 | { |
1416 | struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; | 1450 | struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; |
1417 | struct sk_buff *skb; | 1451 | struct sk_buff *skb; |
1418 | int ret; | 1452 | int ret; |
1419 | 1453 | ||
1420 | while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX | 1454 | while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX |
1421 | < MAX_PENDING_REQS)) { | 1455 | < MAX_PENDING_REQS) && |
1456 | (skb_queue_len(&vif->tx_queue) < budget)) { | ||
1422 | struct xen_netif_tx_request txreq; | 1457 | struct xen_netif_tx_request txreq; |
1423 | struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; | 1458 | struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; |
1424 | struct page *page; | 1459 | struct page *page; |
@@ -1440,7 +1475,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif) | |||
1440 | continue; | 1475 | continue; |
1441 | } | 1476 | } |
1442 | 1477 | ||
1443 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); | 1478 | work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); |
1444 | if (!work_to_do) | 1479 | if (!work_to_do) |
1445 | break; | 1480 | break; |
1446 | 1481 | ||
@@ -1580,14 +1615,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif) | |||
1580 | } | 1615 | } |
1581 | 1616 | ||
1582 | 1617 | ||
1583 | static int xenvif_tx_submit(struct xenvif *vif, int budget) | 1618 | static int xenvif_tx_submit(struct xenvif *vif) |
1584 | { | 1619 | { |
1585 | struct gnttab_copy *gop = vif->tx_copy_ops; | 1620 | struct gnttab_copy *gop = vif->tx_copy_ops; |
1586 | struct sk_buff *skb; | 1621 | struct sk_buff *skb; |
1587 | int work_done = 0; | 1622 | int work_done = 0; |
1588 | 1623 | ||
1589 | while (work_done < budget && | 1624 | while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { |
1590 | (skb = __skb_dequeue(&vif->tx_queue)) != NULL) { | ||
1591 | struct xen_netif_tx_request *txp; | 1625 | struct xen_netif_tx_request *txp; |
1592 | u16 pending_idx; | 1626 | u16 pending_idx; |
1593 | unsigned data_len; | 1627 | unsigned data_len; |
@@ -1662,14 +1696,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget) | |||
1662 | if (unlikely(!tx_work_todo(vif))) | 1696 | if (unlikely(!tx_work_todo(vif))) |
1663 | return 0; | 1697 | return 0; |
1664 | 1698 | ||
1665 | nr_gops = xenvif_tx_build_gops(vif); | 1699 | nr_gops = xenvif_tx_build_gops(vif, budget); |
1666 | 1700 | ||
1667 | if (nr_gops == 0) | 1701 | if (nr_gops == 0) |
1668 | return 0; | 1702 | return 0; |
1669 | 1703 | ||
1670 | gnttab_batch_copy(vif->tx_copy_ops, nr_gops); | 1704 | gnttab_batch_copy(vif->tx_copy_ops, nr_gops); |
1671 | 1705 | ||
1672 | work_done = xenvif_tx_submit(vif, nr_gops); | 1706 | work_done = xenvif_tx_submit(vif); |
1673 | 1707 | ||
1674 | return work_done; | 1708 | return work_done; |
1675 | } | 1709 | } |