aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJulian Wiedmann <jwi@linux.ibm.com>2018-09-05 10:55:11 -0400
committerDavid S. Miller <davem@davemloft.net>2018-09-06 01:32:22 -0400
commitb2f543949acd1ba64313fdad9e672ef47550d773 (patch)
treed74f5146a3c826668397c59eca92b492a34e13c9
parent222440996d6daf635bed6cb35041be22ede3e8a0 (diff)
net/af_iucv: fix skb handling on HiperTransport xmit error
When sending an skb, afiucv_hs_send() bails out on various error conditions. But currently the caller has no way of telling whether the skb was freed or not - resulting in potentially either a) leaked skbs from iucv_send_ctrl(), or b) double-free's from iucv_sock_sendmsg(). As dev_queue_xmit() will always consume the skb (even on error), be consistent and also free the skb from all other error paths. This way callers no longer need to care about managing the skb. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Reviewed-by: Ursula Braun <ubraun@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/iucv/af_iucv.c34
1 files changed, 23 insertions, 11 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 01000c14417f..e2f16a0173a9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
351 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 351 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
352 352
353 skb->dev = iucv->hs_dev; 353 skb->dev = iucv->hs_dev;
354 if (!skb->dev) 354 if (!skb->dev) {
355 return -ENODEV; 355 err = -ENODEV;
356 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) 356 goto err_free;
357 return -ENETDOWN; 357 }
358 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
359 err = -ENETDOWN;
360 goto err_free;
361 }
358 if (skb->len > skb->dev->mtu) { 362 if (skb->len > skb->dev->mtu) {
359 if (sock->sk_type == SOCK_SEQPACKET) 363 if (sock->sk_type == SOCK_SEQPACKET) {
360 return -EMSGSIZE; 364 err = -EMSGSIZE;
361 else 365 goto err_free;
362 skb_trim(skb, skb->dev->mtu); 366 }
367 skb_trim(skb, skb->dev->mtu);
363 } 368 }
364 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); 369 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
365 nskb = skb_clone(skb, GFP_ATOMIC); 370 nskb = skb_clone(skb, GFP_ATOMIC);
366 if (!nskb) 371 if (!nskb) {
367 return -ENOMEM; 372 err = -ENOMEM;
373 goto err_free;
374 }
375
368 skb_queue_tail(&iucv->send_skb_q, nskb); 376 skb_queue_tail(&iucv->send_skb_q, nskb);
369 err = dev_queue_xmit(skb); 377 err = dev_queue_xmit(skb);
370 if (net_xmit_eval(err)) { 378 if (net_xmit_eval(err)) {
@@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
375 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 383 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
376 } 384 }
377 return net_xmit_eval(err); 385 return net_xmit_eval(err);
386
387err_free:
388 kfree_skb(skb);
389 return err;
378} 390}
379 391
380static struct sock *__iucv_get_sock_by_name(char *nm) 392static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1167 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1179 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1168 if (err) { 1180 if (err) {
1169 atomic_dec(&iucv->msg_sent); 1181 atomic_dec(&iucv->msg_sent);
1170 goto fail; 1182 goto out;
1171 } 1183 }
1172 } else { /* Classic VM IUCV transport */ 1184 } else { /* Classic VM IUCV transport */
1173 skb_queue_tail(&iucv->send_skb_q, skb); 1185 skb_queue_tail(&iucv->send_skb_q, skb);