diff options
author | Ursula Braun <ursula.braun@de.ibm.com> | 2011-12-19 17:56:29 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-12-20 14:05:03 -0500 |
commit | 816abbadf981e64b2342e1a875592623619560a4 (patch) | |
tree | c930e4633966e25cb129ff9d86c21281780d4550 /net/iucv/af_iucv.c | |
parent | 42bd48e0145567acf7b3d2ae48bea765315bdd89 (diff) |
af_iucv: release reference to HS device
For HiperSockets transport skbs sent are bound to one of the
available HiperSockets devices. Add missing release of reference to
a HiperSockets device before freeing an skb.
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r-- | net/iucv/af_iucv.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 32a5010b2940..ad90cf29c96e 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -130,6 +130,17 @@ static inline void low_nmcpy(unsigned char *dst, char *src) | |||
130 | memcpy(&dst[8], src, 8); | 130 | memcpy(&dst[8], src, 8); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void iucv_skb_queue_purge(struct sk_buff_head *list) | ||
134 | { | ||
135 | struct sk_buff *skb; | ||
136 | |||
137 | while ((skb = skb_dequeue(list)) != NULL) { | ||
138 | if (skb->dev) | ||
139 | dev_put(skb->dev); | ||
140 | kfree_skb(skb); | ||
141 | } | ||
142 | } | ||
143 | |||
133 | static int afiucv_pm_prepare(struct device *dev) | 144 | static int afiucv_pm_prepare(struct device *dev) |
134 | { | 145 | { |
135 | #ifdef CONFIG_PM_DEBUG | 146 | #ifdef CONFIG_PM_DEBUG |
@@ -164,7 +175,7 @@ static int afiucv_pm_freeze(struct device *dev) | |||
164 | read_lock(&iucv_sk_list.lock); | 175 | read_lock(&iucv_sk_list.lock); |
165 | sk_for_each(sk, node, &iucv_sk_list.head) { | 176 | sk_for_each(sk, node, &iucv_sk_list.head) { |
166 | iucv = iucv_sk(sk); | 177 | iucv = iucv_sk(sk); |
167 | skb_queue_purge(&iucv->send_skb_q); | 178 | iucv_skb_queue_purge(&iucv->send_skb_q); |
168 | skb_queue_purge(&iucv->backlog_skb_q); | 179 | skb_queue_purge(&iucv->backlog_skb_q); |
169 | switch (sk->sk_state) { | 180 | switch (sk->sk_state) { |
170 | case IUCV_SEVERED: | 181 | case IUCV_SEVERED: |
@@ -366,9 +377,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
366 | if (imsg) | 377 | if (imsg) |
367 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); | 378 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); |
368 | 379 | ||
369 | rcu_read_lock(); | 380 | skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if); |
370 | skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if); | ||
371 | rcu_read_unlock(); | ||
372 | if (!skb->dev) | 381 | if (!skb->dev) |
373 | return -ENODEV; | 382 | return -ENODEV; |
374 | if (!(skb->dev->flags & IFF_UP)) | 383 | if (!(skb->dev->flags & IFF_UP)) |
@@ -388,6 +397,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
388 | err = dev_queue_xmit(skb); | 397 | err = dev_queue_xmit(skb); |
389 | if (err) { | 398 | if (err) { |
390 | skb_unlink(nskb, &iucv->send_skb_q); | 399 | skb_unlink(nskb, &iucv->send_skb_q); |
400 | dev_put(nskb->dev); | ||
391 | kfree_skb(nskb); | 401 | kfree_skb(nskb); |
392 | } else { | 402 | } else { |
393 | atomic_sub(confirm_recv, &iucv->msg_recv); | 403 | atomic_sub(confirm_recv, &iucv->msg_recv); |
@@ -481,16 +491,14 @@ static void iucv_sock_close(struct sock *sk) | |||
481 | blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; | 491 | blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; |
482 | skb = sock_alloc_send_skb(sk, blen, 1, &err); | 492 | skb = sock_alloc_send_skb(sk, blen, 1, &err); |
483 | if (skb) { | 493 | if (skb) { |
484 | skb_reserve(skb, | 494 | skb_reserve(skb, blen); |
485 | sizeof(struct af_iucv_trans_hdr) + | ||
486 | ETH_HLEN); | ||
487 | err = afiucv_hs_send(NULL, sk, skb, | 495 | err = afiucv_hs_send(NULL, sk, skb, |
488 | AF_IUCV_FLAG_FIN); | 496 | AF_IUCV_FLAG_FIN); |
489 | } | 497 | } |
490 | sk->sk_state = IUCV_DISCONN; | 498 | sk->sk_state = IUCV_DISCONN; |
491 | sk->sk_state_change(sk); | 499 | sk->sk_state_change(sk); |
492 | } | 500 | } |
493 | case IUCV_DISCONN: | 501 | case IUCV_DISCONN: /* fall through */ |
494 | sk->sk_state = IUCV_CLOSING; | 502 | sk->sk_state = IUCV_CLOSING; |
495 | sk->sk_state_change(sk); | 503 | sk->sk_state_change(sk); |
496 | 504 | ||
@@ -520,7 +528,7 @@ static void iucv_sock_close(struct sock *sk) | |||
520 | sk->sk_err = ECONNRESET; | 528 | sk->sk_err = ECONNRESET; |
521 | sk->sk_state_change(sk); | 529 | sk->sk_state_change(sk); |
522 | 530 | ||
523 | skb_queue_purge(&iucv->send_skb_q); | 531 | iucv_skb_queue_purge(&iucv->send_skb_q); |
524 | skb_queue_purge(&iucv->backlog_skb_q); | 532 | skb_queue_purge(&iucv->backlog_skb_q); |
525 | break; | 533 | break; |
526 | 534 | ||
@@ -739,7 +747,7 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, | |||
739 | if (!memcmp(dev->perm_addr, uid, 8)) { | 747 | if (!memcmp(dev->perm_addr, uid, 8)) { |
740 | memcpy(iucv->src_name, sa->siucv_name, 8); | 748 | memcpy(iucv->src_name, sa->siucv_name, 8); |
741 | memcpy(iucv->src_user_id, sa->siucv_user_id, 8); | 749 | memcpy(iucv->src_user_id, sa->siucv_user_id, 8); |
742 | sock->sk->sk_bound_dev_if = dev->ifindex; | 750 | sk->sk_bound_dev_if = dev->ifindex; |
743 | sk->sk_state = IUCV_BOUND; | 751 | sk->sk_state = IUCV_BOUND; |
744 | iucv->transport = AF_IUCV_TRANS_HIPER; | 752 | iucv->transport = AF_IUCV_TRANS_HIPER; |
745 | if (!iucv->msglimit) | 753 | if (!iucv->msglimit) |
@@ -1225,6 +1233,8 @@ release: | |||
1225 | return len; | 1233 | return len; |
1226 | 1234 | ||
1227 | fail: | 1235 | fail: |
1236 | if (skb->dev) | ||
1237 | dev_put(skb->dev); | ||
1228 | kfree_skb(skb); | 1238 | kfree_skb(skb); |
1229 | out: | 1239 | out: |
1230 | release_sock(sk); | 1240 | release_sock(sk); |
@@ -1441,9 +1451,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1441 | ETH_HLEN; | 1451 | ETH_HLEN; |
1442 | sskb = sock_alloc_send_skb(sk, blen, 1, &err); | 1452 | sskb = sock_alloc_send_skb(sk, blen, 1, &err); |
1443 | if (sskb) { | 1453 | if (sskb) { |
1444 | skb_reserve(sskb, | 1454 | skb_reserve(sskb, blen); |
1445 | sizeof(struct af_iucv_trans_hdr) | ||
1446 | + ETH_HLEN); | ||
1447 | err = afiucv_hs_send(NULL, sk, sskb, | 1455 | err = afiucv_hs_send(NULL, sk, sskb, |
1448 | AF_IUCV_FLAG_WIN); | 1456 | AF_IUCV_FLAG_WIN); |
1449 | } | 1457 | } |
@@ -2261,6 +2269,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, | |||
2261 | case TX_NOTIFY_OK: | 2269 | case TX_NOTIFY_OK: |
2262 | __skb_unlink(this, list); | 2270 | __skb_unlink(this, list); |
2263 | iucv_sock_wake_msglim(sk); | 2271 | iucv_sock_wake_msglim(sk); |
2272 | dev_put(this->dev); | ||
2264 | kfree_skb(this); | 2273 | kfree_skb(this); |
2265 | break; | 2274 | break; |
2266 | case TX_NOTIFY_PENDING: | 2275 | case TX_NOTIFY_PENDING: |
@@ -2271,6 +2280,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, | |||
2271 | atomic_dec(&iucv->pendings); | 2280 | atomic_dec(&iucv->pendings); |
2272 | if (atomic_read(&iucv->pendings) <= 0) | 2281 | if (atomic_read(&iucv->pendings) <= 0) |
2273 | iucv_sock_wake_msglim(sk); | 2282 | iucv_sock_wake_msglim(sk); |
2283 | dev_put(this->dev); | ||
2274 | kfree_skb(this); | 2284 | kfree_skb(this); |
2275 | break; | 2285 | break; |
2276 | case TX_NOTIFY_UNREACHABLE: | 2286 | case TX_NOTIFY_UNREACHABLE: |
@@ -2279,6 +2289,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, | |||
2279 | case TX_NOTIFY_GENERALERROR: | 2289 | case TX_NOTIFY_GENERALERROR: |
2280 | case TX_NOTIFY_DELAYED_GENERALERROR: | 2290 | case TX_NOTIFY_DELAYED_GENERALERROR: |
2281 | __skb_unlink(this, list); | 2291 | __skb_unlink(this, list); |
2292 | dev_put(this->dev); | ||
2282 | kfree_skb(this); | 2293 | kfree_skb(this); |
2283 | if (!list_empty(&iucv->accept_q)) | 2294 | if (!list_empty(&iucv->accept_q)) |
2284 | sk->sk_state = IUCV_SEVERED; | 2295 | sk->sk_state = IUCV_SEVERED; |