diff options
author | Ursula Braun <ursula.braun@de.ibm.com> | 2012-02-07 19:19:47 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-02-08 18:50:19 -0500 |
commit | 800c5eb7b5eba6cb2a32738d763fd59f0fbcdde4 (patch) | |
tree | 870a32bb186b395c7e70da55a1d1b17e37342336 /net/iucv | |
parent | 7f1b0ea42a800713a3d56e1e8ca1a845e0461ca2 (diff) |
af_iucv: change net_device handling for HS transport
This patch saves the net_device in the iucv_sock structure during
bind in order to fasten skb sending.
In addition some other small improvements are made for HS transport:
- error checking when sending skbs
- locking changes in afiucv_hs_callback_txnotify
- skb freeing in afiucv_hs_callback_txnotify
And finally it contains code cleanup to get rid of iucv_skb_queue_purge.
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/af_iucv.c | 119 |
1 files changed, 62 insertions, 57 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index ef6ab71921fc..fbce4a3126de 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -131,17 +131,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src) | |||
131 | memcpy(&dst[8], src, 8); | 131 | memcpy(&dst[8], src, 8); |
132 | } | 132 | } |
133 | 133 | ||
134 | static void iucv_skb_queue_purge(struct sk_buff_head *list) | ||
135 | { | ||
136 | struct sk_buff *skb; | ||
137 | |||
138 | while ((skb = skb_dequeue(list)) != NULL) { | ||
139 | if (skb->dev) | ||
140 | dev_put(skb->dev); | ||
141 | kfree_skb(skb); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static int afiucv_pm_prepare(struct device *dev) | 134 | static int afiucv_pm_prepare(struct device *dev) |
146 | { | 135 | { |
147 | #ifdef CONFIG_PM_DEBUG | 136 | #ifdef CONFIG_PM_DEBUG |
@@ -176,7 +165,7 @@ static int afiucv_pm_freeze(struct device *dev) | |||
176 | read_lock(&iucv_sk_list.lock); | 165 | read_lock(&iucv_sk_list.lock); |
177 | sk_for_each(sk, node, &iucv_sk_list.head) { | 166 | sk_for_each(sk, node, &iucv_sk_list.head) { |
178 | iucv = iucv_sk(sk); | 167 | iucv = iucv_sk(sk); |
179 | iucv_skb_queue_purge(&iucv->send_skb_q); | 168 | skb_queue_purge(&iucv->send_skb_q); |
180 | skb_queue_purge(&iucv->backlog_skb_q); | 169 | skb_queue_purge(&iucv->backlog_skb_q); |
181 | switch (sk->sk_state) { | 170 | switch (sk->sk_state) { |
182 | case IUCV_DISCONN: | 171 | case IUCV_DISCONN: |
@@ -337,7 +326,6 @@ static void iucv_sock_wake_msglim(struct sock *sk) | |||
337 | static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | 326 | static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, |
338 | struct sk_buff *skb, u8 flags) | 327 | struct sk_buff *skb, u8 flags) |
339 | { | 328 | { |
340 | struct net *net = sock_net(sock); | ||
341 | struct iucv_sock *iucv = iucv_sk(sock); | 329 | struct iucv_sock *iucv = iucv_sk(sock); |
342 | struct af_iucv_trans_hdr *phs_hdr; | 330 | struct af_iucv_trans_hdr *phs_hdr; |
343 | struct sk_buff *nskb; | 331 | struct sk_buff *nskb; |
@@ -374,10 +362,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
374 | if (imsg) | 362 | if (imsg) |
375 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); | 363 | memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); |
376 | 364 | ||
377 | skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if); | 365 | skb->dev = iucv->hs_dev; |
378 | if (!skb->dev) | 366 | if (!skb->dev) |
379 | return -ENODEV; | 367 | return -ENODEV; |
380 | if (!(skb->dev->flags & IFF_UP)) | 368 | if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) |
381 | return -ENETDOWN; | 369 | return -ENETDOWN; |
382 | if (skb->len > skb->dev->mtu) { | 370 | if (skb->len > skb->dev->mtu) { |
383 | if (sock->sk_type == SOCK_SEQPACKET) | 371 | if (sock->sk_type == SOCK_SEQPACKET) |
@@ -392,15 +380,14 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, | |||
392 | return -ENOMEM; | 380 | return -ENOMEM; |
393 | skb_queue_tail(&iucv->send_skb_q, nskb); | 381 | skb_queue_tail(&iucv->send_skb_q, nskb); |
394 | err = dev_queue_xmit(skb); | 382 | err = dev_queue_xmit(skb); |
395 | if (err) { | 383 | if (net_xmit_eval(err)) { |
396 | skb_unlink(nskb, &iucv->send_skb_q); | 384 | skb_unlink(nskb, &iucv->send_skb_q); |
397 | dev_put(nskb->dev); | ||
398 | kfree_skb(nskb); | 385 | kfree_skb(nskb); |
399 | } else { | 386 | } else { |
400 | atomic_sub(confirm_recv, &iucv->msg_recv); | 387 | atomic_sub(confirm_recv, &iucv->msg_recv); |
401 | WARN_ON(atomic_read(&iucv->msg_recv) < 0); | 388 | WARN_ON(atomic_read(&iucv->msg_recv) < 0); |
402 | } | 389 | } |
403 | return err; | 390 | return net_xmit_eval(err); |
404 | } | 391 | } |
405 | 392 | ||
406 | static struct sock *__iucv_get_sock_by_name(char *nm) | 393 | static struct sock *__iucv_get_sock_by_name(char *nm) |
@@ -471,7 +458,8 @@ static void iucv_sock_close(struct sock *sk) | |||
471 | { | 458 | { |
472 | struct iucv_sock *iucv = iucv_sk(sk); | 459 | struct iucv_sock *iucv = iucv_sk(sk); |
473 | unsigned long timeo; | 460 | unsigned long timeo; |
474 | int err, blen; | 461 | int err = 0; |
462 | int blen; | ||
475 | struct sk_buff *skb; | 463 | struct sk_buff *skb; |
476 | 464 | ||
477 | lock_sock(sk); | 465 | lock_sock(sk); |
@@ -498,7 +486,7 @@ static void iucv_sock_close(struct sock *sk) | |||
498 | sk->sk_state = IUCV_CLOSING; | 486 | sk->sk_state = IUCV_CLOSING; |
499 | sk->sk_state_change(sk); | 487 | sk->sk_state_change(sk); |
500 | 488 | ||
501 | if (!skb_queue_empty(&iucv->send_skb_q)) { | 489 | if (!err && !skb_queue_empty(&iucv->send_skb_q)) { |
502 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | 490 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) |
503 | timeo = sk->sk_lingertime; | 491 | timeo = sk->sk_lingertime; |
504 | else | 492 | else |
@@ -515,13 +503,19 @@ static void iucv_sock_close(struct sock *sk) | |||
515 | sk->sk_err = ECONNRESET; | 503 | sk->sk_err = ECONNRESET; |
516 | sk->sk_state_change(sk); | 504 | sk->sk_state_change(sk); |
517 | 505 | ||
518 | iucv_skb_queue_purge(&iucv->send_skb_q); | 506 | skb_queue_purge(&iucv->send_skb_q); |
519 | skb_queue_purge(&iucv->backlog_skb_q); | 507 | skb_queue_purge(&iucv->backlog_skb_q); |
520 | 508 | ||
521 | default: /* fall through */ | 509 | default: /* fall through */ |
522 | iucv_sever_path(sk, 1); | 510 | iucv_sever_path(sk, 1); |
523 | } | 511 | } |
524 | 512 | ||
513 | if (iucv->hs_dev) { | ||
514 | dev_put(iucv->hs_dev); | ||
515 | iucv->hs_dev = NULL; | ||
516 | sk->sk_bound_dev_if = 0; | ||
517 | } | ||
518 | |||
525 | /* mark socket for deletion by iucv_sock_kill() */ | 519 | /* mark socket for deletion by iucv_sock_kill() */ |
526 | sock_set_flag(sk, SOCK_ZAPPED); | 520 | sock_set_flag(sk, SOCK_ZAPPED); |
527 | 521 | ||
@@ -713,7 +707,6 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, | |||
713 | goto done_unlock; | 707 | goto done_unlock; |
714 | 708 | ||
715 | /* Bind the socket */ | 709 | /* Bind the socket */ |
716 | |||
717 | if (pr_iucv) | 710 | if (pr_iucv) |
718 | if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) | 711 | if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) |
719 | goto vm_bind; /* VM IUCV transport */ | 712 | goto vm_bind; /* VM IUCV transport */ |
@@ -727,6 +720,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, | |||
727 | memcpy(iucv->src_name, sa->siucv_name, 8); | 720 | memcpy(iucv->src_name, sa->siucv_name, 8); |
728 | memcpy(iucv->src_user_id, sa->siucv_user_id, 8); | 721 | memcpy(iucv->src_user_id, sa->siucv_user_id, 8); |
729 | sk->sk_bound_dev_if = dev->ifindex; | 722 | sk->sk_bound_dev_if = dev->ifindex; |
723 | iucv->hs_dev = dev; | ||
724 | dev_hold(dev); | ||
730 | sk->sk_state = IUCV_BOUND; | 725 | sk->sk_state = IUCV_BOUND; |
731 | iucv->transport = AF_IUCV_TRANS_HIPER; | 726 | iucv->transport = AF_IUCV_TRANS_HIPER; |
732 | if (!iucv->msglimit) | 727 | if (!iucv->msglimit) |
@@ -1128,8 +1123,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1128 | noblock, &err); | 1123 | noblock, &err); |
1129 | else | 1124 | else |
1130 | skb = sock_alloc_send_skb(sk, len, noblock, &err); | 1125 | skb = sock_alloc_send_skb(sk, len, noblock, &err); |
1131 | if (!skb) | 1126 | if (!skb) { |
1127 | err = -ENOMEM; | ||
1132 | goto out; | 1128 | goto out; |
1129 | } | ||
1133 | if (iucv->transport == AF_IUCV_TRANS_HIPER) | 1130 | if (iucv->transport == AF_IUCV_TRANS_HIPER) |
1134 | skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); | 1131 | skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); |
1135 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { | 1132 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { |
@@ -1152,6 +1149,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1152 | /* increment and save iucv message tag for msg_completion cbk */ | 1149 | /* increment and save iucv message tag for msg_completion cbk */ |
1153 | txmsg.tag = iucv->send_tag++; | 1150 | txmsg.tag = iucv->send_tag++; |
1154 | memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); | 1151 | memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); |
1152 | |||
1155 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 1153 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
1156 | atomic_inc(&iucv->msg_sent); | 1154 | atomic_inc(&iucv->msg_sent); |
1157 | err = afiucv_hs_send(&txmsg, sk, skb, 0); | 1155 | err = afiucv_hs_send(&txmsg, sk, skb, 0); |
@@ -1206,8 +1204,6 @@ release: | |||
1206 | return len; | 1204 | return len; |
1207 | 1205 | ||
1208 | fail: | 1206 | fail: |
1209 | if (skb->dev) | ||
1210 | dev_put(skb->dev); | ||
1211 | kfree_skb(skb); | 1207 | kfree_skb(skb); |
1212 | out: | 1208 | out: |
1213 | release_sock(sk); | 1209 | release_sock(sk); |
@@ -1400,7 +1396,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1400 | } | 1396 | } |
1401 | 1397 | ||
1402 | kfree_skb(skb); | 1398 | kfree_skb(skb); |
1403 | atomic_inc(&iucv->msg_recv); | 1399 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
1400 | atomic_inc(&iucv->msg_recv); | ||
1401 | if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { | ||
1402 | WARN_ON(1); | ||
1403 | iucv_sock_close(sk); | ||
1404 | return -EFAULT; | ||
1405 | } | ||
1406 | } | ||
1404 | 1407 | ||
1405 | /* Queue backlog skbs */ | 1408 | /* Queue backlog skbs */ |
1406 | spin_lock_bh(&iucv->message_q.lock); | 1409 | spin_lock_bh(&iucv->message_q.lock); |
@@ -1957,6 +1960,8 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) | |||
1957 | memcpy(niucv->src_name, iucv->src_name, 8); | 1960 | memcpy(niucv->src_name, iucv->src_name, 8); |
1958 | memcpy(niucv->src_user_id, iucv->src_user_id, 8); | 1961 | memcpy(niucv->src_user_id, iucv->src_user_id, 8); |
1959 | nsk->sk_bound_dev_if = sk->sk_bound_dev_if; | 1962 | nsk->sk_bound_dev_if = sk->sk_bound_dev_if; |
1963 | niucv->hs_dev = iucv->hs_dev; | ||
1964 | dev_hold(niucv->hs_dev); | ||
1960 | afiucv_swap_src_dest(skb); | 1965 | afiucv_swap_src_dest(skb); |
1961 | trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; | 1966 | trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; |
1962 | trans_hdr->window = niucv->msglimit; | 1967 | trans_hdr->window = niucv->msglimit; |
@@ -2025,12 +2030,15 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) | |||
2025 | struct iucv_sock *iucv = iucv_sk(sk); | 2030 | struct iucv_sock *iucv = iucv_sk(sk); |
2026 | 2031 | ||
2027 | /* other end of connection closed */ | 2032 | /* other end of connection closed */ |
2028 | if (iucv) { | 2033 | if (!iucv) |
2029 | bh_lock_sock(sk); | 2034 | goto out; |
2035 | bh_lock_sock(sk); | ||
2036 | if (sk->sk_state == IUCV_CONNECTED) { | ||
2030 | sk->sk_state = IUCV_DISCONN; | 2037 | sk->sk_state = IUCV_DISCONN; |
2031 | sk->sk_state_change(sk); | 2038 | sk->sk_state_change(sk); |
2032 | bh_unlock_sock(sk); | ||
2033 | } | 2039 | } |
2040 | bh_unlock_sock(sk); | ||
2041 | out: | ||
2034 | kfree_skb(skb); | 2042 | kfree_skb(skb); |
2035 | return NET_RX_SUCCESS; | 2043 | return NET_RX_SUCCESS; |
2036 | } | 2044 | } |
@@ -2175,11 +2183,11 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2175 | break; | 2183 | break; |
2176 | case (AF_IUCV_FLAG_WIN): | 2184 | case (AF_IUCV_FLAG_WIN): |
2177 | err = afiucv_hs_callback_win(sk, skb); | 2185 | err = afiucv_hs_callback_win(sk, skb); |
2178 | if (skb->len > sizeof(struct af_iucv_trans_hdr)) | 2186 | if (skb->len == sizeof(struct af_iucv_trans_hdr)) { |
2179 | err = afiucv_hs_callback_rx(sk, skb); | 2187 | kfree_skb(skb); |
2180 | else | 2188 | break; |
2181 | kfree(skb); | 2189 | } |
2182 | break; | 2190 | /* fall through */ |
2183 | case 0: | 2191 | case 0: |
2184 | /* plain data frame */ | 2192 | /* plain data frame */ |
2185 | memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, | 2193 | memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, |
@@ -2205,65 +2213,64 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, | |||
2205 | struct iucv_sock *iucv = NULL; | 2213 | struct iucv_sock *iucv = NULL; |
2206 | struct sk_buff_head *list; | 2214 | struct sk_buff_head *list; |
2207 | struct sk_buff *list_skb; | 2215 | struct sk_buff *list_skb; |
2208 | struct sk_buff *this = NULL; | 2216 | struct sk_buff *nskb; |
2209 | unsigned long flags; | 2217 | unsigned long flags; |
2210 | struct hlist_node *node; | 2218 | struct hlist_node *node; |
2211 | 2219 | ||
2212 | read_lock(&iucv_sk_list.lock); | 2220 | read_lock_irqsave(&iucv_sk_list.lock, flags); |
2213 | sk_for_each(sk, node, &iucv_sk_list.head) | 2221 | sk_for_each(sk, node, &iucv_sk_list.head) |
2214 | if (sk == isk) { | 2222 | if (sk == isk) { |
2215 | iucv = iucv_sk(sk); | 2223 | iucv = iucv_sk(sk); |
2216 | break; | 2224 | break; |
2217 | } | 2225 | } |
2218 | read_unlock(&iucv_sk_list.lock); | 2226 | read_unlock_irqrestore(&iucv_sk_list.lock, flags); |
2219 | 2227 | ||
2220 | if (!iucv) | 2228 | if (!iucv || sock_flag(sk, SOCK_ZAPPED)) |
2221 | return; | 2229 | return; |
2222 | 2230 | ||
2223 | bh_lock_sock(sk); | ||
2224 | list = &iucv->send_skb_q; | 2231 | list = &iucv->send_skb_q; |
2225 | list_skb = list->next; | 2232 | spin_lock_irqsave(&list->lock, flags); |
2226 | if (skb_queue_empty(list)) | 2233 | if (skb_queue_empty(list)) |
2227 | goto out_unlock; | 2234 | goto out_unlock; |
2228 | 2235 | list_skb = list->next; | |
2229 | spin_lock_irqsave(&list->lock, flags); | 2236 | nskb = list_skb->next; |
2230 | while (list_skb != (struct sk_buff *)list) { | 2237 | while (list_skb != (struct sk_buff *)list) { |
2231 | if (skb_shinfo(list_skb) == skb_shinfo(skb)) { | 2238 | if (skb_shinfo(list_skb) == skb_shinfo(skb)) { |
2232 | this = list_skb; | ||
2233 | switch (n) { | 2239 | switch (n) { |
2234 | case TX_NOTIFY_OK: | 2240 | case TX_NOTIFY_OK: |
2235 | __skb_unlink(this, list); | 2241 | __skb_unlink(list_skb, list); |
2242 | kfree_skb(list_skb); | ||
2236 | iucv_sock_wake_msglim(sk); | 2243 | iucv_sock_wake_msglim(sk); |
2237 | dev_put(this->dev); | ||
2238 | kfree_skb(this); | ||
2239 | break; | 2244 | break; |
2240 | case TX_NOTIFY_PENDING: | 2245 | case TX_NOTIFY_PENDING: |
2241 | atomic_inc(&iucv->pendings); | 2246 | atomic_inc(&iucv->pendings); |
2242 | break; | 2247 | break; |
2243 | case TX_NOTIFY_DELAYED_OK: | 2248 | case TX_NOTIFY_DELAYED_OK: |
2244 | __skb_unlink(this, list); | 2249 | __skb_unlink(list_skb, list); |
2245 | atomic_dec(&iucv->pendings); | 2250 | atomic_dec(&iucv->pendings); |
2246 | if (atomic_read(&iucv->pendings) <= 0) | 2251 | if (atomic_read(&iucv->pendings) <= 0) |
2247 | iucv_sock_wake_msglim(sk); | 2252 | iucv_sock_wake_msglim(sk); |
2248 | dev_put(this->dev); | 2253 | kfree_skb(list_skb); |
2249 | kfree_skb(this); | ||
2250 | break; | 2254 | break; |
2251 | case TX_NOTIFY_UNREACHABLE: | 2255 | case TX_NOTIFY_UNREACHABLE: |
2252 | case TX_NOTIFY_DELAYED_UNREACHABLE: | 2256 | case TX_NOTIFY_DELAYED_UNREACHABLE: |
2253 | case TX_NOTIFY_TPQFULL: /* not yet used */ | 2257 | case TX_NOTIFY_TPQFULL: /* not yet used */ |
2254 | case TX_NOTIFY_GENERALERROR: | 2258 | case TX_NOTIFY_GENERALERROR: |
2255 | case TX_NOTIFY_DELAYED_GENERALERROR: | 2259 | case TX_NOTIFY_DELAYED_GENERALERROR: |
2256 | __skb_unlink(this, list); | 2260 | __skb_unlink(list_skb, list); |
2257 | dev_put(this->dev); | 2261 | kfree_skb(list_skb); |
2258 | kfree_skb(this); | 2262 | if (sk->sk_state == IUCV_CONNECTED) { |
2259 | sk->sk_state = IUCV_DISCONN; | 2263 | sk->sk_state = IUCV_DISCONN; |
2260 | sk->sk_state_change(sk); | 2264 | sk->sk_state_change(sk); |
2265 | } | ||
2261 | break; | 2266 | break; |
2262 | } | 2267 | } |
2263 | break; | 2268 | break; |
2264 | } | 2269 | } |
2265 | list_skb = list_skb->next; | 2270 | list_skb = nskb; |
2271 | nskb = nskb->next; | ||
2266 | } | 2272 | } |
2273 | out_unlock: | ||
2267 | spin_unlock_irqrestore(&list->lock, flags); | 2274 | spin_unlock_irqrestore(&list->lock, flags); |
2268 | 2275 | ||
2269 | if (sk->sk_state == IUCV_CLOSING) { | 2276 | if (sk->sk_state == IUCV_CLOSING) { |
@@ -2273,8 +2280,6 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, | |||
2273 | } | 2280 | } |
2274 | } | 2281 | } |
2275 | 2282 | ||
2276 | out_unlock: | ||
2277 | bh_unlock_sock(sk); | ||
2278 | } | 2283 | } |
2279 | static const struct proto_ops iucv_sock_ops = { | 2284 | static const struct proto_ops iucv_sock_ops = { |
2280 | .family = PF_IUCV, | 2285 | .family = PF_IUCV, |