diff options
author | David S. Miller <davem@davemloft.net> | 2013-04-22 20:32:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-04-22 20:32:51 -0400 |
commit | 6e0895c2ea326cc4bb11e8fa2f654628d5754c31 (patch) | |
tree | 7089303ac11a12edc43a8c4fa1b23974e10937ea /net/iucv | |
parent | 55fbbe46e9eb3cbe6c335503f5550855a1128dce (diff) | |
parent | 60d509fa6a9c4653a86ad830e4c4b30360b23f0e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
include/net/scm.h
net/batman-adv/routing.c
net/ipv4/tcp_input.c
The e{uid,gid} --> {uid,gid} credentials fix conflicted with the
cleanup in net-next to now pass cred structs around.
The be2net driver had a bug fix in 'net' that overlapped with the VLAN
interface changes by Patrick McHardy in net-next.
An IGB conflict existed because in 'net' the build_skb() support was
reverted, and in 'net-next' there was a comment style fix within that
code.
Several batman-adv conflicts were resolved by making sure that all
calls to batadv_is_my_mac() are changed to have a new bat_priv first
argument.
Eric Dumazet's TS ECR fix in TCP in 'net' conflicted with the F-RTO
rewrite in 'net-next', mostly overlapping changes.
Thanks to Stephen Rothwell and Antonio Quartulli for help with several
of these merge resolutions.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/af_iucv.c | 34 |
1 files changed, 16 insertions, 18 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index e165e8dc962e..ae691651b721 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] = | |||
49 | 49 | ||
50 | #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) | 50 | #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) |
51 | 51 | ||
52 | /* macros to set/get socket control buffer at correct offset */ | ||
53 | #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */ | ||
54 | #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag)) | ||
55 | #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ | ||
56 | #define CB_TRGCLS_LEN (TRGCLS_SIZE) | ||
57 | |||
58 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ | 52 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ |
59 | do { \ | 53 | do { \ |
60 | DEFINE_WAIT(__wait); \ | 54 | DEFINE_WAIT(__wait); \ |
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1141 | 1135 | ||
1142 | /* increment and save iucv message tag for msg_completion cbk */ | 1136 | /* increment and save iucv message tag for msg_completion cbk */ |
1143 | txmsg.tag = iucv->send_tag++; | 1137 | txmsg.tag = iucv->send_tag++; |
1144 | memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); | 1138 | IUCV_SKB_CB(skb)->tag = txmsg.tag; |
1145 | 1139 | ||
1146 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 1140 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
1147 | atomic_inc(&iucv->msg_sent); | 1141 | atomic_inc(&iucv->msg_sent); |
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) | |||
1224 | return -ENOMEM; | 1218 | return -ENOMEM; |
1225 | 1219 | ||
1226 | /* copy target class to control buffer of new skb */ | 1220 | /* copy target class to control buffer of new skb */ |
1227 | memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); | 1221 | IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; |
1228 | 1222 | ||
1229 | /* copy data fragment */ | 1223 | /* copy data fragment */ |
1230 | memcpy(nskb->data, skb->data + copied, size); | 1224 | memcpy(nskb->data, skb->data + copied, size); |
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |||
1256 | 1250 | ||
1257 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ | 1251 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ |
1258 | /* Note: the first 4 bytes are reserved for msg tag */ | 1252 | /* Note: the first 4 bytes are reserved for msg tag */ |
1259 | memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); | 1253 | IUCV_SKB_CB(skb)->class = msg->class; |
1260 | 1254 | ||
1261 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ | 1255 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ |
1262 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { | 1256 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { |
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |||
1292 | } | 1286 | } |
1293 | } | 1287 | } |
1294 | 1288 | ||
1289 | IUCV_SKB_CB(skb)->offset = 0; | ||
1295 | if (sock_queue_rcv_skb(sk, skb)) | 1290 | if (sock_queue_rcv_skb(sk, skb)) |
1296 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); | 1291 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); |
1297 | } | 1292 | } |
@@ -1327,6 +1322,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1327 | unsigned int copied, rlen; | 1322 | unsigned int copied, rlen; |
1328 | struct sk_buff *skb, *rskb, *cskb; | 1323 | struct sk_buff *skb, *rskb, *cskb; |
1329 | int err = 0; | 1324 | int err = 0; |
1325 | u32 offset; | ||
1330 | 1326 | ||
1331 | msg->msg_namelen = 0; | 1327 | msg->msg_namelen = 0; |
1332 | 1328 | ||
@@ -1348,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1348 | return err; | 1344 | return err; |
1349 | } | 1345 | } |
1350 | 1346 | ||
1351 | rlen = skb->len; /* real length of skb */ | 1347 | offset = IUCV_SKB_CB(skb)->offset; |
1348 | rlen = skb->len - offset; /* real length of skb */ | ||
1352 | copied = min_t(unsigned int, rlen, len); | 1349 | copied = min_t(unsigned int, rlen, len); |
1353 | if (!rlen) | 1350 | if (!rlen) |
1354 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; | 1351 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; |
1355 | 1352 | ||
1356 | cskb = skb; | 1353 | cskb = skb; |
1357 | if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { | 1354 | if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { |
1358 | if (!(flags & MSG_PEEK)) | 1355 | if (!(flags & MSG_PEEK)) |
1359 | skb_queue_head(&sk->sk_receive_queue, skb); | 1356 | skb_queue_head(&sk->sk_receive_queue, skb); |
1360 | return -EFAULT; | 1357 | return -EFAULT; |
@@ -1372,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1372 | * get the trgcls from the control buffer of the skb due to | 1369 | * get the trgcls from the control buffer of the skb due to |
1373 | * fragmentation of original iucv message. */ | 1370 | * fragmentation of original iucv message. */ |
1374 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, | 1371 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, |
1375 | CB_TRGCLS_LEN, CB_TRGCLS(skb)); | 1372 | sizeof(IUCV_SKB_CB(skb)->class), |
1373 | (void *)&IUCV_SKB_CB(skb)->class); | ||
1376 | if (err) { | 1374 | if (err) { |
1377 | if (!(flags & MSG_PEEK)) | 1375 | if (!(flags & MSG_PEEK)) |
1378 | skb_queue_head(&sk->sk_receive_queue, skb); | 1376 | skb_queue_head(&sk->sk_receive_queue, skb); |
@@ -1384,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1384 | 1382 | ||
1385 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ | 1383 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ |
1386 | if (sk->sk_type == SOCK_STREAM) { | 1384 | if (sk->sk_type == SOCK_STREAM) { |
1387 | skb_pull(skb, copied); | 1385 | if (copied < rlen) { |
1388 | if (skb->len) { | 1386 | IUCV_SKB_CB(skb)->offset = offset + copied; |
1389 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
1390 | goto done; | 1387 | goto done; |
1391 | } | 1388 | } |
1392 | } | 1389 | } |
@@ -1405,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1405 | spin_lock_bh(&iucv->message_q.lock); | 1402 | spin_lock_bh(&iucv->message_q.lock); |
1406 | rskb = skb_dequeue(&iucv->backlog_skb_q); | 1403 | rskb = skb_dequeue(&iucv->backlog_skb_q); |
1407 | while (rskb) { | 1404 | while (rskb) { |
1405 | IUCV_SKB_CB(rskb)->offset = 0; | ||
1408 | if (sock_queue_rcv_skb(sk, rskb)) { | 1406 | if (sock_queue_rcv_skb(sk, rskb)) { |
1409 | skb_queue_head(&iucv->backlog_skb_q, | 1407 | skb_queue_head(&iucv->backlog_skb_q, |
1410 | rskb); | 1408 | rskb); |
@@ -1833,7 +1831,7 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
1833 | spin_lock_irqsave(&list->lock, flags); | 1831 | spin_lock_irqsave(&list->lock, flags); |
1834 | 1832 | ||
1835 | while (list_skb != (struct sk_buff *)list) { | 1833 | while (list_skb != (struct sk_buff *)list) { |
1836 | if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { | 1834 | if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { |
1837 | this = list_skb; | 1835 | this = list_skb; |
1838 | break; | 1836 | break; |
1839 | } | 1837 | } |
@@ -2094,6 +2092,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) | |||
2094 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); | 2092 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); |
2095 | skb_reset_transport_header(skb); | 2093 | skb_reset_transport_header(skb); |
2096 | skb_reset_network_header(skb); | 2094 | skb_reset_network_header(skb); |
2095 | IUCV_SKB_CB(skb)->offset = 0; | ||
2097 | spin_lock(&iucv->message_q.lock); | 2096 | spin_lock(&iucv->message_q.lock); |
2098 | if (skb_queue_empty(&iucv->backlog_skb_q)) { | 2097 | if (skb_queue_empty(&iucv->backlog_skb_q)) { |
2099 | if (sock_queue_rcv_skb(sk, skb)) { | 2098 | if (sock_queue_rcv_skb(sk, skb)) { |
@@ -2198,8 +2197,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2198 | /* fall through and receive zero length data */ | 2197 | /* fall through and receive zero length data */ |
2199 | case 0: | 2198 | case 0: |
2200 | /* plain data frame */ | 2199 | /* plain data frame */ |
2201 | memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, | 2200 | IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; |
2202 | CB_TRGCLS_LEN); | ||
2203 | err = afiucv_hs_callback_rx(sk, skb); | 2201 | err = afiucv_hs_callback_rx(sk, skb); |
2204 | break; | 2202 | break; |
2205 | default: | 2203 | default: |