aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorUrsula Braun <ursula.braun@de.ibm.com>2013-04-07 18:19:26 -0400
committerDavid S. Miller <davem@davemloft.net>2013-04-08 17:16:57 -0400
commitf9c41a62bba3f3f7ef3541b2a025e3371bcbba97 (patch)
tree39aa5a3b8a584cff0d1974e60d22015a26d04e41
parent88c5b5ce5cb57af6ca2a7cf4d5715fa320448ff9 (diff)
af_iucv: fix recvmsg by replacing skb_pull() function
When receiving data messages, the "BUG_ON(skb->len < skb->data_len)" in the skb_pull() function triggers a kernel panic. Replace the skb_pull logic by a per skb offset as advised by Eric Dumazet. Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: Frank Blaschka <blaschka@linux.vnet.ibm.com> Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/iucv/af_iucv.h8
-rw-r--r--net/iucv/af_iucv.c34
2 files changed, 24 insertions, 18 deletions
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index cc7c19732389..714cc9a54a4c 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -130,6 +130,14 @@ struct iucv_sock {
130 enum iucv_tx_notify n); 130 enum iucv_tx_notify n);
131}; 131};
132 132
133struct iucv_skb_cb {
134 u32 class; /* target class of message */
135 u32 tag; /* tag associated with message */
136 u32 offset; /* offset for skb receival */
137};
138
139#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0]))
140
133/* iucv socket options (SOL_IUCV) */ 141/* iucv socket options (SOL_IUCV) */
134#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ 142#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
135#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ 143#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index bf6935820001..206ce6db2c36 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =
49 49
50#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) 50#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51 51
52/* macros to set/get socket control buffer at correct offset */
53#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56#define CB_TRGCLS_LEN (TRGCLS_SIZE)
57
58#define __iucv_sock_wait(sk, condition, timeo, ret) \ 52#define __iucv_sock_wait(sk, condition, timeo, ret) \
59do { \ 53do { \
60 DEFINE_WAIT(__wait); \ 54 DEFINE_WAIT(__wait); \
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1141 1135
1142 /* increment and save iucv message tag for msg_completion cbk */ 1136 /* increment and save iucv message tag for msg_completion cbk */
1143 txmsg.tag = iucv->send_tag++; 1137 txmsg.tag = iucv->send_tag++;
1144 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1138 IUCV_SKB_CB(skb)->tag = txmsg.tag;
1145 1139
1146 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1140 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1147 atomic_inc(&iucv->msg_sent); 1141 atomic_inc(&iucv->msg_sent);
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1224 return -ENOMEM; 1218 return -ENOMEM;
1225 1219
1226 /* copy target class to control buffer of new skb */ 1220 /* copy target class to control buffer of new skb */
1227 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); 1221 IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1228 1222
1229 /* copy data fragment */ 1223 /* copy data fragment */
1230 memcpy(nskb->data, skb->data + copied, size); 1224 memcpy(nskb->data, skb->data + copied, size);
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1256 1250
1257 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1251 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1258 /* Note: the first 4 bytes are reserved for msg tag */ 1252 /* Note: the first 4 bytes are reserved for msg tag */
1259 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); 1253 IUCV_SKB_CB(skb)->class = msg->class;
1260 1254
1261 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1255 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1262 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1256 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1292 } 1286 }
1293 } 1287 }
1294 1288
1289 IUCV_SKB_CB(skb)->offset = 0;
1295 if (sock_queue_rcv_skb(sk, skb)) 1290 if (sock_queue_rcv_skb(sk, skb))
1296 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 1291 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1297} 1292}
@@ -1327,6 +1322,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1327 unsigned int copied, rlen; 1322 unsigned int copied, rlen;
1328 struct sk_buff *skb, *rskb, *cskb; 1323 struct sk_buff *skb, *rskb, *cskb;
1329 int err = 0; 1324 int err = 0;
1325 u32 offset;
1330 1326
1331 msg->msg_namelen = 0; 1327 msg->msg_namelen = 0;
1332 1328
@@ -1348,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1348 return err; 1344 return err;
1349 } 1345 }
1350 1346
1351 rlen = skb->len; /* real length of skb */ 1347 offset = IUCV_SKB_CB(skb)->offset;
1348 rlen = skb->len - offset; /* real length of skb */
1352 copied = min_t(unsigned int, rlen, len); 1349 copied = min_t(unsigned int, rlen, len);
1353 if (!rlen) 1350 if (!rlen)
1354 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1351 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1355 1352
1356 cskb = skb; 1353 cskb = skb;
1357 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { 1354 if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
1358 if (!(flags & MSG_PEEK)) 1355 if (!(flags & MSG_PEEK))
1359 skb_queue_head(&sk->sk_receive_queue, skb); 1356 skb_queue_head(&sk->sk_receive_queue, skb);
1360 return -EFAULT; 1357 return -EFAULT;
@@ -1372,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1372 * get the trgcls from the control buffer of the skb due to 1369 * get the trgcls from the control buffer of the skb due to
1373 * fragmentation of original iucv message. */ 1370 * fragmentation of original iucv message. */
1374 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1371 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1375 CB_TRGCLS_LEN, CB_TRGCLS(skb)); 1372 sizeof(IUCV_SKB_CB(skb)->class),
1373 (void *)&IUCV_SKB_CB(skb)->class);
1376 if (err) { 1374 if (err) {
1377 if (!(flags & MSG_PEEK)) 1375 if (!(flags & MSG_PEEK))
1378 skb_queue_head(&sk->sk_receive_queue, skb); 1376 skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1384,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1384 1382
1385 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1386 if (sk->sk_type == SOCK_STREAM) { 1384 if (sk->sk_type == SOCK_STREAM) {
1387 skb_pull(skb, copied); 1385 if (copied < rlen) {
1388 if (skb->len) { 1386 IUCV_SKB_CB(skb)->offset = offset + copied;
1389 skb_queue_head(&sk->sk_receive_queue, skb);
1390 goto done; 1387 goto done;
1391 } 1388 }
1392 } 1389 }
@@ -1405,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1405 spin_lock_bh(&iucv->message_q.lock); 1402 spin_lock_bh(&iucv->message_q.lock);
1406 rskb = skb_dequeue(&iucv->backlog_skb_q); 1403 rskb = skb_dequeue(&iucv->backlog_skb_q);
1407 while (rskb) { 1404 while (rskb) {
1405 IUCV_SKB_CB(rskb)->offset = 0;
1408 if (sock_queue_rcv_skb(sk, rskb)) { 1406 if (sock_queue_rcv_skb(sk, rskb)) {
1409 skb_queue_head(&iucv->backlog_skb_q, 1407 skb_queue_head(&iucv->backlog_skb_q,
1410 rskb); 1408 rskb);
@@ -1832,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1832 spin_lock_irqsave(&list->lock, flags); 1830 spin_lock_irqsave(&list->lock, flags);
1833 1831
1834 while (list_skb != (struct sk_buff *)list) { 1832 while (list_skb != (struct sk_buff *)list) {
1835 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { 1833 if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
1836 this = list_skb; 1834 this = list_skb;
1837 break; 1835 break;
1838 } 1836 }
@@ -2093,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2093 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2091 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2094 skb_reset_transport_header(skb); 2092 skb_reset_transport_header(skb);
2095 skb_reset_network_header(skb); 2093 skb_reset_network_header(skb);
2094 IUCV_SKB_CB(skb)->offset = 0;
2096 spin_lock(&iucv->message_q.lock); 2095 spin_lock(&iucv->message_q.lock);
2097 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2096 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2098 if (sock_queue_rcv_skb(sk, skb)) { 2097 if (sock_queue_rcv_skb(sk, skb)) {
@@ -2197,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2197 /* fall through and receive zero length data */ 2196 /* fall through and receive zero length data */
2198 case 0: 2197 case 0:
2199 /* plain data frame */ 2198 /* plain data frame */
2200 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, 2199 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2201 CB_TRGCLS_LEN);
2202 err = afiucv_hs_callback_rx(sk, skb); 2200 err = afiucv_hs_callback_rx(sk, skb);
2203 break; 2201 break;
2204 default: 2202 default: