aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEugene Crosser <Eugene.Crosser@ru.ibm.com>2016-06-13 12:46:15 -0400
committerDavid S. Miller <davem@davemloft.net>2016-06-15 15:21:04 -0400
commit291759a57532b7940b6e52c54ceebd6b8d9e113e (patch)
tree8047d7db0a82fd2abd1b50e6a77fcb6788817497
parente53743994e21d2458f0129d07b253d66f96f5742 (diff)
af_iucv: remove fragment_skb() to use paged SKBs
Before introducing paged skbs in the receive path, get rid of the function `iucv_fragment_skb()` that replaces one large linear skb with several smaller linear skbs. Signed-off-by: Eugene Crosser <Eugene.Crosser@ru.ibm.com> Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/iucv/af_iucv.c59
1 files changed, 3 insertions, 56 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 38448d17c006..9ed2adf9e057 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1231,44 +1231,6 @@ out:
1231 return err; 1231 return err;
1232} 1232}
1233 1233
1234/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1235 *
1236 * Locking: must be called with message_q.lock held
1237 */
1238static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1239{
1240 int dataleft, size, copied = 0;
1241 struct sk_buff *nskb;
1242
1243 dataleft = len;
1244 while (dataleft) {
1245 if (dataleft >= sk->sk_rcvbuf / 4)
1246 size = sk->sk_rcvbuf / 4;
1247 else
1248 size = dataleft;
1249
1250 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1251 if (!nskb)
1252 return -ENOMEM;
1253
1254 /* copy target class to control buffer of new skb */
1255 IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1256
1257 /* copy data fragment */
1258 memcpy(nskb->data, skb->data + copied, size);
1259 copied += size;
1260 dataleft -= size;
1261
1262 skb_reset_transport_header(nskb);
1263 skb_reset_network_header(nskb);
1264 nskb->len = size;
1265
1266 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1267 }
1268
1269 return 0;
1270}
1271
1272/* iucv_process_message() - Receive a single outstanding IUCV message 1234/* iucv_process_message() - Receive a single outstanding IUCV message
1273 * 1235 *
1274 * Locking: must be called with message_q.lock held 1236 * Locking: must be called with message_q.lock held
@@ -1300,24 +1262,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1300 kfree_skb(skb); 1262 kfree_skb(skb);
1301 return; 1263 return;
1302 } 1264 }
1303 /* we need to fragment iucv messages for SOCK_STREAM only; 1265 skb_reset_transport_header(skb);
1304 * for SOCK_SEQPACKET, it is only relevant if we support 1266 skb_reset_network_header(skb);
1305 * record segmentation using MSG_EOR (see also recvmsg()) */ 1267 skb->len = len;
1306 if (sk->sk_type == SOCK_STREAM &&
1307 skb->truesize >= sk->sk_rcvbuf / 4) {
1308 rc = iucv_fragment_skb(sk, skb, len);
1309 kfree_skb(skb);
1310 skb = NULL;
1311 if (rc) {
1312 pr_iucv->path_sever(path, NULL);
1313 return;
1314 }
1315 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1316 } else {
1317 skb_reset_transport_header(skb);
1318 skb_reset_network_header(skb);
1319 skb->len = len;
1320 }
1321 } 1268 }
1322 1269
1323 IUCV_SKB_CB(skb)->offset = 0; 1270 IUCV_SKB_CB(skb)->offset = 0;