aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
authorUrsula Braun <braunu@de.ibm.com>2007-10-08 05:03:31 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:54:51 -0400
commitf0703c80e5156406ad947cb67fe277725b48080f (patch)
tree428f16c8e188763cfa2cadc26708bf785a8958b8 /net/iucv/af_iucv.c
parent57f20448032158ad00b1e74f479515c689998be9 (diff)
[AF_IUCV]: postpone receival of iucv-packets
AF_IUCV socket programs may waste Linux storage, because af_iucv allocates an skb whenever posted by the receive callback routine and receives the message immediately. Message receival is now postponed if data from previous callbacks has not yet been transferred to the receiving socket program. Instead a message handle is saved in a message queue as a reminder. Once messages could be given to the receiving socket program, there is an additional checking for entries in the message queue, followed by skb allocation and message receival if applicable. Signed-off-by: Ursula Braun <braunu@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c211
1 files changed, 125 insertions, 86 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 65358722c397..43e01c8d382b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -224,6 +224,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
224 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 224 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
225 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 225 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
226 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 226 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
227 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
228 spin_lock_init(&iucv_sk(sk)->message_q.lock);
227 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 229 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
228 iucv_sk(sk)->send_tag = 0; 230 iucv_sk(sk)->send_tag = 0;
229 231
@@ -673,6 +675,90 @@ out:
673 return err; 675 return err;
674} 676}
675 677
678static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
679{
680 int dataleft, size, copied = 0;
681 struct sk_buff *nskb;
682
683 dataleft = len;
684 while (dataleft) {
685 if (dataleft >= sk->sk_rcvbuf / 4)
686 size = sk->sk_rcvbuf / 4;
687 else
688 size = dataleft;
689
690 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
691 if (!nskb)
692 return -ENOMEM;
693
694 memcpy(nskb->data, skb->data + copied, size);
695 copied += size;
696 dataleft -= size;
697
698 skb_reset_transport_header(nskb);
699 skb_reset_network_header(nskb);
700 nskb->len = size;
701
702 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
703 }
704
705 return 0;
706}
707
708static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
709 struct iucv_path *path,
710 struct iucv_message *msg)
711{
712 int rc;
713
714 if (msg->flags & IPRMDATA) {
715 skb->data = NULL;
716 skb->len = 0;
717 } else {
718 rc = iucv_message_receive(path, msg, 0, skb->data,
719 msg->length, NULL);
720 if (rc) {
721 kfree_skb(skb);
722 return;
723 }
724 if (skb->truesize >= sk->sk_rcvbuf / 4) {
725 rc = iucv_fragment_skb(sk, skb, msg->length);
726 kfree_skb(skb);
727 skb = NULL;
728 if (rc) {
729 iucv_path_sever(path, NULL);
730 return;
731 }
732 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
733 } else {
734 skb_reset_transport_header(skb);
735 skb_reset_network_header(skb);
736 skb->len = msg->length;
737 }
738 }
739
740 if (sock_queue_rcv_skb(sk, skb))
741 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
742}
743
744static void iucv_process_message_q(struct sock *sk)
745{
746 struct iucv_sock *iucv = iucv_sk(sk);
747 struct sk_buff *skb;
748 struct sock_msg_q *p, *n;
749
750 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
751 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
752 if (!skb)
753 break;
754 iucv_process_message(sk, skb, p->path, &p->msg);
755 list_del(&p->list);
756 kfree(p);
757 if (!skb_queue_empty(&iucv->backlog_skb_q))
758 break;
759 }
760}
761
676static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 762static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
677 struct msghdr *msg, size_t len, int flags) 763 struct msghdr *msg, size_t len, int flags)
678{ 764{
@@ -684,8 +770,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
684 int err = 0; 770 int err = 0;
685 771
686 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 772 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
687 skb_queue_empty(&iucv->backlog_skb_q) && 773 skb_queue_empty(&iucv->backlog_skb_q) &&
688 skb_queue_empty(&sk->sk_receive_queue)) 774 skb_queue_empty(&sk->sk_receive_queue) &&
775 list_empty(&iucv->message_q.list))
689 return 0; 776 return 0;
690 777
691 if (flags & (MSG_OOB)) 778 if (flags & (MSG_OOB))
@@ -724,16 +811,23 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
724 kfree_skb(skb); 811 kfree_skb(skb);
725 812
726 /* Queue backlog skbs */ 813 /* Queue backlog skbs */
727 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 814 rskb = skb_dequeue(&iucv->backlog_skb_q);
728 while (rskb) { 815 while (rskb) {
729 if (sock_queue_rcv_skb(sk, rskb)) { 816 if (sock_queue_rcv_skb(sk, rskb)) {
730 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, 817 skb_queue_head(&iucv->backlog_skb_q,
731 rskb); 818 rskb);
732 break; 819 break;
733 } else { 820 } else {
734 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 821 rskb = skb_dequeue(&iucv->backlog_skb_q);
735 } 822 }
736 } 823 }
824 if (skb_queue_empty(&iucv->backlog_skb_q)) {
825 spin_lock_bh(&iucv->message_q.lock);
826 if (!list_empty(&iucv->message_q.list))
827 iucv_process_message_q(sk);
828 spin_unlock_bh(&iucv->message_q.lock);
829 }
830
737 } else 831 } else
738 skb_queue_head(&sk->sk_receive_queue, skb); 832 skb_queue_head(&sk->sk_receive_queue, skb);
739 833
@@ -975,99 +1069,44 @@ static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
975 sk->sk_state_change(sk); 1069 sk->sk_state_change(sk);
976} 1070}
977 1071
978static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len,
979 struct sk_buff_head *fragmented_skb_q)
980{
981 int dataleft, size, copied = 0;
982 struct sk_buff *nskb;
983
984 dataleft = len;
985 while (dataleft) {
986 if (dataleft >= sk->sk_rcvbuf / 4)
987 size = sk->sk_rcvbuf / 4;
988 else
989 size = dataleft;
990
991 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
992 if (!nskb)
993 return -ENOMEM;
994
995 memcpy(nskb->data, skb->data + copied, size);
996 copied += size;
997 dataleft -= size;
998
999 skb_reset_transport_header(nskb);
1000 skb_reset_network_header(nskb);
1001 nskb->len = size;
1002
1003 skb_queue_tail(fragmented_skb_q, nskb);
1004 }
1005
1006 return 0;
1007}
1008
1009static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1072static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1010{ 1073{
1011 struct sock *sk = path->private; 1074 struct sock *sk = path->private;
1012 struct iucv_sock *iucv = iucv_sk(sk); 1075 struct iucv_sock *iucv = iucv_sk(sk);
1013 struct sk_buff *skb, *fskb; 1076 struct sk_buff *skb;
1014 struct sk_buff_head fragmented_skb_q; 1077 struct sock_msg_q *save_msg;
1015 int rc; 1078 int len;
1016
1017 skb_queue_head_init(&fragmented_skb_q);
1018 1079
1019 if (sk->sk_shutdown & RCV_SHUTDOWN) 1080 if (sk->sk_shutdown & RCV_SHUTDOWN)
1020 return; 1081 return;
1021 1082
1083 if (!list_empty(&iucv->message_q.list) ||
1084 !skb_queue_empty(&iucv->backlog_skb_q))
1085 goto save_message;
1086
1087 len = atomic_read(&sk->sk_rmem_alloc);
1088 len += msg->length + sizeof(struct sk_buff);
1089 if (len > sk->sk_rcvbuf)
1090 goto save_message;
1091
1022 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); 1092 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1023 if (!skb) { 1093 if (!skb)
1024 iucv_path_sever(path, NULL); 1094 goto save_message;
1025 return;
1026 }
1027 1095
1028 if (msg->flags & IPRMDATA) { 1096 spin_lock(&iucv->message_q.lock);
1029 skb->data = NULL; 1097 iucv_process_message(sk, skb, path, msg);
1030 skb->len = 0; 1098 spin_unlock(&iucv->message_q.lock);
1031 } else {
1032 rc = iucv_message_receive(path, msg, 0, skb->data,
1033 msg->length, NULL);
1034 if (rc) {
1035 kfree_skb(skb);
1036 return;
1037 }
1038 if (skb->truesize >= sk->sk_rcvbuf / 4) {
1039 rc = iucv_fragment_skb(sk, skb, msg->length,
1040 &fragmented_skb_q);
1041 kfree_skb(skb);
1042 skb = NULL;
1043 if (rc) {
1044 iucv_path_sever(path, NULL);
1045 return;
1046 }
1047 } else {
1048 skb_reset_transport_header(skb);
1049 skb_reset_network_header(skb);
1050 skb->len = msg->length;
1051 }
1052 }
1053 /* Queue the fragmented skb */
1054 fskb = skb_dequeue(&fragmented_skb_q);
1055 while (fskb) {
1056 if (!skb_queue_empty(&iucv->backlog_skb_q))
1057 skb_queue_tail(&iucv->backlog_skb_q, fskb);
1058 else if (sock_queue_rcv_skb(sk, fskb))
1059 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb);
1060 fskb = skb_dequeue(&fragmented_skb_q);
1061 }
1062 1099
1063 /* Queue the original skb if it exists (was not fragmented) */ 1100 return;
1064 if (skb) { 1101
1065 if (!skb_queue_empty(&iucv->backlog_skb_q)) 1102save_message:
1066 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 1103 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1067 else if (sock_queue_rcv_skb(sk, skb)) 1104 save_msg->path = path;
1068 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 1105 save_msg->msg = *msg;
1069 }
1070 1106
1107 spin_lock(&iucv->message_q.lock);
1108 list_add_tail(&save_msg->list, &iucv->message_q.list);
1109 spin_unlock(&iucv->message_q.lock);
1071} 1110}
1072 1111
1073static void iucv_callback_txdone(struct iucv_path *path, 1112static void iucv_callback_txdone(struct iucv_path *path,