aboutsummaryrefslogtreecommitdiffstats
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
authorHendrik Brueckner <brueckner@linux.vnet.ibm.com>2009-04-21 19:26:24 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-23 07:04:35 -0400
commit44b1e6b5f9a93cc2ba024e09cf137d5f1b5f8426 (patch)
tree1daabfda256f7b5d7e6ccc0e7200ce7888f0a2b9 /net/iucv/af_iucv.c
parentb8942e3b6c4b35dda5e8ca75aec5e2f027fe39a9 (diff)
af_iucv: Modify iucv msg target class using control msghdr
Allow 'classification' of socket data that is sent or received over an af_iucv socket. For classification of data, the target class of an (native) iucv message is used. This patch provides the cmsg interface for iucv_sock_recvmsg() and iucv_sock_sendmsg(). Applications can use the msg_control field of struct msghdr to set or get the target class as a "socket control message" (SCM/CMSG). Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c79
1 files changed, 75 insertions, 4 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 5fc077ee5831..47c5c8d3703f 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -45,6 +45,15 @@ static struct proto iucv_proto = {
45static const u8 iprm_shutdown[8] = 45static const u8 iprm_shutdown[8] =
46 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 46 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
47 47
48#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
49
50/* macros to set/get socket control buffer at correct offset */
51#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
52#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
53#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
54#define CB_TRGCLS_LEN (TRGCLS_SIZE)
55
56
48static void iucv_sock_kill(struct sock *sk); 57static void iucv_sock_kill(struct sock *sk);
49static void iucv_sock_close(struct sock *sk); 58static void iucv_sock_close(struct sock *sk);
50 59
@@ -698,6 +707,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
698 struct iucv_sock *iucv = iucv_sk(sk); 707 struct iucv_sock *iucv = iucv_sk(sk);
699 struct sk_buff *skb; 708 struct sk_buff *skb;
700 struct iucv_message txmsg; 709 struct iucv_message txmsg;
710 struct cmsghdr *cmsg;
711 int cmsg_done;
701 char user_id[9]; 712 char user_id[9];
702 char appl_id[9]; 713 char appl_id[9];
703 int err; 714 int err;
@@ -717,6 +728,48 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
717 } 728 }
718 729
719 if (sk->sk_state == IUCV_CONNECTED) { 730 if (sk->sk_state == IUCV_CONNECTED) {
731 /* initialize defaults */
732 cmsg_done = 0; /* check for duplicate headers */
733 txmsg.class = 0;
734
735 /* iterate over control messages */
736 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
737 cmsg = CMSG_NXTHDR(msg, cmsg)) {
738
739 if (!CMSG_OK(msg, cmsg)) {
740 err = -EINVAL;
741 goto out;
742 }
743
744 if (cmsg->cmsg_level != SOL_IUCV)
745 continue;
746
747 if (cmsg->cmsg_type & cmsg_done) {
748 err = -EINVAL;
749 goto out;
750 }
751 cmsg_done |= cmsg->cmsg_type;
752
753 switch (cmsg->cmsg_type) {
754 case SCM_IUCV_TRGCLS:
755 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
756 err = -EINVAL;
757 goto out;
758 }
759
760 /* set iucv message target class */
761 memcpy(&txmsg.class,
762 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
763
764 break;
765
766 default:
767 err = -EINVAL;
768 goto out;
769 break;
770 }
771 }
772
720 if (!(skb = sock_alloc_send_skb(sk, len, 773 if (!(skb = sock_alloc_send_skb(sk, len,
721 msg->msg_flags & MSG_DONTWAIT, 774 msg->msg_flags & MSG_DONTWAIT,
722 &err))) 775 &err)))
@@ -727,10 +780,9 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
727 goto fail; 780 goto fail;
728 } 781 }
729 782
730 txmsg.class = 0; 783 /* increment and save iucv message tag for msg_completion cbk */
731 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
732 txmsg.tag = iucv->send_tag++; 784 txmsg.tag = iucv->send_tag++;
733 memcpy(skb->cb, &txmsg.tag, 4); 785 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
734 skb_queue_tail(&iucv->send_skb_q, skb); 786 skb_queue_tail(&iucv->send_skb_q, skb);
735 787
736 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 788 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -801,6 +853,10 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
801 if (!nskb) 853 if (!nskb)
802 return -ENOMEM; 854 return -ENOMEM;
803 855
856 /* copy target class to control buffer of new skb */
857 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
858
859 /* copy data fragment */
804 memcpy(nskb->data, skb->data + copied, size); 860 memcpy(nskb->data, skb->data + copied, size);
805 copied += size; 861 copied += size;
806 dataleft -= size; 862 dataleft -= size;
@@ -824,6 +880,10 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
824 880
825 len = iucv_msg_length(msg); 881 len = iucv_msg_length(msg);
826 882
883 /* store msg target class in the second 4 bytes of skb ctrl buffer */
884 /* Note: the first 4 bytes are reserved for msg tag */
885 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
886
827 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 887 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
828 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 888 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
829 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { 889 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
@@ -915,6 +975,17 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
915 975
916 len -= copied; 976 len -= copied;
917 977
978 /* create control message to store iucv msg target class:
979 * get the trgcls from the control buffer of the skb due to
980 * fragmentation of original iucv message. */
981 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
982 CB_TRGCLS_LEN, CB_TRGCLS(skb));
983 if (err) {
984 if (!(flags & MSG_PEEK))
985 skb_queue_head(&sk->sk_receive_queue, skb);
986 return err;
987 }
988
918 /* Mark read part of skb as used */ 989 /* Mark read part of skb as used */
919 if (!(flags & MSG_PEEK)) { 990 if (!(flags & MSG_PEEK)) {
920 skb_pull(skb, copied); 991 skb_pull(skb, copied);
@@ -1316,7 +1387,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1316 spin_lock_irqsave(&list->lock, flags); 1387 spin_lock_irqsave(&list->lock, flags);
1317 1388
1318 while (list_skb != (struct sk_buff *)list) { 1389 while (list_skb != (struct sk_buff *)list) {
1319 if (!memcmp(&msg->tag, list_skb->cb, 4)) { 1390 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1320 this = list_skb; 1391 this = list_skb;
1321 break; 1392 break;
1322 } 1393 }