diff options
author | Jon Paul Maloy <jon.maloy@ericsson.com> | 2015-07-30 18:24:24 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-07-30 20:25:14 -0400 |
commit | 23d8335d786472021b5c733f228c7074208dcfa0 (patch) | |
tree | c7bcdc08b5567835c5a38f0d93cc46c7e8a55fea /net/tipc/msg.h | |
parent | 598411d70f85dcf5b5c6c2369cc48637c251b656 (diff) |
tipc: remove implicit message delivery in node_unlock()
After the most recent changes, all access calls to a link which
may entail addition of messages to the link's input queue are
postpended by an explicit call to tipc_sk_rcv(), using a reference
to the correct queue.
This means that the potentially hazardous implicit delivery, using
tipc_node_unlock() in combination with a binary flag and a cached
queue pointer, now has become redundant.
This commit removes this implicit delivery mechanism both for regular
data messages and for binding table update messages.
Tested-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/msg.h')
-rw-r--r-- | net/tipc/msg.h | 22 |
1 files changed, 0 insertions, 22 deletions
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 53d98ef78650..a82c5848d4bc 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -862,28 +862,6 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list, | |||
862 | return skb; | 862 | return skb; |
863 | } | 863 | } |
864 | 864 | ||
865 | /* tipc_skb_queue_tail(): add buffer to tail of list; | ||
866 | * @list: list to be appended to | ||
867 | * @skb: buffer to append. Always appended | ||
868 | * @dport: the destination port of the buffer | ||
869 | * returns true if dport differs from previous destination | ||
870 | */ | ||
871 | static inline bool tipc_skb_queue_tail(struct sk_buff_head *list, | ||
872 | struct sk_buff *skb, u32 dport) | ||
873 | { | ||
874 | struct sk_buff *_skb = NULL; | ||
875 | bool rv = false; | ||
876 | |||
877 | spin_lock_bh(&list->lock); | ||
878 | _skb = skb_peek_tail(list); | ||
879 | if (!_skb || (msg_destport(buf_msg(_skb)) != dport) || | ||
880 | (skb_queue_len(list) > 32)) | ||
881 | rv = true; | ||
882 | __skb_queue_tail(list, skb); | ||
883 | spin_unlock_bh(&list->lock); | ||
884 | return rv; | ||
885 | } | ||
886 | |||
887 | /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number | 865 | /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number |
888 | * @list: list to be appended to | 866 | * @list: list to be appended to |
889 | * @skb: buffer to add | 867 | * @skb: buffer to add |