diff options
author | Jon Paul Maloy <jon.maloy@ericsson.com> | 2014-07-25 14:48:09 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-28 21:34:01 -0400 |
commit | 13e9b9972fa0f34059e737ae215a26e43966b46f (patch) | |
tree | c028b77384eec9c472b2a11bd41c3fa70d65e93a /net/tipc/msg.c | |
parent | 3fd0202a0dfe07d255c5462d7d0e27673ca10430 (diff) |
tipc: make tipc_buf_append() more robust
As per comment from David Miller, we try to make the buffer reassembly
function more resilient to user errors than it is today.
- We check that the "*buf" parameter always is set, since this is
mandatory input.
- We ensure that *buf->next always is set to NULL before linking in
the buffer, instead of relying of the caller to have done this.
- We ensure that the "tail" pointer in the head buffer's control
block is initialized to NULL when the first fragment arrives.
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/msg.c')
-rw-r--r-- | net/tipc/msg.c | 29 |
1 files changed, 21 insertions, 8 deletions
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index b6f45d029933..9680be6d388a 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -72,27 +72,38 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
72 | struct sk_buff *head = *headbuf; | 72 | struct sk_buff *head = *headbuf; |
73 | struct sk_buff *frag = *buf; | 73 | struct sk_buff *frag = *buf; |
74 | struct sk_buff *tail; | 74 | struct sk_buff *tail; |
75 | struct tipc_msg *msg = buf_msg(frag); | 75 | struct tipc_msg *msg; |
76 | u32 fragid = msg_type(msg); | 76 | u32 fragid; |
77 | bool headstolen; | ||
78 | int delta; | 77 | int delta; |
78 | bool headstolen; | ||
79 | 79 | ||
80 | if (!frag) | ||
81 | goto err; | ||
82 | |||
83 | msg = buf_msg(frag); | ||
84 | fragid = msg_type(msg); | ||
85 | frag->next = NULL; | ||
80 | skb_pull(frag, msg_hdr_sz(msg)); | 86 | skb_pull(frag, msg_hdr_sz(msg)); |
81 | 87 | ||
82 | if (fragid == FIRST_FRAGMENT) { | 88 | if (fragid == FIRST_FRAGMENT) { |
83 | if (head || skb_unclone(frag, GFP_ATOMIC)) | 89 | if (unlikely(head)) |
84 | goto out_free; | 90 | goto err; |
91 | if (unlikely(skb_unclone(frag, GFP_ATOMIC))) | ||
92 | goto err; | ||
85 | head = *headbuf = frag; | 93 | head = *headbuf = frag; |
86 | skb_frag_list_init(head); | 94 | skb_frag_list_init(head); |
95 | TIPC_SKB_CB(head)->tail = NULL; | ||
87 | *buf = NULL; | 96 | *buf = NULL; |
88 | return 0; | 97 | return 0; |
89 | } | 98 | } |
99 | |||
90 | if (!head) | 100 | if (!head) |
91 | goto out_free; | 101 | goto err; |
92 | tail = TIPC_SKB_CB(head)->tail; | 102 | |
93 | if (skb_try_coalesce(head, frag, &headstolen, &delta)) { | 103 | if (skb_try_coalesce(head, frag, &headstolen, &delta)) { |
94 | kfree_skb_partial(frag, headstolen); | 104 | kfree_skb_partial(frag, headstolen); |
95 | } else { | 105 | } else { |
106 | tail = TIPC_SKB_CB(head)->tail; | ||
96 | if (!skb_has_frag_list(head)) | 107 | if (!skb_has_frag_list(head)) |
97 | skb_shinfo(head)->frag_list = frag; | 108 | skb_shinfo(head)->frag_list = frag; |
98 | else | 109 | else |
@@ -102,6 +113,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
102 | head->len += frag->len; | 113 | head->len += frag->len; |
103 | TIPC_SKB_CB(head)->tail = frag; | 114 | TIPC_SKB_CB(head)->tail = frag; |
104 | } | 115 | } |
116 | |||
105 | if (fragid == LAST_FRAGMENT) { | 117 | if (fragid == LAST_FRAGMENT) { |
106 | *buf = head; | 118 | *buf = head; |
107 | TIPC_SKB_CB(head)->tail = NULL; | 119 | TIPC_SKB_CB(head)->tail = NULL; |
@@ -110,7 +122,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
110 | } | 122 | } |
111 | *buf = NULL; | 123 | *buf = NULL; |
112 | return 0; | 124 | return 0; |
113 | out_free: | 125 | |
126 | err: | ||
114 | pr_warn_ratelimited("Unable to build fragment list\n"); | 127 | pr_warn_ratelimited("Unable to build fragment list\n"); |
115 | kfree_skb(*buf); | 128 | kfree_skb(*buf); |
116 | kfree_skb(*headbuf); | 129 | kfree_skb(*headbuf); |