diff options
Diffstat (limited to 'net/tipc/msg.c')
| -rw-r--r-- | net/tipc/msg.c | 381 |
1 files changed, 327 insertions, 54 deletions
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 0a37a472c29f..9680be6d388a 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
| @@ -36,21 +36,16 @@ | |||
| 36 | 36 | ||
| 37 | #include "core.h" | 37 | #include "core.h" |
| 38 | #include "msg.h" | 38 | #include "msg.h" |
| 39 | #include "addr.h" | ||
| 40 | #include "name_table.h" | ||
| 39 | 41 | ||
| 40 | u32 tipc_msg_tot_importance(struct tipc_msg *m) | 42 | #define MAX_FORWARD_SIZE 1024 |
| 43 | |||
| 44 | static unsigned int align(unsigned int i) | ||
| 41 | { | 45 | { |
| 42 | if (likely(msg_isdata(m))) { | 46 | return (i + 3) & ~3u; |
| 43 | if (likely(msg_orignode(m) == tipc_own_addr)) | ||
| 44 | return msg_importance(m); | ||
| 45 | return msg_importance(m) + 4; | ||
| 46 | } | ||
| 47 | if ((msg_user(m) == MSG_FRAGMENTER) && | ||
| 48 | (msg_type(m) == FIRST_FRAGMENT)) | ||
| 49 | return msg_importance(msg_get_wrapped(m)); | ||
| 50 | return msg_importance(m); | ||
| 51 | } | 47 | } |
| 52 | 48 | ||
| 53 | |||
| 54 | void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, | 49 | void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, |
| 55 | u32 destnode) | 50 | u32 destnode) |
| 56 | { | 51 | { |
| @@ -65,41 +60,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, | |||
| 65 | msg_set_destnode(m, destnode); | 60 | msg_set_destnode(m, destnode); |
| 66 | } | 61 | } |
| 67 | 62 | ||
| 68 | /** | ||
| 69 | * tipc_msg_build - create message using specified header and data | ||
| 70 | * | ||
| 71 | * Note: Caller must not hold any locks in case copy_from_user() is interrupted! | ||
| 72 | * | ||
| 73 | * Returns message data size or errno | ||
| 74 | */ | ||
| 75 | int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, | ||
| 76 | unsigned int len, int max_size, struct sk_buff **buf) | ||
| 77 | { | ||
| 78 | int dsz, sz, hsz; | ||
| 79 | unsigned char *to; | ||
| 80 | |||
| 81 | dsz = len; | ||
| 82 | hsz = msg_hdr_sz(hdr); | ||
| 83 | sz = hsz + dsz; | ||
| 84 | msg_set_size(hdr, sz); | ||
| 85 | if (unlikely(sz > max_size)) { | ||
| 86 | *buf = NULL; | ||
| 87 | return dsz; | ||
| 88 | } | ||
| 89 | |||
| 90 | *buf = tipc_buf_acquire(sz); | ||
| 91 | if (!(*buf)) | ||
| 92 | return -ENOMEM; | ||
| 93 | skb_copy_to_linear_data(*buf, hdr, hsz); | ||
| 94 | to = (*buf)->data + hsz; | ||
| 95 | if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) { | ||
| 96 | kfree_skb(*buf); | ||
| 97 | *buf = NULL; | ||
| 98 | return -EFAULT; | ||
| 99 | } | ||
| 100 | return dsz; | ||
| 101 | } | ||
| 102 | |||
| 103 | /* tipc_buf_append(): Append a buffer to the fragment list of another buffer | 63 | /* tipc_buf_append(): Append a buffer to the fragment list of another buffer |
| 104 | * @*headbuf: in: NULL for first frag, otherwise value returned from prev call | 64 | * @*headbuf: in: NULL for first frag, otherwise value returned from prev call |
| 105 | * out: set when successful non-complete reassembly, otherwise NULL | 65 | * out: set when successful non-complete reassembly, otherwise NULL |
| @@ -112,27 +72,38 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
| 112 | struct sk_buff *head = *headbuf; | 72 | struct sk_buff *head = *headbuf; |
| 113 | struct sk_buff *frag = *buf; | 73 | struct sk_buff *frag = *buf; |
| 114 | struct sk_buff *tail; | 74 | struct sk_buff *tail; |
| 115 | struct tipc_msg *msg = buf_msg(frag); | 75 | struct tipc_msg *msg; |
| 116 | u32 fragid = msg_type(msg); | 76 | u32 fragid; |
| 117 | bool headstolen; | ||
| 118 | int delta; | 77 | int delta; |
| 78 | bool headstolen; | ||
| 119 | 79 | ||
| 80 | if (!frag) | ||
| 81 | goto err; | ||
| 82 | |||
| 83 | msg = buf_msg(frag); | ||
| 84 | fragid = msg_type(msg); | ||
| 85 | frag->next = NULL; | ||
| 120 | skb_pull(frag, msg_hdr_sz(msg)); | 86 | skb_pull(frag, msg_hdr_sz(msg)); |
| 121 | 87 | ||
| 122 | if (fragid == FIRST_FRAGMENT) { | 88 | if (fragid == FIRST_FRAGMENT) { |
| 123 | if (head || skb_unclone(frag, GFP_ATOMIC)) | 89 | if (unlikely(head)) |
| 124 | goto out_free; | 90 | goto err; |
| 91 | if (unlikely(skb_unclone(frag, GFP_ATOMIC))) | ||
| 92 | goto err; | ||
| 125 | head = *headbuf = frag; | 93 | head = *headbuf = frag; |
| 126 | skb_frag_list_init(head); | 94 | skb_frag_list_init(head); |
| 95 | TIPC_SKB_CB(head)->tail = NULL; | ||
| 127 | *buf = NULL; | 96 | *buf = NULL; |
| 128 | return 0; | 97 | return 0; |
| 129 | } | 98 | } |
| 99 | |||
| 130 | if (!head) | 100 | if (!head) |
| 131 | goto out_free; | 101 | goto err; |
| 132 | tail = TIPC_SKB_CB(head)->tail; | 102 | |
| 133 | if (skb_try_coalesce(head, frag, &headstolen, &delta)) { | 103 | if (skb_try_coalesce(head, frag, &headstolen, &delta)) { |
| 134 | kfree_skb_partial(frag, headstolen); | 104 | kfree_skb_partial(frag, headstolen); |
| 135 | } else { | 105 | } else { |
| 106 | tail = TIPC_SKB_CB(head)->tail; | ||
| 136 | if (!skb_has_frag_list(head)) | 107 | if (!skb_has_frag_list(head)) |
| 137 | skb_shinfo(head)->frag_list = frag; | 108 | skb_shinfo(head)->frag_list = frag; |
| 138 | else | 109 | else |
| @@ -142,6 +113,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
| 142 | head->len += frag->len; | 113 | head->len += frag->len; |
| 143 | TIPC_SKB_CB(head)->tail = frag; | 114 | TIPC_SKB_CB(head)->tail = frag; |
| 144 | } | 115 | } |
| 116 | |||
| 145 | if (fragid == LAST_FRAGMENT) { | 117 | if (fragid == LAST_FRAGMENT) { |
| 146 | *buf = head; | 118 | *buf = head; |
| 147 | TIPC_SKB_CB(head)->tail = NULL; | 119 | TIPC_SKB_CB(head)->tail = NULL; |
| @@ -150,10 +122,311 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
| 150 | } | 122 | } |
| 151 | *buf = NULL; | 123 | *buf = NULL; |
| 152 | return 0; | 124 | return 0; |
| 153 | out_free: | 125 | |
| 126 | err: | ||
| 154 | pr_warn_ratelimited("Unable to build fragment list\n"); | 127 | pr_warn_ratelimited("Unable to build fragment list\n"); |
| 155 | kfree_skb(*buf); | 128 | kfree_skb(*buf); |
| 156 | kfree_skb(*headbuf); | 129 | kfree_skb(*headbuf); |
| 157 | *buf = *headbuf = NULL; | 130 | *buf = *headbuf = NULL; |
| 158 | return 0; | 131 | return 0; |
| 159 | } | 132 | } |
| 133 | |||
| 134 | |||
| 135 | /** | ||
| 136 | * tipc_msg_build - create buffer chain containing specified header and data | ||
| 137 | * @mhdr: Message header, to be prepended to data | ||
| 138 | * @iov: User data | ||
| 139 | * @offset: Posision in iov to start copying from | ||
| 140 | * @dsz: Total length of user data | ||
| 141 | * @pktmax: Max packet size that can be used | ||
| 142 | * @chain: Buffer or chain of buffers to be returned to caller | ||
| 143 | * Returns message data size or errno: -ENOMEM, -EFAULT | ||
| 144 | */ | ||
| 145 | int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, | ||
| 146 | int offset, int dsz, int pktmax , struct sk_buff **chain) | ||
| 147 | { | ||
| 148 | int mhsz = msg_hdr_sz(mhdr); | ||
| 149 | int msz = mhsz + dsz; | ||
| 150 | int pktno = 1; | ||
| 151 | int pktsz; | ||
| 152 | int pktrem = pktmax; | ||
| 153 | int drem = dsz; | ||
| 154 | struct tipc_msg pkthdr; | ||
| 155 | struct sk_buff *buf, *prev; | ||
| 156 | char *pktpos; | ||
| 157 | int rc; | ||
| 158 | |||
| 159 | msg_set_size(mhdr, msz); | ||
| 160 | |||
| 161 | /* No fragmentation needed? */ | ||
| 162 | if (likely(msz <= pktmax)) { | ||
| 163 | buf = tipc_buf_acquire(msz); | ||
| 164 | *chain = buf; | ||
| 165 | if (unlikely(!buf)) | ||
| 166 | return -ENOMEM; | ||
| 167 | skb_copy_to_linear_data(buf, mhdr, mhsz); | ||
| 168 | pktpos = buf->data + mhsz; | ||
| 169 | if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz)) | ||
| 170 | return dsz; | ||
| 171 | rc = -EFAULT; | ||
| 172 | goto error; | ||
| 173 | } | ||
| 174 | |||
| 175 | /* Prepare reusable fragment header */ | ||
| 176 | tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, | ||
| 177 | INT_H_SIZE, msg_destnode(mhdr)); | ||
| 178 | msg_set_size(&pkthdr, pktmax); | ||
| 179 | msg_set_fragm_no(&pkthdr, pktno); | ||
| 180 | |||
| 181 | /* Prepare first fragment */ | ||
| 182 | *chain = buf = tipc_buf_acquire(pktmax); | ||
| 183 | if (!buf) | ||
| 184 | return -ENOMEM; | ||
| 185 | pktpos = buf->data; | ||
| 186 | skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); | ||
| 187 | pktpos += INT_H_SIZE; | ||
| 188 | pktrem -= INT_H_SIZE; | ||
| 189 | skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz); | ||
| 190 | pktpos += mhsz; | ||
| 191 | pktrem -= mhsz; | ||
| 192 | |||
| 193 | do { | ||
| 194 | if (drem < pktrem) | ||
| 195 | pktrem = drem; | ||
| 196 | |||
| 197 | if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) { | ||
| 198 | rc = -EFAULT; | ||
| 199 | goto error; | ||
| 200 | } | ||
| 201 | drem -= pktrem; | ||
| 202 | offset += pktrem; | ||
| 203 | |||
| 204 | if (!drem) | ||
| 205 | break; | ||
| 206 | |||
| 207 | /* Prepare new fragment: */ | ||
| 208 | if (drem < (pktmax - INT_H_SIZE)) | ||
| 209 | pktsz = drem + INT_H_SIZE; | ||
| 210 | else | ||
| 211 | pktsz = pktmax; | ||
| 212 | prev = buf; | ||
| 213 | buf = tipc_buf_acquire(pktsz); | ||
| 214 | if (!buf) { | ||
| 215 | rc = -ENOMEM; | ||
| 216 | goto error; | ||
| 217 | } | ||
| 218 | prev->next = buf; | ||
| 219 | msg_set_type(&pkthdr, FRAGMENT); | ||
| 220 | msg_set_size(&pkthdr, pktsz); | ||
| 221 | msg_set_fragm_no(&pkthdr, ++pktno); | ||
| 222 | skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); | ||
| 223 | pktpos = buf->data + INT_H_SIZE; | ||
| 224 | pktrem = pktsz - INT_H_SIZE; | ||
| 225 | |||
| 226 | } while (1); | ||
| 227 | |||
| 228 | msg_set_type(buf_msg(buf), LAST_FRAGMENT); | ||
| 229 | return dsz; | ||
| 230 | error: | ||
| 231 | kfree_skb_list(*chain); | ||
| 232 | *chain = NULL; | ||
| 233 | return rc; | ||
| 234 | } | ||
| 235 | |||
| 236 | /** | ||
| 237 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one | ||
| 238 | * @bbuf: the existing buffer ("bundle") | ||
| 239 | * @buf: buffer to be appended | ||
| 240 | * @mtu: max allowable size for the bundle buffer | ||
| 241 | * Consumes buffer if successful | ||
| 242 | * Returns true if bundling could be performed, otherwise false | ||
| 243 | */ | ||
| 244 | bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) | ||
| 245 | { | ||
| 246 | struct tipc_msg *bmsg = buf_msg(bbuf); | ||
| 247 | struct tipc_msg *msg = buf_msg(buf); | ||
| 248 | unsigned int bsz = msg_size(bmsg); | ||
| 249 | unsigned int msz = msg_size(msg); | ||
| 250 | u32 start = align(bsz); | ||
| 251 | u32 max = mtu - INT_H_SIZE; | ||
| 252 | u32 pad = start - bsz; | ||
| 253 | |||
| 254 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) | ||
| 255 | return false; | ||
| 256 | if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) | ||
| 257 | return false; | ||
| 258 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) | ||
| 259 | return false; | ||
| 260 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) | ||
| 261 | return false; | ||
| 262 | if (likely(msg_type(bmsg) != BUNDLE_OPEN)) | ||
| 263 | return false; | ||
| 264 | if (unlikely(skb_tailroom(bbuf) < (pad + msz))) | ||
| 265 | return false; | ||
| 266 | if (unlikely(max < (start + msz))) | ||
| 267 | return false; | ||
| 268 | |||
| 269 | skb_put(bbuf, pad + msz); | ||
| 270 | skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz); | ||
| 271 | msg_set_size(bmsg, start + msz); | ||
| 272 | msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); | ||
| 273 | bbuf->next = buf->next; | ||
| 274 | kfree_skb(buf); | ||
| 275 | return true; | ||
| 276 | } | ||
| 277 | |||
| 278 | /** | ||
| 279 | * tipc_msg_make_bundle(): Create bundle buf and append message to its tail | ||
| 280 | * @buf: buffer to be appended and replaced | ||
| 281 | * @mtu: max allowable size for the bundle buffer, inclusive header | ||
| 282 | * @dnode: destination node for message. (Not always present in header) | ||
| 283 | * Replaces buffer if successful | ||
| 284 | * Returns true if sucess, otherwise false | ||
| 285 | */ | ||
| 286 | bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) | ||
| 287 | { | ||
| 288 | struct sk_buff *bbuf; | ||
| 289 | struct tipc_msg *bmsg; | ||
| 290 | struct tipc_msg *msg = buf_msg(*buf); | ||
| 291 | u32 msz = msg_size(msg); | ||
| 292 | u32 max = mtu - INT_H_SIZE; | ||
| 293 | |||
| 294 | if (msg_user(msg) == MSG_FRAGMENTER) | ||
| 295 | return false; | ||
| 296 | if (msg_user(msg) == CHANGEOVER_PROTOCOL) | ||
| 297 | return false; | ||
| 298 | if (msg_user(msg) == BCAST_PROTOCOL) | ||
| 299 | return false; | ||
| 300 | if (msz > (max / 2)) | ||
| 301 | return false; | ||
| 302 | |||
| 303 | bbuf = tipc_buf_acquire(max); | ||
| 304 | if (!bbuf) | ||
| 305 | return false; | ||
| 306 | |||
| 307 | skb_trim(bbuf, INT_H_SIZE); | ||
| 308 | bmsg = buf_msg(bbuf); | ||
| 309 | tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode); | ||
| 310 | msg_set_seqno(bmsg, msg_seqno(msg)); | ||
| 311 | msg_set_ack(bmsg, msg_ack(msg)); | ||
| 312 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); | ||
| 313 | bbuf->next = (*buf)->next; | ||
| 314 | tipc_msg_bundle(bbuf, *buf, mtu); | ||
| 315 | *buf = bbuf; | ||
| 316 | return true; | ||
| 317 | } | ||
| 318 | |||
| 319 | /** | ||
| 320 | * tipc_msg_reverse(): swap source and destination addresses and add error code | ||
| 321 | * @buf: buffer containing message to be reversed | ||
| 322 | * @dnode: return value: node where to send message after reversal | ||
| 323 | * @err: error code to be set in message | ||
| 324 | * Consumes buffer if failure | ||
| 325 | * Returns true if success, otherwise false | ||
| 326 | */ | ||
| 327 | bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err) | ||
| 328 | { | ||
| 329 | struct tipc_msg *msg = buf_msg(buf); | ||
| 330 | uint imp = msg_importance(msg); | ||
| 331 | struct tipc_msg ohdr; | ||
| 332 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); | ||
| 333 | |||
| 334 | if (skb_linearize(buf)) | ||
| 335 | goto exit; | ||
| 336 | if (msg_dest_droppable(msg)) | ||
| 337 | goto exit; | ||
| 338 | if (msg_errcode(msg)) | ||
| 339 | goto exit; | ||
| 340 | |||
| 341 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); | ||
| 342 | imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); | ||
| 343 | if (msg_isdata(msg)) | ||
| 344 | msg_set_importance(msg, imp); | ||
| 345 | msg_set_errcode(msg, err); | ||
| 346 | msg_set_origport(msg, msg_destport(&ohdr)); | ||
| 347 | msg_set_destport(msg, msg_origport(&ohdr)); | ||
| 348 | msg_set_prevnode(msg, tipc_own_addr); | ||
| 349 | if (!msg_short(msg)) { | ||
| 350 | msg_set_orignode(msg, msg_destnode(&ohdr)); | ||
| 351 | msg_set_destnode(msg, msg_orignode(&ohdr)); | ||
| 352 | } | ||
| 353 | msg_set_size(msg, msg_hdr_sz(msg) + rdsz); | ||
| 354 | skb_trim(buf, msg_size(msg)); | ||
| 355 | skb_orphan(buf); | ||
| 356 | *dnode = msg_orignode(&ohdr); | ||
| 357 | return true; | ||
| 358 | exit: | ||
| 359 | kfree_skb(buf); | ||
| 360 | return false; | ||
| 361 | } | ||
| 362 | |||
| 363 | /** | ||
| 364 | * tipc_msg_eval: determine fate of message that found no destination | ||
| 365 | * @buf: the buffer containing the message. | ||
| 366 | * @dnode: return value: next-hop node, if message to be forwarded | ||
| 367 | * @err: error code to use, if message to be rejected | ||
| 368 | * | ||
| 369 | * Does not consume buffer | ||
| 370 | * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error | ||
| 371 | * code if message to be rejected | ||
| 372 | */ | ||
| 373 | int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) | ||
| 374 | { | ||
| 375 | struct tipc_msg *msg = buf_msg(buf); | ||
| 376 | u32 dport; | ||
| 377 | |||
| 378 | if (msg_type(msg) != TIPC_NAMED_MSG) | ||
| 379 | return -TIPC_ERR_NO_PORT; | ||
| 380 | if (skb_linearize(buf)) | ||
| 381 | return -TIPC_ERR_NO_NAME; | ||
| 382 | if (msg_data_sz(msg) > MAX_FORWARD_SIZE) | ||
| 383 | return -TIPC_ERR_NO_NAME; | ||
| 384 | if (msg_reroute_cnt(msg) > 0) | ||
| 385 | return -TIPC_ERR_NO_NAME; | ||
| 386 | |||
| 387 | *dnode = addr_domain(msg_lookup_scope(msg)); | ||
| 388 | dport = tipc_nametbl_translate(msg_nametype(msg), | ||
| 389 | msg_nameinst(msg), | ||
| 390 | dnode); | ||
| 391 | if (!dport) | ||
| 392 | return -TIPC_ERR_NO_NAME; | ||
| 393 | msg_incr_reroute_cnt(msg); | ||
| 394 | msg_set_destnode(msg, *dnode); | ||
| 395 | msg_set_destport(msg, dport); | ||
| 396 | return TIPC_OK; | ||
| 397 | } | ||
| 398 | |||
| 399 | /* tipc_msg_reassemble() - clone a buffer chain of fragments and | ||
| 400 | * reassemble the clones into one message | ||
| 401 | */ | ||
| 402 | struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) | ||
| 403 | { | ||
| 404 | struct sk_buff *buf = chain; | ||
| 405 | struct sk_buff *frag = buf; | ||
| 406 | struct sk_buff *head = NULL; | ||
| 407 | int hdr_sz; | ||
| 408 | |||
| 409 | /* Copy header if single buffer */ | ||
| 410 | if (!buf->next) { | ||
| 411 | hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf)); | ||
| 412 | return __pskb_copy(buf, hdr_sz, GFP_ATOMIC); | ||
| 413 | } | ||
| 414 | |||
| 415 | /* Clone all fragments and reassemble */ | ||
| 416 | while (buf) { | ||
| 417 | frag = skb_clone(buf, GFP_ATOMIC); | ||
| 418 | if (!frag) | ||
| 419 | goto error; | ||
| 420 | frag->next = NULL; | ||
| 421 | if (tipc_buf_append(&head, &frag)) | ||
| 422 | break; | ||
| 423 | if (!head) | ||
| 424 | goto error; | ||
| 425 | buf = buf->next; | ||
| 426 | } | ||
| 427 | return frag; | ||
| 428 | error: | ||
| 429 | pr_warn("Failed do clone local mcast rcv buffer\n"); | ||
| 430 | kfree_skb(head); | ||
| 431 | return NULL; | ||
| 432 | } | ||
