diff options
Diffstat (limited to 'net/tipc/msg.c')
| -rw-r--r-- | net/tipc/msg.c | 130 |
1 files changed, 92 insertions, 38 deletions
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index b6eb90cd3ef7..c3e96e815418 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * net/tipc/msg.c: TIPC message header routines | 2 | * net/tipc/msg.c: TIPC message header routines |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2000-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2006, 2014-2015, Ericsson AB |
| 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
| 6 | * All rights reserved. | 6 | * All rights reserved. |
| 7 | * | 7 | * |
| @@ -165,6 +165,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | if (fragid == LAST_FRAGMENT) { | 167 | if (fragid == LAST_FRAGMENT) { |
| 168 | TIPC_SKB_CB(head)->validated = false; | ||
| 169 | if (unlikely(!tipc_msg_validate(head))) | ||
| 170 | goto err; | ||
| 168 | *buf = head; | 171 | *buf = head; |
| 169 | TIPC_SKB_CB(head)->tail = NULL; | 172 | TIPC_SKB_CB(head)->tail = NULL; |
| 170 | *headbuf = NULL; | 173 | *headbuf = NULL; |
| @@ -172,7 +175,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | |||
| 172 | } | 175 | } |
| 173 | *buf = NULL; | 176 | *buf = NULL; |
| 174 | return 0; | 177 | return 0; |
| 175 | |||
| 176 | err: | 178 | err: |
| 177 | pr_warn_ratelimited("Unable to build fragment list\n"); | 179 | pr_warn_ratelimited("Unable to build fragment list\n"); |
| 178 | kfree_skb(*buf); | 180 | kfree_skb(*buf); |
| @@ -181,6 +183,48 @@ err: | |||
| 181 | return 0; | 183 | return 0; |
| 182 | } | 184 | } |
| 183 | 185 | ||
| 186 | /* tipc_msg_validate - validate basic format of received message | ||
| 187 | * | ||
| 188 | * This routine ensures a TIPC message has an acceptable header, and at least | ||
| 189 | * as much data as the header indicates it should. The routine also ensures | ||
| 190 | * that the entire message header is stored in the main fragment of the message | ||
| 191 | * buffer, to simplify future access to message header fields. | ||
| 192 | * | ||
| 193 | * Note: Having extra info present in the message header or data areas is OK. | ||
| 194 | * TIPC will ignore the excess, under the assumption that it is optional info | ||
| 195 | * introduced by a later release of the protocol. | ||
| 196 | */ | ||
| 197 | bool tipc_msg_validate(struct sk_buff *skb) | ||
| 198 | { | ||
| 199 | struct tipc_msg *msg; | ||
| 200 | int msz, hsz; | ||
| 201 | |||
| 202 | if (unlikely(TIPC_SKB_CB(skb)->validated)) | ||
| 203 | return true; | ||
| 204 | if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) | ||
| 205 | return false; | ||
| 206 | |||
| 207 | hsz = msg_hdr_sz(buf_msg(skb)); | ||
| 208 | if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) | ||
| 209 | return false; | ||
| 210 | if (unlikely(!pskb_may_pull(skb, hsz))) | ||
| 211 | return false; | ||
| 212 | |||
| 213 | msg = buf_msg(skb); | ||
| 214 | if (unlikely(msg_version(msg) != TIPC_VERSION)) | ||
| 215 | return false; | ||
| 216 | |||
| 217 | msz = msg_size(msg); | ||
| 218 | if (unlikely(msz < hsz)) | ||
| 219 | return false; | ||
| 220 | if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) | ||
| 221 | return false; | ||
| 222 | if (unlikely(skb->len < msz)) | ||
| 223 | return false; | ||
| 224 | |||
| 225 | TIPC_SKB_CB(skb)->validated = true; | ||
| 226 | return true; | ||
| 227 | } | ||
| 184 | 228 | ||
| 185 | /** | 229 | /** |
| 186 | * tipc_msg_build - create buffer chain containing specified header and data | 230 | * tipc_msg_build - create buffer chain containing specified header and data |
| @@ -228,6 +272,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, | |||
| 228 | FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); | 272 | FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); |
| 229 | msg_set_size(&pkthdr, pktmax); | 273 | msg_set_size(&pkthdr, pktmax); |
| 230 | msg_set_fragm_no(&pkthdr, pktno); | 274 | msg_set_fragm_no(&pkthdr, pktno); |
| 275 | msg_set_importance(&pkthdr, msg_importance(mhdr)); | ||
| 231 | 276 | ||
| 232 | /* Prepare first fragment */ | 277 | /* Prepare first fragment */ |
| 233 | skb = tipc_buf_acquire(pktmax); | 278 | skb = tipc_buf_acquire(pktmax); |
| @@ -286,33 +331,36 @@ error: | |||
| 286 | 331 | ||
| 287 | /** | 332 | /** |
| 288 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one | 333 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one |
| 289 | * @list: the buffer chain of the existing buffer ("bundle") | 334 | * @bskb: the buffer to append to ("bundle") |
| 290 | * @skb: buffer to be appended | 335 | * @skb: buffer to be appended |
| 291 | * @mtu: max allowable size for the bundle buffer | 336 | * @mtu: max allowable size for the bundle buffer |
| 292 | * Consumes buffer if successful | 337 | * Consumes buffer if successful |
| 293 | * Returns true if bundling could be performed, otherwise false | 338 | * Returns true if bundling could be performed, otherwise false |
| 294 | */ | 339 | */ |
| 295 | bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) | 340 | bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu) |
| 296 | { | 341 | { |
| 297 | struct sk_buff *bskb = skb_peek_tail(list); | 342 | struct tipc_msg *bmsg; |
| 298 | struct tipc_msg *bmsg = buf_msg(bskb); | ||
| 299 | struct tipc_msg *msg = buf_msg(skb); | 343 | struct tipc_msg *msg = buf_msg(skb); |
| 300 | unsigned int bsz = msg_size(bmsg); | 344 | unsigned int bsz; |
| 301 | unsigned int msz = msg_size(msg); | 345 | unsigned int msz = msg_size(msg); |
| 302 | u32 start = align(bsz); | 346 | u32 start, pad; |
| 303 | u32 max = mtu - INT_H_SIZE; | 347 | u32 max = mtu - INT_H_SIZE; |
| 304 | u32 pad = start - bsz; | ||
| 305 | 348 | ||
| 306 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) | 349 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) |
| 307 | return false; | 350 | return false; |
| 308 | if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) | 351 | if (!bskb) |
| 352 | return false; | ||
| 353 | bmsg = buf_msg(bskb); | ||
| 354 | bsz = msg_size(bmsg); | ||
| 355 | start = align(bsz); | ||
| 356 | pad = start - bsz; | ||
| 357 | |||
| 358 | if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL)) | ||
| 309 | return false; | 359 | return false; |
| 310 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) | 360 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) |
| 311 | return false; | 361 | return false; |
| 312 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) | 362 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) |
| 313 | return false; | 363 | return false; |
| 314 | if (likely(!TIPC_SKB_CB(bskb)->bundling)) | ||
| 315 | return false; | ||
| 316 | if (unlikely(skb_tailroom(bskb) < (pad + msz))) | 364 | if (unlikely(skb_tailroom(bskb) < (pad + msz))) |
| 317 | return false; | 365 | return false; |
| 318 | if (unlikely(max < (start + msz))) | 366 | if (unlikely(max < (start + msz))) |
| @@ -328,34 +376,40 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) | |||
| 328 | 376 | ||
| 329 | /** | 377 | /** |
| 330 | * tipc_msg_extract(): extract bundled inner packet from buffer | 378 | * tipc_msg_extract(): extract bundled inner packet from buffer |
| 331 | * @skb: linear outer buffer, to be extracted from. | 379 | * @skb: buffer to be extracted from. |
| 332 | * @iskb: extracted inner buffer, to be returned | 380 | * @iskb: extracted inner buffer, to be returned |
| 333 | * @pos: position of msg to be extracted. Returns with pointer of next msg | 381 | * @pos: position in outer message of msg to be extracted. |
| 382 | * Returns position of next msg | ||
| 334 | * Consumes outer buffer when last packet extracted | 383 | * Consumes outer buffer when last packet extracted |
| 335 | * Returns true when when there is an extracted buffer, otherwise false | 384 | * Returns true when when there is an extracted buffer, otherwise false |
| 336 | */ | 385 | */ |
| 337 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) | 386 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) |
| 338 | { | 387 | { |
| 339 | struct tipc_msg *msg = buf_msg(skb); | 388 | struct tipc_msg *msg; |
| 340 | int imsz; | 389 | int imsz, offset; |
| 341 | struct tipc_msg *imsg = (struct tipc_msg *)(msg_data(msg) + *pos); | ||
| 342 | 390 | ||
| 343 | /* Is there space left for shortest possible message? */ | 391 | *iskb = NULL; |
| 344 | if (*pos > (msg_data_sz(msg) - SHORT_H_SIZE)) | 392 | if (unlikely(skb_linearize(skb))) |
| 393 | goto none; | ||
| 394 | |||
| 395 | msg = buf_msg(skb); | ||
| 396 | offset = msg_hdr_sz(msg) + *pos; | ||
| 397 | if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE))) | ||
| 345 | goto none; | 398 | goto none; |
| 346 | imsz = msg_size(imsg); | ||
| 347 | 399 | ||
| 348 | /* Is there space left for current message ? */ | 400 | *iskb = skb_clone(skb, GFP_ATOMIC); |
| 349 | if ((*pos + imsz) > msg_data_sz(msg)) | 401 | if (unlikely(!*iskb)) |
| 350 | goto none; | 402 | goto none; |
| 351 | *iskb = tipc_buf_acquire(imsz); | 403 | skb_pull(*iskb, offset); |
| 352 | if (!*iskb) | 404 | imsz = msg_size(buf_msg(*iskb)); |
| 405 | skb_trim(*iskb, imsz); | ||
| 406 | if (unlikely(!tipc_msg_validate(*iskb))) | ||
| 353 | goto none; | 407 | goto none; |
| 354 | skb_copy_to_linear_data(*iskb, imsg, imsz); | ||
| 355 | *pos += align(imsz); | 408 | *pos += align(imsz); |
| 356 | return true; | 409 | return true; |
| 357 | none: | 410 | none: |
| 358 | kfree_skb(skb); | 411 | kfree_skb(skb); |
| 412 | kfree_skb(*iskb); | ||
| 359 | *iskb = NULL; | 413 | *iskb = NULL; |
| 360 | return false; | 414 | return false; |
| 361 | } | 415 | } |
| @@ -369,18 +423,17 @@ none: | |||
| 369 | * Replaces buffer if successful | 423 | * Replaces buffer if successful |
| 370 | * Returns true if success, otherwise false | 424 | * Returns true if success, otherwise false |
| 371 | */ | 425 | */ |
| 372 | bool tipc_msg_make_bundle(struct sk_buff_head *list, | 426 | bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode) |
| 373 | struct sk_buff *skb, u32 mtu, u32 dnode) | ||
| 374 | { | 427 | { |
| 375 | struct sk_buff *bskb; | 428 | struct sk_buff *bskb; |
| 376 | struct tipc_msg *bmsg; | 429 | struct tipc_msg *bmsg; |
| 377 | struct tipc_msg *msg = buf_msg(skb); | 430 | struct tipc_msg *msg = buf_msg(*skb); |
| 378 | u32 msz = msg_size(msg); | 431 | u32 msz = msg_size(msg); |
| 379 | u32 max = mtu - INT_H_SIZE; | 432 | u32 max = mtu - INT_H_SIZE; |
| 380 | 433 | ||
| 381 | if (msg_user(msg) == MSG_FRAGMENTER) | 434 | if (msg_user(msg) == MSG_FRAGMENTER) |
| 382 | return false; | 435 | return false; |
| 383 | if (msg_user(msg) == CHANGEOVER_PROTOCOL) | 436 | if (msg_user(msg) == TUNNEL_PROTOCOL) |
| 384 | return false; | 437 | return false; |
| 385 | if (msg_user(msg) == BCAST_PROTOCOL) | 438 | if (msg_user(msg) == BCAST_PROTOCOL) |
| 386 | return false; | 439 | return false; |
| @@ -398,9 +451,9 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, | |||
| 398 | msg_set_seqno(bmsg, msg_seqno(msg)); | 451 | msg_set_seqno(bmsg, msg_seqno(msg)); |
| 399 | msg_set_ack(bmsg, msg_ack(msg)); | 452 | msg_set_ack(bmsg, msg_ack(msg)); |
| 400 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); | 453 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); |
| 401 | TIPC_SKB_CB(bskb)->bundling = true; | 454 | tipc_msg_bundle(bskb, *skb, mtu); |
| 402 | __skb_queue_tail(list, bskb); | 455 | *skb = bskb; |
| 403 | return tipc_msg_bundle(list, skb, mtu); | 456 | return true; |
| 404 | } | 457 | } |
| 405 | 458 | ||
| 406 | /** | 459 | /** |
| @@ -415,21 +468,17 @@ bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, | |||
| 415 | int err) | 468 | int err) |
| 416 | { | 469 | { |
| 417 | struct tipc_msg *msg = buf_msg(buf); | 470 | struct tipc_msg *msg = buf_msg(buf); |
| 418 | uint imp = msg_importance(msg); | ||
| 419 | struct tipc_msg ohdr; | 471 | struct tipc_msg ohdr; |
| 420 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); | 472 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); |
| 421 | 473 | ||
| 422 | if (skb_linearize(buf)) | 474 | if (skb_linearize(buf)) |
| 423 | goto exit; | 475 | goto exit; |
| 476 | msg = buf_msg(buf); | ||
| 424 | if (msg_dest_droppable(msg)) | 477 | if (msg_dest_droppable(msg)) |
| 425 | goto exit; | 478 | goto exit; |
| 426 | if (msg_errcode(msg)) | 479 | if (msg_errcode(msg)) |
| 427 | goto exit; | 480 | goto exit; |
| 428 | |||
| 429 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); | 481 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); |
| 430 | imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); | ||
| 431 | if (msg_isdata(msg)) | ||
| 432 | msg_set_importance(msg, imp); | ||
| 433 | msg_set_errcode(msg, err); | 482 | msg_set_errcode(msg, err); |
| 434 | msg_set_origport(msg, msg_destport(&ohdr)); | 483 | msg_set_origport(msg, msg_destport(&ohdr)); |
| 435 | msg_set_destport(msg, msg_origport(&ohdr)); | 484 | msg_set_destport(msg, msg_origport(&ohdr)); |
| @@ -462,15 +511,18 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, | |||
| 462 | { | 511 | { |
| 463 | struct tipc_msg *msg = buf_msg(skb); | 512 | struct tipc_msg *msg = buf_msg(skb); |
| 464 | u32 dport; | 513 | u32 dport; |
| 514 | u32 own_addr = tipc_own_addr(net); | ||
| 465 | 515 | ||
| 466 | if (!msg_isdata(msg)) | 516 | if (!msg_isdata(msg)) |
| 467 | return false; | 517 | return false; |
| 468 | if (!msg_named(msg)) | 518 | if (!msg_named(msg)) |
| 469 | return false; | 519 | return false; |
| 520 | if (msg_errcode(msg)) | ||
| 521 | return false; | ||
| 470 | *err = -TIPC_ERR_NO_NAME; | 522 | *err = -TIPC_ERR_NO_NAME; |
| 471 | if (skb_linearize(skb)) | 523 | if (skb_linearize(skb)) |
| 472 | return false; | 524 | return false; |
| 473 | if (msg_reroute_cnt(msg) > 0) | 525 | if (msg_reroute_cnt(msg)) |
| 474 | return false; | 526 | return false; |
| 475 | *dnode = addr_domain(net, msg_lookup_scope(msg)); | 527 | *dnode = addr_domain(net, msg_lookup_scope(msg)); |
| 476 | dport = tipc_nametbl_translate(net, msg_nametype(msg), | 528 | dport = tipc_nametbl_translate(net, msg_nametype(msg), |
| @@ -478,6 +530,8 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, | |||
| 478 | if (!dport) | 530 | if (!dport) |
| 479 | return false; | 531 | return false; |
| 480 | msg_incr_reroute_cnt(msg); | 532 | msg_incr_reroute_cnt(msg); |
| 533 | if (*dnode != own_addr) | ||
| 534 | msg_set_prevnode(msg, own_addr); | ||
| 481 | msg_set_destnode(msg, *dnode); | 535 | msg_set_destnode(msg, *dnode); |
| 482 | msg_set_destport(msg, dport); | 536 | msg_set_destport(msg, dport); |
| 483 | *err = TIPC_OK; | 537 | *err = TIPC_OK; |
