summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefano Brivio <sbrivio@redhat.com>2018-12-06 13:30:37 -0500
committerDavid S. Miller <davem@davemloft.net>2018-12-07 19:24:40 -0500
commite6ac64d4c4d095085d7dd71cbd05704ac99829b2 (patch)
tree9b93e099315cb036f69df1d45cac0e98de166976
parent66033f47ca60294a95fc85ec3a3cc909dab7b765 (diff)
neighbour: Avoid writing before skb->head in neigh_hh_output()
While skb_push() makes the kernel panic if the skb headroom is less than the unaligned hardware header size, it will proceed normally in case we copy more than that because of alignment, and we'll silently corrupt adjacent slabs. In the case fixed by the previous patch, "ipv6: Check available headroom in ip6_xmit() even without options", we end up in neigh_hh_output() with 14 bytes headroom, 14 bytes hardware header and write 16 bytes, starting 2 bytes before the allocated buffer. Always check we're not writing before skb->head and, if the headroom is not enough, warn and drop the packet. v2: - instead of panicking with BUG_ON(), WARN_ON_ONCE() and drop the packet (Eric Dumazet) - if we avoid the panic, though, we need to explicitly check the headroom before the memcpy(), otherwise we'll have corrupted slabs on a running kernel, after we warn - use __skb_push() instead of skb_push(), as the headroom check is already implemented here explicitly (Eric Dumazet) Signed-off-by: Stefano Brivio <sbrivio@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/neighbour.h28
1 files changed, 23 insertions, 5 deletions
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index f58b384aa6c9..665990c7dec8 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -454,6 +454,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
454 454
455static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) 455static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
456{ 456{
457 unsigned int hh_alen = 0;
457 unsigned int seq; 458 unsigned int seq;
458 unsigned int hh_len; 459 unsigned int hh_len;
459 460
@@ -461,16 +462,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
461 seq = read_seqbegin(&hh->hh_lock); 462 seq = read_seqbegin(&hh->hh_lock);
462 hh_len = hh->hh_len; 463 hh_len = hh->hh_len;
463 if (likely(hh_len <= HH_DATA_MOD)) { 464 if (likely(hh_len <= HH_DATA_MOD)) {
464 /* this is inlined by gcc */ 465 hh_alen = HH_DATA_MOD;
465 memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); 466
467 /* skb_push() would proceed silently if we have room for
468 * the unaligned size but not for the aligned size:
469 * check headroom explicitly.
470 */
471 if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
472 /* this is inlined by gcc */
473 memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
474 HH_DATA_MOD);
475 }
466 } else { 476 } else {
467 unsigned int hh_alen = HH_DATA_ALIGN(hh_len); 477 hh_alen = HH_DATA_ALIGN(hh_len);
468 478
469 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); 479 if (likely(skb_headroom(skb) >= hh_alen)) {
480 memcpy(skb->data - hh_alen, hh->hh_data,
481 hh_alen);
482 }
470 } 483 }
471 } while (read_seqretry(&hh->hh_lock, seq)); 484 } while (read_seqretry(&hh->hh_lock, seq));
472 485
473 skb_push(skb, hh_len); 486 if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
487 kfree_skb(skb);
488 return NET_XMIT_DROP;
489 }
490
491 __skb_push(skb, hh_len);
474 return dev_queue_xmit(skb); 492 return dev_queue_xmit(skb);
475} 493}
476 494