aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2017-12-03 12:32:59 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-03 12:39:14 -0500
commiteeea10b83a139451130df1594f26710c8fa390c8 (patch)
tree518abdf4864af7d3957b9c386f5dc3a2a5a06b5a
parentbcd1d601e5cc760bf5743a59e4716603490e281c (diff)
tcp: add tcp_v4_fill_cb()/tcp_v4_restore_cb()
James Morris reported kernel stack corruption bug [1] while running the SELinux testsuite, and bisected to a recent commit bffa72cf7f9d ("net: sk_buff rbnode reorg") We believe this commit is fine, but exposes an older bug. SELinux code runs from tcp_filter() and might send an ICMP, expecting IP options to be found in skb->cb[] using regular IPCB placement. We need to defer TCP mangling of skb->cb[] after tcp_filter() calls. This patch adds tcp_v4_fill_cb()/tcp_v4_restore_cb() in a very similar way we added them for IPv6. [1] [ 339.806024] SELinux: failure in selinux_parse_skb(), unable to parse packet [ 339.822505] Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: ffffffff81745af5 [ 339.822505] [ 339.852250] CPU: 4 PID: 3642 Comm: client Not tainted 4.15.0-rc1-test #15 [ 339.868498] Hardware name: LENOVO 10FGS0VA1L/30BC, BIOS FWKT68A 01/19/2017 [ 339.885060] Call Trace: [ 339.896875] <IRQ> [ 339.908103] dump_stack+0x63/0x87 [ 339.920645] panic+0xe8/0x248 [ 339.932668] ? ip_push_pending_frames+0x33/0x40 [ 339.946328] ? icmp_send+0x525/0x530 [ 339.958861] ? kfree_skbmem+0x60/0x70 [ 339.971431] __stack_chk_fail+0x1b/0x20 [ 339.984049] icmp_send+0x525/0x530 [ 339.996205] ? netlbl_skbuff_err+0x36/0x40 [ 340.008997] ? selinux_netlbl_err+0x11/0x20 [ 340.021816] ? selinux_socket_sock_rcv_skb+0x211/0x230 [ 340.035529] ? security_sock_rcv_skb+0x3b/0x50 [ 340.048471] ? sk_filter_trim_cap+0x44/0x1c0 [ 340.061246] ? tcp_v4_inbound_md5_hash+0x69/0x1b0 [ 340.074562] ? tcp_filter+0x2c/0x40 [ 340.086400] ? tcp_v4_rcv+0x820/0xa20 [ 340.098329] ? ip_local_deliver_finish+0x71/0x1a0 [ 340.111279] ? ip_local_deliver+0x6f/0xe0 [ 340.123535] ? ip_rcv_finish+0x3a0/0x3a0 [ 340.135523] ? ip_rcv_finish+0xdb/0x3a0 [ 340.147442] ? ip_rcv+0x27c/0x3c0 [ 340.158668] ? inet_del_offload+0x40/0x40 [ 340.170580] ? __netif_receive_skb_core+0x4ac/0x900 [ 340.183285] ? rcu_accelerate_cbs+0x5b/0x80 [ 340.195282] ? __netif_receive_skb+0x18/0x60 [ 340.207288] ? process_backlog+0x95/0x140 [ 340.218948] ? net_rx_action+0x26c/0x3b0 [ 340.230416] ? __do_softirq+0xc9/0x26a [ 340.241625] ? do_softirq_own_stack+0x2a/0x40 [ 340.253368] </IRQ> [ 340.262673] ? do_softirq+0x50/0x60 [ 340.273450] ? __local_bh_enable_ip+0x57/0x60 [ 340.285045] ? ip_finish_output2+0x175/0x350 [ 340.296403] ? ip_finish_output+0x127/0x1d0 [ 340.307665] ? nf_hook_slow+0x3c/0xb0 [ 340.318230] ? ip_output+0x72/0xe0 [ 340.328524] ? ip_fragment.constprop.54+0x80/0x80 [ 340.340070] ? ip_local_out+0x35/0x40 [ 340.350497] ? ip_queue_xmit+0x15c/0x3f0 [ 340.361060] ? __kmalloc_reserve.isra.40+0x31/0x90 [ 340.372484] ? __skb_clone+0x2e/0x130 [ 340.382633] ? tcp_transmit_skb+0x558/0xa10 [ 340.393262] ? tcp_connect+0x938/0xad0 [ 340.403370] ? ktime_get_with_offset+0x4c/0xb0 [ 340.414206] ? tcp_v4_connect+0x457/0x4e0 [ 340.424471] ? __inet_stream_connect+0xb3/0x300 [ 340.435195] ? inet_stream_connect+0x3b/0x60 [ 340.445607] ? SYSC_connect+0xd9/0x110 [ 340.455455] ? __audit_syscall_entry+0xaf/0x100 [ 340.466112] ? syscall_trace_enter+0x1d0/0x2b0 [ 340.476636] ? __audit_syscall_exit+0x209/0x290 [ 340.487151] ? SyS_connect+0xe/0x10 [ 340.496453] ? do_syscall_64+0x67/0x1b0 [ 340.506078] ? entry_SYSCALL64_slow_path+0x25/0x25 Fixes: 971f10eca186 ("tcp: better TCP_SKB_CB layout to reduce cache line misses") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: James Morris <james.l.morris@oracle.com> Tested-by: James Morris <james.l.morris@oracle.com> Tested-by: Casey Schaufler <casey@schaufler-ca.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_ipv4.c59
-rw-r--r--net/ipv6/tcp_ipv6.c10
2 files changed, 46 insertions, 23 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c6bc0c4d19c6..77ea45da0fe9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1591,6 +1591,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb)
1591} 1591}
1592EXPORT_SYMBOL(tcp_filter); 1592EXPORT_SYMBOL(tcp_filter);
1593 1593
1594static void tcp_v4_restore_cb(struct sk_buff *skb)
1595{
1596 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1597 sizeof(struct inet_skb_parm));
1598}
1599
1600static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1601 const struct tcphdr *th)
1602{
1603 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1604 * barrier() makes sure compiler wont play fool^Waliasing games.
1605 */
1606 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1607 sizeof(struct inet_skb_parm));
1608 barrier();
1609
1610 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1611 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1612 skb->len - th->doff * 4);
1613 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1614 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1615 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1616 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1617 TCP_SKB_CB(skb)->sacked = 0;
1618 TCP_SKB_CB(skb)->has_rxtstamp =
1619 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1620}
1621
1594/* 1622/*
1595 * From tcp_input.c 1623 * From tcp_input.c
1596 */ 1624 */
@@ -1631,24 +1659,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
1631 1659
1632 th = (const struct tcphdr *)skb->data; 1660 th = (const struct tcphdr *)skb->data;
1633 iph = ip_hdr(skb); 1661 iph = ip_hdr(skb);
1634 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1635 * barrier() makes sure compiler wont play fool^Waliasing games.
1636 */
1637 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1638 sizeof(struct inet_skb_parm));
1639 barrier();
1640
1641 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1642 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1643 skb->len - th->doff * 4);
1644 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1645 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1646 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1647 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1648 TCP_SKB_CB(skb)->sacked = 0;
1649 TCP_SKB_CB(skb)->has_rxtstamp =
1650 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1651
1652lookup: 1662lookup:
1653 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, 1663 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1654 th->dest, sdif, &refcounted); 1664 th->dest, sdif, &refcounted);
@@ -1679,14 +1689,19 @@ process:
1679 sock_hold(sk); 1689 sock_hold(sk);
1680 refcounted = true; 1690 refcounted = true;
1681 nsk = NULL; 1691 nsk = NULL;
1682 if (!tcp_filter(sk, skb)) 1692 if (!tcp_filter(sk, skb)) {
1693 th = (const struct tcphdr *)skb->data;
1694 iph = ip_hdr(skb);
1695 tcp_v4_fill_cb(skb, iph, th);
1683 nsk = tcp_check_req(sk, skb, req, false); 1696 nsk = tcp_check_req(sk, skb, req, false);
1697 }
1684 if (!nsk) { 1698 if (!nsk) {
1685 reqsk_put(req); 1699 reqsk_put(req);
1686 goto discard_and_relse; 1700 goto discard_and_relse;
1687 } 1701 }
1688 if (nsk == sk) { 1702 if (nsk == sk) {
1689 reqsk_put(req); 1703 reqsk_put(req);
1704 tcp_v4_restore_cb(skb);
1690 } else if (tcp_child_process(sk, nsk, skb)) { 1705 } else if (tcp_child_process(sk, nsk, skb)) {
1691 tcp_v4_send_reset(nsk, skb); 1706 tcp_v4_send_reset(nsk, skb);
1692 goto discard_and_relse; 1707 goto discard_and_relse;
@@ -1712,6 +1727,7 @@ process:
1712 goto discard_and_relse; 1727 goto discard_and_relse;
1713 th = (const struct tcphdr *)skb->data; 1728 th = (const struct tcphdr *)skb->data;
1714 iph = ip_hdr(skb); 1729 iph = ip_hdr(skb);
1730 tcp_v4_fill_cb(skb, iph, th);
1715 1731
1716 skb->dev = NULL; 1732 skb->dev = NULL;
1717 1733
@@ -1742,6 +1758,8 @@ no_tcp_socket:
1742 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 1758 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1743 goto discard_it; 1759 goto discard_it;
1744 1760
1761 tcp_v4_fill_cb(skb, iph, th);
1762
1745 if (tcp_checksum_complete(skb)) { 1763 if (tcp_checksum_complete(skb)) {
1746csum_error: 1764csum_error:
1747 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); 1765 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
@@ -1768,6 +1786,8 @@ do_time_wait:
1768 goto discard_it; 1786 goto discard_it;
1769 } 1787 }
1770 1788
1789 tcp_v4_fill_cb(skb, iph, th);
1790
1771 if (tcp_checksum_complete(skb)) { 1791 if (tcp_checksum_complete(skb)) {
1772 inet_twsk_put(inet_twsk(sk)); 1792 inet_twsk_put(inet_twsk(sk));
1773 goto csum_error; 1793 goto csum_error;
@@ -1784,6 +1804,7 @@ do_time_wait:
1784 if (sk2) { 1804 if (sk2) {
1785 inet_twsk_deschedule_put(inet_twsk(sk)); 1805 inet_twsk_deschedule_put(inet_twsk(sk));
1786 sk = sk2; 1806 sk = sk2;
1807 tcp_v4_restore_cb(skb);
1787 refcounted = false; 1808 refcounted = false;
1788 goto process; 1809 goto process;
1789 } 1810 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index be11dc13aa70..1f04ec0e4a7a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1454,7 +1454,6 @@ process:
1454 struct sock *nsk; 1454 struct sock *nsk;
1455 1455
1456 sk = req->rsk_listener; 1456 sk = req->rsk_listener;
1457 tcp_v6_fill_cb(skb, hdr, th);
1458 if (tcp_v6_inbound_md5_hash(sk, skb)) { 1457 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1459 sk_drops_add(sk, skb); 1458 sk_drops_add(sk, skb);
1460 reqsk_put(req); 1459 reqsk_put(req);
@@ -1467,8 +1466,12 @@ process:
1467 sock_hold(sk); 1466 sock_hold(sk);
1468 refcounted = true; 1467 refcounted = true;
1469 nsk = NULL; 1468 nsk = NULL;
1470 if (!tcp_filter(sk, skb)) 1469 if (!tcp_filter(sk, skb)) {
1470 th = (const struct tcphdr *)skb->data;
1471 hdr = ipv6_hdr(skb);
1472 tcp_v6_fill_cb(skb, hdr, th);
1471 nsk = tcp_check_req(sk, skb, req, false); 1473 nsk = tcp_check_req(sk, skb, req, false);
1474 }
1472 if (!nsk) { 1475 if (!nsk) {
1473 reqsk_put(req); 1476 reqsk_put(req);
1474 goto discard_and_relse; 1477 goto discard_and_relse;
@@ -1492,8 +1495,6 @@ process:
1492 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1495 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1493 goto discard_and_relse; 1496 goto discard_and_relse;
1494 1497
1495 tcp_v6_fill_cb(skb, hdr, th);
1496
1497 if (tcp_v6_inbound_md5_hash(sk, skb)) 1498 if (tcp_v6_inbound_md5_hash(sk, skb))
1498 goto discard_and_relse; 1499 goto discard_and_relse;
1499 1500
@@ -1501,6 +1502,7 @@ process:
1501 goto discard_and_relse; 1502 goto discard_and_relse;
1502 th = (const struct tcphdr *)skb->data; 1503 th = (const struct tcphdr *)skb->data;
1503 hdr = ipv6_hdr(skb); 1504 hdr = ipv6_hdr(skb);
1505 tcp_v6_fill_cb(skb, hdr, th);
1504 1506
1505 skb->dev = NULL; 1507 skb->dev = NULL;
1506 1508