summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2017-01-24 20:28:18 -0500
committerDavid S. Miller <davem@davemloft.net>2017-01-25 13:17:47 -0500
commita67edbf4fb6deadcfe57a04a134abed4a5ba3bb5 (patch)
tree110dfd4447ba997d36b80e2e534ae234ecd18eef
parent0fe05591790e953f3ef9cf4f1bce08b6dd7b771f (diff)
bpf: add initial bpf tracepoints
This work adds a number of tracepoints to paths that are either considered slow-path or exception-like states, where monitoring or inspecting them would be desirable. For bpf(2) syscall, tracepoints have been placed for main commands when they succeed. In XDP case, tracepoint is for exceptions, that is, f.e. on abnormal BPF program exit such as unknown or XDP_ABORTED return code, or when error occurs during XDP_TX action and the packet could not be forwarded. Both have been split into separate event headers, and can be further extended. Worst case, if they unexpectedly should get into our way in future, they can also removed [1]. Of course, these tracepoints (like any other) can be analyzed by eBPF itself, etc. Example output: # ./perf record -a -e bpf:* sleep 10 # ./perf script sock_example 6197 [005] 283.980322: bpf:bpf_map_create: map type=ARRAY ufd=4 key=4 val=8 max=256 flags=0 sock_example 6197 [005] 283.980721: bpf:bpf_prog_load: prog=a5ea8fa30ea6849c type=SOCKET_FILTER ufd=5 sock_example 6197 [005] 283.988423: bpf:bpf_prog_get_type: prog=a5ea8fa30ea6849c type=SOCKET_FILTER sock_example 6197 [005] 283.988443: bpf:bpf_map_lookup_elem: map type=ARRAY ufd=4 key=[06 00 00 00] val=[00 00 00 00 00 00 00 00] [...] sock_example 6197 [005] 288.990868: bpf:bpf_map_lookup_elem: map type=ARRAY ufd=4 key=[01 00 00 00] val=[14 00 00 00 00 00 00 00] swapper 0 [005] 289.338243: bpf:bpf_prog_put_rcu: prog=a5ea8fa30ea6849c type=SOCKET_FILTER [1] https://lwn.net/Articles/705270/ Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c4
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--include/linux/bpf_trace.h7
-rw-r--r--include/trace/events/bpf.h347
-rw-r--r--include/trace/events/xdp.h53
-rw-r--r--kernel/bpf/core.c9
-rw-r--r--kernel/bpf/inode.c17
-rw-r--r--kernel/bpf/syscall.c19
11 files changed, 483 insertions, 15 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e362f99334d0..f15ddba3659a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -33,6 +33,7 @@
33 33
34#include <net/busy_poll.h> 34#include <net/busy_poll.h>
35#include <linux/bpf.h> 35#include <linux/bpf.h>
36#include <linux/bpf_trace.h>
36#include <linux/mlx4/cq.h> 37#include <linux/mlx4/cq.h>
37#include <linux/slab.h> 38#include <linux/slab.h>
38#include <linux/mlx4/qp.h> 39#include <linux/mlx4/qp.h>
@@ -926,10 +927,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
926 length, cq->ring, 927 length, cq->ring,
927 &doorbell_pending))) 928 &doorbell_pending)))
928 goto consumed; 929 goto consumed;
930 trace_xdp_exception(dev, xdp_prog, act);
929 goto xdp_drop_no_cnt; /* Drop on xmit failure */ 931 goto xdp_drop_no_cnt; /* Drop on xmit failure */
930 default: 932 default:
931 bpf_warn_invalid_xdp_action(act); 933 bpf_warn_invalid_xdp_action(act);
932 case XDP_ABORTED: 934 case XDP_ABORTED:
935 trace_xdp_exception(dev, xdp_prog, act);
933 case XDP_DROP: 936 case XDP_DROP:
934 ring->xdp_drop++; 937 ring->xdp_drop++;
935xdp_drop_no_cnt: 938xdp_drop_no_cnt:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ba50583ea3ed..3d2e1a1886a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -33,6 +33,7 @@
33#include <linux/ip.h> 33#include <linux/ip.h>
34#include <linux/ipv6.h> 34#include <linux/ipv6.h>
35#include <linux/tcp.h> 35#include <linux/tcp.h>
36#include <linux/bpf_trace.h>
36#include <net/busy_poll.h> 37#include <net/busy_poll.h>
37#include "en.h" 38#include "en.h"
38#include "en_tc.h" 39#include "en_tc.h"
@@ -640,7 +641,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
640 mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); 641 mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
641} 642}
642 643
643static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, 644static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
644 struct mlx5e_dma_info *di, 645 struct mlx5e_dma_info *di,
645 const struct xdp_buff *xdp) 646 const struct xdp_buff *xdp)
646{ 647{
@@ -662,7 +663,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
662 MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { 663 MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
663 rq->stats.xdp_drop++; 664 rq->stats.xdp_drop++;
664 mlx5e_page_release(rq, di, true); 665 mlx5e_page_release(rq, di, true);
665 return; 666 return false;
666 } 667 }
667 668
668 if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { 669 if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
@@ -673,7 +674,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
673 } 674 }
674 rq->stats.xdp_tx_full++; 675 rq->stats.xdp_tx_full++;
675 mlx5e_page_release(rq, di, true); 676 mlx5e_page_release(rq, di, true);
676 return; 677 return false;
677 } 678 }
678 679
679 dma_len -= MLX5E_XDP_MIN_INLINE; 680 dma_len -= MLX5E_XDP_MIN_INLINE;
@@ -703,6 +704,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
703 704
704 sq->db.xdp.doorbell = true; 705 sq->db.xdp.doorbell = true;
705 rq->stats.xdp_tx++; 706 rq->stats.xdp_tx++;
707 return true;
706} 708}
707 709
708/* returns true if packet was consumed by xdp */ 710/* returns true if packet was consumed by xdp */
@@ -728,11 +730,13 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
728 *len = xdp.data_end - xdp.data; 730 *len = xdp.data_end - xdp.data;
729 return false; 731 return false;
730 case XDP_TX: 732 case XDP_TX:
731 mlx5e_xmit_xdp_frame(rq, di, &xdp); 733 if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
734 trace_xdp_exception(rq->netdev, prog, act);
732 return true; 735 return true;
733 default: 736 default:
734 bpf_warn_invalid_xdp_action(act); 737 bpf_warn_invalid_xdp_action(act);
735 case XDP_ABORTED: 738 case XDP_ABORTED:
739 trace_xdp_exception(rq->netdev, prog, act);
736 case XDP_DROP: 740 case XDP_DROP:
737 rq->stats.xdp_drop++; 741 rq->stats.xdp_drop++;
738 mlx5e_page_release(rq, di, true); 742 mlx5e_page_release(rq, di, true);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 67afd95ffb93..6ac43abf561b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <linux/bpf.h> 44#include <linux/bpf.h>
45#include <linux/bpf_trace.h>
45#include <linux/module.h> 46#include <linux/module.h>
46#include <linux/kernel.h> 47#include <linux/kernel.h>
47#include <linux/init.h> 48#include <linux/init.h>
@@ -1459,7 +1460,7 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
1459 dev_kfree_skb_any(skb); 1460 dev_kfree_skb_any(skb);
1460} 1461}
1461 1462
1462static void 1463static bool
1463nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, 1464nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1464 struct nfp_net_tx_ring *tx_ring, 1465 struct nfp_net_tx_ring *tx_ring,
1465 struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, 1466 struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
@@ -1473,13 +1474,13 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1473 1474
1474 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1475 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1475 nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); 1476 nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
1476 return; 1477 return false;
1477 } 1478 }
1478 1479
1479 new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); 1480 new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
1480 if (unlikely(!new_frag)) { 1481 if (unlikely(!new_frag)) {
1481 nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); 1482 nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
1482 return; 1483 return false;
1483 } 1484 }
1484 nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); 1485 nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
1485 1486
@@ -1509,6 +1510,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
1509 1510
1510 tx_ring->wr_p++; 1511 tx_ring->wr_p++;
1511 tx_ring->wr_ptr_add++; 1512 tx_ring->wr_ptr_add++;
1513 return true;
1512} 1514}
1513 1515
1514static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) 1516static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
@@ -1613,12 +1615,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1613 case XDP_PASS: 1615 case XDP_PASS:
1614 break; 1616 break;
1615 case XDP_TX: 1617 case XDP_TX:
1616 nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf, 1618 if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring,
1617 pkt_off, pkt_len); 1619 tx_ring, rxbuf,
1620 pkt_off, pkt_len)))
1621 trace_xdp_exception(nn->netdev, xdp_prog, act);
1618 continue; 1622 continue;
1619 default: 1623 default:
1620 bpf_warn_invalid_xdp_action(act); 1624 bpf_warn_invalid_xdp_action(act);
1621 case XDP_ABORTED: 1625 case XDP_ABORTED:
1626 trace_xdp_exception(nn->netdev, xdp_prog, act);
1622 case XDP_DROP: 1627 case XDP_DROP:
1623 nfp_net_rx_give_one(rx_ring, rxbuf->frag, 1628 nfp_net_rx_give_one(rx_ring, rxbuf->frag,
1624 rxbuf->dma_addr); 1629 rxbuf->dma_addr);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1a6ca4884fad..445d4d2492c3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -32,6 +32,7 @@
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/bpf_trace.h>
35#include <net/udp_tunnel.h> 36#include <net/udp_tunnel.h>
36#include <linux/ip.h> 37#include <linux/ip.h>
37#include <net/ipv6.h> 38#include <net/ipv6.h>
@@ -1016,6 +1017,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
1016 /* We need the replacement buffer before transmit. */ 1017 /* We need the replacement buffer before transmit. */
1017 if (qede_alloc_rx_buffer(rxq, true)) { 1018 if (qede_alloc_rx_buffer(rxq, true)) {
1018 qede_recycle_rx_bd_ring(rxq, 1); 1019 qede_recycle_rx_bd_ring(rxq, 1);
1020 trace_xdp_exception(edev->ndev, prog, act);
1019 return false; 1021 return false;
1020 } 1022 }
1021 1023
@@ -1026,6 +1028,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
1026 dma_unmap_page(rxq->dev, bd->mapping, 1028 dma_unmap_page(rxq->dev, bd->mapping,
1027 PAGE_SIZE, DMA_BIDIRECTIONAL); 1029 PAGE_SIZE, DMA_BIDIRECTIONAL);
1028 __free_page(bd->data); 1030 __free_page(bd->data);
1031 trace_xdp_exception(edev->ndev, prog, act);
1029 } 1032 }
1030 1033
1031 /* Regardless, we've consumed an Rx BD */ 1034 /* Regardless, we've consumed an Rx BD */
@@ -1035,6 +1038,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
1035 default: 1038 default:
1036 bpf_warn_invalid_xdp_action(act); 1039 bpf_warn_invalid_xdp_action(act);
1037 case XDP_ABORTED: 1040 case XDP_ABORTED:
1041 trace_xdp_exception(edev->ndev, prog, act);
1038 case XDP_DROP: 1042 case XDP_DROP:
1039 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); 1043 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1040 } 1044 }
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 37db91d1a0a3..f9bf94887ff1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -23,6 +23,7 @@
23#include <linux/virtio.h> 23#include <linux/virtio.h>
24#include <linux/virtio_net.h> 24#include <linux/virtio_net.h>
25#include <linux/bpf.h> 25#include <linux/bpf.h>
26#include <linux/bpf_trace.h>
26#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
27#include <linux/if_vlan.h> 28#include <linux/if_vlan.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
@@ -330,7 +331,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
330 return skb; 331 return skb;
331} 332}
332 333
333static void virtnet_xdp_xmit(struct virtnet_info *vi, 334static bool virtnet_xdp_xmit(struct virtnet_info *vi,
334 struct receive_queue *rq, 335 struct receive_queue *rq,
335 struct send_queue *sq, 336 struct send_queue *sq,
336 struct xdp_buff *xdp, 337 struct xdp_buff *xdp,
@@ -382,10 +383,12 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
382 put_page(page); 383 put_page(page);
383 } else /* small buffer */ 384 } else /* small buffer */
384 kfree_skb(data); 385 kfree_skb(data);
385 return; // On error abort to avoid unnecessary kick 386 /* On error abort to avoid unnecessary kick */
387 return false;
386 } 388 }
387 389
388 virtqueue_kick(sq->vq); 390 virtqueue_kick(sq->vq);
391 return true;
389} 392}
390 393
391static u32 do_xdp_prog(struct virtnet_info *vi, 394static u32 do_xdp_prog(struct virtnet_info *vi,
@@ -421,11 +424,14 @@ static u32 do_xdp_prog(struct virtnet_info *vi,
421 vi->xdp_queue_pairs + 424 vi->xdp_queue_pairs +
422 smp_processor_id(); 425 smp_processor_id();
423 xdp.data = buf; 426 xdp.data = buf;
424 virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); 427 if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp,
428 data)))
429 trace_xdp_exception(vi->dev, xdp_prog, act);
425 return XDP_TX; 430 return XDP_TX;
426 default: 431 default:
427 bpf_warn_invalid_xdp_action(act); 432 bpf_warn_invalid_xdp_action(act);
428 case XDP_ABORTED: 433 case XDP_ABORTED:
434 trace_xdp_exception(vi->dev, xdp_prog, act);
429 case XDP_DROP: 435 case XDP_DROP:
430 return XDP_DROP; 436 return XDP_DROP;
431 } 437 }
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
new file mode 100644
index 000000000000..b22efbdd2eb4
--- /dev/null
+++ b/include/linux/bpf_trace.h
@@ -0,0 +1,7 @@
1#ifndef __LINUX_BPF_TRACE_H__
2#define __LINUX_BPF_TRACE_H__
3
4#include <trace/events/bpf.h>
5#include <trace/events/xdp.h>
6
7#endif /* __LINUX_BPF_TRACE_H__ */
diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h
new file mode 100644
index 000000000000..c3a53fd47ff1
--- /dev/null
+++ b/include/trace/events/bpf.h
@@ -0,0 +1,347 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM bpf
3
4#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BPF_H
6
7#include <linux/filter.h>
8#include <linux/bpf.h>
9#include <linux/fs.h>
10#include <linux/tracepoint.h>
11
12#define __PROG_TYPE_MAP(FN) \
13 FN(SOCKET_FILTER) \
14 FN(KPROBE) \
15 FN(SCHED_CLS) \
16 FN(SCHED_ACT) \
17 FN(TRACEPOINT) \
18 FN(XDP) \
19 FN(PERF_EVENT) \
20 FN(CGROUP_SKB) \
21 FN(CGROUP_SOCK) \
22 FN(LWT_IN) \
23 FN(LWT_OUT) \
24 FN(LWT_XMIT)
25
26#define __MAP_TYPE_MAP(FN) \
27 FN(HASH) \
28 FN(ARRAY) \
29 FN(PROG_ARRAY) \
30 FN(PERF_EVENT_ARRAY) \
31 FN(PERCPU_HASH) \
32 FN(PERCPU_ARRAY) \
33 FN(STACK_TRACE) \
34 FN(CGROUP_ARRAY) \
35 FN(LRU_HASH) \
36 FN(LRU_PERCPU_HASH) \
37 FN(LPM_TRIE)
38
39#define __PROG_TYPE_TP_FN(x) \
40 TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x);
41#define __PROG_TYPE_SYM_FN(x) \
42 { BPF_PROG_TYPE_##x, #x },
43#define __PROG_TYPE_SYM_TAB \
44 __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 }
45__PROG_TYPE_MAP(__PROG_TYPE_TP_FN)
46
47#define __MAP_TYPE_TP_FN(x) \
48 TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x);
49#define __MAP_TYPE_SYM_FN(x) \
50 { BPF_MAP_TYPE_##x, #x },
51#define __MAP_TYPE_SYM_TAB \
52 __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 }
53__MAP_TYPE_MAP(__MAP_TYPE_TP_FN)
54
55DECLARE_EVENT_CLASS(bpf_prog_event,
56
57 TP_PROTO(const struct bpf_prog *prg),
58
59 TP_ARGS(prg),
60
61 TP_STRUCT__entry(
62 __array(u8, prog_tag, 8)
63 __field(u32, type)
64 ),
65
66 TP_fast_assign(
67 BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
68 memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
69 __entry->type = prg->type;
70 ),
71
72 TP_printk("prog=%s type=%s",
73 __print_hex_str(__entry->prog_tag, 8),
74 __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB))
75);
76
77DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type,
78
79 TP_PROTO(const struct bpf_prog *prg),
80
81 TP_ARGS(prg)
82);
83
84DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu,
85
86 TP_PROTO(const struct bpf_prog *prg),
87
88 TP_ARGS(prg)
89);
90
91TRACE_EVENT(bpf_prog_load,
92
93 TP_PROTO(const struct bpf_prog *prg, int ufd),
94
95 TP_ARGS(prg, ufd),
96
97 TP_STRUCT__entry(
98 __array(u8, prog_tag, 8)
99 __field(u32, type)
100 __field(int, ufd)
101 ),
102
103 TP_fast_assign(
104 BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
105 memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
106 __entry->type = prg->type;
107 __entry->ufd = ufd;
108 ),
109
110 TP_printk("prog=%s type=%s ufd=%d",
111 __print_hex_str(__entry->prog_tag, 8),
112 __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB),
113 __entry->ufd)
114);
115
116TRACE_EVENT(bpf_map_create,
117
118 TP_PROTO(const struct bpf_map *map, int ufd),
119
120 TP_ARGS(map, ufd),
121
122 TP_STRUCT__entry(
123 __field(u32, type)
124 __field(u32, size_key)
125 __field(u32, size_value)
126 __field(u32, max_entries)
127 __field(u32, flags)
128 __field(int, ufd)
129 ),
130
131 TP_fast_assign(
132 __entry->type = map->map_type;
133 __entry->size_key = map->key_size;
134 __entry->size_value = map->value_size;
135 __entry->max_entries = map->max_entries;
136 __entry->flags = map->map_flags;
137 __entry->ufd = ufd;
138 ),
139
140 TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x",
141 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
142 __entry->ufd, __entry->size_key, __entry->size_value,
143 __entry->max_entries, __entry->flags)
144);
145
146DECLARE_EVENT_CLASS(bpf_obj_prog,
147
148 TP_PROTO(const struct bpf_prog *prg, int ufd,
149 const struct filename *pname),
150
151 TP_ARGS(prg, ufd, pname),
152
153 TP_STRUCT__entry(
154 __array(u8, prog_tag, 8)
155 __field(int, ufd)
156 __string(path, pname->name)
157 ),
158
159 TP_fast_assign(
160 BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
161 memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
162 __assign_str(path, pname->name);
163 __entry->ufd = ufd;
164 ),
165
166 TP_printk("prog=%s path=%s ufd=%d",
167 __print_hex_str(__entry->prog_tag, 8),
168 __get_str(path), __entry->ufd)
169);
170
171DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog,
172
173 TP_PROTO(const struct bpf_prog *prg, int ufd,
174 const struct filename *pname),
175
176 TP_ARGS(prg, ufd, pname)
177);
178
179DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog,
180
181 TP_PROTO(const struct bpf_prog *prg, int ufd,
182 const struct filename *pname),
183
184 TP_ARGS(prg, ufd, pname)
185);
186
187DECLARE_EVENT_CLASS(bpf_obj_map,
188
189 TP_PROTO(const struct bpf_map *map, int ufd,
190 const struct filename *pname),
191
192 TP_ARGS(map, ufd, pname),
193
194 TP_STRUCT__entry(
195 __field(u32, type)
196 __field(int, ufd)
197 __string(path, pname->name)
198 ),
199
200 TP_fast_assign(
201 __assign_str(path, pname->name);
202 __entry->type = map->map_type;
203 __entry->ufd = ufd;
204 ),
205
206 TP_printk("map type=%s ufd=%d path=%s",
207 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
208 __entry->ufd, __get_str(path))
209);
210
211DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map,
212
213 TP_PROTO(const struct bpf_map *map, int ufd,
214 const struct filename *pname),
215
216 TP_ARGS(map, ufd, pname)
217);
218
219DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map,
220
221 TP_PROTO(const struct bpf_map *map, int ufd,
222 const struct filename *pname),
223
224 TP_ARGS(map, ufd, pname)
225);
226
227DECLARE_EVENT_CLASS(bpf_map_keyval,
228
229 TP_PROTO(const struct bpf_map *map, int ufd,
230 const void *key, const void *val),
231
232 TP_ARGS(map, ufd, key, val),
233
234 TP_STRUCT__entry(
235 __field(u32, type)
236 __field(u32, key_len)
237 __dynamic_array(u8, key, map->key_size)
238 __field(bool, key_trunc)
239 __field(u32, val_len)
240 __dynamic_array(u8, val, map->value_size)
241 __field(bool, val_trunc)
242 __field(int, ufd)
243 ),
244
245 TP_fast_assign(
246 memcpy(__get_dynamic_array(key), key, map->key_size);
247 memcpy(__get_dynamic_array(val), val, map->value_size);
248 __entry->type = map->map_type;
249 __entry->key_len = min(map->key_size, 16U);
250 __entry->key_trunc = map->key_size != __entry->key_len;
251 __entry->val_len = min(map->value_size, 16U);
252 __entry->val_trunc = map->value_size != __entry->val_len;
253 __entry->ufd = ufd;
254 ),
255
256 TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]",
257 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
258 __entry->ufd,
259 __print_hex(__get_dynamic_array(key), __entry->key_len),
260 __entry->key_trunc ? " ..." : "",
261 __print_hex(__get_dynamic_array(val), __entry->val_len),
262 __entry->val_trunc ? " ..." : "")
263);
264
265DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem,
266
267 TP_PROTO(const struct bpf_map *map, int ufd,
268 const void *key, const void *val),
269
270 TP_ARGS(map, ufd, key, val)
271);
272
273DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem,
274
275 TP_PROTO(const struct bpf_map *map, int ufd,
276 const void *key, const void *val),
277
278 TP_ARGS(map, ufd, key, val)
279);
280
281TRACE_EVENT(bpf_map_delete_elem,
282
283 TP_PROTO(const struct bpf_map *map, int ufd,
284 const void *key),
285
286 TP_ARGS(map, ufd, key),
287
288 TP_STRUCT__entry(
289 __field(u32, type)
290 __field(u32, key_len)
291 __dynamic_array(u8, key, map->key_size)
292 __field(bool, key_trunc)
293 __field(int, ufd)
294 ),
295
296 TP_fast_assign(
297 memcpy(__get_dynamic_array(key), key, map->key_size);
298 __entry->type = map->map_type;
299 __entry->key_len = min(map->key_size, 16U);
300 __entry->key_trunc = map->key_size != __entry->key_len;
301 __entry->ufd = ufd;
302 ),
303
304 TP_printk("map type=%s ufd=%d key=[%s%s]",
305 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
306 __entry->ufd,
307 __print_hex(__get_dynamic_array(key), __entry->key_len),
308 __entry->key_trunc ? " ..." : "")
309);
310
311TRACE_EVENT(bpf_map_next_key,
312
313 TP_PROTO(const struct bpf_map *map, int ufd,
314 const void *key, const void *key_next),
315
316 TP_ARGS(map, ufd, key, key_next),
317
318 TP_STRUCT__entry(
319 __field(u32, type)
320 __field(u32, key_len)
321 __dynamic_array(u8, key, map->key_size)
322 __dynamic_array(u8, nxt, map->key_size)
323 __field(bool, key_trunc)
324 __field(int, ufd)
325 ),
326
327 TP_fast_assign(
328 memcpy(__get_dynamic_array(key), key, map->key_size);
329 memcpy(__get_dynamic_array(nxt), key_next, map->key_size);
330 __entry->type = map->map_type;
331 __entry->key_len = min(map->key_size, 16U);
332 __entry->key_trunc = map->key_size != __entry->key_len;
333 __entry->ufd = ufd;
334 ),
335
336 TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]",
337 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
338 __entry->ufd,
339 __print_hex(__get_dynamic_array(key), __entry->key_len),
340 __entry->key_trunc ? " ..." : "",
341 __print_hex(__get_dynamic_array(nxt), __entry->key_len),
342 __entry->key_trunc ? " ..." : "")
343);
344
345#endif /* _TRACE_BPF_H */
346
347#include <trace/define_trace.h>
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
new file mode 100644
index 000000000000..1b61357d3f57
--- /dev/null
+++ b/include/trace/events/xdp.h
@@ -0,0 +1,53 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM xdp
3
4#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_XDP_H
6
7#include <linux/netdevice.h>
8#include <linux/filter.h>
9#include <linux/tracepoint.h>
10
11#define __XDP_ACT_MAP(FN) \
12 FN(ABORTED) \
13 FN(DROP) \
14 FN(PASS) \
15 FN(TX)
16
17#define __XDP_ACT_TP_FN(x) \
18 TRACE_DEFINE_ENUM(XDP_##x);
19#define __XDP_ACT_SYM_FN(x) \
20 { XDP_##x, #x },
21#define __XDP_ACT_SYM_TAB \
22 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
23__XDP_ACT_MAP(__XDP_ACT_TP_FN)
24
25TRACE_EVENT(xdp_exception,
26
27 TP_PROTO(const struct net_device *dev,
28 const struct bpf_prog *xdp, u32 act),
29
30 TP_ARGS(dev, xdp, act),
31
32 TP_STRUCT__entry(
33 __string(name, dev->name)
34 __array(u8, prog_tag, 8)
35 __field(u32, act)
36 ),
37
38 TP_fast_assign(
39 BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
40 memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
41 __assign_str(name, dev->name);
42 __entry->act = act;
43 ),
44
45 TP_printk("prog=%s device=%s action=%s",
46 __print_hex_str(__entry->prog_tag, 8),
47 __get_str(name),
48 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
49);
50
51#endif /* _TRACE_XDP_H */
52
53#include <trace/define_trace.h>
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 503d4211988a..fddd76b1b627 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1173,3 +1173,12 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1173{ 1173{
1174 return -EFAULT; 1174 return -EFAULT;
1175} 1175}
1176
1177/* All definitions of tracepoints related to BPF. */
1178#define CREATE_TRACE_POINTS
1179#include <linux/bpf_trace.h>
1180
1181EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1182
1183EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1184EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 0b030c9126d3..fddcae801724 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -21,6 +21,7 @@
21#include <linux/parser.h> 21#include <linux/parser.h>
22#include <linux/filter.h> 22#include <linux/filter.h>
23#include <linux/bpf.h> 23#include <linux/bpf.h>
24#include <linux/bpf_trace.h>
24 25
25enum bpf_type { 26enum bpf_type {
26 BPF_TYPE_UNSPEC = 0, 27 BPF_TYPE_UNSPEC = 0,
@@ -281,6 +282,13 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
281 ret = bpf_obj_do_pin(pname, raw, type); 282 ret = bpf_obj_do_pin(pname, raw, type);
282 if (ret != 0) 283 if (ret != 0)
283 bpf_any_put(raw, type); 284 bpf_any_put(raw, type);
285 if ((trace_bpf_obj_pin_prog_enabled() ||
286 trace_bpf_obj_pin_map_enabled()) && !ret) {
287 if (type == BPF_TYPE_PROG)
288 trace_bpf_obj_pin_prog(raw, ufd, pname);
289 if (type == BPF_TYPE_MAP)
290 trace_bpf_obj_pin_map(raw, ufd, pname);
291 }
284out: 292out:
285 putname(pname); 293 putname(pname);
286 return ret; 294 return ret;
@@ -342,8 +350,15 @@ int bpf_obj_get_user(const char __user *pathname)
342 else 350 else
343 goto out; 351 goto out;
344 352
345 if (ret < 0) 353 if (ret < 0) {
346 bpf_any_put(raw, type); 354 bpf_any_put(raw, type);
355 } else if (trace_bpf_obj_get_prog_enabled() ||
356 trace_bpf_obj_get_map_enabled()) {
357 if (type == BPF_TYPE_PROG)
358 trace_bpf_obj_get_prog(raw, ret, pname);
359 if (type == BPF_TYPE_MAP)
360 trace_bpf_obj_get_map(raw, ret, pname);
361 }
347out: 362out:
348 putname(pname); 363 putname(pname);
349 return ret; 364 return ret;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 1d6b29e4e2c3..05ad086ab71d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -10,6 +10,7 @@
10 * General Public License for more details. 10 * General Public License for more details.
11 */ 11 */
12#include <linux/bpf.h> 12#include <linux/bpf.h>
13#include <linux/bpf_trace.h>
13#include <linux/syscalls.h> 14#include <linux/syscalls.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/anon_inodes.h> 16#include <linux/anon_inodes.h>
@@ -215,6 +216,7 @@ static int map_create(union bpf_attr *attr)
215 /* failed to allocate fd */ 216 /* failed to allocate fd */
216 goto free_map; 217 goto free_map;
217 218
219 trace_bpf_map_create(map, err);
218 return err; 220 return err;
219 221
220free_map: 222free_map:
@@ -339,6 +341,7 @@ static int map_lookup_elem(union bpf_attr *attr)
339 if (copy_to_user(uvalue, value, value_size) != 0) 341 if (copy_to_user(uvalue, value, value_size) != 0)
340 goto free_value; 342 goto free_value;
341 343
344 trace_bpf_map_lookup_elem(map, ufd, key, value);
342 err = 0; 345 err = 0;
343 346
344free_value: 347free_value:
@@ -421,6 +424,8 @@ static int map_update_elem(union bpf_attr *attr)
421 __this_cpu_dec(bpf_prog_active); 424 __this_cpu_dec(bpf_prog_active);
422 preempt_enable(); 425 preempt_enable();
423 426
427 if (!err)
428 trace_bpf_map_update_elem(map, ufd, key, value);
424free_value: 429free_value:
425 kfree(value); 430 kfree(value);
426free_key: 431free_key:
@@ -466,6 +471,8 @@ static int map_delete_elem(union bpf_attr *attr)
466 __this_cpu_dec(bpf_prog_active); 471 __this_cpu_dec(bpf_prog_active);
467 preempt_enable(); 472 preempt_enable();
468 473
474 if (!err)
475 trace_bpf_map_delete_elem(map, ufd, key);
469free_key: 476free_key:
470 kfree(key); 477 kfree(key);
471err_put: 478err_put:
@@ -518,6 +525,7 @@ static int map_get_next_key(union bpf_attr *attr)
518 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 525 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
519 goto free_next_key; 526 goto free_next_key;
520 527
528 trace_bpf_map_next_key(map, ufd, key, next_key);
521 err = 0; 529 err = 0;
522 530
523free_next_key: 531free_next_key:
@@ -671,8 +679,10 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
671 679
672void bpf_prog_put(struct bpf_prog *prog) 680void bpf_prog_put(struct bpf_prog *prog)
673{ 681{
674 if (atomic_dec_and_test(&prog->aux->refcnt)) 682 if (atomic_dec_and_test(&prog->aux->refcnt)) {
683 trace_bpf_prog_put_rcu(prog);
675 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 684 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
685 }
676} 686}
677EXPORT_SYMBOL_GPL(bpf_prog_put); 687EXPORT_SYMBOL_GPL(bpf_prog_put);
678 688
@@ -781,7 +791,11 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
781 791
782struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) 792struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
783{ 793{
784 return __bpf_prog_get(ufd, &type); 794 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
795
796 if (!IS_ERR(prog))
797 trace_bpf_prog_get_type(prog);
798 return prog;
785} 799}
786EXPORT_SYMBOL_GPL(bpf_prog_get_type); 800EXPORT_SYMBOL_GPL(bpf_prog_get_type);
787 801
@@ -863,6 +877,7 @@ static int bpf_prog_load(union bpf_attr *attr)
863 /* failed to allocate fd */ 877 /* failed to allocate fd */
864 goto free_used_maps; 878 goto free_used_maps;
865 879
880 trace_bpf_prog_load(prog, err);
866 return err; 881 return err;
867 882
868free_used_maps: 883free_used_maps: