aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorTuong Lien <tuong.t.lien@dektech.com.au>2018-12-18 21:17:58 -0500
committerDavid S. Miller <davem@davemloft.net>2018-12-19 14:49:24 -0500
commit01e661ebfbad40e6280fb8ec25f2861d39ba4387 (patch)
treebe328a8884d99966a4152c6a23f7306d1c266f03 /net/tipc
parent26574db0c17fb29fac8b57f94ed1dfd46cc89887 (diff)
tipc: add trace_events for tipc socket
The commit adds the new trace_events for TIPC socket object: trace_tipc_sk_create() trace_tipc_sk_poll() trace_tipc_sk_sendmsg() trace_tipc_sk_sendmcast() trace_tipc_sk_sendstream() trace_tipc_sk_filter_rcv() trace_tipc_sk_advance_rx() trace_tipc_sk_rej_msg() trace_tipc_sk_drop_msg() trace_tipc_sk_release() trace_tipc_sk_shutdown() trace_tipc_sk_overlimit1() trace_tipc_sk_overlimit2() Also, enables the traces for the following cases: - When user creates a TIPC socket; - When user calls poll() on TIPC socket; - When user sends a dgram/mcast/stream message. - When a message is put into the socket 'sk_receive_queue'; - When a message is released from the socket 'sk_receive_queue'; - When a message is rejected (e.g. due to no port, invalid, etc.); - When a message is dropped (e.g. due to wrong message type); - When socket is released; - When socket is shutdown; - When socket rcvq's allocation is overlimit (> 90%); - When socket rcvq + bklq's allocation is overlimit (> 90%); - When the 'TIPC_ERR_OVERLOAD/2' issue happens; Note: a) All the socket traces are designed to be able to trace on a specific socket by either using the 'event filtering' feature on a known socket 'portid' value or the sysctl file: /proc/sys/net/tipc/sk_filter The file determines a 'tuple' for what socket should be traced: (portid, sock type, name type, name lower, name upper) where: + 'portid' is the socket portid generated at socket creating, can be found in the trace outputs or the 'tipc socket list' command printouts; + 'sock type' is the socket type (1 = SOCK_TREAM, ...); + 'name type', 'name lower' and 'name upper' are the service name being connected to or published by the socket. Value '0' means 'ANY', the default tuple value is (0, 0, 0, 0, 0) i.e. the traces happen for every sockets with no filter. b) The 'tipc_sk_overlimit1/2' event is also a conditional trace_event which happens when the socket receive queue (and backlog queue) is about to be overloaded, when the queue allocation is > 90%. Then, when the trace is enabled, the last skbs leading to the TIPC_ERR_OVERLOAD/2 issue can be traced. The trace event is designed as an 'upper watermark' notification that the other traces (e.g. 'tipc_sk_advance_rx' vs 'tipc_sk_filter_rcv') or actions can be triggerred in the meanwhile to see what is going on with the socket queue. In addition, the 'trace_tipc_sk_dump()' is also placed at the 'TIPC_ERR_OVERLOAD/2' case, so the socket and last skb can be dumped for post-analysis. Acked-by: Ying Xue <ying.xue@windriver.com> Tested-by: Ying Xue <ying.xue@windriver.com> Acked-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: Tuong Lien <tuong.t.lien@dektech.com.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/socket.c137
-rw-r--r--net/tipc/socket.h2
-rw-r--r--net/tipc/sysctl.c8
-rw-r--r--net/tipc/trace.c6
-rw-r--r--net/tipc/trace.h32
5 files changed, 176 insertions, 9 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b6b2a94eb54e..291d6bbe85f4 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -234,6 +234,7 @@ static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
234 */ 234 */
235static void tsk_advance_rx_queue(struct sock *sk) 235static void tsk_advance_rx_queue(struct sock *sk)
236{ 236{
237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
238} 239}
239 240
@@ -248,6 +249,7 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
248 if (!tipc_msg_reverse(onode, &skb, err)) 249 if (!tipc_msg_reverse(onode, &skb, err))
249 return; 250 return;
250 251
252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
251 dnode = msg_destnode(buf_msg(skb)); 253 dnode = msg_destnode(buf_msg(skb));
252 selector = msg_origport(buf_msg(skb)); 254 selector = msg_origport(buf_msg(skb));
253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); 255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
@@ -483,6 +485,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
483 tsk_set_unreliable(tsk, true); 485 tsk_set_unreliable(tsk, true);
484 } 486 }
485 487
488 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
486 return 0; 489 return 0;
487} 490}
488 491
@@ -572,6 +575,7 @@ static int tipc_release(struct socket *sock)
572 tsk = tipc_sk(sk); 575 tsk = tipc_sk(sk);
573 lock_sock(sk); 576 lock_sock(sk);
574 577
578 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
575 __tipc_shutdown(sock, TIPC_ERR_NO_PORT); 579 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
576 sk->sk_shutdown = SHUTDOWN_MASK; 580 sk->sk_shutdown = SHUTDOWN_MASK;
577 tipc_sk_leave(tsk); 581 tipc_sk_leave(tsk);
@@ -719,6 +723,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
719 __poll_t revents = 0; 723 __poll_t revents = 0;
720 724
721 sock_poll_wait(file, sock, wait); 725 sock_poll_wait(file, sock, wait);
726 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
722 727
723 if (sk->sk_shutdown & RCV_SHUTDOWN) 728 if (sk->sk_shutdown & RCV_SHUTDOWN)
724 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 729 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
@@ -805,9 +810,12 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
805 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); 810 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
806 811
807 /* Send message if build was successful */ 812 /* Send message if build was successful */
808 if (unlikely(rc == dlen)) 813 if (unlikely(rc == dlen)) {
814 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
815 TIPC_DUMP_SK_SNDQ, " ");
809 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, 816 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
810 &tsk->cong_link_cnt); 817 &tsk->cong_link_cnt);
818 }
811 819
812 tipc_nlist_purge(&dsts); 820 tipc_nlist_purge(&dsts);
813 821
@@ -1209,8 +1217,10 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1209 bool conn_cong; 1217 bool conn_cong;
1210 1218
1211 /* Ignore if connection cannot be validated: */ 1219 /* Ignore if connection cannot be validated: */
1212 if (!tsk_peer_msg(tsk, hdr)) 1220 if (!tsk_peer_msg(tsk, hdr)) {
1221 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1213 goto exit; 1222 goto exit;
1223 }
1214 1224
1215 if (unlikely(msg_errcode(hdr))) { 1225 if (unlikely(msg_errcode(hdr))) {
1216 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 1226 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
@@ -1378,6 +1388,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1378 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) 1388 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
1379 return -ENOMEM; 1389 return -ENOMEM;
1380 1390
1391 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1381 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1392 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1382 if (unlikely(rc == -ELINKCONG)) { 1393 if (unlikely(rc == -ELINKCONG)) {
1383 tipc_dest_push(clinks, dnode, 0); 1394 tipc_dest_push(clinks, dnode, 0);
@@ -1455,6 +1466,8 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1455 if (unlikely(rc != send)) 1466 if (unlikely(rc != send))
1456 break; 1467 break;
1457 1468
1469 trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
1470 TIPC_DUMP_SK_SNDQ, " ");
1458 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); 1471 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1459 if (unlikely(rc == -ELINKCONG)) { 1472 if (unlikely(rc == -ELINKCONG)) {
1460 tsk->cong_link_cnt = 1; 1473 tsk->cong_link_cnt = 1;
@@ -2129,6 +2142,7 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2129 struct sk_buff_head inputq; 2142 struct sk_buff_head inputq;
2130 int limit, err = TIPC_OK; 2143 int limit, err = TIPC_OK;
2131 2144
2145 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2132 TIPC_SKB_CB(skb)->bytes_read = 0; 2146 TIPC_SKB_CB(skb)->bytes_read = 0;
2133 __skb_queue_head_init(&inputq); 2147 __skb_queue_head_init(&inputq);
2134 __skb_queue_tail(&inputq, skb); 2148 __skb_queue_tail(&inputq, skb);
@@ -2148,17 +2162,25 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2148 (!grp && msg_in_group(hdr))) 2162 (!grp && msg_in_group(hdr)))
2149 err = TIPC_ERR_NO_PORT; 2163 err = TIPC_ERR_NO_PORT;
2150 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { 2164 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2165 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2166 "err_overload2!");
2151 atomic_inc(&sk->sk_drops); 2167 atomic_inc(&sk->sk_drops);
2152 err = TIPC_ERR_OVERLOAD; 2168 err = TIPC_ERR_OVERLOAD;
2153 } 2169 }
2154 2170
2155 if (unlikely(err)) { 2171 if (unlikely(err)) {
2156 tipc_skb_reject(net, err, skb, xmitq); 2172 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2173 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2174 "@filter_rcv!");
2175 __skb_queue_tail(xmitq, skb);
2176 }
2157 err = TIPC_OK; 2177 err = TIPC_OK;
2158 continue; 2178 continue;
2159 } 2179 }
2160 __skb_queue_tail(&sk->sk_receive_queue, skb); 2180 __skb_queue_tail(&sk->sk_receive_queue, skb);
2161 skb_set_owner_r(skb, sk); 2181 skb_set_owner_r(skb, sk);
2182 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2183 "rcvq >90% allocated!");
2162 sk->sk_data_ready(sk); 2184 sk->sk_data_ready(sk);
2163 } 2185 }
2164} 2186}
@@ -2224,14 +2246,21 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2224 if (!sk->sk_backlog.len) 2246 if (!sk->sk_backlog.len)
2225 atomic_set(dcnt, 0); 2247 atomic_set(dcnt, 0);
2226 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 2248 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2227 if (likely(!sk_add_backlog(sk, skb, lim))) 2249 if (likely(!sk_add_backlog(sk, skb, lim))) {
2250 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2251 "bklg & rcvq >90% allocated!");
2228 continue; 2252 continue;
2253 }
2229 2254
2255 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2230 /* Overload => reject message back to sender */ 2256 /* Overload => reject message back to sender */
2231 onode = tipc_own_addr(sock_net(sk)); 2257 onode = tipc_own_addr(sock_net(sk));
2232 atomic_inc(&sk->sk_drops); 2258 atomic_inc(&sk->sk_drops);
2233 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) 2259 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2260 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2261 "@sk_enqueue!");
2234 __skb_queue_tail(xmitq, skb); 2262 __skb_queue_tail(xmitq, skb);
2263 }
2235 break; 2264 break;
2236 } 2265 }
2237} 2266}
@@ -2280,6 +2309,8 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2280 /* Prepare for message rejection */ 2309 /* Prepare for message rejection */
2281 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err)) 2310 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2282 continue; 2311 continue;
2312
2313 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2283xmit: 2314xmit:
2284 dnode = msg_destnode(buf_msg(skb)); 2315 dnode = msg_destnode(buf_msg(skb));
2285 tipc_node_xmit_skb(net, skb, dnode, dport); 2316 tipc_node_xmit_skb(net, skb, dnode, dport);
@@ -2553,6 +2584,7 @@ static int tipc_shutdown(struct socket *sock, int how)
2553 2584
2554 lock_sock(sk); 2585 lock_sock(sk);
2555 2586
2587 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2556 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); 2588 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2557 sk->sk_shutdown = SEND_SHUTDOWN; 2589 sk->sk_shutdown = SEND_SHUTDOWN;
2558 2590
@@ -3566,12 +3598,107 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3566 return skb->len; 3598 return skb->len;
3567} 3599}
3568 3600
3601/**
3602 * tipc_sk_filtering - check if a socket should be traced
3603 * @sk: the socket to be examined
3604 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3605 * (portid, sock type, name type, name lower, name upper)
3606 *
3607 * Returns true if the socket meets the socket tuple data
3608 * (value 0 = 'any') or when there is no tuple set (all = 0),
3609 * otherwise false
3610 */
3611bool tipc_sk_filtering(struct sock *sk)
3612{
3613 struct tipc_sock *tsk;
3614 struct publication *p;
3615 u32 _port, _sktype, _type, _lower, _upper;
3616 u32 type = 0, lower = 0, upper = 0;
3617
3618 if (!sk)
3619 return true;
3620
3621 tsk = tipc_sk(sk);
3622
3623 _port = sysctl_tipc_sk_filter[0];
3624 _sktype = sysctl_tipc_sk_filter[1];
3625 _type = sysctl_tipc_sk_filter[2];
3626 _lower = sysctl_tipc_sk_filter[3];
3627 _upper = sysctl_tipc_sk_filter[4];
3628
3629 if (!_port && !_sktype && !_type && !_lower && !_upper)
3630 return true;
3631
3632 if (_port)
3633 return (_port == tsk->portid);
3634
3635 if (_sktype && _sktype != sk->sk_type)
3636 return false;
3637
3638 if (tsk->published) {
3639 p = list_first_entry_or_null(&tsk->publications,
3640 struct publication, binding_sock);
3641 if (p) {
3642 type = p->type;
3643 lower = p->lower;
3644 upper = p->upper;
3645 }
3646 }
3647
3648 if (!tipc_sk_type_connectionless(sk)) {
3649 type = tsk->conn_type;
3650 lower = tsk->conn_instance;
3651 upper = tsk->conn_instance;
3652 }
3653
3654 if ((_type && _type != type) || (_lower && _lower != lower) ||
3655 (_upper && _upper != upper))
3656 return false;
3657
3658 return true;
3659}
3660
3569u32 tipc_sock_get_portid(struct sock *sk) 3661u32 tipc_sock_get_portid(struct sock *sk)
3570{ 3662{
3571 return (sk) ? (tipc_sk(sk))->portid : 0; 3663 return (sk) ? (tipc_sk(sk))->portid : 0;
3572} 3664}
3573 3665
3574/** 3666/**
3667 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3668 * both the rcv and backlog queues are considered
3669 * @sk: tipc sk to be checked
3670 * @skb: tipc msg to be checked
3671 *
3672 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3673 */
3674
3675bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3676{
3677 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3678 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3679 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3680
3681 return (qsize > lim * 90 / 100);
3682}
3683
3684/**
3685 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3686 * only the rcv queue is considered
3687 * @sk: tipc sk to be checked
3688 * @skb: tipc msg to be checked
3689 *
3690 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3691 */
3692
3693bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3694{
3695 unsigned int lim = rcvbuf_limit(sk, skb);
3696 unsigned int qsize = sk_rmem_alloc_get(sk);
3697
3698 return (qsize > lim * 90 / 100);
3699}
3700
3701/**
3575 * tipc_sk_dump - dump TIPC socket 3702 * tipc_sk_dump - dump TIPC socket
3576 * @sk: tipc sk to be dumped 3703 * @sk: tipc sk to be dumped
3577 * @dqueues: bitmask to decide if any socket queue to be dumped? 3704 * @dqueues: bitmask to decide if any socket queue to be dumped?
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 07e36545b696..235b9679acee 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -72,5 +72,7 @@ int tipc_dump_start(struct netlink_callback *cb);
72int __tipc_dump_start(struct netlink_callback *cb, struct net *net); 72int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
73int tipc_dump_done(struct netlink_callback *cb); 73int tipc_dump_done(struct netlink_callback *cb);
74u32 tipc_sock_get_portid(struct sock *sk); 74u32 tipc_sock_get_portid(struct sock *sk);
75bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb);
76bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb);
75 77
76#endif 78#endif
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 1a779b1e8510..3481e4906bd6 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -34,6 +34,7 @@
34 */ 34 */
35 35
36#include "core.h" 36#include "core.h"
37#include "trace.h"
37 38
38#include <linux/sysctl.h> 39#include <linux/sysctl.h>
39 40
@@ -54,6 +55,13 @@ static struct ctl_table tipc_table[] = {
54 .mode = 0644, 55 .mode = 0644,
55 .proc_handler = proc_dointvec, 56 .proc_handler = proc_dointvec,
56 }, 57 },
58 {
59 .procname = "sk_filter",
60 .data = &sysctl_tipc_sk_filter,
61 .maxlen = sizeof(sysctl_tipc_sk_filter),
62 .mode = 0644,
63 .proc_handler = proc_doulongvec_minmax,
64 },
57 {} 65 {}
58}; 66};
59 67
diff --git a/net/tipc/trace.c b/net/tipc/trace.c
index 846196f0e810..964823841efe 100644
--- a/net/tipc/trace.c
+++ b/net/tipc/trace.c
@@ -37,6 +37,12 @@
37#include "trace.h" 37#include "trace.h"
38 38
39/** 39/**
40 * socket tuples for filtering in socket traces:
41 * (portid, sock type, name type, name lower, name upper)
42 */
43unsigned long sysctl_tipc_sk_filter[5] __read_mostly = {0, };
44
45/**
40 * tipc_skb_dump - dump TIPC skb data 46 * tipc_skb_dump - dump TIPC skb data
41 * @skb: skb to be dumped 47 * @skb: skb to be dumped
42 * @more: dump more? 48 * @more: dump more?
diff --git a/net/tipc/trace.h b/net/tipc/trace.h
index 535c8958651f..ebbfcd14627e 100644
--- a/net/tipc/trace.h
+++ b/net/tipc/trace.h
@@ -113,11 +113,14 @@ enum {
113 {(0xcbe), "SYNCH_BEGIN_EVT" },\ 113 {(0xcbe), "SYNCH_BEGIN_EVT" },\
114 {(0xcee), "SYNCH_END_EVT" }) 114 {(0xcee), "SYNCH_END_EVT" })
115 115
116extern unsigned long sysctl_tipc_sk_filter[5] __read_mostly;
117
116int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf); 118int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf);
117int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf); 119int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf);
118int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf); 120int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf);
119int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf); 121int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf);
120int tipc_node_dump(struct tipc_node *n, bool more, char *buf); 122int tipc_node_dump(struct tipc_node *n, bool more, char *buf);
123bool tipc_sk_filtering(struct sock *sk);
121 124
122DECLARE_EVENT_CLASS(tipc_skb_class, 125DECLARE_EVENT_CLASS(tipc_skb_class,
123 126
@@ -199,12 +202,33 @@ DECLARE_EVENT_CLASS(tipc_sk_class,
199 __get_str(skb_buf), __get_str(buf)) 202 __get_str(skb_buf), __get_str(buf))
200); 203);
201 204
202#define DEFINE_SK_EVENT(name) \ 205#define DEFINE_SK_EVENT_FILTER(name) \
203DEFINE_EVENT(tipc_sk_class, name, \ 206DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
207 TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
208 const char *header), \
209 TP_ARGS(sk, skb, dqueues, header), \
210 TP_CONDITION(tipc_sk_filtering(sk)))
211DEFINE_SK_EVENT_FILTER(tipc_sk_dump);
212DEFINE_SK_EVENT_FILTER(tipc_sk_create);
213DEFINE_SK_EVENT_FILTER(tipc_sk_sendmcast);
214DEFINE_SK_EVENT_FILTER(tipc_sk_sendmsg);
215DEFINE_SK_EVENT_FILTER(tipc_sk_sendstream);
216DEFINE_SK_EVENT_FILTER(tipc_sk_poll);
217DEFINE_SK_EVENT_FILTER(tipc_sk_filter_rcv);
218DEFINE_SK_EVENT_FILTER(tipc_sk_advance_rx);
219DEFINE_SK_EVENT_FILTER(tipc_sk_rej_msg);
220DEFINE_SK_EVENT_FILTER(tipc_sk_drop_msg);
221DEFINE_SK_EVENT_FILTER(tipc_sk_release);
222DEFINE_SK_EVENT_FILTER(tipc_sk_shutdown);
223
224#define DEFINE_SK_EVENT_FILTER_COND(name, cond) \
225DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
204 TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \ 226 TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
205 const char *header), \ 227 const char *header), \
206 TP_ARGS(sk, skb, dqueues, header)) 228 TP_ARGS(sk, skb, dqueues, header), \
207DEFINE_SK_EVENT(tipc_sk_dump); 229 TP_CONDITION(tipc_sk_filtering(sk) && (cond)))
230DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit1, tipc_sk_overlimit1(sk, skb));
231DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit2, tipc_sk_overlimit2(sk, skb));
208 232
209DECLARE_EVENT_CLASS(tipc_link_class, 233DECLARE_EVENT_CLASS(tipc_link_class,
210 234