aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/cls_cgroup.h29
-rw-r--r--net/sched/cls_cgroup.c23
2 files changed, 31 insertions, 21 deletions
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c15d39456e14..ccd6d8bffa4d 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
49 if (classid != sk->sk_classid) 49 if (classid != sk->sk_classid)
50 sk->sk_classid = classid; 50 sk->sk_classid = classid;
51} 51}
52
53static inline u32 task_get_classid(const struct sk_buff *skb)
54{
55 u32 classid = task_cls_state(current)->classid;
56
57 /* Due to the nature of the classifier it is required to ignore all
58 * packets originating from softirq context as accessing `current'
59 * would lead to false results.
60 *
61 * This test assumes that all callers of dev_queue_xmit() explicitly
62 * disable bh. Knowing this, it is possible to detect softirq based
63 * calls by looking at the number of nested bh disable calls because
64 * softirqs always disables bh.
65 */
66 if (in_serving_softirq()) {
67 /* If there is an sk_classid we'll use that. */
68 if (!skb->sk)
69 return 0;
70
71 classid = skb->sk->sk_classid;
72 }
73
74 return classid;
75}
52#else /* !CONFIG_CGROUP_NET_CLASSID */ 76#else /* !CONFIG_CGROUP_NET_CLASSID */
53static inline void sock_update_classid(struct sock *sk) 77static inline void sock_update_classid(struct sock *sk)
54{ 78{
55} 79}
80
81static inline u32 task_get_classid(const struct sk_buff *skb)
82{
83 return 0;
84}
56#endif /* CONFIG_CGROUP_NET_CLASSID */ 85#endif /* CONFIG_CGROUP_NET_CLASSID */
57#endif /* _NET_CLS_CGROUP_H */ 86#endif /* _NET_CLS_CGROUP_H */
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index ea611b216412..4c85bd3a750c 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
30 struct tcf_result *res) 30 struct tcf_result *res)
31{ 31{
32 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); 32 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
33 u32 classid; 33 u32 classid = task_get_classid(skb);
34
35 classid = task_cls_state(current)->classid;
36
37 /*
38 * Due to the nature of the classifier it is required to ignore all
39 * packets originating from softirq context as accessing `current'
40 * would lead to false results.
41 *
42 * This test assumes that all callers of dev_queue_xmit() explicitely
43 * disable bh. Knowing this, it is possible to detect softirq based
44 * calls by looking at the number of nested bh disable calls because
45 * softirqs always disables bh.
46 */
47 if (in_serving_softirq()) {
48 /* If there is an sk_classid we'll use that. */
49 if (!skb->sk)
50 return -1;
51 classid = skb->sk->sk_classid;
52 }
53 34
54 if (!classid) 35 if (!classid)
55 return -1; 36 return -1;
56
57 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) 37 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
58 return -1; 38 return -1;
59 39
60 res->classid = classid; 40 res->classid = classid;
61 res->class = 0; 41 res->class = 0;
42
62 return tcf_exts_exec(skb, &head->exts, res); 43 return tcf_exts_exec(skb, &head->exts, res);
63} 44}
64 45