aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_sfq.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2014-09-12 23:05:27 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-13 12:30:25 -0400
commit25d8c0d55f241ce2d360df1bea48e23a55836ee6 (patch)
treec0aca67607e7ce560a4a2cebef5fb6d55adf4112 /net/sched/sch_sfq.c
parent46e5da40aec256155cfedee96dd21a75da941f2c (diff)
net: rcu-ify tcf_proto
rcu'ify tcf_proto this allows calling tc_classify() without holding any locks. Updaters are protected by RTNL. This patch prepares the core net_sched infrastracture for running the classifier/action chains without holding the qdisc lock however it does nothing to ensure cls_xxx and act_xxx types also work without locking. Additional patches are required to address the fall out. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_sfq.c')
-rw-r--r--net/sched/sch_sfq.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 211db9017c35..80c36bd54abc 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -125,7 +125,7 @@ struct sfq_sched_data {
125 u8 cur_depth; /* depth of longest slot */ 125 u8 cur_depth; /* depth of longest slot */
126 u8 flags; 126 u8 flags;
127 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ 127 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
128 struct tcf_proto *filter_list; 128 struct tcf_proto __rcu *filter_list;
129 sfq_index *ht; /* Hash table ('divisor' slots) */ 129 sfq_index *ht; /* Hash table ('divisor' slots) */
130 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ 130 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
131 131
@@ -187,6 +187,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
187{ 187{
188 struct sfq_sched_data *q = qdisc_priv(sch); 188 struct sfq_sched_data *q = qdisc_priv(sch);
189 struct tcf_result res; 189 struct tcf_result res;
190 struct tcf_proto *fl;
190 int result; 191 int result;
191 192
192 if (TC_H_MAJ(skb->priority) == sch->handle && 193 if (TC_H_MAJ(skb->priority) == sch->handle &&
@@ -194,13 +195,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
194 TC_H_MIN(skb->priority) <= q->divisor) 195 TC_H_MIN(skb->priority) <= q->divisor)
195 return TC_H_MIN(skb->priority); 196 return TC_H_MIN(skb->priority);
196 197
197 if (!q->filter_list) { 198 fl = rcu_dereference_bh(q->filter_list);
199 if (!fl) {
198 skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys); 200 skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
199 return sfq_hash(q, skb) + 1; 201 return sfq_hash(q, skb) + 1;
200 } 202 }
201 203
202 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 204 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
203 result = tc_classify(skb, q->filter_list, &res); 205 result = tc_classify(skb, fl, &res);
204 if (result >= 0) { 206 if (result >= 0) {
205#ifdef CONFIG_NET_CLS_ACT 207#ifdef CONFIG_NET_CLS_ACT
206 switch (result) { 208 switch (result) {
@@ -836,7 +838,8 @@ static void sfq_put(struct Qdisc *q, unsigned long cl)
836{ 838{
837} 839}
838 840
839static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) 841static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch,
842 unsigned long cl)
840{ 843{
841 struct sfq_sched_data *q = qdisc_priv(sch); 844 struct sfq_sched_data *q = qdisc_priv(sch);
842 845