aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_api.c23
-rw-r--r--net/sched/cls_cgroup.c22
-rw-r--r--net/sched/cls_flow.c8
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/em_meta.c8
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c6
8 files changed, 46 insertions, 33 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 0759f32e9dca..09cdcdfe7e91 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -135,6 +135,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
135 unsigned long cl; 135 unsigned long cl;
136 unsigned long fh; 136 unsigned long fh;
137 int err; 137 int err;
138 int tp_created = 0;
138 139
139 if (net != &init_net) 140 if (net != &init_net)
140 return -EINVAL; 141 return -EINVAL;
@@ -266,10 +267,7 @@ replay:
266 goto errout; 267 goto errout;
267 } 268 }
268 269
269 spin_lock_bh(root_lock); 270 tp_created = 1;
270 tp->next = *back;
271 *back = tp;
272 spin_unlock_bh(root_lock);
273 271
274 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) 272 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind))
275 goto errout; 273 goto errout;
@@ -296,8 +294,11 @@ replay:
296 switch (n->nlmsg_type) { 294 switch (n->nlmsg_type) {
297 case RTM_NEWTFILTER: 295 case RTM_NEWTFILTER:
298 err = -EEXIST; 296 err = -EEXIST;
299 if (n->nlmsg_flags & NLM_F_EXCL) 297 if (n->nlmsg_flags & NLM_F_EXCL) {
298 if (tp_created)
299 tcf_destroy(tp);
300 goto errout; 300 goto errout;
301 }
301 break; 302 break;
302 case RTM_DELTFILTER: 303 case RTM_DELTFILTER:
303 err = tp->ops->delete(tp, fh); 304 err = tp->ops->delete(tp, fh);
@@ -314,8 +315,18 @@ replay:
314 } 315 }
315 316
316 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); 317 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
317 if (err == 0) 318 if (err == 0) {
319 if (tp_created) {
320 spin_lock_bh(root_lock);
321 tp->next = *back;
322 *back = tp;
323 spin_unlock_bh(root_lock);
324 }
318 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); 325 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
326 } else {
327 if (tp_created)
328 tcf_destroy(tp);
329 }
319 330
320errout: 331errout:
321 if (cl) 332 if (cl)
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 1ab4542e61e0..0f815cc6a3db 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -98,8 +98,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
98 struct tcf_result *res) 98 struct tcf_result *res)
99{ 99{
100 struct cls_cgroup_head *head = tp->root; 100 struct cls_cgroup_head *head = tp->root;
101 struct cgroup_cls_state *cs; 101 u32 classid;
102 int ret = 0;
103 102
104 /* 103 /*
105 * Due to the nature of the classifier it is required to ignore all 104 * Due to the nature of the classifier it is required to ignore all
@@ -115,17 +114,18 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
115 return -1; 114 return -1;
116 115
117 rcu_read_lock(); 116 rcu_read_lock();
118 cs = task_cls_state(current); 117 classid = task_cls_state(current)->classid;
119 if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) {
120 res->classid = cs->classid;
121 res->class = 0;
122 ret = tcf_exts_exec(skb, &head->exts, res);
123 } else
124 ret = -1;
125
126 rcu_read_unlock(); 118 rcu_read_unlock();
127 119
128 return ret; 120 if (!classid)
121 return -1;
122
123 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
124 return -1;
125
126 res->classid = classid;
127 res->class = 0;
128 return tcf_exts_exec(skb, &head->exts, res);
129} 129}
130 130
131static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) 131static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 0ef4e3065bcd..9402a7fd3785 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -84,7 +84,7 @@ static u32 flow_get_dst(const struct sk_buff *skb)
84 case htons(ETH_P_IPV6): 84 case htons(ETH_P_IPV6):
85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
86 default: 86 default:
87 return addr_fold(skb->dst) ^ (__force u16)skb->protocol; 87 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
88 } 88 }
89} 89}
90 90
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
163 break; 163 break;
164 } 164 }
165 default: 165 default:
166 res = addr_fold(skb->dst) ^ (__force u16)skb->protocol; 166 res = addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
167 } 167 }
168 168
169 return res; 169 return res;
@@ -251,8 +251,8 @@ fallback:
251static u32 flow_get_rtclassid(const struct sk_buff *skb) 251static u32 flow_get_rtclassid(const struct sk_buff *skb)
252{ 252{
253#ifdef CONFIG_NET_CLS_ROUTE 253#ifdef CONFIG_NET_CLS_ROUTE
254 if (skb->dst) 254 if (skb_dst(skb))
255 return skb->dst->tclassid; 255 return skb_dst(skb)->tclassid;
256#endif 256#endif
257 return 0; 257 return 0;
258} 258}
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index bdf1f4172eef..dd872d5383ef 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -137,7 +137,7 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
137 u32 id, h; 137 u32 id, h;
138 int iif, dont_cache = 0; 138 int iif, dont_cache = 0;
139 139
140 if ((dst = skb->dst) == NULL) 140 if ((dst = skb_dst(skb)) == NULL)
141 goto failure; 141 goto failure;
142 142
143 id = dst->tclassid; 143 id = dst->tclassid;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index fad596bf32d7..266151ae85a3 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -246,11 +246,11 @@ META_COLLECTOR(int_tcindex)
246 246
247META_COLLECTOR(int_rtclassid) 247META_COLLECTOR(int_rtclassid)
248{ 248{
249 if (unlikely(skb->dst == NULL)) 249 if (unlikely(skb_dst(skb) == NULL))
250 *err = -1; 250 *err = -1;
251 else 251 else
252#ifdef CONFIG_NET_CLS_ROUTE 252#ifdef CONFIG_NET_CLS_ROUTE
253 dst->value = skb->dst->tclassid; 253 dst->value = skb_dst(skb)->tclassid;
254#else 254#else
255 dst->value = 0; 255 dst->value = 0;
256#endif 256#endif
@@ -258,10 +258,10 @@ META_COLLECTOR(int_rtclassid)
258 258
259META_COLLECTOR(int_rtiif) 259META_COLLECTOR(int_rtiif)
260{ 260{
261 if (unlikely(skb->rtable == NULL)) 261 if (unlikely(skb_rtable(skb) == NULL))
262 *err = -1; 262 *err = -1;
263 else 263 else
264 dst->value = skb->rtable->fl.iif; 264 dst->value = skb_rtable(skb)->fl.iif;
265} 265}
266 266
267/************************************************************************** 267/**************************************************************************
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5022f9c1f34b..362c2811b2df 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -372,7 +372,7 @@ cftree_update(struct hfsc_class *cl)
372 * ism: (psched_us/byte) << ISM_SHIFT 372 * ism: (psched_us/byte) << ISM_SHIFT
373 * dx: psched_us 373 * dx: psched_us
374 * 374 *
375 * The clock source resolution with ktime is 1.024us. 375 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
376 * 376 *
377 * sm and ism are scaled in order to keep effective digits. 377 * sm and ism are scaled in order to keep effective digits.
378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective 378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
@@ -383,9 +383,11 @@ cftree_update(struct hfsc_class *cl)
383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
384 * 384 *
385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
386 *
387 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
386 */ 388 */
387#define SM_SHIFT 20 389#define SM_SHIFT (30 - PSCHED_SHIFT)
388#define ISM_SHIFT 18 390#define ISM_SHIFT (8 + PSCHED_SHIFT)
389 391
390#define SM_MASK ((1ULL << SM_SHIFT) - 1) 392#define SM_MASK ((1ULL << SM_SHIFT) - 1)
391#define ISM_MASK ((1ULL << ISM_SHIFT) - 1) 393#define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 33133d27b539..8706920a6d45 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -149,7 +149,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
149 break; 149 break;
150 } 150 }
151 default: 151 default:
152 h = (unsigned long)skb->dst ^ skb->protocol; 152 h = (unsigned long)skb_dst(skb) ^ skb->protocol;
153 h2 = (unsigned long)skb->sk; 153 h2 = (unsigned long)skb->sk;
154 } 154 }
155 155
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index a886496bdc3a..cb1cb1e76b9a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -222,7 +222,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
222{ 222{
223 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); 223 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
224 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); 224 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
225 struct neighbour *mn = skb->dst->neighbour; 225 struct neighbour *mn = skb_dst(skb)->neighbour;
226 struct neighbour *n = q->ncache; 226 struct neighbour *n = q->ncache;
227 227
228 if (mn->tbl == NULL) 228 if (mn->tbl == NULL)
@@ -262,8 +262,8 @@ static inline int teql_resolve(struct sk_buff *skb,
262 return -ENODEV; 262 return -ENODEV;
263 263
264 if (dev->header_ops == NULL || 264 if (dev->header_ops == NULL ||
265 skb->dst == NULL || 265 skb_dst(skb) == NULL ||
266 skb->dst->neighbour == NULL) 266 skb_dst(skb)->neighbour == NULL)
267 return 0; 267 return 0;
268 return __teql_resolve(skb, skb_res, dev); 268 return __teql_resolve(skb, skb_res, dev);
269} 269}