aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/sched/sch_qfq.c109
-rw-r--r--net/tipc/handler.c1
7 files changed, 91 insertions, 40 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 09cb3f6dc40c..bda6d004f9f0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1666,7 +1666,7 @@ static inline int deliver_skb(struct sk_buff *skb,
1666 1666
1667static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1667static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1668{ 1668{
1669 if (ptype->af_packet_priv == NULL) 1669 if (!ptype->af_packet_priv || !skb->sk)
1670 return false; 1670 return false;
1671 1671
1672 if (ptype->id_match) 1672 if (ptype->id_match)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 76d4c2c3c89b..fad649ae4dec 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2192,7 +2192,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
2192 goto skip; 2192 goto skip;
2193 2193
2194 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 2194 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
2195 portid, seq, 0, NTF_SELF); 2195 portid, seq,
2196 RTM_NEWNEIGH, NTF_SELF);
2196 if (err < 0) 2197 if (err < 0)
2197 return err; 2198 return err;
2198skip: 2199skip:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 535584c00f91..0c34bfabc11f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -892,13 +892,16 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
892 struct inet_diag_req_v2 *r, struct nlattr *bc) 892 struct inet_diag_req_v2 *r, struct nlattr *bc)
893{ 893{
894 const struct inet_diag_handler *handler; 894 const struct inet_diag_handler *handler;
895 int err = 0;
895 896
896 handler = inet_diag_lock_handler(r->sdiag_protocol); 897 handler = inet_diag_lock_handler(r->sdiag_protocol);
897 if (!IS_ERR(handler)) 898 if (!IS_ERR(handler))
898 handler->dump(skb, cb, r, bc); 899 handler->dump(skb, cb, r, bc);
900 else
901 err = PTR_ERR(handler);
899 inet_diag_unlock_handler(handler); 902 inet_diag_unlock_handler(handler);
900 903
901 return skb->len; 904 return err ? : skb->len;
902} 905}
903 906
904static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 907static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 0185679c5f53..d5cb3c4e66f8 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1633,9 +1633,9 @@ static size_t ip6gre_get_size(const struct net_device *dev)
1633 /* IFLA_GRE_OKEY */ 1633 /* IFLA_GRE_OKEY */
1634 nla_total_size(4) + 1634 nla_total_size(4) +
1635 /* IFLA_GRE_LOCAL */ 1635 /* IFLA_GRE_LOCAL */
1636 nla_total_size(4) + 1636 nla_total_size(sizeof(struct in6_addr)) +
1637 /* IFLA_GRE_REMOTE */ 1637 /* IFLA_GRE_REMOTE */
1638 nla_total_size(4) + 1638 nla_total_size(sizeof(struct in6_addr)) +
1639 /* IFLA_GRE_TTL */ 1639 /* IFLA_GRE_TTL */
1640 nla_total_size(1) + 1640 nla_total_size(1) +
1641 /* IFLA_GRE_TOS */ 1641 /* IFLA_GRE_TOS */
@@ -1659,8 +1659,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1659 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || 1659 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1660 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1660 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1661 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1661 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1662 nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) || 1662 nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) ||
1663 nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) || 1663 nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) ||
1664 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || 1664 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
1665 /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/ 1665 /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
1666 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 1666 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index ff36194a71aa..2edce30ef733 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -535,7 +535,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
535{ 535{
536 struct inet6_dev *idev; 536 struct inet6_dev *idev;
537 struct inet6_ifaddr *ifa; 537 struct inet6_ifaddr *ifa;
538 struct in6_addr mcaddr; 538 struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
539 539
540 idev = in6_dev_get(dev); 540 idev = in6_dev_get(dev);
541 if (!idev) 541 if (!idev)
@@ -543,7 +543,6 @@ static void ndisc_send_unsol_na(struct net_device *dev)
543 543
544 read_lock_bh(&idev->lock); 544 read_lock_bh(&idev->lock);
545 list_for_each_entry(ifa, &idev->addr_list, if_list) { 545 list_for_each_entry(ifa, &idev->addr_list, if_list) {
546 addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
547 ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr, 546 ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
548 /*router=*/ !!idev->cnf.forwarding, 547 /*router=*/ !!idev->cnf.forwarding,
549 /*solicited=*/ false, /*override=*/ true, 548 /*solicited=*/ false, /*override=*/ true,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index f0dd83cff906..9687fa1c2275 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -84,18 +84,19 @@
84 * grp->index is the index of the group; and grp->slot_shift 84 * grp->index is the index of the group; and grp->slot_shift
85 * is the shift for the corresponding (scaled) sigma_i. 85 * is the shift for the corresponding (scaled) sigma_i.
86 */ 86 */
87#define QFQ_MAX_INDEX 19 87#define QFQ_MAX_INDEX 24
88#define QFQ_MAX_WSHIFT 16 88#define QFQ_MAX_WSHIFT 12
89 89
90#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) 90#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
91#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT) 91#define QFQ_MAX_WSUM (16*QFQ_MAX_WEIGHT)
92 92
93#define FRAC_BITS 30 /* fixed point arithmetic */ 93#define FRAC_BITS 30 /* fixed point arithmetic */
94#define ONE_FP (1UL << FRAC_BITS) 94#define ONE_FP (1UL << FRAC_BITS)
95#define IWSUM (ONE_FP/QFQ_MAX_WSUM) 95#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
96 96
97#define QFQ_MTU_SHIFT 11 97#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
98#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX) 98#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
99#define QFQ_MIN_LMAX 256 /* min possible lmax for a class */
99 100
100/* 101/*
101 * Possible group states. These values are used as indexes for the bitmaps 102 * Possible group states. These values are used as indexes for the bitmaps
@@ -231,6 +232,32 @@ static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
231 q->wsum += delta_w; 232 q->wsum += delta_w;
232} 233}
233 234
235static void qfq_update_reactivate_class(struct qfq_sched *q,
236 struct qfq_class *cl,
237 u32 inv_w, u32 lmax, int delta_w)
238{
239 bool need_reactivation = false;
240 int i = qfq_calc_index(inv_w, lmax);
241
242 if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
243 /*
244 * shift cl->F back, to not charge the
245 * class for the not-yet-served head
246 * packet
247 */
248 cl->F = cl->S;
249 /* remove class from its slot in the old group */
250 qfq_deactivate_class(q, cl);
251 need_reactivation = true;
252 }
253
254 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
255
256 if (need_reactivation) /* activate in new group */
257 qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
258}
259
260
234static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 261static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
235 struct nlattr **tca, unsigned long *arg) 262 struct nlattr **tca, unsigned long *arg)
236{ 263{
@@ -238,7 +265,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
238 struct qfq_class *cl = (struct qfq_class *)*arg; 265 struct qfq_class *cl = (struct qfq_class *)*arg;
239 struct nlattr *tb[TCA_QFQ_MAX + 1]; 266 struct nlattr *tb[TCA_QFQ_MAX + 1];
240 u32 weight, lmax, inv_w; 267 u32 weight, lmax, inv_w;
241 int i, err; 268 int err;
242 int delta_w; 269 int delta_w;
243 270
244 if (tca[TCA_OPTIONS] == NULL) { 271 if (tca[TCA_OPTIONS] == NULL) {
@@ -270,16 +297,14 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
270 297
271 if (tb[TCA_QFQ_LMAX]) { 298 if (tb[TCA_QFQ_LMAX]) {
272 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); 299 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
273 if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) { 300 if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
274 pr_notice("qfq: invalid max length %u\n", lmax); 301 pr_notice("qfq: invalid max length %u\n", lmax);
275 return -EINVAL; 302 return -EINVAL;
276 } 303 }
277 } else 304 } else
278 lmax = 1UL << QFQ_MTU_SHIFT; 305 lmax = psched_mtu(qdisc_dev(sch));
279 306
280 if (cl != NULL) { 307 if (cl != NULL) {
281 bool need_reactivation = false;
282
283 if (tca[TCA_RATE]) { 308 if (tca[TCA_RATE]) {
284 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 309 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
285 qdisc_root_sleeping_lock(sch), 310 qdisc_root_sleeping_lock(sch),
@@ -291,24 +316,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
291 if (lmax == cl->lmax && inv_w == cl->inv_w) 316 if (lmax == cl->lmax && inv_w == cl->inv_w)
292 return 0; /* nothing to update */ 317 return 0; /* nothing to update */
293 318
294 i = qfq_calc_index(inv_w, lmax);
295 sch_tree_lock(sch); 319 sch_tree_lock(sch);
296 if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { 320 qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w);
297 /*
298 * shift cl->F back, to not charge the
299 * class for the not-yet-served head
300 * packet
301 */
302 cl->F = cl->S;
303 /* remove class from its slot in the old group */
304 qfq_deactivate_class(q, cl);
305 need_reactivation = true;
306 }
307
308 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
309
310 if (need_reactivation) /* activate in new group */
311 qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
312 sch_tree_unlock(sch); 321 sch_tree_unlock(sch);
313 322
314 return 0; 323 return 0;
@@ -663,15 +672,48 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
663 672
664 673
665/* 674/*
666 * XXX we should make sure that slot becomes less than 32. 675 * If the weight and lmax (max_pkt_size) of the classes do not change,
667 * This is guaranteed by the input values. 676 * then QFQ guarantees that the slot index is never higher than
668 * roundedS is always cl->S rounded on grp->slot_shift bits. 677 * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM).
678 *
679 * With the current values of the above constants, the index is
680 * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18.
681 *
682 * When the weight of a class is increased or the lmax of the class is
683 * decreased, a new class with smaller slot size may happen to be
684 * activated. The activation of this class should be properly delayed
685 * to when the service of the class has finished in the ideal system
686 * tracked by QFQ. If the activation of the class is not delayed to
687 * this reference time instant, then this class may be unjustly served
688 * before other classes waiting for service. This may cause
689 * (unfrequently) the above bound to the slot index to be violated for
690 * some of these unlucky classes.
691 *
692 * Instead of delaying the activation of the new class, which is quite
693 * complex, the following inaccurate but simple solution is used: if
694 * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps
695 * of the class are shifted backward so as to let the slot index
696 * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if
697 * the slot index is above it, then the data structure implementing
698 * the bucket list either gets immediately corrupted or may get
699 * corrupted on a possible next packet arrival that causes the start
700 * time of the group to be shifted backward.
669 */ 701 */
670static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, 702static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
671 u64 roundedS) 703 u64 roundedS)
672{ 704{
673 u64 slot = (roundedS - grp->S) >> grp->slot_shift; 705 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
674 unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS; 706 unsigned int i; /* slot index in the bucket list */
707
708 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
709 u64 deltaS = roundedS - grp->S -
710 ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
711 cl->S -= deltaS;
712 cl->F -= deltaS;
713 slot = QFQ_MAX_SLOTS - 2;
714 }
715
716 i = (grp->front + slot) % QFQ_MAX_SLOTS;
675 717
676 hlist_add_head(&cl->next, &grp->slots[i]); 718 hlist_add_head(&cl->next, &grp->slots[i]);
677 __set_bit(slot, &grp->full_slots); 719 __set_bit(slot, &grp->full_slots);
@@ -892,6 +934,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
892 } 934 }
893 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); 935 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
894 936
937 if (unlikely(cl->lmax < qdisc_pkt_len(skb))) {
938 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
939 cl->lmax, qdisc_pkt_len(skb), cl->common.classid);
940 qfq_update_reactivate_class(q, cl, cl->inv_w,
941 qdisc_pkt_len(skb), 0);
942 }
943
895 err = qdisc_enqueue(skb, cl->qdisc); 944 err = qdisc_enqueue(skb, cl->qdisc);
896 if (unlikely(err != NET_XMIT_SUCCESS)) { 945 if (unlikely(err != NET_XMIT_SUCCESS)) {
897 pr_debug("qfq_enqueue: enqueue failed %d\n", err); 946 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 111ff8300ae5..b36f0fcd9bdf 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -116,7 +116,6 @@ void tipc_handler_stop(void)
116 return; 116 return;
117 117
118 handler_enabled = 0; 118 handler_enabled = 0;
119 tasklet_disable(&tipc_tasklet);
120 tasklet_kill(&tipc_tasklet); 119 tasklet_kill(&tipc_tasklet);
121 120
122 spin_lock_bh(&qitem_lock); 121 spin_lock_bh(&qitem_lock);