aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/netprio_cgroup.c260
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/ipv4/inet_diag.c154
-rw-r--r--net/ipv4/ip_fragment.c19
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_output.c15
-rw-r--r--net/irda/ircomm/ircomm_tty.c1
-rw-r--r--net/sched/cls_cgroup.c28
9 files changed, 277 insertions, 214 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index c0946cb2b354..e5942bf45a6d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3451,6 +3451,8 @@ static int napi_gro_complete(struct sk_buff *skb)
3451 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3451 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3452 int err = -ENOENT; 3452 int err = -ENOENT;
3453 3453
3454 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3455
3454 if (NAPI_GRO_CB(skb)->count == 1) { 3456 if (NAPI_GRO_CB(skb)->count == 1) {
3455 skb_shinfo(skb)->gso_size = 0; 3457 skb_shinfo(skb)->gso_size = 0;
3456 goto out; 3458 goto out;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 79285a36035f..bde53da9cd86 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -27,11 +27,7 @@
27 27
28#include <linux/fdtable.h> 28#include <linux/fdtable.h>
29 29
30#define PRIOIDX_SZ 128 30#define PRIOMAP_MIN_SZ 128
31
32static unsigned long prioidx_map[PRIOIDX_SZ];
33static DEFINE_SPINLOCK(prioidx_map_lock);
34static atomic_t max_prioidx = ATOMIC_INIT(0);
35 31
36static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp) 32static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
37{ 33{
@@ -39,136 +35,157 @@ static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgr
39 struct cgroup_netprio_state, css); 35 struct cgroup_netprio_state, css);
40} 36}
41 37
42static int get_prioidx(u32 *prio) 38/*
43{ 39 * Extend @dev->priomap so that it's large enough to accomodate
44 unsigned long flags; 40 * @target_idx. @dev->priomap.priomap_len > @target_idx after successful
45 u32 prioidx; 41 * return. Must be called under rtnl lock.
46 42 */
47 spin_lock_irqsave(&prioidx_map_lock, flags); 43static int extend_netdev_table(struct net_device *dev, u32 target_idx)
48 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
49 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
50 spin_unlock_irqrestore(&prioidx_map_lock, flags);
51 return -ENOSPC;
52 }
53 set_bit(prioidx, prioidx_map);
54 if (atomic_read(&max_prioidx) < prioidx)
55 atomic_set(&max_prioidx, prioidx);
56 spin_unlock_irqrestore(&prioidx_map_lock, flags);
57 *prio = prioidx;
58 return 0;
59}
60
61static void put_prioidx(u32 idx)
62{ 44{
63 unsigned long flags; 45 struct netprio_map *old, *new;
64 46 size_t new_sz, new_len;
65 spin_lock_irqsave(&prioidx_map_lock, flags);
66 clear_bit(idx, prioidx_map);
67 spin_unlock_irqrestore(&prioidx_map_lock, flags);
68}
69 47
70static int extend_netdev_table(struct net_device *dev, u32 new_len) 48 /* is the existing priomap large enough? */
71{ 49 old = rtnl_dereference(dev->priomap);
72 size_t new_size = sizeof(struct netprio_map) + 50 if (old && old->priomap_len > target_idx)
73 ((sizeof(u32) * new_len)); 51 return 0;
74 struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
75 struct netprio_map *old_priomap;
76 52
77 old_priomap = rtnl_dereference(dev->priomap); 53 /*
54 * Determine the new size. Let's keep it power-of-two. We start
55 * from PRIOMAP_MIN_SZ and double it until it's large enough to
56 * accommodate @target_idx.
57 */
58 new_sz = PRIOMAP_MIN_SZ;
59 while (true) {
60 new_len = (new_sz - offsetof(struct netprio_map, priomap)) /
61 sizeof(new->priomap[0]);
62 if (new_len > target_idx)
63 break;
64 new_sz *= 2;
65 /* overflowed? */
66 if (WARN_ON(new_sz < PRIOMAP_MIN_SZ))
67 return -ENOSPC;
68 }
78 69
79 if (!new_priomap) { 70 /* allocate & copy */
71 new = kzalloc(new_sz, GFP_KERNEL);
72 if (!new) {
80 pr_warn("Unable to alloc new priomap!\n"); 73 pr_warn("Unable to alloc new priomap!\n");
81 return -ENOMEM; 74 return -ENOMEM;
82 } 75 }
83 76
84 if (old_priomap) 77 if (old)
85 memcpy(new_priomap->priomap, old_priomap->priomap, 78 memcpy(new->priomap, old->priomap,
86 old_priomap->priomap_len * 79 old->priomap_len * sizeof(old->priomap[0]));
87 sizeof(old_priomap->priomap[0]));
88 80
89 new_priomap->priomap_len = new_len; 81 new->priomap_len = new_len;
90 82
91 rcu_assign_pointer(dev->priomap, new_priomap); 83 /* install the new priomap */
92 if (old_priomap) 84 rcu_assign_pointer(dev->priomap, new);
93 kfree_rcu(old_priomap, rcu); 85 if (old)
86 kfree_rcu(old, rcu);
94 return 0; 87 return 0;
95} 88}
96 89
97static int write_update_netdev_table(struct net_device *dev) 90/**
91 * netprio_prio - return the effective netprio of a cgroup-net_device pair
92 * @cgrp: cgroup part of the target pair
93 * @dev: net_device part of the target pair
94 *
95 * Should be called under RCU read or rtnl lock.
96 */
97static u32 netprio_prio(struct cgroup *cgrp, struct net_device *dev)
98{
99 struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
100
101 if (map && cgrp->id < map->priomap_len)
102 return map->priomap[cgrp->id];
103 return 0;
104}
105
106/**
107 * netprio_set_prio - set netprio on a cgroup-net_device pair
108 * @cgrp: cgroup part of the target pair
109 * @dev: net_device part of the target pair
110 * @prio: prio to set
111 *
112 * Set netprio to @prio on @cgrp-@dev pair. Should be called under rtnl
113 * lock and may fail under memory pressure for non-zero @prio.
114 */
115static int netprio_set_prio(struct cgroup *cgrp, struct net_device *dev,
116 u32 prio)
98{ 117{
99 int ret = 0;
100 u32 max_len;
101 struct netprio_map *map; 118 struct netprio_map *map;
119 int ret;
102 120
103 max_len = atomic_read(&max_prioidx) + 1; 121 /* avoid extending priomap for zero writes */
104 map = rtnl_dereference(dev->priomap); 122 map = rtnl_dereference(dev->priomap);
105 if (!map || map->priomap_len < max_len) 123 if (!prio && (!map || map->priomap_len <= cgrp->id))
106 ret = extend_netdev_table(dev, max_len); 124 return 0;
107 125
108 return ret; 126 ret = extend_netdev_table(dev, cgrp->id);
127 if (ret)
128 return ret;
129
130 map = rtnl_dereference(dev->priomap);
131 map->priomap[cgrp->id] = prio;
132 return 0;
109} 133}
110 134
111static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) 135static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
112{ 136{
113 struct cgroup_netprio_state *cs; 137 struct cgroup_netprio_state *cs;
114 int ret = -EINVAL;
115 138
116 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 139 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
117 if (!cs) 140 if (!cs)
118 return ERR_PTR(-ENOMEM); 141 return ERR_PTR(-ENOMEM);
119 142
120 if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
121 goto out;
122
123 ret = get_prioidx(&cs->prioidx);
124 if (ret < 0) {
125 pr_warn("No space in priority index array\n");
126 goto out;
127 }
128
129 return &cs->css; 143 return &cs->css;
130out:
131 kfree(cs);
132 return ERR_PTR(ret);
133} 144}
134 145
135static void cgrp_destroy(struct cgroup *cgrp) 146static int cgrp_css_online(struct cgroup *cgrp)
136{ 147{
137 struct cgroup_netprio_state *cs; 148 struct cgroup *parent = cgrp->parent;
138 struct net_device *dev; 149 struct net_device *dev;
139 struct netprio_map *map; 150 int ret = 0;
151
152 if (!parent)
153 return 0;
140 154
141 cs = cgrp_netprio_state(cgrp);
142 rtnl_lock(); 155 rtnl_lock();
156 /*
157 * Inherit prios from the parent. As all prios are set during
158 * onlining, there is no need to clear them on offline.
159 */
143 for_each_netdev(&init_net, dev) { 160 for_each_netdev(&init_net, dev) {
144 map = rtnl_dereference(dev->priomap); 161 u32 prio = netprio_prio(parent, dev);
145 if (map && cs->prioidx < map->priomap_len) 162
146 map->priomap[cs->prioidx] = 0; 163 ret = netprio_set_prio(cgrp, dev, prio);
164 if (ret)
165 break;
147 } 166 }
148 rtnl_unlock(); 167 rtnl_unlock();
149 put_prioidx(cs->prioidx); 168 return ret;
150 kfree(cs); 169}
170
171static void cgrp_css_free(struct cgroup *cgrp)
172{
173 kfree(cgrp_netprio_state(cgrp));
151} 174}
152 175
153static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft) 176static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
154{ 177{
155 return (u64)cgrp_netprio_state(cgrp)->prioidx; 178 return cgrp->id;
156} 179}
157 180
158static int read_priomap(struct cgroup *cont, struct cftype *cft, 181static int read_priomap(struct cgroup *cont, struct cftype *cft,
159 struct cgroup_map_cb *cb) 182 struct cgroup_map_cb *cb)
160{ 183{
161 struct net_device *dev; 184 struct net_device *dev;
162 u32 prioidx = cgrp_netprio_state(cont)->prioidx;
163 u32 priority;
164 struct netprio_map *map;
165 185
166 rcu_read_lock(); 186 rcu_read_lock();
167 for_each_netdev_rcu(&init_net, dev) { 187 for_each_netdev_rcu(&init_net, dev)
168 map = rcu_dereference(dev->priomap); 188 cb->fill(cb, dev->name, netprio_prio(cont, dev));
169 priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
170 cb->fill(cb, dev->name, priority);
171 }
172 rcu_read_unlock(); 189 rcu_read_unlock();
173 return 0; 190 return 0;
174} 191}
@@ -176,66 +193,24 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
176static int write_priomap(struct cgroup *cgrp, struct cftype *cft, 193static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
177 const char *buffer) 194 const char *buffer)
178{ 195{
179 char *devname = kstrdup(buffer, GFP_KERNEL); 196 char devname[IFNAMSIZ + 1];
180 int ret = -EINVAL;
181 u32 prioidx = cgrp_netprio_state(cgrp)->prioidx;
182 unsigned long priority;
183 char *priostr;
184 struct net_device *dev; 197 struct net_device *dev;
185 struct netprio_map *map; 198 u32 prio;
186 199 int ret;
187 if (!devname)
188 return -ENOMEM;
189
190 /*
191 * Minimally sized valid priomap string
192 */
193 if (strlen(devname) < 3)
194 goto out_free_devname;
195
196 priostr = strstr(devname, " ");
197 if (!priostr)
198 goto out_free_devname;
199
200 /*
201 *Separate the devname from the associated priority
202 *and advance the priostr pointer to the priority value
203 */
204 *priostr = '\0';
205 priostr++;
206
207 /*
208 * If the priostr points to NULL, we're at the end of the passed
209 * in string, and its not a valid write
210 */
211 if (*priostr == '\0')
212 goto out_free_devname;
213
214 ret = kstrtoul(priostr, 10, &priority);
215 if (ret < 0)
216 goto out_free_devname;
217 200
218 ret = -ENODEV; 201 if (sscanf(buffer, "%"__stringify(IFNAMSIZ)"s %u", devname, &prio) != 2)
202 return -EINVAL;
219 203
220 dev = dev_get_by_name(&init_net, devname); 204 dev = dev_get_by_name(&init_net, devname);
221 if (!dev) 205 if (!dev)
222 goto out_free_devname; 206 return -ENODEV;
223 207
224 rtnl_lock(); 208 rtnl_lock();
225 ret = write_update_netdev_table(dev);
226 if (ret < 0)
227 goto out_put_dev;
228 209
229 map = rtnl_dereference(dev->priomap); 210 ret = netprio_set_prio(cgrp, dev, prio);
230 if (map)
231 map->priomap[prioidx] = priority;
232 211
233out_put_dev:
234 rtnl_unlock(); 212 rtnl_unlock();
235 dev_put(dev); 213 dev_put(dev);
236
237out_free_devname:
238 kfree(devname);
239 return ret; 214 return ret;
240} 215}
241 216
@@ -276,22 +251,13 @@ static struct cftype ss_files[] = {
276 251
277struct cgroup_subsys net_prio_subsys = { 252struct cgroup_subsys net_prio_subsys = {
278 .name = "net_prio", 253 .name = "net_prio",
279 .create = cgrp_create, 254 .css_alloc = cgrp_css_alloc,
280 .destroy = cgrp_destroy, 255 .css_online = cgrp_css_online,
256 .css_free = cgrp_css_free,
281 .attach = net_prio_attach, 257 .attach = net_prio_attach,
282 .subsys_id = net_prio_subsys_id, 258 .subsys_id = net_prio_subsys_id,
283 .base_cftypes = ss_files, 259 .base_cftypes = ss_files,
284 .module = THIS_MODULE, 260 .module = THIS_MODULE,
285
286 /*
287 * net_prio has artificial limit on the number of cgroups and
288 * disallows nesting making it impossible to co-mount it with other
289 * hierarchical subsystems. Remove the artificially low PRIOIDX_SZ
290 * limit and properly nest configuration such that children follow
291 * their parents' configurations by default and are allowed to
292 * override and remove the following.
293 */
294 .broken_hierarchy = true,
295}; 261};
296 262
297static int netprio_device_event(struct notifier_block *unused, 263static int netprio_device_event(struct notifier_block *unused,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4007c1437fda..3f0636cd76cd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3004,7 +3004,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3004 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 3004 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
3005 pinfo->gso_size = 0; 3005 pinfo->gso_size = 0;
3006 skb_header_release(p); 3006 skb_header_release(p);
3007 nskb->prev = p; 3007 NAPI_GRO_CB(nskb)->last = p;
3008 3008
3009 nskb->data_len += p->len; 3009 nskb->data_len += p->len;
3010 nskb->truesize += p->truesize; 3010 nskb->truesize += p->truesize;
@@ -3030,8 +3030,8 @@ merge:
3030 3030
3031 __skb_pull(skb, offset); 3031 __skb_pull(skb, offset);
3032 3032
3033 p->prev->next = skb; 3033 NAPI_GRO_CB(p)->last->next = skb;
3034 p->prev = skb; 3034 NAPI_GRO_CB(p)->last = skb;
3035 skb_header_release(skb); 3035 skb_header_release(skb);
3036 3036
3037done: 3037done:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 0c34bfabc11f..e23e16dc501d 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -44,6 +44,10 @@ struct inet_diag_entry {
44 u16 dport; 44 u16 dport;
45 u16 family; 45 u16 family;
46 u16 userlocks; 46 u16 userlocks;
47#if IS_ENABLED(CONFIG_IPV6)
48 struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
49 struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
50#endif
47}; 51};
48 52
49static DEFINE_MUTEX(inet_diag_table_mutex); 53static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -428,25 +432,31 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
428 break; 432 break;
429 } 433 }
430 434
431 if (cond->prefix_len == 0)
432 break;
433
434 if (op->code == INET_DIAG_BC_S_COND) 435 if (op->code == INET_DIAG_BC_S_COND)
435 addr = entry->saddr; 436 addr = entry->saddr;
436 else 437 else
437 addr = entry->daddr; 438 addr = entry->daddr;
438 439
440 if (cond->family != AF_UNSPEC &&
441 cond->family != entry->family) {
442 if (entry->family == AF_INET6 &&
443 cond->family == AF_INET) {
444 if (addr[0] == 0 && addr[1] == 0 &&
445 addr[2] == htonl(0xffff) &&
446 bitstring_match(addr + 3,
447 cond->addr,
448 cond->prefix_len))
449 break;
450 }
451 yes = 0;
452 break;
453 }
454
455 if (cond->prefix_len == 0)
456 break;
439 if (bitstring_match(addr, cond->addr, 457 if (bitstring_match(addr, cond->addr,
440 cond->prefix_len)) 458 cond->prefix_len))
441 break; 459 break;
442 if (entry->family == AF_INET6 &&
443 cond->family == AF_INET) {
444 if (addr[0] == 0 && addr[1] == 0 &&
445 addr[2] == htonl(0xffff) &&
446 bitstring_match(addr + 3, cond->addr,
447 cond->prefix_len))
448 break;
449 }
450 yes = 0; 460 yes = 0;
451 break; 461 break;
452 } 462 }
@@ -509,6 +519,55 @@ static int valid_cc(const void *bc, int len, int cc)
509 return 0; 519 return 0;
510} 520}
511 521
522/* Validate an inet_diag_hostcond. */
523static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
524 int *min_len)
525{
526 int addr_len;
527 struct inet_diag_hostcond *cond;
528
529 /* Check hostcond space. */
530 *min_len += sizeof(struct inet_diag_hostcond);
531 if (len < *min_len)
532 return false;
533 cond = (struct inet_diag_hostcond *)(op + 1);
534
535 /* Check address family and address length. */
536 switch (cond->family) {
537 case AF_UNSPEC:
538 addr_len = 0;
539 break;
540 case AF_INET:
541 addr_len = sizeof(struct in_addr);
542 break;
543 case AF_INET6:
544 addr_len = sizeof(struct in6_addr);
545 break;
546 default:
547 return false;
548 }
549 *min_len += addr_len;
550 if (len < *min_len)
551 return false;
552
553 /* Check prefix length (in bits) vs address length (in bytes). */
554 if (cond->prefix_len > 8 * addr_len)
555 return false;
556
557 return true;
558}
559
560/* Validate a port comparison operator. */
561static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
562 int len, int *min_len)
563{
564 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
565 *min_len += sizeof(struct inet_diag_bc_op);
566 if (len < *min_len)
567 return false;
568 return true;
569}
570
512static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 571static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
513{ 572{
514 const void *bc = bytecode; 573 const void *bc = bytecode;
@@ -516,29 +575,39 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
516 575
517 while (len > 0) { 576 while (len > 0) {
518 const struct inet_diag_bc_op *op = bc; 577 const struct inet_diag_bc_op *op = bc;
578 int min_len = sizeof(struct inet_diag_bc_op);
519 579
520//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 580//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
521 switch (op->code) { 581 switch (op->code) {
522 case INET_DIAG_BC_AUTO:
523 case INET_DIAG_BC_S_COND: 582 case INET_DIAG_BC_S_COND:
524 case INET_DIAG_BC_D_COND: 583 case INET_DIAG_BC_D_COND:
584 if (!valid_hostcond(bc, len, &min_len))
585 return -EINVAL;
586 break;
525 case INET_DIAG_BC_S_GE: 587 case INET_DIAG_BC_S_GE:
526 case INET_DIAG_BC_S_LE: 588 case INET_DIAG_BC_S_LE:
527 case INET_DIAG_BC_D_GE: 589 case INET_DIAG_BC_D_GE:
528 case INET_DIAG_BC_D_LE: 590 case INET_DIAG_BC_D_LE:
529 case INET_DIAG_BC_JMP: 591 if (!valid_port_comparison(bc, len, &min_len))
530 if (op->no < 4 || op->no > len + 4 || op->no & 3)
531 return -EINVAL;
532 if (op->no < len &&
533 !valid_cc(bytecode, bytecode_len, len - op->no))
534 return -EINVAL; 592 return -EINVAL;
535 break; 593 break;
594 case INET_DIAG_BC_AUTO:
595 case INET_DIAG_BC_JMP:
536 case INET_DIAG_BC_NOP: 596 case INET_DIAG_BC_NOP:
537 break; 597 break;
538 default: 598 default:
539 return -EINVAL; 599 return -EINVAL;
540 } 600 }
541 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3) 601
602 if (op->code != INET_DIAG_BC_NOP) {
603 if (op->no < min_len || op->no > len + 4 || op->no & 3)
604 return -EINVAL;
605 if (op->no < len &&
606 !valid_cc(bytecode, bytecode_len, len - op->no))
607 return -EINVAL;
608 }
609
610 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
542 return -EINVAL; 611 return -EINVAL;
543 bc += op->yes; 612 bc += op->yes;
544 len -= op->yes; 613 len -= op->yes;
@@ -596,6 +665,36 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
596 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 665 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
597} 666}
598 667
668/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
669 * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
670 */
671static inline void inet_diag_req_addrs(const struct sock *sk,
672 const struct request_sock *req,
673 struct inet_diag_entry *entry)
674{
675 struct inet_request_sock *ireq = inet_rsk(req);
676
677#if IS_ENABLED(CONFIG_IPV6)
678 if (sk->sk_family == AF_INET6) {
679 if (req->rsk_ops->family == AF_INET6) {
680 entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
681 entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
682 } else if (req->rsk_ops->family == AF_INET) {
683 ipv6_addr_set_v4mapped(ireq->loc_addr,
684 &entry->saddr_storage);
685 ipv6_addr_set_v4mapped(ireq->rmt_addr,
686 &entry->daddr_storage);
687 entry->saddr = entry->saddr_storage.s6_addr32;
688 entry->daddr = entry->daddr_storage.s6_addr32;
689 }
690 } else
691#endif
692 {
693 entry->saddr = &ireq->loc_addr;
694 entry->daddr = &ireq->rmt_addr;
695 }
696}
697
599static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 698static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
600 struct request_sock *req, 699 struct request_sock *req,
601 struct user_namespace *user_ns, 700 struct user_namespace *user_ns,
@@ -637,8 +736,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
637 r->idiag_inode = 0; 736 r->idiag_inode = 0;
638#if IS_ENABLED(CONFIG_IPV6) 737#if IS_ENABLED(CONFIG_IPV6)
639 if (r->idiag_family == AF_INET6) { 738 if (r->idiag_family == AF_INET6) {
640 *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr; 739 struct inet_diag_entry entry;
641 *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; 740 inet_diag_req_addrs(sk, req, &entry);
741 memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
742 memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
642 } 743 }
643#endif 744#endif
644 745
@@ -691,18 +792,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
691 continue; 792 continue;
692 793
693 if (bc) { 794 if (bc) {
694 entry.saddr = 795 inet_diag_req_addrs(sk, req, &entry);
695#if IS_ENABLED(CONFIG_IPV6)
696 (entry.family == AF_INET6) ?
697 inet6_rsk(req)->loc_addr.s6_addr32 :
698#endif
699 &ireq->loc_addr;
700 entry.daddr =
701#if IS_ENABLED(CONFIG_IPV6)
702 (entry.family == AF_INET6) ?
703 inet6_rsk(req)->rmt_addr.s6_addr32 :
704#endif
705 &ireq->rmt_addr;
706 entry.dport = ntohs(ireq->rmt_port); 796 entry.dport = ntohs(ireq->rmt_port);
707 797
708 if (!inet_diag_bc_run(bc, &entry)) 798 if (!inet_diag_bc_run(bc, &entry))
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 448e68546827..8d5cc75dac88 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -707,28 +707,27 @@ EXPORT_SYMBOL(ip_defrag);
707 707
708struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 708struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
709{ 709{
710 const struct iphdr *iph; 710 struct iphdr iph;
711 u32 len; 711 u32 len;
712 712
713 if (skb->protocol != htons(ETH_P_IP)) 713 if (skb->protocol != htons(ETH_P_IP))
714 return skb; 714 return skb;
715 715
716 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 716 if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
717 return skb; 717 return skb;
718 718
719 iph = ip_hdr(skb); 719 if (iph.ihl < 5 || iph.version != 4)
720 if (iph->ihl < 5 || iph->version != 4)
721 return skb; 720 return skb;
722 if (!pskb_may_pull(skb, iph->ihl*4)) 721
723 return skb; 722 len = ntohs(iph.tot_len);
724 iph = ip_hdr(skb); 723 if (skb->len < len || len < (iph.ihl * 4))
725 len = ntohs(iph->tot_len);
726 if (skb->len < len || len < (iph->ihl * 4))
727 return skb; 724 return skb;
728 725
729 if (ip_is_fragment(ip_hdr(skb))) { 726 if (ip_is_fragment(&iph)) {
730 skb = skb_share_check(skb, GFP_ATOMIC); 727 skb = skb_share_check(skb, GFP_ATOMIC);
731 if (skb) { 728 if (skb) {
729 if (!pskb_may_pull(skb, iph.ihl*4))
730 return skb;
732 if (pskb_trim_rcsum(skb, len)) 731 if (pskb_trim_rcsum(skb, len))
733 return skb; 732 return skb;
734 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 733 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 609ff98aeb47..181fc8234a52 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5645,7 +5645,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5645 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5645 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5646 5646
5647 if (data) { /* Retransmit unacked data in SYN */ 5647 if (data) { /* Retransmit unacked data in SYN */
5648 tcp_retransmit_skb(sk, data); 5648 tcp_for_write_queue_from(data, sk) {
5649 if (data == tcp_send_head(sk) ||
5650 __tcp_retransmit_skb(sk, data))
5651 break;
5652 }
5649 tcp_rearm_rto(sk); 5653 tcp_rearm_rto(sk);
5650 return true; 5654 return true;
5651 } 5655 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2798706cb063..948ac275b9b5 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2309,12 +2309,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2309 * state updates are done by the caller. Returns non-zero if an 2309 * state updates are done by the caller. Returns non-zero if an
2310 * error occurred which prevented the send. 2310 * error occurred which prevented the send.
2311 */ 2311 */
2312int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2312int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2313{ 2313{
2314 struct tcp_sock *tp = tcp_sk(sk); 2314 struct tcp_sock *tp = tcp_sk(sk);
2315 struct inet_connection_sock *icsk = inet_csk(sk); 2315 struct inet_connection_sock *icsk = inet_csk(sk);
2316 unsigned int cur_mss; 2316 unsigned int cur_mss;
2317 int err;
2318 2317
2319 /* Inconslusive MTU probe */ 2318 /* Inconslusive MTU probe */
2320 if (icsk->icsk_mtup.probe_size) { 2319 if (icsk->icsk_mtup.probe_size) {
@@ -2387,11 +2386,17 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2387 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { 2386 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
2388 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2387 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2389 GFP_ATOMIC); 2388 GFP_ATOMIC);
2390 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2389 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2391 -ENOBUFS; 2390 -ENOBUFS;
2392 } else { 2391 } else {
2393 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2392 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2394 } 2393 }
2394}
2395
2396int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2397{
2398 struct tcp_sock *tp = tcp_sk(sk);
2399 int err = __tcp_retransmit_skb(sk, skb);
2395 2400
2396 if (err == 0) { 2401 if (err == 0) {
2397 /* Update global TCP statistics. */ 2402 /* Update global TCP statistics. */
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 496ce2cebcd7..a68c88cdec6e 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -183,6 +183,7 @@ static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self)
183 ircomm_tty_shutdown(self); 183 ircomm_tty_shutdown(self);
184 184
185 self->magic = 0; 185 self->magic = 0;
186 tty_port_destroy(&self->port);
186 kfree(self); 187 kfree(self);
187} 188}
188 189
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 2ecde225ae60..31f06b633574 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -34,21 +34,25 @@ static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
34 struct cgroup_cls_state, css); 34 struct cgroup_cls_state, css);
35} 35}
36 36
37static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) 37static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
38{ 38{
39 struct cgroup_cls_state *cs; 39 struct cgroup_cls_state *cs;
40 40
41 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 41 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
42 if (!cs) 42 if (!cs)
43 return ERR_PTR(-ENOMEM); 43 return ERR_PTR(-ENOMEM);
44 return &cs->css;
45}
44 46
47static int cgrp_css_online(struct cgroup *cgrp)
48{
45 if (cgrp->parent) 49 if (cgrp->parent)
46 cs->classid = cgrp_cls_state(cgrp->parent)->classid; 50 cgrp_cls_state(cgrp)->classid =
47 51 cgrp_cls_state(cgrp->parent)->classid;
48 return &cs->css; 52 return 0;
49} 53}
50 54
51static void cgrp_destroy(struct cgroup *cgrp) 55static void cgrp_css_free(struct cgroup *cgrp)
52{ 56{
53 kfree(cgrp_cls_state(cgrp)); 57 kfree(cgrp_cls_state(cgrp));
54} 58}
@@ -75,20 +79,12 @@ static struct cftype ss_files[] = {
75 79
76struct cgroup_subsys net_cls_subsys = { 80struct cgroup_subsys net_cls_subsys = {
77 .name = "net_cls", 81 .name = "net_cls",
78 .create = cgrp_create, 82 .css_alloc = cgrp_css_alloc,
79 .destroy = cgrp_destroy, 83 .css_online = cgrp_css_online,
84 .css_free = cgrp_css_free,
80 .subsys_id = net_cls_subsys_id, 85 .subsys_id = net_cls_subsys_id,
81 .base_cftypes = ss_files, 86 .base_cftypes = ss_files,
82 .module = THIS_MODULE, 87 .module = THIS_MODULE,
83
84 /*
85 * While net_cls cgroup has the rudimentary hierarchy support of
86 * inheriting the parent's classid on cgroup creation, it doesn't
87 * properly propagates config changes in ancestors to their
88 * descendents. A child should follow the parent's configuration
89 * but be allowed to override it. Fix it and remove the following.
90 */
91 .broken_hierarchy = true,
92}; 88};
93 89
94struct cls_cgroup_head { 90struct cls_cgroup_head {