aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2016-06-19 01:57:48 -0400
committerOlof Johansson <olof@lixom.net>2016-06-19 01:57:48 -0400
commit58935f24a996cb55595c29dd5303bd9b778c8b00 (patch)
treea234f5328286290c64ca07d508224b19c1d3e89e /net
parent9503427e916aea7ec2cc429504f82d7200ab4bcd (diff)
parent6b41d44862e8f3a4b95102c6ff6cad3fccc7994b (diff)
Merge tag 'omap-for-v4.7/fixes-powedomain' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes
Fixes for omaps for v4.7-rc cycle: - Fix dra7 for hardware issues limiting L4Per and L3init power domains to on state. Without this the devices may not work correctly after some time of use because of asymmetric aging. And related to this, let's also remove the unusable states. - Always select omap interconnect for am43x as otherwise the am43x only configurations will not boot properly. This can happen easily for any product kernels that leave out other SoCs to save memory. - Fix DSS PLL2 addresses that have gone unused for now - Select erratum 430973 for omap3, this is now safe to do and can save quite a bit of debugging time for people who may have left it out. * tag 'omap-for-v4.7/fixes-powedomain' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: ARM: OMAP: DRA7: powerdomain data: Remove unused pwrsts_mem_ret ARM: OMAP: DRA7: powerdomain data: Remove unused pwrsts_logic_ret ARM: OMAP: DRA7: powerdomain data: Set L3init and L4per to ON ARM: OMAP2+: Select OMAP_INTERCONNECT for SOC_AM43XX ARM: dts: DRA74x: fix DSS PLL2 addresses ARM: OMAP2: Enable Errata 430973 for OMAP3 + Linux 4.7-rc2 Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c5
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c20
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c4
-rw-r--r--net/ceph/osd_client.c51
-rw-r--r--net/ceph/osdmap.c4
-rw-r--r--net/core/hwbm.c3
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/ieee802154/nl802154.c4
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c4
-rw-r--r--net/ipv6/Kconfig9
-rw-r--r--net/ipv6/Makefile2
-rw-r--r--net/ipv6/fou6.c2
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/l2tp/l2tp_ip6.c12
-rw-r--r--net/lapb/lapb_in.c5
-rw-r--r--net/lapb/lapb_out.c4
-rw-r--r--net/lapb/lapb_subr.c14
-rw-r--r--net/openvswitch/actions.c20
-rw-r--r--net/sched/act_police.c11
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sctp/sctp_diag.c3
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/tipc/netlink_compat.c111
27 files changed, 230 insertions, 99 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a1e273af6fc8..82a116ba590e 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev,
290 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) 290 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
291 return; 291 return;
292 292
293 /* vlan continues to inherit address of lower device */
294 if (vlan_dev_inherit_address(vlandev, dev))
295 goto out;
296
293 /* vlan address was different from the old address and is equal to 297 /* vlan address was different from the old address and is equal to
294 * the new address */ 298 * the new address */
295 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && 299 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev,
302 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) 306 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
303 dev_uc_add(dev, vlandev->dev_addr); 307 dev_uc_add(dev, vlandev->dev_addr);
304 308
309out:
305 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); 310 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
306} 311}
307 312
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9d010a09ab98..cc1557978066 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
109void vlan_setup(struct net_device *dev); 109void vlan_setup(struct net_device *dev);
110int register_vlan_dev(struct net_device *dev); 110int register_vlan_dev(struct net_device *dev);
111void unregister_vlan_dev(struct net_device *dev, struct list_head *head); 111void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
112bool vlan_dev_inherit_address(struct net_device *dev,
113 struct net_device *real_dev);
112 114
113static inline u32 vlan_get_ingress_priority(struct net_device *dev, 115static inline u32 vlan_get_ingress_priority(struct net_device *dev,
114 u16 vlan_tci) 116 u16 vlan_tci)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e7e62570bdb8..86ae75b77390 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -245,6 +245,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
245 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); 245 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
246} 246}
247 247
248bool vlan_dev_inherit_address(struct net_device *dev,
249 struct net_device *real_dev)
250{
251 if (dev->addr_assign_type != NET_ADDR_STOLEN)
252 return false;
253
254 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
255 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
256 return true;
257}
258
248static int vlan_dev_open(struct net_device *dev) 259static int vlan_dev_open(struct net_device *dev)
249{ 260{
250 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 261 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -255,7 +266,8 @@ static int vlan_dev_open(struct net_device *dev)
255 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 266 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
256 return -ENETDOWN; 267 return -ENETDOWN;
257 268
258 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { 269 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
270 !vlan_dev_inherit_address(dev, real_dev)) {
259 err = dev_uc_add(real_dev, dev->dev_addr); 271 err = dev_uc_add(real_dev, dev->dev_addr);
260 if (err < 0) 272 if (err < 0)
261 goto out; 273 goto out;
@@ -560,8 +572,10 @@ static int vlan_dev_init(struct net_device *dev)
560 /* ipv6 shared card related stuff */ 572 /* ipv6 shared card related stuff */
561 dev->dev_id = real_dev->dev_id; 573 dev->dev_id = real_dev->dev_id;
562 574
563 if (is_zero_ether_addr(dev->dev_addr)) 575 if (is_zero_ether_addr(dev->dev_addr)) {
564 eth_hw_addr_inherit(dev, real_dev); 576 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
577 dev->addr_assign_type = NET_ADDR_STOLEN;
578 }
565 if (is_zero_ether_addr(dev->broadcast)) 579 if (is_zero_ether_addr(dev->broadcast))
566 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 580 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
567 581
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 4fd6af47383a..adb6e3d21b1e 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -124,7 +124,7 @@ as_indicate_complete:
124 break; 124 break;
125 case as_addparty: 125 case as_addparty:
126 case as_dropparty: 126 case as_dropparty:
127 sk->sk_err_soft = msg->reply; 127 sk->sk_err_soft = -msg->reply;
128 /* < 0 failure, otherwise ep_ref */ 128 /* < 0 failure, otherwise ep_ref */
129 clear_bit(ATM_VF_WAITING, &vcc->flags); 129 clear_bit(ATM_VF_WAITING, &vcc->flags);
130 break; 130 break;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 3fa0a9ee98d1..878563a8354d 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
546 schedule(); 546 schedule();
547 } 547 }
548 finish_wait(sk_sleep(sk), &wait); 548 finish_wait(sk_sleep(sk), &wait);
549 error = xchg(&sk->sk_err_soft, 0); 549 error = -xchg(&sk->sk_err_soft, 0);
550out: 550out:
551 release_sock(sk); 551 release_sock(sk);
552 return error; 552 return error;
@@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
573 error = -EUNATCH; 573 error = -EUNATCH;
574 goto out; 574 goto out;
575 } 575 }
576 error = xchg(&sk->sk_err_soft, 0); 576 error = -xchg(&sk->sk_err_soft, 0);
577out: 577out:
578 release_sock(sk); 578 release_sock(sk);
579 return error; 579 return error;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0160d7d09a1e..89469592076c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1276,9 +1276,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
1276 const struct ceph_osd_request_target *t, 1276 const struct ceph_osd_request_target *t,
1277 struct ceph_pg_pool_info *pi) 1277 struct ceph_pg_pool_info *pi)
1278{ 1278{
1279 bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 1279 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1280 bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 1280 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1281 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1281 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1282 __pool_full(pi); 1282 __pool_full(pi);
1283 1283
1284 WARN_ON(pi->id != t->base_oloc.pool); 1284 WARN_ON(pi->id != t->base_oloc.pool);
@@ -1303,8 +1303,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1303 bool force_resend = false; 1303 bool force_resend = false;
1304 bool need_check_tiering = false; 1304 bool need_check_tiering = false;
1305 bool need_resend = false; 1305 bool need_resend = false;
1306 bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap, 1306 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1307 CEPH_OSDMAP_SORTBITWISE);
1308 enum calc_target_result ct_res; 1307 enum calc_target_result ct_res;
1309 int ret; 1308 int ret;
1310 1309
@@ -1540,9 +1539,9 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1540 */ 1539 */
1541 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 1540 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1542 1541
1543 dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__, 1542 dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1544 req, req->r_t.target_oid.name_len, req->r_t.target_oid.name, 1543 req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1545 req->r_t.target_oid.name_len, msg->front.iov_len, data_len); 1544 msg->front.iov_len, data_len);
1546} 1545}
1547 1546
1548/* 1547/*
@@ -1590,9 +1589,9 @@ static void maybe_request_map(struct ceph_osd_client *osdc)
1590 verify_osdc_locked(osdc); 1589 verify_osdc_locked(osdc);
1591 WARN_ON(!osdc->osdmap->epoch); 1590 WARN_ON(!osdc->osdmap->epoch);
1592 1591
1593 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1592 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1594 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || 1593 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1595 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { 1594 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1596 dout("%s osdc %p continuous\n", __func__, osdc); 1595 dout("%s osdc %p continuous\n", __func__, osdc);
1597 continuous = true; 1596 continuous = true;
1598 } else { 1597 } else {
@@ -1629,19 +1628,19 @@ again:
1629 } 1628 }
1630 1629
1631 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1630 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1632 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { 1631 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1633 dout("req %p pausewr\n", req); 1632 dout("req %p pausewr\n", req);
1634 req->r_t.paused = true; 1633 req->r_t.paused = true;
1635 maybe_request_map(osdc); 1634 maybe_request_map(osdc);
1636 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 1635 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1637 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { 1636 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1638 dout("req %p pauserd\n", req); 1637 dout("req %p pauserd\n", req);
1639 req->r_t.paused = true; 1638 req->r_t.paused = true;
1640 maybe_request_map(osdc); 1639 maybe_request_map(osdc);
1641 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1640 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1642 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 1641 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1643 CEPH_OSD_FLAG_FULL_FORCE)) && 1642 CEPH_OSD_FLAG_FULL_FORCE)) &&
1644 (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1643 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1645 pool_full(osdc, req->r_t.base_oloc.pool))) { 1644 pool_full(osdc, req->r_t.base_oloc.pool))) {
1646 dout("req %p full/pool_full\n", req); 1645 dout("req %p full/pool_full\n", req);
1647 pr_warn_ratelimited("FULL or reached pool quota\n"); 1646 pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2280,7 +2279,7 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2280 struct ceph_osd_request *req = lreq->ping_req; 2279 struct ceph_osd_request *req = lreq->ping_req;
2281 struct ceph_osd_req_op *op = &req->r_ops[0]; 2280 struct ceph_osd_req_op *op = &req->r_ops[0];
2282 2281
2283 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { 2282 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2284 dout("%s PAUSERD\n", __func__); 2283 dout("%s PAUSERD\n", __func__);
2285 return; 2284 return;
2286 } 2285 }
@@ -2893,6 +2892,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2893 dout("req %p tid %llu cb\n", req, req->r_tid); 2892 dout("req %p tid %llu cb\n", req, req->r_tid);
2894 __complete_request(req); 2893 __complete_request(req);
2895 } 2894 }
2895 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2896 complete_all(&req->r_safe_completion);
2897 ceph_osdc_put_request(req);
2896 } else { 2898 } else {
2897 if (req->r_unsafe_callback) { 2899 if (req->r_unsafe_callback) {
2898 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); 2900 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
@@ -2901,10 +2903,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2901 WARN_ON(1); 2903 WARN_ON(1);
2902 } 2904 }
2903 } 2905 }
2904 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2905 complete_all(&req->r_safe_completion);
2906 2906
2907 ceph_osdc_put_request(req);
2908 return; 2907 return;
2909 2908
2910fail_request: 2909fail_request:
@@ -3050,7 +3049,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
3050 bool skipped_map = false; 3049 bool skipped_map = false;
3051 bool was_full; 3050 bool was_full;
3052 3051
3053 was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 3052 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3054 set_pool_was_full(osdc); 3053 set_pool_was_full(osdc);
3055 3054
3056 if (incremental) 3055 if (incremental)
@@ -3088,7 +3087,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
3088 osdc->osdmap = newmap; 3087 osdc->osdmap = newmap;
3089 } 3088 }
3090 3089
3091 was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 3090 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3092 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 3091 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3093 need_resend, need_resend_linger); 3092 need_resend, need_resend_linger);
3094 3093
@@ -3174,9 +3173,9 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3174 if (ceph_check_fsid(osdc->client, &fsid) < 0) 3173 if (ceph_check_fsid(osdc->client, &fsid) < 0)
3175 goto bad; 3174 goto bad;
3176 3175
3177 was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 3176 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3178 was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 3177 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3179 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 3178 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3180 have_pool_full(osdc); 3179 have_pool_full(osdc);
3181 3180
3182 /* incremental maps */ 3181 /* incremental maps */
@@ -3238,9 +3237,9 @@ done:
3238 * we find out when we are no longer full and stop returning 3237 * we find out when we are no longer full and stop returning
3239 * ENOSPC. 3238 * ENOSPC.
3240 */ 3239 */
3241 pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 3240 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3242 pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 3241 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3243 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 3242 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3244 have_pool_full(osdc); 3243 have_pool_full(osdc);
3245 if (was_pauserd || was_pausewr || pauserd || pausewr) 3244 if (was_pauserd || was_pausewr || pauserd || pausewr)
3246 maybe_request_map(osdc); 3245 maybe_request_map(osdc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index cde52e94732f..03062bb763b3 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1778,8 +1778,8 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
1778 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 1778 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
1779 oid->name_len); 1779 oid->name_len);
1780 1780
1781 dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len, 1781 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
1782 oid->name, raw_pgid->pool, raw_pgid->seed); 1782 raw_pgid->pool, raw_pgid->seed);
1783 return 0; 1783 return 0;
1784} 1784}
1785EXPORT_SYMBOL(ceph_object_locator_to_pg); 1785EXPORT_SYMBOL(ceph_object_locator_to_pg);
diff --git a/net/core/hwbm.c b/net/core/hwbm.c
index 941c28486896..2cab489ae62e 100644
--- a/net/core/hwbm.c
+++ b/net/core/hwbm.c
@@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
55 spin_lock_irqsave(&bm_pool->lock, flags); 55 spin_lock_irqsave(&bm_pool->lock, flags);
56 if (bm_pool->buf_num == bm_pool->size) { 56 if (bm_pool->buf_num == bm_pool->size) {
57 pr_warn("pool already filled\n"); 57 pr_warn("pool already filled\n");
58 spin_unlock_irqrestore(&bm_pool->lock, flags);
58 return bm_pool->buf_num; 59 return bm_pool->buf_num;
59 } 60 }
60 61
61 if (buf_num + bm_pool->buf_num > bm_pool->size) { 62 if (buf_num + bm_pool->buf_num > bm_pool->size) {
62 pr_warn("cannot allocate %d buffers for pool\n", 63 pr_warn("cannot allocate %d buffers for pool\n",
63 buf_num); 64 buf_num);
65 spin_unlock_irqrestore(&bm_pool->lock, flags);
64 return 0; 66 return 0;
65 } 67 }
66 68
67 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { 69 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
68 pr_warn("Adding %d buffers to the %d current buffers will overflow\n", 70 pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
69 buf_num, bm_pool->buf_num); 71 buf_num, bm_pool->buf_num);
72 spin_unlock_irqrestore(&bm_pool->lock, flags);
70 return 0; 73 return 0;
71 } 74 }
72 75
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 8604ae245960..8b02df0d354d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2245 hrtimer_set_expires(&t.timer, spin_until); 2245 hrtimer_set_expires(&t.timer, spin_until);
2246 2246
2247 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); 2247 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
2248 if (remaining <= 0) { 2248 if (remaining <= 0)
2249 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2249 goto out;
2250 return;
2251 }
2252 2250
2253 start_time = ktime_get(); 2251 start_time = ktime_get();
2254 if (remaining < 100000) { 2252 if (remaining < 100000) {
@@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2273 } 2271 }
2274 2272
2275 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2273 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2274out:
2276 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2275 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2276 destroy_hrtimer_on_stack(&t.timer);
2277} 2277}
2278 2278
2279static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2279static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index ca207dbf673b..116187b5c267 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
1289 nl802154_dev_addr_policy)) 1289 nl802154_dev_addr_policy))
1290 return -EINVAL; 1290 return -EINVAL;
1291 1291
1292 if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] && 1292 if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
1293 !attrs[NL802154_DEV_ADDR_ATTR_MODE] && 1293 !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
1294 !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || 1294 !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
1295 attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) 1295 attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
1296 return -EINVAL; 1296 return -EINVAL;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 377424ea17a4..d39e9e47a26e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net)
1681 */ 1681 */
1682 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); 1682 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1683 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); 1683 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1684
1685 /* Default values for sysctl-controlled parameters.
1686 * We set them here, in case sysctl is not compiled.
1687 */
1688 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1689 net->ipv4.sysctl_ip_dynaddr = 0;
1690 net->ipv4.sysctl_ip_early_demux = 1;
1691
1684 return 0; 1692 return 0;
1685} 1693}
1686 1694
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index bb0419582b8d..1cb67de106fe 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
999 if (!net->ipv4.sysctl_local_reserved_ports) 999 if (!net->ipv4.sysctl_local_reserved_ports)
1000 goto err_ports; 1000 goto err_ports;
1001 1001
1002 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1003 net->ipv4.sysctl_ip_dynaddr = 0;
1004 net->ipv4.sysctl_ip_early_demux = 1;
1005
1006 return 0; 1002 return 0;
1007 1003
1008err_ports: 1004err_ports:
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 3f8411328de5..2343e4f2e0bf 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -232,6 +232,15 @@ config IPV6_GRE
232 232
233 Saying M here will produce a module called ip6_gre. If unsure, say N. 233 Saying M here will produce a module called ip6_gre. If unsure, say N.
234 234
235config IPV6_FOU
236 tristate
237 default NET_FOU && IPV6
238
239config IPV6_FOU_TUNNEL
240 tristate
241 default NET_FOU_IP_TUNNELS && IPV6_FOU
242 select IPV6_TUNNEL
243
235config IPV6_MULTIPLE_TABLES 244config IPV6_MULTIPLE_TABLES
236 bool "IPv6: Multiple Routing Tables" 245 bool "IPv6: Multiple Routing Tables"
237 select FIB_RULES 246 select FIB_RULES
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 7ec3129c9ace..6d8ea099213e 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
42obj-$(CONFIG_IPV6_SIT) += sit.o 42obj-$(CONFIG_IPV6_SIT) += sit.o
43obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 43obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
44obj-$(CONFIG_IPV6_GRE) += ip6_gre.o 44obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
45obj-$(CONFIG_NET_FOU) += fou6.o 45obj-$(CONFIG_IPV6_FOU) += fou6.o
46 46
47obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o 47obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
48obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) 48obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index c972d0b52579..9ea249b9451e 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
69} 69}
70EXPORT_SYMBOL(gue6_build_header); 70EXPORT_SYMBOL(gue6_build_header);
71 71
72#ifdef CONFIG_NET_FOU_IP_TUNNELS 72#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
73 73
74static const struct ip6_tnl_encap_ops fou_ip6tun_ops = { 74static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
75 .encap_hlen = fou_encap_hlen, 75 .encap_hlen = fou_encap_hlen,
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index af503f518278..f4ac2842d4d9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
712 fl6->daddr = p->raddr; 712 fl6->daddr = p->raddr;
713 fl6->flowi6_oif = p->link; 713 fl6->flowi6_oif = p->link;
714 fl6->flowlabel = 0; 714 fl6->flowlabel = 0;
715 fl6->flowi6_proto = IPPROTO_GRE;
715 716
716 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 717 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
717 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 718 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
@@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
1027 1028
1028 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1029 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1029 dev->mtu = ETH_DATA_LEN - t_hlen; 1030 dev->mtu = ETH_DATA_LEN - t_hlen;
1031 if (dev->type == ARPHRD_ETHER)
1032 dev->mtu -= ETH_HLEN;
1030 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1033 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1031 dev->mtu -= 8; 1034 dev->mtu -= 8;
1032 1035
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index c6f5df1bed12..6c54e03fe9c1 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
128 */ 128 */
129static int l2tp_ip6_recv(struct sk_buff *skb) 129static int l2tp_ip6_recv(struct sk_buff *skb)
130{ 130{
131 struct net *net = dev_net(skb->dev);
131 struct sock *sk; 132 struct sock *sk;
132 u32 session_id; 133 u32 session_id;
133 u32 tunnel_id; 134 u32 tunnel_id;
@@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
154 } 155 }
155 156
156 /* Ok, this is a data packet. Lookup the session. */ 157 /* Ok, this is a data packet. Lookup the session. */
157 session = l2tp_session_find(&init_net, NULL, session_id); 158 session = l2tp_session_find(net, NULL, session_id);
158 if (session == NULL) 159 if (session == NULL)
159 goto discard; 160 goto discard;
160 161
@@ -188,14 +189,14 @@ pass_up:
188 goto discard; 189 goto discard;
189 190
190 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 191 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
191 tunnel = l2tp_tunnel_find(&init_net, tunnel_id); 192 tunnel = l2tp_tunnel_find(net, tunnel_id);
192 if (tunnel != NULL) 193 if (tunnel != NULL)
193 sk = tunnel->sock; 194 sk = tunnel->sock;
194 else { 195 else {
195 struct ipv6hdr *iph = ipv6_hdr(skb); 196 struct ipv6hdr *iph = ipv6_hdr(skb);
196 197
197 read_lock_bh(&l2tp_ip6_lock); 198 read_lock_bh(&l2tp_ip6_lock);
198 sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, 199 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
199 0, tunnel_id); 200 0, tunnel_id);
200 read_unlock_bh(&l2tp_ip6_lock); 201 read_unlock_bh(&l2tp_ip6_lock);
201 } 202 }
@@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
263 struct inet_sock *inet = inet_sk(sk); 264 struct inet_sock *inet = inet_sk(sk);
264 struct ipv6_pinfo *np = inet6_sk(sk); 265 struct ipv6_pinfo *np = inet6_sk(sk);
265 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; 266 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
267 struct net *net = sock_net(sk);
266 __be32 v4addr = 0; 268 __be32 v4addr = 0;
267 int addr_type; 269 int addr_type;
268 int err; 270 int err;
@@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
286 288
287 err = -EADDRINUSE; 289 err = -EADDRINUSE;
288 read_lock_bh(&l2tp_ip6_lock); 290 read_lock_bh(&l2tp_ip6_lock);
289 if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr, 291 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
290 sk->sk_bound_dev_if, addr->l2tp_conn_id)) 292 sk->sk_bound_dev_if, addr->l2tp_conn_id))
291 goto out_in_use; 293 goto out_in_use;
292 read_unlock_bh(&l2tp_ip6_lock); 294 read_unlock_bh(&l2tp_ip6_lock);
@@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
456 return 0; 458 return 0;
457 459
458drop: 460drop:
459 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); 461 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
460 kfree_skb(skb); 462 kfree_skb(skb);
461 return -1; 463 return -1;
462} 464}
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index 5dba899131b3..182470847fcf 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
444 break; 444 break;
445 445
446 case LAPB_FRMR: 446 case LAPB_FRMR:
447 lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n", 447 lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n",
448 lapb->dev, frame->pf, 448 lapb->dev, frame->pf,
449 skb->data[0], skb->data[1], skb->data[2], 449 skb->data);
450 skb->data[3], skb->data[4]);
451 lapb_establish_data_link(lapb); 450 lapb_establish_data_link(lapb);
452 lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); 451 lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
453 lapb_requeue_frames(lapb); 452 lapb_requeue_frames(lapb);
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index ba4d015bd1a6..482c94d9d958 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
148 } 148 }
149 } 149 }
150 150
151 lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n", 151 lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
152 lapb->dev, lapb->state,
153 skb->data[0], skb->data[1], skb->data[2]);
154 152
155 if (!lapb_data_transmit(lapb, skb)) 153 if (!lapb_data_transmit(lapb, skb))
156 kfree_skb(skb); 154 kfree_skb(skb);
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 9d0a426eccbb..3c1914df641f 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
113{ 113{
114 frame->type = LAPB_ILLEGAL; 114 frame->type = LAPB_ILLEGAL;
115 115
116 lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n", 116 lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
117 lapb->dev, lapb->state,
118 skb->data[0], skb->data[1], skb->data[2]);
119 117
120 /* We always need to look at 2 bytes, sometimes we need 118 /* We always need to look at 2 bytes, sometimes we need
121 * to look at 3 and those cases are handled below. 119 * to look at 3 and those cases are handled below.
@@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
284 dptr++; 282 dptr++;
285 *dptr++ = lapb->frmr_type; 283 *dptr++ = lapb->frmr_type;
286 284
287 lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", 285 lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n",
288 lapb->dev, lapb->state, 286 lapb->dev, lapb->state,
289 skb->data[1], skb->data[2], skb->data[3], 287 &skb->data[1]);
290 skb->data[4], skb->data[5]);
291 } else { 288 } else {
292 dptr = skb_put(skb, 4); 289 dptr = skb_put(skb, 4);
293 *dptr++ = LAPB_FRMR; 290 *dptr++ = LAPB_FRMR;
@@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
299 dptr++; 296 dptr++;
300 *dptr++ = lapb->frmr_type; 297 *dptr++ = lapb->frmr_type;
301 298
302 lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n", 299 lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n",
303 lapb->dev, lapb->state, skb->data[1], 300 lapb->dev, lapb->state, &skb->data[1]);
304 skb->data[2], skb->data[3]);
305 } 301 }
306 302
307 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); 303 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 879185fe183f..9a3eb7a0ebf4 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key)
137 return !!key->eth.type; 137 return !!key->eth.type;
138} 138}
139 139
140static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
141 __be16 ethertype)
142{
143 if (skb->ip_summed == CHECKSUM_COMPLETE) {
144 __be16 diff[] = { ~(hdr->h_proto), ethertype };
145
146 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
147 ~skb->csum);
148 }
149
150 hdr->h_proto = ethertype;
151}
152
140static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, 153static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
141 const struct ovs_action_push_mpls *mpls) 154 const struct ovs_action_push_mpls *mpls)
142{ 155{
143 __be32 *new_mpls_lse; 156 __be32 *new_mpls_lse;
144 struct ethhdr *hdr;
145 157
146 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ 158 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
147 if (skb->encapsulation) 159 if (skb->encapsulation)
@@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
160 172
161 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); 173 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
162 174
163 hdr = eth_hdr(skb); 175 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
164 hdr->h_proto = mpls->mpls_ethertype;
165
166 if (!skb->inner_protocol) 176 if (!skb->inner_protocol)
167 skb_set_inner_protocol(skb, skb->protocol); 177 skb_set_inner_protocol(skb, skb->protocol);
168 skb->protocol = mpls->mpls_ethertype; 178 skb->protocol = mpls->mpls_ethertype;
@@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
193 * field correctly in the presence of VLAN tags. 203 * field correctly in the presence of VLAN tags.
194 */ 204 */
195 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); 205 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
196 hdr->h_proto = ethertype; 206 update_ethertype(skb, hdr, ethertype);
197 if (eth_p_mpls(skb->protocol)) 207 if (eth_p_mpls(skb->protocol))
198 skb->protocol = ethertype; 208 skb->protocol = ethertype;
199 209
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 330f14e302e8..b884dae692a1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -239,6 +239,8 @@ override:
239 police->tcfp_t_c = ktime_get_ns(); 239 police->tcfp_t_c = ktime_get_ns();
240 police->tcf_index = parm->index ? parm->index : 240 police->tcf_index = parm->index ? parm->index :
241 tcf_hash_new_index(tn); 241 tcf_hash_new_index(tn);
242 police->tcf_tm.install = jiffies;
243 police->tcf_tm.lastuse = jiffies;
242 h = tcf_hash(police->tcf_index, POL_TAB_MASK); 244 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
243 spin_lock_bh(&hinfo->lock); 245 spin_lock_bh(&hinfo->lock);
244 hlist_add_head(&police->tcf_head, &hinfo->htab[h]); 246 hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
@@ -268,6 +270,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
268 spin_lock(&police->tcf_lock); 270 spin_lock(&police->tcf_lock);
269 271
270 bstats_update(&police->tcf_bstats, skb); 272 bstats_update(&police->tcf_bstats, skb);
273 tcf_lastuse_update(&police->tcf_tm);
271 274
272 if (police->tcfp_ewma_rate && 275 if (police->tcfp_ewma_rate &&
273 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 276 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
@@ -327,6 +330,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
327 .refcnt = police->tcf_refcnt - ref, 330 .refcnt = police->tcf_refcnt - ref,
328 .bindcnt = police->tcf_bindcnt - bind, 331 .bindcnt = police->tcf_bindcnt - bind,
329 }; 332 };
333 struct tcf_t t;
330 334
331 if (police->rate_present) 335 if (police->rate_present)
332 psched_ratecfg_getrate(&opt.rate, &police->rate); 336 psched_ratecfg_getrate(&opt.rate, &police->rate);
@@ -340,6 +344,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
340 if (police->tcfp_ewma_rate && 344 if (police->tcfp_ewma_rate &&
341 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) 345 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
342 goto nla_put_failure; 346 goto nla_put_failure;
347
348 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
349 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
350 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
351 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
352 goto nla_put_failure;
353
343 return skb->len; 354 return skb->len;
344 355
345nla_put_failure: 356nla_put_failure:
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 64f71a2155f3..ddf047df5361 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr
607 if (throttle) 607 if (throttle)
608 qdisc_throttled(wd->qdisc); 608 qdisc_throttled(wd->qdisc);
609 609
610 if (wd->last_expires == expires)
611 return;
612
613 wd->last_expires = expires;
610 hrtimer_start(&wd->timer, 614 hrtimer_start(&wd->timer,
611 ns_to_ktime(expires), 615 ns_to_ktime(expires),
612 HRTIMER_MODE_ABS_PINNED); 616 HRTIMER_MODE_ABS_PINNED);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f6bf5818ed4d..d4b4218af6b1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -928,17 +928,10 @@ ok:
928 } 928 }
929 } 929 }
930 qdisc_qstats_overlimit(sch); 930 qdisc_qstats_overlimit(sch);
931 if (likely(next_event > q->now)) { 931 if (likely(next_event > q->now))
932 if (!test_bit(__QDISC_STATE_DEACTIVATED, 932 qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
933 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { 933 else
934 ktime_t time = ns_to_ktime(next_event);
935 qdisc_throttled(q->watchdog.qdisc);
936 hrtimer_start(&q->watchdog.timer, time,
937 HRTIMER_MODE_ABS_PINNED);
938 }
939 } else {
940 schedule_work(&q->work); 934 schedule_work(&q->work);
941 }
942fin: 935fin:
943 return skb; 936 return skb;
944} 937}
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 8e3e769dc9ea..1ce724b87618 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -356,6 +356,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
356 if (cb->args[4] < cb->args[1]) 356 if (cb->args[4] < cb->args[1])
357 goto next; 357 goto next;
358 358
359 if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
360 goto next;
361
359 if (r->sdiag_family != AF_UNSPEC && 362 if (r->sdiag_family != AF_UNSPEC &&
360 sk->sk_family != r->sdiag_family) 363 sk->sk_family != r->sdiag_family)
361 goto next; 364 goto next;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 777d0324594a..67154b848aa9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4220 info->sctpi_s_disable_fragments = sp->disable_fragments; 4220 info->sctpi_s_disable_fragments = sp->disable_fragments;
4221 info->sctpi_s_v4mapped = sp->v4mapped; 4221 info->sctpi_s_v4mapped = sp->v4mapped;
4222 info->sctpi_s_frag_interleave = sp->frag_interleave; 4222 info->sctpi_s_frag_interleave = sp->frag_interleave;
4223 info->sctpi_s_type = sp->type;
4223 4224
4224 return 0; 4225 return 0;
4225 } 4226 }
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 4dfc5c14f8c3..f795b1dd0ccd 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
346 struct nlattr **attrs) 346 struct nlattr **attrs)
347{ 347{
348 struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; 348 struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
349 int err;
350
351 if (!attrs[TIPC_NLA_BEARER])
352 return -EINVAL;
349 353
350 nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], 354 err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
351 NULL); 355 attrs[TIPC_NLA_BEARER], NULL);
356 if (err)
357 return err;
352 358
353 return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, 359 return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
354 nla_data(bearer[TIPC_NLA_BEARER_NAME]), 360 nla_data(bearer[TIPC_NLA_BEARER_NAME]),
@@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
460 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 466 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
461 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; 467 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
462 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; 468 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
469 int err;
463 470
464 nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); 471 if (!attrs[TIPC_NLA_LINK])
472 return -EINVAL;
465 473
466 nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], 474 err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
467 NULL); 475 NULL);
476 if (err)
477 return err;
478
479 if (!link[TIPC_NLA_LINK_PROP])
480 return -EINVAL;
468 481
469 nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], 482 err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
470 NULL); 483 link[TIPC_NLA_LINK_PROP], NULL);
484 if (err)
485 return err;
486
487 if (!link[TIPC_NLA_LINK_STATS])
488 return -EINVAL;
489
490 err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
491 link[TIPC_NLA_LINK_STATS], NULL);
492 if (err)
493 return err;
471 494
472 name = (char *)TLV_DATA(msg->req); 495 name = (char *)TLV_DATA(msg->req);
473 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) 496 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
@@ -569,8 +592,15 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
569{ 592{
570 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 593 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
571 struct tipc_link_info link_info; 594 struct tipc_link_info link_info;
595 int err;
572 596
573 nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); 597 if (!attrs[TIPC_NLA_LINK])
598 return -EINVAL;
599
600 err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
601 NULL);
602 if (err)
603 return err;
574 604
575 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); 605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
576 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); 606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
@@ -758,12 +788,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
758 u32 node, depth, type, lowbound, upbound; 788 u32 node, depth, type, lowbound, upbound;
759 static const char * const scope_str[] = {"", " zone", " cluster", 789 static const char * const scope_str[] = {"", " zone", " cluster",
760 " node"}; 790 " node"};
791 int err;
761 792
762 nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, 793 if (!attrs[TIPC_NLA_NAME_TABLE])
763 attrs[TIPC_NLA_NAME_TABLE], NULL); 794 return -EINVAL;
764 795
765 nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], 796 err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
766 NULL); 797 attrs[TIPC_NLA_NAME_TABLE], NULL);
798 if (err)
799 return err;
800
801 if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
802 return -EINVAL;
803
804 err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
805 nt[TIPC_NLA_NAME_TABLE_PUBL], NULL);
806 if (err)
807 return err;
767 808
768 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); 809 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
769 810
@@ -815,8 +856,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
815{ 856{
816 u32 type, lower, upper; 857 u32 type, lower, upper;
817 struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; 858 struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
859 int err;
818 860
819 nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL); 861 if (!attrs[TIPC_NLA_PUBL])
862 return -EINVAL;
863
864 err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
865 NULL);
866 if (err)
867 return err;
820 868
821 type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); 869 type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
822 lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); 870 lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
@@ -876,7 +924,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
876 u32 sock_ref; 924 u32 sock_ref;
877 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 925 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
878 926
879 nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL); 927 if (!attrs[TIPC_NLA_SOCK])
928 return -EINVAL;
929
930 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
931 NULL);
932 if (err)
933 return err;
880 934
881 sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 935 sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
882 tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); 936 tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
@@ -917,9 +971,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
917 struct nlattr **attrs) 971 struct nlattr **attrs)
918{ 972{
919 struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; 973 struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
974 int err;
975
976 if (!attrs[TIPC_NLA_MEDIA])
977 return -EINVAL;
920 978
921 nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], 979 err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
922 NULL); 980 NULL);
981 if (err)
982 return err;
923 983
924 return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, 984 return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
925 nla_data(media[TIPC_NLA_MEDIA_NAME]), 985 nla_data(media[TIPC_NLA_MEDIA_NAME]),
@@ -931,8 +991,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
931{ 991{
932 struct tipc_node_info node_info; 992 struct tipc_node_info node_info;
933 struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; 993 struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
994 int err;
934 995
935 nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL); 996 if (!attrs[TIPC_NLA_NODE])
997 return -EINVAL;
998
999 err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
1000 NULL);
1001 if (err)
1002 return err;
936 1003
937 node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); 1004 node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
938 node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); 1005 node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
@@ -971,8 +1038,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
971{ 1038{
972 __be32 id; 1039 __be32 id;
973 struct nlattr *net[TIPC_NLA_NET_MAX + 1]; 1040 struct nlattr *net[TIPC_NLA_NET_MAX + 1];
1041 int err;
1042
1043 if (!attrs[TIPC_NLA_NET])
1044 return -EINVAL;
1045
1046 err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
1047 NULL);
1048 if (err)
1049 return err;
974 1050
975 nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
976 id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); 1051 id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
977 1052
978 return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); 1053 return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));