aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-20 15:15:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-20 15:15:20 -0400
commit505f48b53478d3816d1f3b001815703cfd7afa09 (patch)
tree9ed8ce0cf58811c70f064c6862cfb68d98178fd8
parent486cf46f3f9be5f2a966016c1a8fe01e32cde09e (diff)
parentafaef734e5f0004916d07ecf7d86292cdd00d59b (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: fib_rules: fix unresolved_rules counting r8169: fix wrong eee setting for rlt8111evl r8169: fix driver shutdown WoL regression. ehea: Change maintainer to me pptp: pptp_rcv_core() misses pskb_may_pull() call tproxy: copy transparent flag when creating a time wait pptp: fix skb leak in pptp_xmit() bonding: use local function pointer of bond->recv_probe in bond_handle_frame smsc911x: Add support for SMSC LAN89218 tg3: negate USE_PHYLIB flag check netconsole: enable netconsole can make net_device refcnt incorrent bluetooth: Properly clone LSM attributes to newly created child connections l2tp: fix a potential skb leak in l2tp_xmit_skb() bridge: fix hang on removal of bridge via netlink x25: Prevent skb overreads when checking call user data x25: Handle undersized/fragmented skbs x25: Validate incoming call user data lengths udplite: fast-path computation of checksum coverage IPVS netns shutdown/startup dead-lock netfilter: nf_conntrack: fix event flooding in GRE protocol tracker
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/net/bonding/bond_main.c7
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/pptp.c22
-rw-r--r--drivers/net/r8169.c90
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/udplite.h63
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c5
-rw-r--r--net/bridge/br_if.c9
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/core/fib_rules.c5
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c131
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c4
-rw-r--r--net/x25/af_x25.c40
-rw-r--r--net/x25/x25_dev.c6
-rw-r--r--net/x25/x25_facilities.c10
-rw-r--r--net/x25/x25_in.c43
-rw-r--r--net/x25/x25_link.c3
-rw-r--r--net/x25/x25_subr.c14
-rw-r--r--security/security.c1
28 files changed, 330 insertions, 155 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 5483e0c93b4b..b3bc88d9c03a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2460,7 +2460,7 @@ S: Supported
2460F: drivers/infiniband/hw/ehca/ 2460F: drivers/infiniband/hw/ehca/
2461 2461
2462EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER 2462EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
2463M: Breno Leitao <leitao@linux.vnet.ibm.com> 2463M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
2464L: netdev@vger.kernel.org 2464L: netdev@vger.kernel.org
2465S: Maintained 2465S: Maintained
2466F: drivers/net/ehea/ 2466F: drivers/net/ehea/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6d79b78cfc75..de3d351ccb6b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1435,6 +1435,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1435 struct sk_buff *skb = *pskb; 1435 struct sk_buff *skb = *pskb;
1436 struct slave *slave; 1436 struct slave *slave;
1437 struct bonding *bond; 1437 struct bonding *bond;
1438 void (*recv_probe)(struct sk_buff *, struct bonding *,
1439 struct slave *);
1438 1440
1439 skb = skb_share_check(skb, GFP_ATOMIC); 1441 skb = skb_share_check(skb, GFP_ATOMIC);
1440 if (unlikely(!skb)) 1442 if (unlikely(!skb))
@@ -1448,11 +1450,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1448 if (bond->params.arp_interval) 1450 if (bond->params.arp_interval)
1449 slave->dev->last_rx = jiffies; 1451 slave->dev->last_rx = jiffies;
1450 1452
1451 if (bond->recv_probe) { 1453 recv_probe = ACCESS_ONCE(bond->recv_probe);
1454 if (recv_probe) {
1452 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1455 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1453 1456
1454 if (likely(nskb)) { 1457 if (likely(nskb)) {
1455 bond->recv_probe(nskb, bond, slave); 1458 recv_probe(nskb, bond, slave);
1456 dev_kfree_skb(nskb); 1459 dev_kfree_skb(nskb);
1457 } 1460 }
1458 } 1461 }
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ed2a3977c6e7..e8882023576b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt,
307 return err; 307 return err;
308 if (enabled < 0 || enabled > 1) 308 if (enabled < 0 || enabled > 1)
309 return -EINVAL; 309 return -EINVAL;
310 if (enabled == nt->enabled) {
311 printk(KERN_INFO "netconsole: network logging has already %s\n",
312 nt->enabled ? "started" : "stopped");
313 return -EINVAL;
314 }
310 315
311 if (enabled) { /* 1 */ 316 if (enabled) { /* 1 */
312 317
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index eae542a7e987..89f829f5f725 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
285 ip_send_check(iph); 285 ip_send_check(iph);
286 286
287 ip_local_out(skb); 287 ip_local_out(skb);
288 return 1;
288 289
289tx_error: 290tx_error:
291 kfree_skb(skb);
290 return 1; 292 return 1;
291} 293}
292 294
@@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
305 } 307 }
306 308
307 header = (struct pptp_gre_header *)(skb->data); 309 header = (struct pptp_gre_header *)(skb->data);
310 headersize = sizeof(*header);
308 311
309 /* test if acknowledgement present */ 312 /* test if acknowledgement present */
310 if (PPTP_GRE_IS_A(header->ver)) { 313 if (PPTP_GRE_IS_A(header->ver)) {
311 __u32 ack = (PPTP_GRE_IS_S(header->flags)) ? 314 __u32 ack;
312 header->ack : header->seq; /* ack in different place if S = 0 */ 315
316 if (!pskb_may_pull(skb, headersize))
317 goto drop;
318 header = (struct pptp_gre_header *)(skb->data);
319
320 /* ack in different place if S = 0 */
321 ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq;
313 322
314 ack = ntohl(ack); 323 ack = ntohl(ack);
315 324
@@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
318 /* also handle sequence number wrap-around */ 327 /* also handle sequence number wrap-around */
319 if (WRAPPED(ack, opt->ack_recv)) 328 if (WRAPPED(ack, opt->ack_recv))
320 opt->ack_recv = ack; 329 opt->ack_recv = ack;
330 } else {
331 headersize -= sizeof(header->ack);
321 } 332 }
322
323 /* test if payload present */ 333 /* test if payload present */
324 if (!PPTP_GRE_IS_S(header->flags)) 334 if (!PPTP_GRE_IS_S(header->flags))
325 goto drop; 335 goto drop;
326 336
327 headersize = sizeof(*header);
328 payload_len = ntohs(header->payload_len); 337 payload_len = ntohs(header->payload_len);
329 seq = ntohl(header->seq); 338 seq = ntohl(header->seq);
330 339
331 /* no ack present? */
332 if (!PPTP_GRE_IS_A(header->ver))
333 headersize -= sizeof(header->ack);
334 /* check for incomplete packet (length smaller than expected) */ 340 /* check for incomplete packet (length smaller than expected) */
335 if (skb->len - headersize < payload_len) 341 if (!pskb_may_pull(skb, headersize + payload_len))
336 goto drop; 342 goto drop;
337 343
338 payload = skb->data + headersize; 344 payload = skb->data + headersize;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index c23667017922..6d657cabb951 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2859,7 +2859,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
2859 rtl_writephy(tp, 0x1f, 0x0004); 2859 rtl_writephy(tp, 0x1f, 0x0004);
2860 rtl_writephy(tp, 0x1f, 0x0007); 2860 rtl_writephy(tp, 0x1f, 0x0007);
2861 rtl_writephy(tp, 0x1e, 0x0020); 2861 rtl_writephy(tp, 0x1e, 0x0020);
2862 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); 2862 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
2863 rtl_writephy(tp, 0x1f, 0x0002); 2863 rtl_writephy(tp, 0x1f, 0x0002);
2864 rtl_writephy(tp, 0x1f, 0x0000); 2864 rtl_writephy(tp, 0x1f, 0x0000);
2865 rtl_writephy(tp, 0x0d, 0x0007); 2865 rtl_writephy(tp, 0x0d, 0x0007);
@@ -3316,6 +3316,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3316 } 3316 }
3317} 3317}
3318 3318
3319static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3320{
3321 void __iomem *ioaddr = tp->mmio_addr;
3322
3323 switch (tp->mac_version) {
3324 case RTL_GIGA_MAC_VER_29:
3325 case RTL_GIGA_MAC_VER_30:
3326 case RTL_GIGA_MAC_VER_32:
3327 case RTL_GIGA_MAC_VER_33:
3328 case RTL_GIGA_MAC_VER_34:
3329 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3330 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3331 break;
3332 default:
3333 break;
3334 }
3335}
3336
3337static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3338{
3339 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3340 return false;
3341
3342 rtl_writephy(tp, 0x1f, 0x0000);
3343 rtl_writephy(tp, MII_BMCR, 0x0000);
3344
3345 rtl_wol_suspend_quirk(tp);
3346
3347 return true;
3348}
3349
3319static void r810x_phy_power_down(struct rtl8169_private *tp) 3350static void r810x_phy_power_down(struct rtl8169_private *tp)
3320{ 3351{
3321 rtl_writephy(tp, 0x1f, 0x0000); 3352 rtl_writephy(tp, 0x1f, 0x0000);
@@ -3330,18 +3361,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
3330 3361
3331static void r810x_pll_power_down(struct rtl8169_private *tp) 3362static void r810x_pll_power_down(struct rtl8169_private *tp)
3332{ 3363{
3333 void __iomem *ioaddr = tp->mmio_addr; 3364 if (rtl_wol_pll_power_down(tp))
3334
3335 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
3336 rtl_writephy(tp, 0x1f, 0x0000);
3337 rtl_writephy(tp, MII_BMCR, 0x0000);
3338
3339 if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
3340 tp->mac_version == RTL_GIGA_MAC_VER_30)
3341 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3342 AcceptMulticast | AcceptMyPhys);
3343 return; 3365 return;
3344 }
3345 3366
3346 r810x_phy_power_down(tp); 3367 r810x_phy_power_down(tp);
3347} 3368}
@@ -3430,17 +3451,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
3430 tp->mac_version == RTL_GIGA_MAC_VER_33) 3451 tp->mac_version == RTL_GIGA_MAC_VER_33)
3431 rtl_ephy_write(ioaddr, 0x19, 0xff64); 3452 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3432 3453
3433 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 3454 if (rtl_wol_pll_power_down(tp))
3434 rtl_writephy(tp, 0x1f, 0x0000);
3435 rtl_writephy(tp, MII_BMCR, 0x0000);
3436
3437 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3438 tp->mac_version == RTL_GIGA_MAC_VER_33 ||
3439 tp->mac_version == RTL_GIGA_MAC_VER_34)
3440 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3441 AcceptMulticast | AcceptMyPhys);
3442 return; 3455 return;
3443 }
3444 3456
3445 r8168_phy_power_down(tp); 3457 r8168_phy_power_down(tp);
3446 3458
@@ -5788,11 +5800,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
5788 5800
5789#endif /* !CONFIG_PM */ 5801#endif /* !CONFIG_PM */
5790 5802
5803static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
5804{
5805 void __iomem *ioaddr = tp->mmio_addr;
5806
5807 /* WoL fails with 8168b when the receiver is disabled. */
5808 switch (tp->mac_version) {
5809 case RTL_GIGA_MAC_VER_11:
5810 case RTL_GIGA_MAC_VER_12:
5811 case RTL_GIGA_MAC_VER_17:
5812 pci_clear_master(tp->pci_dev);
5813
5814 RTL_W8(ChipCmd, CmdRxEnb);
5815 /* PCI commit */
5816 RTL_R8(ChipCmd);
5817 break;
5818 default:
5819 break;
5820 }
5821}
5822
5791static void rtl_shutdown(struct pci_dev *pdev) 5823static void rtl_shutdown(struct pci_dev *pdev)
5792{ 5824{
5793 struct net_device *dev = pci_get_drvdata(pdev); 5825 struct net_device *dev = pci_get_drvdata(pdev);
5794 struct rtl8169_private *tp = netdev_priv(dev); 5826 struct rtl8169_private *tp = netdev_priv(dev);
5795 void __iomem *ioaddr = tp->mmio_addr;
5796 5827
5797 rtl8169_net_suspend(dev); 5828 rtl8169_net_suspend(dev);
5798 5829
@@ -5806,16 +5837,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
5806 spin_unlock_irq(&tp->lock); 5837 spin_unlock_irq(&tp->lock);
5807 5838
5808 if (system_state == SYSTEM_POWER_OFF) { 5839 if (system_state == SYSTEM_POWER_OFF) {
5809 /* WoL fails with 8168b when the receiver is disabled. */ 5840 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
5810 if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || 5841 rtl_wol_suspend_quirk(tp);
5811 tp->mac_version == RTL_GIGA_MAC_VER_12 || 5842 rtl_wol_shutdown_quirk(tp);
5812 tp->mac_version == RTL_GIGA_MAC_VER_17) &&
5813 (tp->features & RTL_FEATURE_WOL)) {
5814 pci_clear_master(pdev);
5815
5816 RTL_W8(ChipCmd, CmdRxEnb);
5817 /* PCI commit */
5818 RTL_R8(ChipCmd);
5819 } 5843 }
5820 5844
5821 pci_wake_from_d3(pdev, true); 5845 pci_wake_from_d3(pdev, true);
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index b9016a30cdc5..c90ddb61cc56 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -26,6 +26,7 @@
26 * LAN9215, LAN9216, LAN9217, LAN9218 26 * LAN9215, LAN9216, LAN9217, LAN9218
27 * LAN9210, LAN9211 27 * LAN9210, LAN9211
28 * LAN9220, LAN9221 28 * LAN9220, LAN9221
29 * LAN89218
29 * 30 *
30 */ 31 */
31 32
@@ -1983,6 +1984,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
1983 case 0x01170000: 1984 case 0x01170000:
1984 case 0x01160000: 1985 case 0x01160000:
1985 case 0x01150000: 1986 case 0x01150000:
1987 case 0x218A0000:
1986 /* LAN911[5678] family */ 1988 /* LAN911[5678] family */
1987 pdata->generation = pdata->idrev & 0x0000FFFF; 1989 pdata->generation = pdata->idrev & 0x0000FFFF;
1988 break; 1990 break;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 4a1374df6084..c11a2b8327f3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -15577,7 +15577,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
15577 15577
15578 cancel_work_sync(&tp->reset_task); 15578 cancel_work_sync(&tp->reset_task);
15579 15579
15580 if (!tg3_flag(tp, USE_PHYLIB)) { 15580 if (tg3_flag(tp, USE_PHYLIB)) {
15581 tg3_phy_fini(tp); 15581 tg3_phy_fini(tp);
15582 tg3_mdio_fini(tp); 15582 tg3_mdio_fini(tp);
15583 } 15583 }
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 1aaf915656f3..8fa4430f99c1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -900,6 +900,7 @@ struct netns_ipvs {
900 volatile int sync_state; 900 volatile int sync_state;
901 volatile int master_syncid; 901 volatile int master_syncid;
902 volatile int backup_syncid; 902 volatile int backup_syncid;
903 struct mutex sync_mutex;
903 /* multicast interface name */ 904 /* multicast interface name */
904 char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 905 char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
905 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 906 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 673a024c6b2a..5f097ca7d5c5 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
66 return 0; 66 return 0;
67} 67}
68 68
69static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) 69/* Slow-path computation of checksum. Socket is locked. */
70static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
70{ 71{
72 const struct udp_sock *up = udp_sk(skb->sk);
71 int cscov = up->len; 73 int cscov = up->len;
74 __wsum csum = 0;
72 75
73 /* 76 if (up->pcflag & UDPLITE_SEND_CC) {
74 * Sender has set `partial coverage' option on UDP-Lite socket 77 /*
75 */ 78 * Sender has set `partial coverage' option on UDP-Lite socket.
76 if (up->pcflag & UDPLITE_SEND_CC) { 79 * The special case "up->pcslen == 0" signifies full coverage.
80 */
77 if (up->pcslen < up->len) { 81 if (up->pcslen < up->len) {
78 /* up->pcslen == 0 means that full coverage is required, 82 if (0 < up->pcslen)
79 * partial coverage only if 0 < up->pcslen < up->len */ 83 cscov = up->pcslen;
80 if (0 < up->pcslen) { 84 udp_hdr(skb)->len = htons(up->pcslen);
81 cscov = up->pcslen;
82 }
83 uh->len = htons(up->pcslen);
84 } 85 }
85 /* 86 /*
86 * NOTE: Causes for the error case `up->pcslen > up->len': 87 * NOTE: Causes for the error case `up->pcslen > up->len':
87 * (i) Application error (will not be penalized). 88 * (i) Application error (will not be penalized).
88 * (ii) Payload too big for send buffer: data is split 89 * (ii) Payload too big for send buffer: data is split
89 * into several packets, each with its own header. 90 * into several packets, each with its own header.
90 * In this case (e.g. last segment), coverage may 91 * In this case (e.g. last segment), coverage may
91 * exceed packet length. 92 * exceed packet length.
92 * Since packets with coverage length > packet length are 93 * Since packets with coverage length > packet length are
93 * illegal, we fall back to the defaults here. 94 * illegal, we fall back to the defaults here.
94 */ 95 */
95 } 96 }
96 return cscov;
97}
98
99static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
100{
101 int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
102 __wsum csum = 0;
103 97
104 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ 98 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
105 99
@@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
115 return csum; 109 return csum;
116} 110}
117 111
112/* Fast-path computation of checksum. Socket may not be locked. */
118static inline __wsum udplite_csum(struct sk_buff *skb) 113static inline __wsum udplite_csum(struct sk_buff *skb)
119{ 114{
120 struct sock *sk = skb->sk; 115 const struct udp_sock *up = udp_sk(skb->sk);
121 int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
122 const int off = skb_transport_offset(skb); 116 const int off = skb_transport_offset(skb);
123 const int len = skb->len - off; 117 int len = skb->len - off;
124 118
119 if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
120 if (0 < up->pcslen)
121 len = up->pcslen;
122 udp_hdr(skb)->len = htons(up->pcslen);
123 }
125 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ 124 skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
126 125
127 return skb_checksum(skb, off, min(cscov, len), 0); 126 return skb_checksum(skb, off, len, 0);
128} 127}
129 128
130extern void udplite4_register(void); 129extern void udplite4_register(void);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 61f1f623091d..e8292369cdcf 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -26,6 +26,8 @@
26 26
27/* Bluetooth L2CAP sockets. */ 27/* Bluetooth L2CAP sockets. */
28 28
29#include <linux/security.h>
30
29#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 33#include <net/bluetooth/l2cap.h>
@@ -933,6 +935,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
933 chan->force_reliable = pchan->force_reliable; 935 chan->force_reliable = pchan->force_reliable;
934 chan->flushable = pchan->flushable; 936 chan->flushable = pchan->flushable;
935 chan->force_active = pchan->force_active; 937 chan->force_active = pchan->force_active;
938
939 security_sk_clone(parent, sk);
936 } else { 940 } else {
937 941
938 switch (sk->sk_type) { 942 switch (sk->sk_type) {
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 482722bbc7a0..5417f6127323 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -42,6 +42,7 @@
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/seq_file.h> 44#include <linux/seq_file.h>
45#include <linux/security.h>
45#include <net/sock.h> 46#include <net/sock.h>
46 47
47#include <asm/system.h> 48#include <asm/system.h>
@@ -264,6 +265,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
264 265
265 pi->sec_level = rfcomm_pi(parent)->sec_level; 266 pi->sec_level = rfcomm_pi(parent)->sec_level;
266 pi->role_switch = rfcomm_pi(parent)->role_switch; 267 pi->role_switch = rfcomm_pi(parent)->role_switch;
268
269 security_sk_clone(parent, sk);
267 } else { 270 } else {
268 pi->dlc->defer_setup = 0; 271 pi->dlc->defer_setup = 0;
269 272
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8270f05e3f1f..a324b009e34b 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -41,6 +41,7 @@
41#include <linux/debugfs.h> 41#include <linux/debugfs.h>
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43#include <linux/list.h> 43#include <linux/list.h>
44#include <linux/security.h>
44#include <net/sock.h> 45#include <net/sock.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
@@ -403,8 +404,10 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
403{ 404{
404 BT_DBG("sk %p", sk); 405 BT_DBG("sk %p", sk);
405 406
406 if (parent) 407 if (parent) {
407 sk->sk_type = parent->sk_type; 408 sk->sk_type = parent->sk_type;
409 security_sk_clone(parent, sk);
410 }
408} 411}
409 412
410static struct proto sco_proto = { 413static struct proto sco_proto = {
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e73815456adf..1d420f64ff27 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -161,9 +161,10 @@ static void del_nbp(struct net_bridge_port *p)
161 call_rcu(&p->rcu, destroy_nbp_rcu); 161 call_rcu(&p->rcu, destroy_nbp_rcu);
162} 162}
163 163
164/* called with RTNL */ 164/* Delete bridge device */
165static void del_br(struct net_bridge *br, struct list_head *head) 165void br_dev_delete(struct net_device *dev, struct list_head *head)
166{ 166{
167 struct net_bridge *br = netdev_priv(dev);
167 struct net_bridge_port *p, *n; 168 struct net_bridge_port *p, *n;
168 169
169 list_for_each_entry_safe(p, n, &br->port_list, list) { 170 list_for_each_entry_safe(p, n, &br->port_list, list) {
@@ -268,7 +269,7 @@ int br_del_bridge(struct net *net, const char *name)
268 } 269 }
269 270
270 else 271 else
271 del_br(netdev_priv(dev), NULL); 272 br_dev_delete(dev, NULL);
272 273
273 rtnl_unlock(); 274 rtnl_unlock();
274 return ret; 275 return ret;
@@ -449,7 +450,7 @@ void __net_exit br_net_exit(struct net *net)
449 rtnl_lock(); 450 rtnl_lock();
450 for_each_netdev(net, dev) 451 for_each_netdev(net, dev)
451 if (dev->priv_flags & IFF_EBRIDGE) 452 if (dev->priv_flags & IFF_EBRIDGE)
452 del_br(netdev_priv(dev), &list); 453 br_dev_delete(dev, &list);
453 454
454 unregister_netdevice_many(&list); 455 unregister_netdevice_many(&list);
455 rtnl_unlock(); 456 rtnl_unlock();
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5b1ed1ba9aa7..e5f9ece3c9a0 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -210,6 +210,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = {
210 .priv_size = sizeof(struct net_bridge), 210 .priv_size = sizeof(struct net_bridge),
211 .setup = br_dev_setup, 211 .setup = br_dev_setup,
212 .validate = br_validate, 212 .validate = br_validate,
213 .dellink = br_dev_delete,
213}; 214};
214 215
215int __init br_netlink_init(void) 216int __init br_netlink_init(void)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 78cc364997d9..857a021deea9 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -294,6 +294,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
294 294
295/* br_device.c */ 295/* br_device.c */
296extern void br_dev_setup(struct net_device *dev); 296extern void br_dev_setup(struct net_device *dev);
297extern void br_dev_delete(struct net_device *dev, struct list_head *list);
297extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, 298extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
298 struct net_device *dev); 299 struct net_device *dev);
299#ifdef CONFIG_NET_POLL_CONTROLLER 300#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3231b468bb72..27071ee2a4e1 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -475,8 +475,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
475 475
476 list_del_rcu(&rule->list); 476 list_del_rcu(&rule->list);
477 477
478 if (rule->action == FR_ACT_GOTO) 478 if (rule->action == FR_ACT_GOTO) {
479 ops->nr_goto_rules--; 479 ops->nr_goto_rules--;
480 if (rtnl_dereference(rule->ctarget) == NULL)
481 ops->unresolved_rules--;
482 }
480 483
481 /* 484 /*
482 * Check if this rule is a target to any of them. If so, 485 * Check if this rule is a target to any of them. If so,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d2fe4e06b472..0ce3d06dce60 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -328,6 +328,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
330 330
331 tw->tw_transparent = inet_sk(sk)->transparent;
331 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 332 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
332 tcptw->tw_rcv_nxt = tp->rcv_nxt; 333 tcptw->tw_rcv_nxt = tp->rcv_nxt;
333 tcptw->tw_snd_nxt = tp->snd_nxt; 334 tcptw->tw_snd_nxt = tp->snd_nxt;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ad4ac2601a56..34b2ddeacb67 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1045 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1045 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1046 uhlen + hdr_len; 1046 uhlen + hdr_len;
1047 old_headroom = skb_headroom(skb); 1047 old_headroom = skb_headroom(skb);
1048 if (skb_cow_head(skb, headroom)) 1048 if (skb_cow_head(skb, headroom)) {
1049 dev_kfree_skb(skb);
1049 goto abort; 1050 goto abort;
1051 }
1050 1052
1051 new_headroom = skb_headroom(skb); 1053 new_headroom = skb_headroom(skb);
1052 skb_orphan(skb); 1054 skb_orphan(skb);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5290ac353a5e..e3be48bf4dcd 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2283,6 +2283,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2283 struct ip_vs_service *svc; 2283 struct ip_vs_service *svc;
2284 struct ip_vs_dest_user *udest_compat; 2284 struct ip_vs_dest_user *udest_compat;
2285 struct ip_vs_dest_user_kern udest; 2285 struct ip_vs_dest_user_kern udest;
2286 struct netns_ipvs *ipvs = net_ipvs(net);
2286 2287
2287 if (!capable(CAP_NET_ADMIN)) 2288 if (!capable(CAP_NET_ADMIN))
2288 return -EPERM; 2289 return -EPERM;
@@ -2303,6 +2304,24 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2303 /* increase the module use count */ 2304 /* increase the module use count */
2304 ip_vs_use_count_inc(); 2305 ip_vs_use_count_inc();
2305 2306
2307 /* Handle daemons since they have another lock */
2308 if (cmd == IP_VS_SO_SET_STARTDAEMON ||
2309 cmd == IP_VS_SO_SET_STOPDAEMON) {
2310 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2311
2312 if (mutex_lock_interruptible(&ipvs->sync_mutex)) {
2313 ret = -ERESTARTSYS;
2314 goto out_dec;
2315 }
2316 if (cmd == IP_VS_SO_SET_STARTDAEMON)
2317 ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
2318 dm->syncid);
2319 else
2320 ret = stop_sync_thread(net, dm->state);
2321 mutex_unlock(&ipvs->sync_mutex);
2322 goto out_dec;
2323 }
2324
2306 if (mutex_lock_interruptible(&__ip_vs_mutex)) { 2325 if (mutex_lock_interruptible(&__ip_vs_mutex)) {
2307 ret = -ERESTARTSYS; 2326 ret = -ERESTARTSYS;
2308 goto out_dec; 2327 goto out_dec;
@@ -2316,15 +2335,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2316 /* Set timeout values for (tcp tcpfin udp) */ 2335 /* Set timeout values for (tcp tcpfin udp) */
2317 ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); 2336 ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
2318 goto out_unlock; 2337 goto out_unlock;
2319 } else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
2320 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2321 ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
2322 dm->syncid);
2323 goto out_unlock;
2324 } else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
2325 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
2326 ret = stop_sync_thread(net, dm->state);
2327 goto out_unlock;
2328 } 2338 }
2329 2339
2330 usvc_compat = (struct ip_vs_service_user *)arg; 2340 usvc_compat = (struct ip_vs_service_user *)arg;
@@ -2584,6 +2594,33 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2584 2594
2585 if (copy_from_user(arg, user, copylen) != 0) 2595 if (copy_from_user(arg, user, copylen) != 0)
2586 return -EFAULT; 2596 return -EFAULT;
2597 /*
2598 * Handle daemons first since it has its own locking
2599 */
2600 if (cmd == IP_VS_SO_GET_DAEMON) {
2601 struct ip_vs_daemon_user d[2];
2602
2603 memset(&d, 0, sizeof(d));
2604 if (mutex_lock_interruptible(&ipvs->sync_mutex))
2605 return -ERESTARTSYS;
2606
2607 if (ipvs->sync_state & IP_VS_STATE_MASTER) {
2608 d[0].state = IP_VS_STATE_MASTER;
2609 strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
2610 sizeof(d[0].mcast_ifn));
2611 d[0].syncid = ipvs->master_syncid;
2612 }
2613 if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
2614 d[1].state = IP_VS_STATE_BACKUP;
2615 strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
2616 sizeof(d[1].mcast_ifn));
2617 d[1].syncid = ipvs->backup_syncid;
2618 }
2619 if (copy_to_user(user, &d, sizeof(d)) != 0)
2620 ret = -EFAULT;
2621 mutex_unlock(&ipvs->sync_mutex);
2622 return ret;
2623 }
2587 2624
2588 if (mutex_lock_interruptible(&__ip_vs_mutex)) 2625 if (mutex_lock_interruptible(&__ip_vs_mutex))
2589 return -ERESTARTSYS; 2626 return -ERESTARTSYS;
@@ -2681,28 +2718,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2681 } 2718 }
2682 break; 2719 break;
2683 2720
2684 case IP_VS_SO_GET_DAEMON:
2685 {
2686 struct ip_vs_daemon_user d[2];
2687
2688 memset(&d, 0, sizeof(d));
2689 if (ipvs->sync_state & IP_VS_STATE_MASTER) {
2690 d[0].state = IP_VS_STATE_MASTER;
2691 strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
2692 sizeof(d[0].mcast_ifn));
2693 d[0].syncid = ipvs->master_syncid;
2694 }
2695 if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
2696 d[1].state = IP_VS_STATE_BACKUP;
2697 strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
2698 sizeof(d[1].mcast_ifn));
2699 d[1].syncid = ipvs->backup_syncid;
2700 }
2701 if (copy_to_user(user, &d, sizeof(d)) != 0)
2702 ret = -EFAULT;
2703 }
2704 break;
2705
2706 default: 2721 default:
2707 ret = -EINVAL; 2722 ret = -EINVAL;
2708 } 2723 }
@@ -3205,7 +3220,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3205 struct net *net = skb_sknet(skb); 3220 struct net *net = skb_sknet(skb);
3206 struct netns_ipvs *ipvs = net_ipvs(net); 3221 struct netns_ipvs *ipvs = net_ipvs(net);
3207 3222
3208 mutex_lock(&__ip_vs_mutex); 3223 mutex_lock(&ipvs->sync_mutex);
3209 if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { 3224 if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
3210 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, 3225 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
3211 ipvs->master_mcast_ifn, 3226 ipvs->master_mcast_ifn,
@@ -3225,7 +3240,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3225 } 3240 }
3226 3241
3227nla_put_failure: 3242nla_put_failure:
3228 mutex_unlock(&__ip_vs_mutex); 3243 mutex_unlock(&ipvs->sync_mutex);
3229 3244
3230 return skb->len; 3245 return skb->len;
3231} 3246}
@@ -3271,13 +3286,9 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
3271 return ip_vs_set_timeout(net, &t); 3286 return ip_vs_set_timeout(net, &t);
3272} 3287}
3273 3288
3274static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) 3289static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
3275{ 3290{
3276 struct ip_vs_service *svc = NULL;
3277 struct ip_vs_service_user_kern usvc;
3278 struct ip_vs_dest_user_kern udest;
3279 int ret = 0, cmd; 3291 int ret = 0, cmd;
3280 int need_full_svc = 0, need_full_dest = 0;
3281 struct net *net; 3292 struct net *net;
3282 struct netns_ipvs *ipvs; 3293 struct netns_ipvs *ipvs;
3283 3294
@@ -3285,19 +3296,10 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3285 ipvs = net_ipvs(net); 3296 ipvs = net_ipvs(net);
3286 cmd = info->genlhdr->cmd; 3297 cmd = info->genlhdr->cmd;
3287 3298
3288 mutex_lock(&__ip_vs_mutex); 3299 if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
3289
3290 if (cmd == IPVS_CMD_FLUSH) {
3291 ret = ip_vs_flush(net);
3292 goto out;
3293 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3294 ret = ip_vs_genl_set_config(net, info->attrs);
3295 goto out;
3296 } else if (cmd == IPVS_CMD_NEW_DAEMON ||
3297 cmd == IPVS_CMD_DEL_DAEMON) {
3298
3299 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; 3300 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
3300 3301
3302 mutex_lock(&ipvs->sync_mutex);
3301 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || 3303 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
3302 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, 3304 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
3303 info->attrs[IPVS_CMD_ATTR_DAEMON], 3305 info->attrs[IPVS_CMD_ATTR_DAEMON],
@@ -3310,6 +3312,33 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3310 ret = ip_vs_genl_new_daemon(net, daemon_attrs); 3312 ret = ip_vs_genl_new_daemon(net, daemon_attrs);
3311 else 3313 else
3312 ret = ip_vs_genl_del_daemon(net, daemon_attrs); 3314 ret = ip_vs_genl_del_daemon(net, daemon_attrs);
3315out:
3316 mutex_unlock(&ipvs->sync_mutex);
3317 }
3318 return ret;
3319}
3320
3321static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3322{
3323 struct ip_vs_service *svc = NULL;
3324 struct ip_vs_service_user_kern usvc;
3325 struct ip_vs_dest_user_kern udest;
3326 int ret = 0, cmd;
3327 int need_full_svc = 0, need_full_dest = 0;
3328 struct net *net;
3329 struct netns_ipvs *ipvs;
3330
3331 net = skb_sknet(skb);
3332 ipvs = net_ipvs(net);
3333 cmd = info->genlhdr->cmd;
3334
3335 mutex_lock(&__ip_vs_mutex);
3336
3337 if (cmd == IPVS_CMD_FLUSH) {
3338 ret = ip_vs_flush(net);
3339 goto out;
3340 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3341 ret = ip_vs_genl_set_config(net, info->attrs);
3313 goto out; 3342 goto out;
3314 } else if (cmd == IPVS_CMD_ZERO && 3343 } else if (cmd == IPVS_CMD_ZERO &&
3315 !info->attrs[IPVS_CMD_ATTR_SERVICE]) { 3344 !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
@@ -3536,13 +3565,13 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3536 .cmd = IPVS_CMD_NEW_DAEMON, 3565 .cmd = IPVS_CMD_NEW_DAEMON,
3537 .flags = GENL_ADMIN_PERM, 3566 .flags = GENL_ADMIN_PERM,
3538 .policy = ip_vs_cmd_policy, 3567 .policy = ip_vs_cmd_policy,
3539 .doit = ip_vs_genl_set_cmd, 3568 .doit = ip_vs_genl_set_daemon,
3540 }, 3569 },
3541 { 3570 {
3542 .cmd = IPVS_CMD_DEL_DAEMON, 3571 .cmd = IPVS_CMD_DEL_DAEMON,
3543 .flags = GENL_ADMIN_PERM, 3572 .flags = GENL_ADMIN_PERM,
3544 .policy = ip_vs_cmd_policy, 3573 .policy = ip_vs_cmd_policy,
3545 .doit = ip_vs_genl_set_cmd, 3574 .doit = ip_vs_genl_set_daemon,
3546 }, 3575 },
3547 { 3576 {
3548 .cmd = IPVS_CMD_GET_DAEMON, 3577 .cmd = IPVS_CMD_GET_DAEMON,
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 7ee7215b8ba0..3cdd479f9b5d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -61,6 +61,7 @@
61 61
62#define SYNC_PROTO_VER 1 /* Protocol version in header */ 62#define SYNC_PROTO_VER 1 /* Protocol version in header */
63 63
64static struct lock_class_key __ipvs_sync_key;
64/* 65/*
65 * IPVS sync connection entry 66 * IPVS sync connection entry
66 * Version 0, i.e. original version. 67 * Version 0, i.e. original version.
@@ -1545,6 +1546,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
1545 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", 1546 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
1546 sizeof(struct ip_vs_sync_conn_v0)); 1547 sizeof(struct ip_vs_sync_conn_v0));
1547 1548
1549
1548 if (state == IP_VS_STATE_MASTER) { 1550 if (state == IP_VS_STATE_MASTER) {
1549 if (ipvs->master_thread) 1551 if (ipvs->master_thread)
1550 return -EEXIST; 1552 return -EEXIST;
@@ -1667,6 +1669,7 @@ int __net_init ip_vs_sync_net_init(struct net *net)
1667{ 1669{
1668 struct netns_ipvs *ipvs = net_ipvs(net); 1670 struct netns_ipvs *ipvs = net_ipvs(net);
1669 1671
1672 __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
1670 INIT_LIST_HEAD(&ipvs->sync_queue); 1673 INIT_LIST_HEAD(&ipvs->sync_queue);
1671 spin_lock_init(&ipvs->sync_lock); 1674 spin_lock_init(&ipvs->sync_lock);
1672 spin_lock_init(&ipvs->sync_buff_lock); 1675 spin_lock_init(&ipvs->sync_buff_lock);
@@ -1680,7 +1683,9 @@ int __net_init ip_vs_sync_net_init(struct net *net)
1680void ip_vs_sync_net_cleanup(struct net *net) 1683void ip_vs_sync_net_cleanup(struct net *net)
1681{ 1684{
1682 int retc; 1685 int retc;
1686 struct netns_ipvs *ipvs = net_ipvs(net);
1683 1687
1688 mutex_lock(&ipvs->sync_mutex);
1684 retc = stop_sync_thread(net, IP_VS_STATE_MASTER); 1689 retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
1685 if (retc && retc != -ESRCH) 1690 if (retc && retc != -ESRCH)
1686 pr_err("Failed to stop Master Daemon\n"); 1691 pr_err("Failed to stop Master Daemon\n");
@@ -1688,4 +1693,5 @@ void ip_vs_sync_net_cleanup(struct net *net)
1688 retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); 1693 retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
1689 if (retc && retc != -ESRCH) 1694 if (retc && retc != -ESRCH)
1690 pr_err("Failed to stop Backup Daemon\n"); 1695 pr_err("Failed to stop Backup Daemon\n");
1696 mutex_unlock(&ipvs->sync_mutex);
1691} 1697}
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index cf616e55ca41..d69facdd9a7a 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -241,8 +241,8 @@ static int gre_packet(struct nf_conn *ct,
241 nf_ct_refresh_acct(ct, ctinfo, skb, 241 nf_ct_refresh_acct(ct, ctinfo, skb,
242 ct->proto.gre.stream_timeout); 242 ct->proto.gre.stream_timeout);
243 /* Also, more likely to be important, and not a probe. */ 243 /* Also, more likely to be important, and not a probe. */
244 set_bit(IPS_ASSURED_BIT, &ct->status); 244 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
245 nf_conntrack_event_cache(IPCT_ASSURED, ct); 245 nf_conntrack_event_cache(IPCT_ASSURED, ct);
246 } else 246 } else
247 nf_ct_refresh_acct(ct, ctinfo, skb, 247 nf_ct_refresh_acct(ct, ctinfo, skb,
248 ct->proto.gre.timeout); 248 ct->proto.gre.timeout);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d30615419b4d..5f03e4ea65bf 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb,
91 int needed; 91 int needed;
92 int rc; 92 int rc;
93 93
94 if (skb->len < 1) { 94 if (!pskb_may_pull(skb, 1)) {
95 /* packet has no address block */ 95 /* packet has no address block */
96 rc = 0; 96 rc = 0;
97 goto empty; 97 goto empty;
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
100 len = *skb->data; 100 len = *skb->data;
101 needed = 1 + (len >> 4) + (len & 0x0f); 101 needed = 1 + (len >> 4) + (len & 0x0f);
102 102
103 if (skb->len < needed) { 103 if (!pskb_may_pull(skb, needed)) {
104 /* packet is too short to hold the addresses it claims 104 /* packet is too short to hold the addresses it claims
105 to hold */ 105 to hold */
106 rc = -1; 106 rc = -1;
@@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
295 * Found a listening socket, now check the incoming 295 * Found a listening socket, now check the incoming
296 * call user data vs this sockets call user data 296 * call user data vs this sockets call user data
297 */ 297 */
298 if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { 298 if (x25_sk(s)->cudmatchlength > 0 &&
299 skb->len >= x25_sk(s)->cudmatchlength) {
299 if((memcmp(x25_sk(s)->calluserdata.cuddata, 300 if((memcmp(x25_sk(s)->calluserdata.cuddata,
300 skb->data, 301 skb->data,
301 x25_sk(s)->cudmatchlength)) == 0) { 302 x25_sk(s)->cudmatchlength)) == 0) {
@@ -951,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
951 * 952 *
952 * Facilities length is mandatory in call request packets 953 * Facilities length is mandatory in call request packets
953 */ 954 */
954 if (skb->len < 1) 955 if (!pskb_may_pull(skb, 1))
955 goto out_clear_request; 956 goto out_clear_request;
956 len = skb->data[0] + 1; 957 len = skb->data[0] + 1;
957 if (skb->len < len) 958 if (!pskb_may_pull(skb, len))
958 goto out_clear_request; 959 goto out_clear_request;
959 skb_pull(skb,len); 960 skb_pull(skb,len);
960 961
961 /* 962 /*
963 * Ensure that the amount of call user data is valid.
964 */
965 if (skb->len > X25_MAX_CUD_LEN)
966 goto out_clear_request;
967
968 /*
969 * Get all the call user data so it can be used in
970 * x25_find_listener and skb_copy_from_linear_data up ahead.
971 */
972 if (!pskb_may_pull(skb, skb->len))
973 goto out_clear_request;
974
975 /*
962 * Find a listener for the particular address/cud pair. 976 * Find a listener for the particular address/cud pair.
963 */ 977 */
964 sk = x25_find_listener(&source_addr,skb); 978 sk = x25_find_listener(&source_addr,skb);
@@ -1166,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
1166 * byte of the user data is the logical value of the Q Bit. 1180 * byte of the user data is the logical value of the Q Bit.
1167 */ 1181 */
1168 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1182 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
1183 if (!pskb_may_pull(skb, 1))
1184 goto out_kfree_skb;
1185
1169 qbit = skb->data[0]; 1186 qbit = skb->data[0];
1170 skb_pull(skb, 1); 1187 skb_pull(skb, 1);
1171 } 1188 }
@@ -1244,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1244 struct x25_sock *x25 = x25_sk(sk); 1261 struct x25_sock *x25 = x25_sk(sk);
1245 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; 1262 struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
1246 size_t copied; 1263 size_t copied;
1247 int qbit; 1264 int qbit, header_len = x25->neighbour->extended ?
1265 X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
1266
1248 struct sk_buff *skb; 1267 struct sk_buff *skb;
1249 unsigned char *asmptr; 1268 unsigned char *asmptr;
1250 int rc = -ENOTCONN; 1269 int rc = -ENOTCONN;
@@ -1265,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1265 1284
1266 skb = skb_dequeue(&x25->interrupt_in_queue); 1285 skb = skb_dequeue(&x25->interrupt_in_queue);
1267 1286
1287 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
1288 goto out_free_dgram;
1289
1268 skb_pull(skb, X25_STD_MIN_LEN); 1290 skb_pull(skb, X25_STD_MIN_LEN);
1269 1291
1270 /* 1292 /*
@@ -1285,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
1285 if (!skb) 1307 if (!skb)
1286 goto out; 1308 goto out;
1287 1309
1310 if (!pskb_may_pull(skb, header_len))
1311 goto out_free_dgram;
1312
1288 qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; 1313 qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
1289 1314
1290 skb_pull(skb, x25->neighbour->extended ? 1315 skb_pull(skb, header_len);
1291 X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
1292 1316
1293 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { 1317 if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
1294 asmptr = skb_push(skb, 1); 1318 asmptr = skb_push(skb, 1);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index e547ca1578c3..fa2b41888bd9 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
32 unsigned short frametype; 32 unsigned short frametype;
33 unsigned int lci; 33 unsigned int lci;
34 34
35 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
36 return 0;
37
35 frametype = skb->data[2]; 38 frametype = skb->data[2];
36 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 39 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
37 40
@@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
115 goto drop; 118 goto drop;
116 } 119 }
117 120
121 if (!pskb_may_pull(skb, 1))
122 return 0;
123
118 switch (skb->data[0]) { 124 switch (skb->data[0]) {
119 125
120 case X25_IFACE_DATA: 126 case X25_IFACE_DATA:
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index f77e4e75f914..36384a1fa9f2 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -44,7 +44,7 @@
44int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, 44int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
45 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 45 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
46{ 46{
47 unsigned char *p = skb->data; 47 unsigned char *p;
48 unsigned int len; 48 unsigned int len;
49 49
50 *vc_fac_mask = 0; 50 *vc_fac_mask = 0;
@@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
60 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); 60 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
61 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); 61 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
62 62
63 if (skb->len < 1) 63 if (!pskb_may_pull(skb, 1))
64 return 0; 64 return 0;
65 65
66 len = *p++; 66 len = skb->data[0];
67 67
68 if (len >= skb->len) 68 if (!pskb_may_pull(skb, 1 + len))
69 return -1; 69 return -1;
70 70
71 p = skb->data + 1;
72
71 while (len > 0) { 73 while (len > 0) {
72 switch (*p & X25_FAC_CLASS_MASK) { 74 switch (*p & X25_FAC_CLASS_MASK) {
73 case X25_FAC_CLASS_A: 75 case X25_FAC_CLASS_A:
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 0b073b51b183..a49cd4ec551a 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
107 /* 107 /*
108 * Parse the data in the frame. 108 * Parse the data in the frame.
109 */ 109 */
110 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
111 goto out_clear;
110 skb_pull(skb, X25_STD_MIN_LEN); 112 skb_pull(skb, X25_STD_MIN_LEN);
111 113
112 len = x25_parse_address_block(skb, &source_addr, 114 len = x25_parse_address_block(skb, &source_addr,
@@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
127 * Copy any Call User Data. 129 * Copy any Call User Data.
128 */ 130 */
129 if (skb->len > 0) { 131 if (skb->len > 0) {
130 skb_copy_from_linear_data(skb, 132 if (skb->len > X25_MAX_CUD_LEN)
131 x25->calluserdata.cuddata, 133 goto out_clear;
132 skb->len); 134
135 skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
136 skb->len);
133 x25->calluserdata.cudlength = skb->len; 137 x25->calluserdata.cudlength = skb->len;
134 } 138 }
135 if (!sock_flag(sk, SOCK_DEAD)) 139 if (!sock_flag(sk, SOCK_DEAD))
@@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
137 break; 141 break;
138 } 142 }
139 case X25_CLEAR_REQUEST: 143 case X25_CLEAR_REQUEST:
144 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
145 goto out_clear;
146
140 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 147 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
141 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); 148 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
142 break; 149 break;
@@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
164 switch (frametype) { 171 switch (frametype) {
165 172
166 case X25_CLEAR_REQUEST: 173 case X25_CLEAR_REQUEST:
174 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
175 goto out_clear;
176
167 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 177 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
168 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 178 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
169 break; 179 break;
@@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
177 } 187 }
178 188
179 return 0; 189 return 0;
190
191out_clear:
192 x25_write_internal(sk, X25_CLEAR_REQUEST);
193 x25_start_t23timer(sk);
194 return 0;
180} 195}
181 196
182/* 197/*
@@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
206 break; 221 break;
207 222
208 case X25_CLEAR_REQUEST: 223 case X25_CLEAR_REQUEST:
224 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
225 goto out_clear;
226
209 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 227 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
210 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 228 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
211 break; 229 break;
@@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
304 } 322 }
305 323
306 return queued; 324 return queued;
325
326out_clear:
327 x25_write_internal(sk, X25_CLEAR_REQUEST);
328 x25->state = X25_STATE_2;
329 x25_start_t23timer(sk);
330 return 0;
307} 331}
308 332
309/* 333/*
@@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
313 */ 337 */
314static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) 338static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
315{ 339{
340 struct x25_sock *x25 = x25_sk(sk);
341
316 switch (frametype) { 342 switch (frametype) {
317 343
318 case X25_RESET_REQUEST: 344 case X25_RESET_REQUEST:
319 x25_write_internal(sk, X25_RESET_CONFIRMATION); 345 x25_write_internal(sk, X25_RESET_CONFIRMATION);
320 case X25_RESET_CONFIRMATION: { 346 case X25_RESET_CONFIRMATION: {
321 struct x25_sock *x25 = x25_sk(sk);
322
323 x25_stop_timer(sk); 347 x25_stop_timer(sk);
324 x25->condition = 0x00; 348 x25->condition = 0x00;
325 x25->va = 0; 349 x25->va = 0;
@@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
331 break; 355 break;
332 } 356 }
333 case X25_CLEAR_REQUEST: 357 case X25_CLEAR_REQUEST:
358 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
359 goto out_clear;
360
334 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 361 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
335 x25_disconnect(sk, 0, skb->data[3], skb->data[4]); 362 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
336 break; 363 break;
@@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
340 } 367 }
341 368
342 return 0; 369 return 0;
370
371out_clear:
372 x25_write_internal(sk, X25_CLEAR_REQUEST);
373 x25->state = X25_STATE_2;
374 x25_start_t23timer(sk);
375 return 0;
343} 376}
344 377
345/* Higher level upcall for a LAPB frame */ 378/* Higher level upcall for a LAPB frame */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 037958ff8eed..4acacf3c6617 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
90 break; 90 break;
91 91
92 case X25_DIAGNOSTIC: 92 case X25_DIAGNOSTIC:
93 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
94 break;
95
93 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n", 96 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
94 skb->data[3], skb->data[4], 97 skb->data[3], skb->data[4],
95 skb->data[5], skb->data[6]); 98 skb->data[5], skb->data[6]);
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 24a342ebc7f5..5170d52bfd96 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -269,7 +269,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
269 int *d, int *m) 269 int *d, int *m)
270{ 270{
271 struct x25_sock *x25 = x25_sk(sk); 271 struct x25_sock *x25 = x25_sk(sk);
272 unsigned char *frame = skb->data; 272 unsigned char *frame;
273
274 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
275 return X25_ILLEGAL;
276 frame = skb->data;
273 277
274 *ns = *nr = *q = *d = *m = 0; 278 *ns = *nr = *q = *d = *m = 0;
275 279
@@ -294,6 +298,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
294 if (frame[2] == X25_RR || 298 if (frame[2] == X25_RR ||
295 frame[2] == X25_RNR || 299 frame[2] == X25_RNR ||
296 frame[2] == X25_REJ) { 300 frame[2] == X25_REJ) {
301 if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
302 return X25_ILLEGAL;
303 frame = skb->data;
304
297 *nr = (frame[3] >> 1) & 0x7F; 305 *nr = (frame[3] >> 1) & 0x7F;
298 return frame[2]; 306 return frame[2];
299 } 307 }
@@ -308,6 +316,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
308 316
309 if (x25->neighbour->extended) { 317 if (x25->neighbour->extended) {
310 if ((frame[2] & 0x01) == X25_DATA) { 318 if ((frame[2] & 0x01) == X25_DATA) {
319 if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
320 return X25_ILLEGAL;
321 frame = skb->data;
322
311 *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; 323 *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
312 *d = (frame[0] & X25_D_BIT) == X25_D_BIT; 324 *d = (frame[0] & X25_D_BIT) == X25_D_BIT;
313 *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT; 325 *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
diff --git a/security/security.c b/security/security.c
index 0e4fccfef12c..d9e153390926 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1097,6 +1097,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk)
1097{ 1097{
1098 security_ops->sk_clone_security(sk, newsk); 1098 security_ops->sk_clone_security(sk, newsk);
1099} 1099}
1100EXPORT_SYMBOL(security_sk_clone);
1100 1101
1101void security_sk_classify_flow(struct sock *sk, struct flowi *fl) 1102void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
1102{ 1103{