diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-04-25 04:42:42 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-04-25 04:42:42 -0400 |
commit | a126f7c41d80322b42ae0383ed3dcb17ee0296fc (patch) | |
tree | 67f3605e72e01f7ec0b15af22d9d7b6ef8598b55 /net | |
parent | 0098fc39e6d575f940487f09f303787efbc7a373 (diff) | |
parent | a7eb7c6f9a657a01a8359edae31bbeacd18b072c (diff) |
Merge branch 'mcpm' of git://git.linaro.org/people/nico/linux into devel-stable
Diffstat (limited to 'net')
45 files changed, 401 insertions, 266 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 74dea377fe5b..de2e950a0a7a 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = { | |||
655 | .create = p9_virtio_create, | 655 | .create = p9_virtio_create, |
656 | .close = p9_virtio_close, | 656 | .close = p9_virtio_close, |
657 | .request = p9_virtio_request, | 657 | .request = p9_virtio_request, |
658 | //.zc_request = p9_virtio_zc_request, | 658 | .zc_request = p9_virtio_zc_request, |
659 | .cancel = p9_virtio_cancel, | 659 | .cancel = p9_virtio_cancel, |
660 | /* | 660 | /* |
661 | * We leave one entry for input and one entry for response | 661 | * We leave one entry for input and one entry for response |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d5f1d3fd4b28..314c73ed418f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
66 | goto out; | 66 | goto out; |
67 | } | 67 | } |
68 | 68 | ||
69 | mdst = br_mdb_get(br, skb); | 69 | mdst = br_mdb_get(br, skb, vid); |
70 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) | 70 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) |
71 | br_multicast_deliver(mdst, skb); | 71 | br_multicast_deliver(mdst, skb); |
72 | else | 72 | else |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 480330151898..828e2bcc1f52 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
97 | if (is_broadcast_ether_addr(dest)) | 97 | if (is_broadcast_ether_addr(dest)) |
98 | skb2 = skb; | 98 | skb2 = skb; |
99 | else if (is_multicast_ether_addr(dest)) { | 99 | else if (is_multicast_ether_addr(dest)) { |
100 | mdst = br_mdb_get(br, skb); | 100 | mdst = br_mdb_get(br, skb, vid); |
101 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { | 101 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { |
102 | if ((mdst && mdst->mglist) || | 102 | if ((mdst && mdst->mglist) || |
103 | br_multicast_is_router(br)) | 103 | br_multicast_is_router(br)) |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 9f97b850fc65..ee79f3f20383 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
80 | port = p->port; | 80 | port = p->port; |
81 | if (port) { | 81 | if (port) { |
82 | struct br_mdb_entry e; | 82 | struct br_mdb_entry e; |
83 | memset(&e, 0, sizeof(e)); | ||
83 | e.ifindex = port->dev->ifindex; | 84 | e.ifindex = port->dev->ifindex; |
84 | e.state = p->state; | 85 | e.state = p->state; |
85 | if (p->addr.proto == htons(ETH_P_IP)) | 86 | if (p->addr.proto == htons(ETH_P_IP)) |
@@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
136 | break; | 137 | break; |
137 | 138 | ||
138 | bpm = nlmsg_data(nlh); | 139 | bpm = nlmsg_data(nlh); |
140 | memset(bpm, 0, sizeof(*bpm)); | ||
139 | bpm->ifindex = dev->ifindex; | 141 | bpm->ifindex = dev->ifindex; |
140 | if (br_mdb_fill_info(skb, cb, dev) < 0) | 142 | if (br_mdb_fill_info(skb, cb, dev) < 0) |
141 | goto out; | 143 | goto out; |
@@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, | |||
171 | return -EMSGSIZE; | 173 | return -EMSGSIZE; |
172 | 174 | ||
173 | bpm = nlmsg_data(nlh); | 175 | bpm = nlmsg_data(nlh); |
176 | memset(bpm, 0, sizeof(*bpm)); | ||
174 | bpm->family = AF_BRIDGE; | 177 | bpm->family = AF_BRIDGE; |
175 | bpm->ifindex = dev->ifindex; | 178 | bpm->ifindex = dev->ifindex; |
176 | nest = nla_nest_start(skb, MDBA_MDB); | 179 | nest = nla_nest_start(skb, MDBA_MDB); |
@@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, | |||
228 | { | 231 | { |
229 | struct br_mdb_entry entry; | 232 | struct br_mdb_entry entry; |
230 | 233 | ||
234 | memset(&entry, 0, sizeof(entry)); | ||
231 | entry.ifindex = port->dev->ifindex; | 235 | entry.ifindex = port->dev->ifindex; |
232 | entry.addr.proto = group->proto; | 236 | entry.addr.proto = group->proto; |
233 | entry.addr.u.ip4 = group->u.ip4; | 237 | entry.addr.u.ip4 = group->u.ip4; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 10e6fce1bb62..923fbeaf7afd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get( | |||
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 134 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
135 | struct sk_buff *skb) | 135 | struct sk_buff *skb, u16 vid) |
136 | { | 136 | { |
137 | struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); | 137 | struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); |
138 | struct br_ip ip; | 138 | struct br_ip ip; |
@@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | |||
144 | return NULL; | 144 | return NULL; |
145 | 145 | ||
146 | ip.proto = skb->protocol; | 146 | ip.proto = skb->protocol; |
147 | ip.vid = vid; | ||
147 | 148 | ||
148 | switch (skb->protocol) { | 149 | switch (skb->protocol) { |
149 | case htons(ETH_P_IP): | 150 | case htons(ETH_P_IP): |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6d314c4e6bcb..3cbf5beb3d4b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -442,7 +442,7 @@ extern int br_multicast_rcv(struct net_bridge *br, | |||
442 | struct net_bridge_port *port, | 442 | struct net_bridge_port *port, |
443 | struct sk_buff *skb); | 443 | struct sk_buff *skb); |
444 | extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 444 | extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
445 | struct sk_buff *skb); | 445 | struct sk_buff *skb, u16 vid); |
446 | extern void br_multicast_add_port(struct net_bridge_port *port); | 446 | extern void br_multicast_add_port(struct net_bridge_port *port); |
447 | extern void br_multicast_del_port(struct net_bridge_port *port); | 447 | extern void br_multicast_del_port(struct net_bridge_port *port); |
448 | extern void br_multicast_enable_port(struct net_bridge_port *port); | 448 | extern void br_multicast_enable_port(struct net_bridge_port *port); |
@@ -504,7 +504,7 @@ static inline int br_multicast_rcv(struct net_bridge *br, | |||
504 | } | 504 | } |
505 | 505 | ||
506 | static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 506 | static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
507 | struct sk_buff *skb) | 507 | struct sk_buff *skb, u16 vid) |
508 | { | 508 | { |
509 | return NULL; | 509 | return NULL; |
510 | } | 510 | } |
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 1ae1d9cb278d..21760f008974 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev) | |||
118 | return NULL; | 118 | return NULL; |
119 | } | 119 | } |
120 | 120 | ||
121 | void caif_flow_cb(struct sk_buff *skb) | 121 | static void caif_flow_cb(struct sk_buff *skb) |
122 | { | 122 | { |
123 | struct caif_device_entry *caifd; | 123 | struct caif_device_entry *caifd; |
124 | void (*dtor)(struct sk_buff *skb) = NULL; | 124 | void (*dtor)(struct sk_buff *skb) = NULL; |
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index 3ebc8cbc91ff..ef8ebaa993cf 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c | |||
@@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |||
81 | layr->up->ctrlcmd(layr->up, ctrl, layr->id); | 81 | layr->up->ctrlcmd(layr->up, ctrl, layr->id); |
82 | } | 82 | } |
83 | 83 | ||
84 | struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], | 84 | static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], |
85 | u8 braddr[ETH_ALEN]) | 85 | u8 braddr[ETH_ALEN]) |
86 | { | 86 | { |
87 | struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); | 87 | struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); |
88 | 88 | ||
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 69bc4bf89e3e..4543b9aba40c 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
@@ -654,6 +654,24 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) | |||
654 | return 0; | 654 | return 0; |
655 | } | 655 | } |
656 | 656 | ||
657 | static int __decode_pgid(void **p, void *end, struct ceph_pg *pg) | ||
658 | { | ||
659 | u8 v; | ||
660 | |||
661 | ceph_decode_need(p, end, 1+8+4+4, bad); | ||
662 | v = ceph_decode_8(p); | ||
663 | if (v != 1) | ||
664 | goto bad; | ||
665 | pg->pool = ceph_decode_64(p); | ||
666 | pg->seed = ceph_decode_32(p); | ||
667 | *p += 4; /* skip preferred */ | ||
668 | return 0; | ||
669 | |||
670 | bad: | ||
671 | dout("error decoding pgid\n"); | ||
672 | return -EINVAL; | ||
673 | } | ||
674 | |||
657 | /* | 675 | /* |
658 | * decode a full map. | 676 | * decode a full map. |
659 | */ | 677 | */ |
@@ -745,13 +763,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
745 | for (i = 0; i < len; i++) { | 763 | for (i = 0; i < len; i++) { |
746 | int n, j; | 764 | int n, j; |
747 | struct ceph_pg pgid; | 765 | struct ceph_pg pgid; |
748 | struct ceph_pg_v1 pgid_v1; | ||
749 | struct ceph_pg_mapping *pg; | 766 | struct ceph_pg_mapping *pg; |
750 | 767 | ||
751 | ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); | 768 | err = __decode_pgid(p, end, &pgid); |
752 | ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); | 769 | if (err) |
753 | pgid.pool = le32_to_cpu(pgid_v1.pool); | 770 | goto bad; |
754 | pgid.seed = le16_to_cpu(pgid_v1.ps); | 771 | ceph_decode_need(p, end, sizeof(u32), bad); |
755 | n = ceph_decode_32(p); | 772 | n = ceph_decode_32(p); |
756 | err = -EINVAL; | 773 | err = -EINVAL; |
757 | if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) | 774 | if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) |
@@ -818,8 +835,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
818 | u16 version; | 835 | u16 version; |
819 | 836 | ||
820 | ceph_decode_16_safe(p, end, version, bad); | 837 | ceph_decode_16_safe(p, end, version, bad); |
821 | if (version > 6) { | 838 | if (version != 6) { |
822 | pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); | 839 | pr_warning("got unknown v %d != 6 of inc osdmap\n", version); |
823 | goto bad; | 840 | goto bad; |
824 | } | 841 | } |
825 | 842 | ||
@@ -963,15 +980,14 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
963 | while (len--) { | 980 | while (len--) { |
964 | struct ceph_pg_mapping *pg; | 981 | struct ceph_pg_mapping *pg; |
965 | int j; | 982 | int j; |
966 | struct ceph_pg_v1 pgid_v1; | ||
967 | struct ceph_pg pgid; | 983 | struct ceph_pg pgid; |
968 | u32 pglen; | 984 | u32 pglen; |
969 | ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); | ||
970 | ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); | ||
971 | pgid.pool = le32_to_cpu(pgid_v1.pool); | ||
972 | pgid.seed = le16_to_cpu(pgid_v1.ps); | ||
973 | pglen = ceph_decode_32(p); | ||
974 | 985 | ||
986 | err = __decode_pgid(p, end, &pgid); | ||
987 | if (err) | ||
988 | goto bad; | ||
989 | ceph_decode_need(p, end, sizeof(u32), bad); | ||
990 | pglen = ceph_decode_32(p); | ||
975 | if (pglen) { | 991 | if (pglen) { |
976 | ceph_decode_need(p, end, pglen*sizeof(u32), bad); | 992 | ceph_decode_need(p, end, pglen*sizeof(u32), bad); |
977 | 993 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index a06a7a58dd11..dffbef70cd31 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3444,6 +3444,7 @@ ncls: | |||
3444 | } | 3444 | } |
3445 | switch (rx_handler(&skb)) { | 3445 | switch (rx_handler(&skb)) { |
3446 | case RX_HANDLER_CONSUMED: | 3446 | case RX_HANDLER_CONSUMED: |
3447 | ret = NET_RX_SUCCESS; | ||
3447 | goto unlock; | 3448 | goto unlock; |
3448 | case RX_HANDLER_ANOTHER: | 3449 | case RX_HANDLER_ANOTHER: |
3449 | goto another_round; | 3450 | goto another_round; |
@@ -4103,7 +4104,7 @@ static void net_rx_action(struct softirq_action *h) | |||
4103 | * Allow this to run for 2 jiffies since which will allow | 4104 | * Allow this to run for 2 jiffies since which will allow |
4104 | * an average latency of 1.5/HZ. | 4105 | * an average latency of 1.5/HZ. |
4105 | */ | 4106 | */ |
4106 | if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) | 4107 | if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) |
4107 | goto softnet_break; | 4108 | goto softnet_break; |
4108 | 4109 | ||
4109 | local_irq_enable(); | 4110 | local_irq_enable(); |
@@ -4780,7 +4781,7 @@ EXPORT_SYMBOL(dev_set_mac_address); | |||
4780 | /** | 4781 | /** |
4781 | * dev_change_carrier - Change device carrier | 4782 | * dev_change_carrier - Change device carrier |
4782 | * @dev: device | 4783 | * @dev: device |
4783 | * @new_carries: new value | 4784 | * @new_carrier: new value |
4784 | * | 4785 | * |
4785 | * Change device carrier | 4786 | * Change device carrier |
4786 | */ | 4787 | */ |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b376410ff259..a585d45cc9d9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -979,6 +979,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
979 | * report anything. | 979 | * report anything. |
980 | */ | 980 | */ |
981 | ivi.spoofchk = -1; | 981 | ivi.spoofchk = -1; |
982 | memset(ivi.mac, 0, sizeof(ivi.mac)); | ||
982 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) | 983 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) |
983 | break; | 984 | break; |
984 | vf_mac.vf = | 985 | vf_mac.vf = |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 1b588e23cf80..21291f1abcd6 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, | |||
284 | if (!netdev->dcbnl_ops->getpermhwaddr) | 284 | if (!netdev->dcbnl_ops->getpermhwaddr) |
285 | return -EOPNOTSUPP; | 285 | return -EOPNOTSUPP; |
286 | 286 | ||
287 | memset(perm_addr, 0, sizeof(perm_addr)); | ||
287 | netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); | 288 | netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); |
288 | 289 | ||
289 | return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); | 290 | return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); |
@@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1042 | 1043 | ||
1043 | if (ops->ieee_getets) { | 1044 | if (ops->ieee_getets) { |
1044 | struct ieee_ets ets; | 1045 | struct ieee_ets ets; |
1046 | memset(&ets, 0, sizeof(ets)); | ||
1045 | err = ops->ieee_getets(netdev, &ets); | 1047 | err = ops->ieee_getets(netdev, &ets); |
1046 | if (!err && | 1048 | if (!err && |
1047 | nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) | 1049 | nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) |
@@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1050 | 1052 | ||
1051 | if (ops->ieee_getmaxrate) { | 1053 | if (ops->ieee_getmaxrate) { |
1052 | struct ieee_maxrate maxrate; | 1054 | struct ieee_maxrate maxrate; |
1055 | memset(&maxrate, 0, sizeof(maxrate)); | ||
1053 | err = ops->ieee_getmaxrate(netdev, &maxrate); | 1056 | err = ops->ieee_getmaxrate(netdev, &maxrate); |
1054 | if (!err) { | 1057 | if (!err) { |
1055 | err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, | 1058 | err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, |
@@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1061 | 1064 | ||
1062 | if (ops->ieee_getpfc) { | 1065 | if (ops->ieee_getpfc) { |
1063 | struct ieee_pfc pfc; | 1066 | struct ieee_pfc pfc; |
1067 | memset(&pfc, 0, sizeof(pfc)); | ||
1064 | err = ops->ieee_getpfc(netdev, &pfc); | 1068 | err = ops->ieee_getpfc(netdev, &pfc); |
1065 | if (!err && | 1069 | if (!err && |
1066 | nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) | 1070 | nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) |
@@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1094 | /* get peer info if available */ | 1098 | /* get peer info if available */ |
1095 | if (ops->ieee_peer_getets) { | 1099 | if (ops->ieee_peer_getets) { |
1096 | struct ieee_ets ets; | 1100 | struct ieee_ets ets; |
1101 | memset(&ets, 0, sizeof(ets)); | ||
1097 | err = ops->ieee_peer_getets(netdev, &ets); | 1102 | err = ops->ieee_peer_getets(netdev, &ets); |
1098 | if (!err && | 1103 | if (!err && |
1099 | nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) | 1104 | nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) |
@@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1102 | 1107 | ||
1103 | if (ops->ieee_peer_getpfc) { | 1108 | if (ops->ieee_peer_getpfc) { |
1104 | struct ieee_pfc pfc; | 1109 | struct ieee_pfc pfc; |
1110 | memset(&pfc, 0, sizeof(pfc)); | ||
1105 | err = ops->ieee_peer_getpfc(netdev, &pfc); | 1111 | err = ops->ieee_peer_getpfc(netdev, &pfc); |
1106 | if (!err && | 1112 | if (!err && |
1107 | nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) | 1113 | nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) |
@@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1280 | /* peer info if available */ | 1286 | /* peer info if available */ |
1281 | if (ops->cee_peer_getpg) { | 1287 | if (ops->cee_peer_getpg) { |
1282 | struct cee_pg pg; | 1288 | struct cee_pg pg; |
1289 | memset(&pg, 0, sizeof(pg)); | ||
1283 | err = ops->cee_peer_getpg(netdev, &pg); | 1290 | err = ops->cee_peer_getpg(netdev, &pg); |
1284 | if (!err && | 1291 | if (!err && |
1285 | nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) | 1292 | nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) |
@@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1288 | 1295 | ||
1289 | if (ops->cee_peer_getpfc) { | 1296 | if (ops->cee_peer_getpfc) { |
1290 | struct cee_pfc pfc; | 1297 | struct cee_pfc pfc; |
1298 | memset(&pfc, 0, sizeof(pfc)); | ||
1291 | err = ops->cee_peer_getpfc(netdev, &pfc); | 1299 | err = ops->cee_peer_getpfc(netdev, &pfc); |
1292 | if (!err && | 1300 | if (!err && |
1293 | nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) | 1301 | nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) |
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 8c2251fb0a3f..bba5f8336317 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h | |||
@@ -84,7 +84,7 @@ | |||
84 | (memcmp(addr1, addr2, length >> 3) == 0) | 84 | (memcmp(addr1, addr2, length >> 3) == 0) |
85 | 85 | ||
86 | /* local link, i.e. FE80::/10 */ | 86 | /* local link, i.e. FE80::/10 */ |
87 | #define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) | 87 | #define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80)) |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * check whether we can compress the IID to 16 bits, | 90 | * check whether we can compress the IID to 16 bits, |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d1874be1df3..786d97aee751 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock); | |||
735 | * tcp/dccp_create_openreq_child(). | 735 | * tcp/dccp_create_openreq_child(). |
736 | */ | 736 | */ |
737 | void inet_csk_prepare_forced_close(struct sock *sk) | 737 | void inet_csk_prepare_forced_close(struct sock *sk) |
738 | __releases(&sk->sk_lock.slock) | ||
738 | { | 739 | { |
739 | /* sk_clone_lock locked the socket and set refcnt to 2 */ | 740 | /* sk_clone_lock locked the socket and set refcnt to 2 */ |
740 | bh_unlock_sock(sk); | 741 | bh_unlock_sock(sk); |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 87abd3e2bd32..2bdf802e28e2 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb) | |||
228 | icmp_send(skb, ICMP_DEST_UNREACH, | 228 | icmp_send(skb, ICMP_DEST_UNREACH, |
229 | ICMP_PROT_UNREACH, 0); | 229 | ICMP_PROT_UNREACH, 0); |
230 | } | 230 | } |
231 | } else | 231 | kfree_skb(skb); |
232 | } else { | ||
232 | IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); | 233 | IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); |
233 | kfree_skb(skb); | 234 | consume_skb(skb); |
235 | } | ||
234 | } | 236 | } |
235 | } | 237 | } |
236 | out: | 238 | out: |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f6289bf6f332..310a3647c83d 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -423,7 +423,7 @@ int ip_options_compile(struct net *net, | |||
423 | put_unaligned_be32(midtime, timeptr); | 423 | put_unaligned_be32(midtime, timeptr); |
424 | opt->is_changed = 1; | 424 | opt->is_changed = 1; |
425 | } | 425 | } |
426 | } else { | 426 | } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { |
427 | unsigned int overflow = optptr[3]>>4; | 427 | unsigned int overflow = optptr[3]>>4; |
428 | if (overflow == 15) { | 428 | if (overflow == 15) { |
429 | pp_ptr = optptr + 3; | 429 | pp_ptr = optptr + 3; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a759e19496d2..0d9bdacce99f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5485 | if (tcp_checksum_complete_user(sk, skb)) | 5485 | if (tcp_checksum_complete_user(sk, skb)) |
5486 | goto csum_error; | 5486 | goto csum_error; |
5487 | 5487 | ||
5488 | if ((int)skb->truesize > sk->sk_forward_alloc) | ||
5489 | goto step5; | ||
5490 | |||
5488 | /* Predicted packet is in window by definition. | 5491 | /* Predicted packet is in window by definition. |
5489 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. | 5492 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. |
5490 | * Hence, check seq<=rcv_wup reduces to: | 5493 | * Hence, check seq<=rcv_wup reduces to: |
@@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5496 | 5499 | ||
5497 | tcp_rcv_rtt_measure_ts(sk, skb); | 5500 | tcp_rcv_rtt_measure_ts(sk, skb); |
5498 | 5501 | ||
5499 | if ((int)skb->truesize > sk->sk_forward_alloc) | ||
5500 | goto step5; | ||
5501 | |||
5502 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); | 5502 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); |
5503 | 5503 | ||
5504 | /* Bulk data transfer: receiver */ | 5504 | /* Bulk data transfer: receiver */ |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 5b10414e619e..e33fe0ab2568 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -241,9 +241,11 @@ resubmit: | |||
241 | icmpv6_send(skb, ICMPV6_PARAMPROB, | 241 | icmpv6_send(skb, ICMPV6_PARAMPROB, |
242 | ICMPV6_UNK_NEXTHDR, nhoff); | 242 | ICMPV6_UNK_NEXTHDR, nhoff); |
243 | } | 243 | } |
244 | } else | 244 | kfree_skb(skb); |
245 | } else { | ||
245 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); | 246 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); |
246 | kfree_skb(skb); | 247 | consume_skb(skb); |
248 | } | ||
247 | } | 249 | } |
248 | rcu_read_unlock(); | 250 | rcu_read_unlock(); |
249 | return 0; | 251 | return 0; |
@@ -279,7 +281,8 @@ int ip6_mc_input(struct sk_buff *skb) | |||
279 | * IPv6 multicast router mode is now supported ;) | 281 | * IPv6 multicast router mode is now supported ;) |
280 | */ | 282 | */ |
281 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && | 283 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && |
282 | !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && | 284 | !(ipv6_addr_type(&hdr->daddr) & |
285 | (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && | ||
283 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { | 286 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { |
284 | /* | 287 | /* |
285 | * Okay, we try to forward - split and duplicate | 288 | * Okay, we try to forward - split and duplicate |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 928266569689..e5fe0041adfa 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net) | |||
1915 | restart: | 1915 | restart: |
1916 | read_lock_bh(&table->tb6_lock); | 1916 | read_lock_bh(&table->tb6_lock); |
1917 | for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { | 1917 | for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { |
1918 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { | 1918 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && |
1919 | (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { | ||
1919 | dst_hold(&rt->dst); | 1920 | dst_hold(&rt->dst); |
1920 | read_unlock_bh(&table->tb6_lock); | 1921 | read_unlock_bh(&table->tb6_lock); |
1921 | ip6_del_rt(rt); | 1922 | ip6_del_rt(rt); |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 9a5fd3c3e530..362ba47968e4 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
280 | struct tty_port *port = &self->port; | 280 | struct tty_port *port = &self->port; |
281 | DECLARE_WAITQUEUE(wait, current); | 281 | DECLARE_WAITQUEUE(wait, current); |
282 | int retval; | 282 | int retval; |
283 | int do_clocal = 0, extra_count = 0; | 283 | int do_clocal = 0; |
284 | unsigned long flags; | 284 | unsigned long flags; |
285 | 285 | ||
286 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 286 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
@@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
289 | * If non-blocking mode is set, or the port is not enabled, | 289 | * If non-blocking mode is set, or the port is not enabled, |
290 | * then make the check up front and then exit. | 290 | * then make the check up front and then exit. |
291 | */ | 291 | */ |
292 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | 292 | if (test_bit(TTY_IO_ERROR, &tty->flags)) { |
293 | /* nonblock mode is set or port is not enabled */ | 293 | port->flags |= ASYNC_NORMAL_ACTIVE; |
294 | return 0; | ||
295 | } | ||
296 | |||
297 | if (filp->f_flags & O_NONBLOCK) { | ||
298 | /* nonblock mode is set */ | ||
299 | if (tty->termios.c_cflag & CBAUD) | ||
300 | tty_port_raise_dtr_rts(port); | ||
294 | port->flags |= ASYNC_NORMAL_ACTIVE; | 301 | port->flags |= ASYNC_NORMAL_ACTIVE; |
295 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); | 302 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); |
296 | return 0; | 303 | return 0; |
@@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
315 | __FILE__, __LINE__, tty->driver->name, port->count); | 322 | __FILE__, __LINE__, tty->driver->name, port->count); |
316 | 323 | ||
317 | spin_lock_irqsave(&port->lock, flags); | 324 | spin_lock_irqsave(&port->lock, flags); |
318 | if (!tty_hung_up_p(filp)) { | 325 | if (!tty_hung_up_p(filp)) |
319 | extra_count = 1; | ||
320 | port->count--; | 326 | port->count--; |
321 | } | ||
322 | spin_unlock_irqrestore(&port->lock, flags); | ||
323 | port->blocked_open++; | 327 | port->blocked_open++; |
328 | spin_unlock_irqrestore(&port->lock, flags); | ||
324 | 329 | ||
325 | while (1) { | 330 | while (1) { |
326 | if (tty->termios.c_cflag & CBAUD) | 331 | if (tty->termios.c_cflag & CBAUD) |
327 | tty_port_raise_dtr_rts(port); | 332 | tty_port_raise_dtr_rts(port); |
328 | 333 | ||
329 | current->state = TASK_INTERRUPTIBLE; | 334 | set_current_state(TASK_INTERRUPTIBLE); |
330 | 335 | ||
331 | if (tty_hung_up_p(filp) || | 336 | if (tty_hung_up_p(filp) || |
332 | !test_bit(ASYNCB_INITIALIZED, &port->flags)) { | 337 | !test_bit(ASYNCB_INITIALIZED, &port->flags)) { |
@@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
361 | __set_current_state(TASK_RUNNING); | 366 | __set_current_state(TASK_RUNNING); |
362 | remove_wait_queue(&port->open_wait, &wait); | 367 | remove_wait_queue(&port->open_wait, &wait); |
363 | 368 | ||
364 | if (extra_count) { | 369 | spin_lock_irqsave(&port->lock, flags); |
365 | /* ++ is not atomic, so this should be protected - Jean II */ | 370 | if (!tty_hung_up_p(filp)) |
366 | spin_lock_irqsave(&port->lock, flags); | ||
367 | port->count++; | 371 | port->count++; |
368 | spin_unlock_irqrestore(&port->lock, flags); | ||
369 | } | ||
370 | port->blocked_open--; | 372 | port->blocked_open--; |
373 | spin_unlock_irqrestore(&port->lock, flags); | ||
371 | 374 | ||
372 | IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", | 375 | IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", |
373 | __FILE__, __LINE__, tty->driver->name, port->count); | 376 | __FILE__, __LINE__, tty->driver->name, port->count); |
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index e71e85ba2bf1..29340a9a6fb9 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
495 | /* case CS_ISO_8859_9: */ | 495 | /* case CS_ISO_8859_9: */ |
496 | /* case CS_UNICODE: */ | 496 | /* case CS_UNICODE: */ |
497 | default: | 497 | default: |
498 | IRDA_DEBUG(0, "%s(), charset %s, not supported\n", | 498 | IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n", |
499 | __func__, ias_charset_types[charset]); | 499 | __func__, charset, |
500 | charset < ARRAY_SIZE(ias_charset_types) ? | ||
501 | ias_charset_types[charset] : | ||
502 | "(unknown)"); | ||
500 | 503 | ||
501 | /* Aborting, close connection! */ | 504 | /* Aborting, close connection! */ |
502 | iriap_disconnect_request(self); | 505 | iriap_disconnect_request(self); |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 556fdafdd1ea..8555f331ea60 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
2201 | XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); | 2201 | XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); |
2202 | xp->priority = pol->sadb_x_policy_priority; | 2202 | xp->priority = pol->sadb_x_policy_priority; |
2203 | 2203 | ||
2204 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], | 2204 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; |
2205 | xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); | 2205 | xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); |
2206 | if (!xp->family) { | 2206 | if (!xp->family) { |
2207 | err = -EINVAL; | 2207 | err = -EINVAL; |
@@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
2214 | if (xp->selector.sport) | 2214 | if (xp->selector.sport) |
2215 | xp->selector.sport_mask = htons(0xffff); | 2215 | xp->selector.sport_mask = htons(0xffff); |
2216 | 2216 | ||
2217 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], | 2217 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; |
2218 | pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); | 2218 | pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); |
2219 | xp->selector.prefixlen_d = sa->sadb_address_prefixlen; | 2219 | xp->selector.prefixlen_d = sa->sadb_address_prefixlen; |
2220 | 2220 | ||
@@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa | |||
2315 | 2315 | ||
2316 | memset(&sel, 0, sizeof(sel)); | 2316 | memset(&sel, 0, sizeof(sel)); |
2317 | 2317 | ||
2318 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], | 2318 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; |
2319 | sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); | 2319 | sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); |
2320 | sel.prefixlen_s = sa->sadb_address_prefixlen; | 2320 | sel.prefixlen_s = sa->sadb_address_prefixlen; |
2321 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); | 2321 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); |
@@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa | |||
2323 | if (sel.sport) | 2323 | if (sel.sport) |
2324 | sel.sport_mask = htons(0xffff); | 2324 | sel.sport_mask = htons(0xffff); |
2325 | 2325 | ||
2326 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], | 2326 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; |
2327 | pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); | 2327 | pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); |
2328 | sel.prefixlen_d = sa->sadb_address_prefixlen; | 2328 | sel.prefixlen_d = sa->sadb_address_prefixlen; |
2329 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); | 2329 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 3f4e3afc191a..6a53371dba1f 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
355 | l2tp_xmit_skb(session, skb, session->hdr_len); | 355 | l2tp_xmit_skb(session, skb, session->hdr_len); |
356 | 356 | ||
357 | sock_put(ps->tunnel_sock); | 357 | sock_put(ps->tunnel_sock); |
358 | sock_put(sk); | ||
358 | 359 | ||
359 | return error; | 360 | return error; |
360 | 361 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 09d96a8f6c2c..fb306814576a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -3285,6 +3285,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, | |||
3285 | struct cfg80211_chan_def *chandef) | 3285 | struct cfg80211_chan_def *chandef) |
3286 | { | 3286 | { |
3287 | struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); | 3287 | struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); |
3288 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
3288 | struct ieee80211_chanctx_conf *chanctx_conf; | 3289 | struct ieee80211_chanctx_conf *chanctx_conf; |
3289 | int ret = -ENODATA; | 3290 | int ret = -ENODATA; |
3290 | 3291 | ||
@@ -3293,6 +3294,16 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, | |||
3293 | if (chanctx_conf) { | 3294 | if (chanctx_conf) { |
3294 | *chandef = chanctx_conf->def; | 3295 | *chandef = chanctx_conf->def; |
3295 | ret = 0; | 3296 | ret = 0; |
3297 | } else if (local->open_count > 0 && | ||
3298 | local->open_count == local->monitors && | ||
3299 | sdata->vif.type == NL80211_IFTYPE_MONITOR) { | ||
3300 | if (local->use_chanctx) | ||
3301 | *chandef = local->monitor_chandef; | ||
3302 | else | ||
3303 | cfg80211_chandef_create(chandef, | ||
3304 | local->_oper_channel, | ||
3305 | local->_oper_channel_type); | ||
3306 | ret = 0; | ||
3296 | } | 3307 | } |
3297 | rcu_read_unlock(); | 3308 | rcu_read_unlock(); |
3298 | 3309 | ||
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 2c059e54e885..baaa8608e52d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -107,7 +107,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local) | |||
107 | 107 | ||
108 | lockdep_assert_held(&local->mtx); | 108 | lockdep_assert_held(&local->mtx); |
109 | 109 | ||
110 | active = !list_empty(&local->chanctx_list); | 110 | active = !list_empty(&local->chanctx_list) || local->monitors; |
111 | 111 | ||
112 | if (!local->ops->remain_on_channel) { | 112 | if (!local->ops->remain_on_channel) { |
113 | list_for_each_entry(roc, &local->roc_list, list) { | 113 | list_for_each_entry(roc, &local->roc_list, list) { |
@@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) | |||
541 | 541 | ||
542 | ieee80211_adjust_monitor_flags(sdata, 1); | 542 | ieee80211_adjust_monitor_flags(sdata, 1); |
543 | ieee80211_configure_filter(local); | 543 | ieee80211_configure_filter(local); |
544 | mutex_lock(&local->mtx); | ||
545 | ieee80211_recalc_idle(local); | ||
546 | mutex_unlock(&local->mtx); | ||
544 | 547 | ||
545 | netif_carrier_on(dev); | 548 | netif_carrier_on(dev); |
546 | break; | 549 | break; |
@@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
812 | 815 | ||
813 | ieee80211_adjust_monitor_flags(sdata, -1); | 816 | ieee80211_adjust_monitor_flags(sdata, -1); |
814 | ieee80211_configure_filter(local); | 817 | ieee80211_configure_filter(local); |
818 | mutex_lock(&local->mtx); | ||
819 | ieee80211_recalc_idle(local); | ||
820 | mutex_unlock(&local->mtx); | ||
815 | break; | 821 | break; |
816 | case NL80211_IFTYPE_P2P_DEVICE: | 822 | case NL80211_IFTYPE_P2P_DEVICE: |
817 | /* relies on synchronize_rcu() below */ | 823 | /* relies on synchronize_rcu() below */ |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9f6464f3e05f..141577412d84 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, | |||
647 | our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & | 647 | our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & |
648 | mask) >> shift; | 648 | mask) >> shift; |
649 | 649 | ||
650 | if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED) | ||
651 | continue; | ||
652 | |||
650 | switch (ap_mcs) { | 653 | switch (ap_mcs) { |
651 | default: | 654 | default: |
652 | if (our_mcs <= ap_mcs) | 655 | if (our_mcs <= ap_mcs) |
@@ -3503,6 +3506,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
3503 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 3506 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
3504 | 3507 | ||
3505 | /* | 3508 | /* |
3509 | * Stop timers before deleting work items, as timers | ||
3510 | * could race and re-add the work-items. They will be | ||
3511 | * re-established on connection. | ||
3512 | */ | ||
3513 | del_timer_sync(&ifmgd->conn_mon_timer); | ||
3514 | del_timer_sync(&ifmgd->bcn_mon_timer); | ||
3515 | |||
3516 | /* | ||
3506 | * we need to use atomic bitops for the running bits | 3517 | * we need to use atomic bitops for the running bits |
3507 | * only because both timers might fire at the same | 3518 | * only because both timers might fire at the same |
3508 | * time -- the code here is properly synchronised. | 3519 | * time -- the code here is properly synchronised. |
@@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
3516 | if (del_timer_sync(&ifmgd->timer)) | 3527 | if (del_timer_sync(&ifmgd->timer)) |
3517 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); | 3528 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); |
3518 | 3529 | ||
3519 | cancel_work_sync(&ifmgd->chswitch_work); | ||
3520 | if (del_timer_sync(&ifmgd->chswitch_timer)) | 3530 | if (del_timer_sync(&ifmgd->chswitch_timer)) |
3521 | set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); | 3531 | set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); |
3522 | 3532 | cancel_work_sync(&ifmgd->chswitch_work); | |
3523 | /* these will just be re-established on connection */ | ||
3524 | del_timer_sync(&ifmgd->conn_mon_timer); | ||
3525 | del_timer_sync(&ifmgd->bcn_mon_timer); | ||
3526 | } | 3533 | } |
3527 | 3534 | ||
3528 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) | 3535 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) |
@@ -4315,6 +4322,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) | |||
4315 | { | 4322 | { |
4316 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 4323 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
4317 | 4324 | ||
4325 | /* | ||
4326 | * Make sure some work items will not run after this, | ||
4327 | * they will not do anything but might not have been | ||
4328 | * cancelled when disconnecting. | ||
4329 | */ | ||
4330 | cancel_work_sync(&ifmgd->monitor_work); | ||
4331 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); | ||
4332 | cancel_work_sync(&ifmgd->request_smps_work); | ||
4333 | cancel_work_sync(&ifmgd->csa_connection_drop_work); | ||
4334 | cancel_work_sync(&ifmgd->chswitch_work); | ||
4335 | |||
4318 | mutex_lock(&ifmgd->mtx); | 4336 | mutex_lock(&ifmgd->mtx); |
4319 | if (ifmgd->assoc_data) | 4337 | if (ifmgd->assoc_data) |
4320 | ieee80211_destroy_assoc_data(sdata, false); | 4338 | ieee80211_destroy_assoc_data(sdata, false); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index de8548bf0a7f..8914d2d2881a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, | |||
1231 | if (local->queue_stop_reasons[q] || | 1231 | if (local->queue_stop_reasons[q] || |
1232 | (!txpending && !skb_queue_empty(&local->pending[q]))) { | 1232 | (!txpending && !skb_queue_empty(&local->pending[q]))) { |
1233 | if (unlikely(info->flags & | 1233 | if (unlikely(info->flags & |
1234 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK && | 1234 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) { |
1235 | local->queue_stop_reasons[q] & | 1235 | if (local->queue_stop_reasons[q] & |
1236 | ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) { | 1236 | ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) { |
1237 | /* | ||
1238 | * Drop off-channel frames if queues | ||
1239 | * are stopped for any reason other | ||
1240 | * than off-channel operation. Never | ||
1241 | * queue them. | ||
1242 | */ | ||
1243 | spin_unlock_irqrestore( | ||
1244 | &local->queue_stop_reason_lock, | ||
1245 | flags); | ||
1246 | ieee80211_purge_tx_queue(&local->hw, | ||
1247 | skbs); | ||
1248 | return true; | ||
1249 | } | ||
1250 | } else { | ||
1251 | |||
1237 | /* | 1252 | /* |
1238 | * Drop off-channel frames if queues are stopped | 1253 | * Since queue is stopped, queue up frames for |
1239 | * for any reason other than off-channel | 1254 | * later transmission from the tx-pending |
1240 | * operation. Never queue them. | 1255 | * tasklet when the queue is woken again. |
1241 | */ | 1256 | */ |
1242 | spin_unlock_irqrestore( | 1257 | if (txpending) |
1243 | &local->queue_stop_reason_lock, flags); | 1258 | skb_queue_splice_init(skbs, |
1244 | ieee80211_purge_tx_queue(&local->hw, skbs); | 1259 | &local->pending[q]); |
1245 | return true; | 1260 | else |
1261 | skb_queue_splice_tail_init(skbs, | ||
1262 | &local->pending[q]); | ||
1263 | |||
1264 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1265 | flags); | ||
1266 | return false; | ||
1246 | } | 1267 | } |
1247 | |||
1248 | /* | ||
1249 | * Since queue is stopped, queue up frames for later | ||
1250 | * transmission from the tx-pending tasklet when the | ||
1251 | * queue is woken again. | ||
1252 | */ | ||
1253 | if (txpending) | ||
1254 | skb_queue_splice_init(skbs, &local->pending[q]); | ||
1255 | else | ||
1256 | skb_queue_splice_tail_init(skbs, | ||
1257 | &local->pending[q]); | ||
1258 | |||
1259 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1260 | flags); | ||
1261 | return false; | ||
1262 | } | 1268 | } |
1263 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 1269 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
1264 | 1270 | ||
@@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1844 | } | 1850 | } |
1845 | 1851 | ||
1846 | if (!is_multicast_ether_addr(skb->data)) { | 1852 | if (!is_multicast_ether_addr(skb->data)) { |
1853 | struct sta_info *next_hop; | ||
1854 | bool mpp_lookup = true; | ||
1855 | |||
1847 | mpath = mesh_path_lookup(sdata, skb->data); | 1856 | mpath = mesh_path_lookup(sdata, skb->data); |
1848 | if (!mpath) | 1857 | if (mpath) { |
1858 | mpp_lookup = false; | ||
1859 | next_hop = rcu_dereference(mpath->next_hop); | ||
1860 | if (!next_hop || | ||
1861 | !(mpath->flags & (MESH_PATH_ACTIVE | | ||
1862 | MESH_PATH_RESOLVING))) | ||
1863 | mpp_lookup = true; | ||
1864 | } | ||
1865 | |||
1866 | if (mpp_lookup) | ||
1849 | mppath = mpp_path_lookup(sdata, skb->data); | 1867 | mppath = mpp_path_lookup(sdata, skb->data); |
1868 | |||
1869 | if (mppath && mpath) | ||
1870 | mesh_path_del(mpath->sdata, mpath->dst); | ||
1850 | } | 1871 | } |
1851 | 1872 | ||
1852 | /* | 1873 | /* |
@@ -2350,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, | |||
2350 | if (local->tim_in_locked_section) { | 2371 | if (local->tim_in_locked_section) { |
2351 | __ieee80211_beacon_add_tim(sdata, ps, skb); | 2372 | __ieee80211_beacon_add_tim(sdata, ps, skb); |
2352 | } else { | 2373 | } else { |
2353 | spin_lock(&local->tim_lock); | 2374 | spin_lock_bh(&local->tim_lock); |
2354 | __ieee80211_beacon_add_tim(sdata, ps, skb); | 2375 | __ieee80211_beacon_add_tim(sdata, ps, skb); |
2355 | spin_unlock(&local->tim_lock); | 2376 | spin_unlock_bh(&local->tim_lock); |
2356 | } | 2377 | } |
2357 | 2378 | ||
2358 | return 0; | 2379 | return 0; |
@@ -2724,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2724 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 2745 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
2725 | } | 2746 | } |
2726 | 2747 | ||
2727 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); | 2748 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
2749 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); | ||
2728 | if (!ieee80211_tx_prepare(sdata, &tx, skb)) | 2750 | if (!ieee80211_tx_prepare(sdata, &tx, skb)) |
2729 | break; | 2751 | break; |
2730 | dev_kfree_skb_any(skb); | 2752 | dev_kfree_skb_any(skb); |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index f82b2e606cfd..1ba9dbc0e107 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
@@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb, | |||
1470 | if (ret == -EAGAIN) | 1470 | if (ret == -EAGAIN) |
1471 | ret = 1; | 1471 | ret = 1; |
1472 | 1472 | ||
1473 | return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST; | 1473 | return (ret < 0 && ret != -ENOTEMPTY) ? ret : |
1474 | ret > 0 ? 0 : -IPSET_ERR_EXIST; | ||
1474 | } | 1475 | } |
1475 | 1476 | ||
1476 | /* Get headed data of a set */ | 1477 | /* Get headed data of a set */ |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a9740bd6fe54..94b4b9853f60 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, | |||
339 | { | 339 | { |
340 | const struct nf_conn_help *help; | 340 | const struct nf_conn_help *help; |
341 | const struct nf_conntrack_helper *helper; | 341 | const struct nf_conntrack_helper *helper; |
342 | struct va_format vaf; | ||
343 | va_list args; | ||
344 | |||
345 | va_start(args, fmt); | ||
346 | |||
347 | vaf.fmt = fmt; | ||
348 | vaf.va = &args; | ||
342 | 349 | ||
343 | /* Called from the helper function, this call never fails */ | 350 | /* Called from the helper function, this call never fails */ |
344 | help = nfct_help(ct); | 351 | help = nfct_help(ct); |
@@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, | |||
347 | helper = rcu_dereference(help->helper); | 354 | helper = rcu_dereference(help->helper); |
348 | 355 | ||
349 | nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, | 356 | nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, |
350 | "nf_ct_%s: dropping packet: %s ", helper->name, fmt); | 357 | "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); |
358 | |||
359 | va_end(args); | ||
351 | } | 360 | } |
352 | EXPORT_SYMBOL_GPL(nf_ct_helper_log); | 361 | EXPORT_SYMBOL_GPL(nf_ct_helper_log); |
353 | 362 | ||
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index d578ec251712..0b1b32cda307 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id) | |||
62 | } | 62 | } |
63 | EXPORT_SYMBOL_GPL(nfnl_unlock); | 63 | EXPORT_SYMBOL_GPL(nfnl_unlock); |
64 | 64 | ||
65 | static struct mutex *nfnl_get_lock(__u8 subsys_id) | ||
66 | { | ||
67 | return &table[subsys_id].mutex; | ||
68 | } | ||
69 | |||
70 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) | 65 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) |
71 | { | 66 | { |
72 | nfnl_lock(n->subsys_id); | 67 | nfnl_lock(n->subsys_id); |
@@ -199,7 +194,7 @@ replay: | |||
199 | rcu_read_unlock(); | 194 | rcu_read_unlock(); |
200 | nfnl_lock(subsys_id); | 195 | nfnl_lock(subsys_id); |
201 | if (rcu_dereference_protected(table[subsys_id].subsys, | 196 | if (rcu_dereference_protected(table[subsys_id].subsys, |
202 | lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || | 197 | lockdep_is_held(&table[subsys_id].mutex)) != ss || |
203 | nfnetlink_find_client(type, ss) != nc) | 198 | nfnetlink_find_client(type, ss) != nc) |
204 | err = -EAGAIN; | 199 | err = -EAGAIN; |
205 | else if (nc->call) | 200 | else if (nc->call) |
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index ba92824086f3..3228d7f24eb4 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c | |||
@@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
124 | const struct xt_audit_info *info = par->targinfo; | 124 | const struct xt_audit_info *info = par->targinfo; |
125 | struct audit_buffer *ab; | 125 | struct audit_buffer *ab; |
126 | 126 | ||
127 | if (audit_enabled == 0) | ||
128 | goto errout; | ||
129 | |||
127 | ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); | 130 | ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); |
128 | if (ab == NULL) | 131 | if (ab == NULL) |
129 | goto errout; | 132 | goto errout; |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 847d495cd4de..8a6c6ea466d8 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
1189 | struct netlbl_unlhsh_walk_arg cb_arg; | 1189 | struct netlbl_unlhsh_walk_arg cb_arg; |
1190 | u32 skip_bkt = cb->args[0]; | 1190 | u32 skip_bkt = cb->args[0]; |
1191 | u32 skip_chain = cb->args[1]; | 1191 | u32 skip_chain = cb->args[1]; |
1192 | u32 skip_addr4 = cb->args[2]; | ||
1193 | u32 skip_addr6 = cb->args[3]; | ||
1194 | u32 iter_bkt; | 1192 | u32 iter_bkt; |
1195 | u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; | 1193 | u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; |
1196 | struct netlbl_unlhsh_iface *iface; | 1194 | struct netlbl_unlhsh_iface *iface; |
@@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
1215 | continue; | 1213 | continue; |
1216 | netlbl_af4list_foreach_rcu(addr4, | 1214 | netlbl_af4list_foreach_rcu(addr4, |
1217 | &iface->addr4_list) { | 1215 | &iface->addr4_list) { |
1218 | if (iter_addr4++ < skip_addr4) | 1216 | if (iter_addr4++ < cb->args[2]) |
1219 | continue; | 1217 | continue; |
1220 | if (netlbl_unlabel_staticlist_gen( | 1218 | if (netlbl_unlabel_staticlist_gen( |
1221 | NLBL_UNLABEL_C_STATICLIST, | 1219 | NLBL_UNLABEL_C_STATICLIST, |
@@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
1231 | #if IS_ENABLED(CONFIG_IPV6) | 1229 | #if IS_ENABLED(CONFIG_IPV6) |
1232 | netlbl_af6list_foreach_rcu(addr6, | 1230 | netlbl_af6list_foreach_rcu(addr6, |
1233 | &iface->addr6_list) { | 1231 | &iface->addr6_list) { |
1234 | if (iter_addr6++ < skip_addr6) | 1232 | if (iter_addr6++ < cb->args[3]) |
1235 | continue; | 1233 | continue; |
1236 | if (netlbl_unlabel_staticlist_gen( | 1234 | if (netlbl_unlabel_staticlist_gen( |
1237 | NLBL_UNLABEL_C_STATICLIST, | 1235 | NLBL_UNLABEL_C_STATICLIST, |
@@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
1250 | 1248 | ||
1251 | unlabel_staticlist_return: | 1249 | unlabel_staticlist_return: |
1252 | rcu_read_unlock(); | 1250 | rcu_read_unlock(); |
1253 | cb->args[0] = skip_bkt; | 1251 | cb->args[0] = iter_bkt; |
1254 | cb->args[1] = skip_chain; | 1252 | cb->args[1] = iter_chain; |
1255 | cb->args[2] = skip_addr4; | 1253 | cb->args[2] = iter_addr4; |
1256 | cb->args[3] = skip_addr6; | 1254 | cb->args[3] = iter_addr6; |
1257 | return skb->len; | 1255 | return skb->len; |
1258 | } | 1256 | } |
1259 | 1257 | ||
@@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
1273 | { | 1271 | { |
1274 | struct netlbl_unlhsh_walk_arg cb_arg; | 1272 | struct netlbl_unlhsh_walk_arg cb_arg; |
1275 | struct netlbl_unlhsh_iface *iface; | 1273 | struct netlbl_unlhsh_iface *iface; |
1276 | u32 skip_addr4 = cb->args[0]; | 1274 | u32 iter_addr4 = 0, iter_addr6 = 0; |
1277 | u32 skip_addr6 = cb->args[1]; | ||
1278 | u32 iter_addr4 = 0; | ||
1279 | struct netlbl_af4list *addr4; | 1275 | struct netlbl_af4list *addr4; |
1280 | #if IS_ENABLED(CONFIG_IPV6) | 1276 | #if IS_ENABLED(CONFIG_IPV6) |
1281 | u32 iter_addr6 = 0; | ||
1282 | struct netlbl_af6list *addr6; | 1277 | struct netlbl_af6list *addr6; |
1283 | #endif | 1278 | #endif |
1284 | 1279 | ||
@@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
1292 | goto unlabel_staticlistdef_return; | 1287 | goto unlabel_staticlistdef_return; |
1293 | 1288 | ||
1294 | netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { | 1289 | netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { |
1295 | if (iter_addr4++ < skip_addr4) | 1290 | if (iter_addr4++ < cb->args[0]) |
1296 | continue; | 1291 | continue; |
1297 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, | 1292 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, |
1298 | iface, | 1293 | iface, |
@@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
1305 | } | 1300 | } |
1306 | #if IS_ENABLED(CONFIG_IPV6) | 1301 | #if IS_ENABLED(CONFIG_IPV6) |
1307 | netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { | 1302 | netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { |
1308 | if (iter_addr6++ < skip_addr6) | 1303 | if (iter_addr6++ < cb->args[1]) |
1309 | continue; | 1304 | continue; |
1310 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, | 1305 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, |
1311 | iface, | 1306 | iface, |
@@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
1320 | 1315 | ||
1321 | unlabel_staticlistdef_return: | 1316 | unlabel_staticlistdef_return: |
1322 | rcu_read_unlock(); | 1317 | rcu_read_unlock(); |
1323 | cb->args[0] = skip_addr4; | 1318 | cb->args[0] = iter_addr4; |
1324 | cb->args[1] = skip_addr6; | 1319 | cb->args[1] = iter_addr6; |
1325 | return skb->len; | 1320 | return skb->len; |
1326 | } | 1321 | } |
1327 | 1322 | ||
diff --git a/net/rds/message.c b/net/rds/message.c index f0a4658f3273..aba232f9f308 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
@@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm) | |||
82 | void rds_message_put(struct rds_message *rm) | 82 | void rds_message_put(struct rds_message *rm) |
83 | { | 83 | { |
84 | rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); | 84 | rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); |
85 | if (atomic_read(&rm->m_refcount) == 0) { | 85 | WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm); |
86 | printk(KERN_CRIT "danger refcount zero on %p\n", rm); | ||
87 | WARN_ON(1); | ||
88 | } | ||
89 | if (atomic_dec_and_test(&rm->m_refcount)) { | 86 | if (atomic_dec_and_test(&rm->m_refcount)) { |
90 | BUG_ON(!list_empty(&rm->m_sock_item)); | 87 | BUG_ON(!list_empty(&rm->m_sock_item)); |
91 | BUG_ON(!list_empty(&rm->m_conn_item)); | 88 | BUG_ON(!list_empty(&rm->m_conn_item)); |
@@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp) | |||
197 | { | 194 | { |
198 | struct rds_message *rm; | 195 | struct rds_message *rm; |
199 | 196 | ||
197 | if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message)) | ||
198 | return NULL; | ||
199 | |||
200 | rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); | 200 | rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); |
201 | if (!rm) | 201 | if (!rm) |
202 | goto out; | 202 | goto out; |
diff --git a/net/rds/stats.c b/net/rds/stats.c index 7be790d60b90..73be187d389e 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c | |||
@@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter, | |||
87 | for (i = 0; i < nr; i++) { | 87 | for (i = 0; i < nr; i++) { |
88 | BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); | 88 | BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); |
89 | strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); | 89 | strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); |
90 | ctr.name[sizeof(ctr.name) - 1] = '\0'; | ||
90 | ctr.value = values[i]; | 91 | ctr.value = values[i]; |
91 | 92 | ||
92 | rds_info_copy(iter, &ctr, sizeof(ctr)); | 93 | rds_info_copy(iter, &ctr, sizeof(ctr)); |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e9a77f621c3d..d51852bba01c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | |||
298 | new_num_classes == q->max_agg_classes - 1) /* agg no more full */ | 298 | new_num_classes == q->max_agg_classes - 1) /* agg no more full */ |
299 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); | 299 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); |
300 | 300 | ||
301 | /* The next assignment may let | ||
302 | * agg->initial_budget > agg->budgetmax | ||
303 | * hold, we will take it into account in charge_actual_service(). | ||
304 | */ | ||
301 | agg->budgetmax = new_num_classes * agg->lmax; | 305 | agg->budgetmax = new_num_classes * agg->lmax; |
302 | new_agg_weight = agg->class_weight * new_num_classes; | 306 | new_agg_weight = agg->class_weight * new_num_classes; |
303 | agg->inv_w = ONE_FP/new_agg_weight; | 307 | agg->inv_w = ONE_FP/new_agg_weight; |
@@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q) | |||
817 | unsigned long old_vslot = q->oldV >> q->min_slot_shift; | 821 | unsigned long old_vslot = q->oldV >> q->min_slot_shift; |
818 | 822 | ||
819 | if (vslot != old_vslot) { | 823 | if (vslot != old_vslot) { |
820 | unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; | 824 | unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1; |
821 | qfq_move_groups(q, mask, IR, ER); | 825 | qfq_move_groups(q, mask, IR, ER); |
822 | qfq_move_groups(q, mask, IB, EB); | 826 | qfq_move_groups(q, mask, IB, EB); |
823 | } | 827 | } |
@@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, | |||
988 | /* Update F according to the actual service received by the aggregate. */ | 992 | /* Update F according to the actual service received by the aggregate. */ |
989 | static inline void charge_actual_service(struct qfq_aggregate *agg) | 993 | static inline void charge_actual_service(struct qfq_aggregate *agg) |
990 | { | 994 | { |
991 | /* compute the service received by the aggregate */ | 995 | /* Compute the service received by the aggregate, taking into |
992 | u32 service_received = agg->initial_budget - agg->budget; | 996 | * account that, after decreasing the number of classes in |
997 | * agg, it may happen that | ||
998 | * agg->initial_budget - agg->budget > agg->bugdetmax | ||
999 | */ | ||
1000 | u32 service_received = min(agg->budgetmax, | ||
1001 | agg->initial_budget - agg->budget); | ||
993 | 1002 | ||
994 | agg->F = agg->S + (u64)service_received * agg->inv_w; | 1003 | agg->F = agg->S + (u64)service_received * agg->inv_w; |
995 | } | 1004 | } |
996 | 1005 | ||
1006 | static inline void qfq_update_agg_ts(struct qfq_sched *q, | ||
1007 | struct qfq_aggregate *agg, | ||
1008 | enum update_reason reason); | ||
1009 | |||
1010 | static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); | ||
1011 | |||
997 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | 1012 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) |
998 | { | 1013 | { |
999 | struct qfq_sched *q = qdisc_priv(sch); | 1014 | struct qfq_sched *q = qdisc_priv(sch); |
@@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
1021 | in_serv_agg->initial_budget = in_serv_agg->budget = | 1036 | in_serv_agg->initial_budget = in_serv_agg->budget = |
1022 | in_serv_agg->budgetmax; | 1037 | in_serv_agg->budgetmax; |
1023 | 1038 | ||
1024 | if (!list_empty(&in_serv_agg->active)) | 1039 | if (!list_empty(&in_serv_agg->active)) { |
1025 | /* | 1040 | /* |
1026 | * Still active: reschedule for | 1041 | * Still active: reschedule for |
1027 | * service. Possible optimization: if no other | 1042 | * service. Possible optimization: if no other |
@@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
1032 | * handle it, we would need to maintain an | 1047 | * handle it, we would need to maintain an |
1033 | * extra num_active_aggs field. | 1048 | * extra num_active_aggs field. |
1034 | */ | 1049 | */ |
1035 | qfq_activate_agg(q, in_serv_agg, requeue); | 1050 | qfq_update_agg_ts(q, in_serv_agg, requeue); |
1036 | else if (sch->q.qlen == 0) { /* no aggregate to serve */ | 1051 | qfq_schedule_agg(q, in_serv_agg); |
1052 | } else if (sch->q.qlen == 0) { /* no aggregate to serve */ | ||
1037 | q->in_serv_agg = NULL; | 1053 | q->in_serv_agg = NULL; |
1038 | return NULL; | 1054 | return NULL; |
1039 | } | 1055 | } |
@@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
1052 | qdisc_bstats_update(sch, skb); | 1068 | qdisc_bstats_update(sch, skb); |
1053 | 1069 | ||
1054 | agg_dequeue(in_serv_agg, cl, len); | 1070 | agg_dequeue(in_serv_agg, cl, len); |
1055 | in_serv_agg->budget -= len; | 1071 | /* If lmax is lowered, through qfq_change_class, for a class |
1072 | * owning pending packets with larger size than the new value | ||
1073 | * of lmax, then the following condition may hold. | ||
1074 | */ | ||
1075 | if (unlikely(in_serv_agg->budget < len)) | ||
1076 | in_serv_agg->budget = 0; | ||
1077 | else | ||
1078 | in_serv_agg->budget -= len; | ||
1079 | |||
1056 | q->V += (u64)len * IWSUM; | 1080 | q->V += (u64)len * IWSUM; |
1057 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", | 1081 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", |
1058 | len, (unsigned long long) in_serv_agg->F, | 1082 | len, (unsigned long long) in_serv_agg->F, |
@@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1217 | cl->deficit = agg->lmax; | 1241 | cl->deficit = agg->lmax; |
1218 | list_add_tail(&cl->alist, &agg->active); | 1242 | list_add_tail(&cl->alist, &agg->active); |
1219 | 1243 | ||
1220 | if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) | 1244 | if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || |
1221 | return err; /* aggregate was not empty, nothing else to do */ | 1245 | q->in_serv_agg == agg) |
1246 | return err; /* non-empty or in service, nothing else to do */ | ||
1222 | 1247 | ||
1223 | /* recharge budget */ | 1248 | qfq_activate_agg(q, agg, enqueue); |
1224 | agg->initial_budget = agg->budget = agg->budgetmax; | ||
1225 | |||
1226 | qfq_update_agg_ts(q, agg, enqueue); | ||
1227 | if (q->in_serv_agg == NULL) | ||
1228 | q->in_serv_agg = agg; | ||
1229 | else if (agg != q->in_serv_agg) | ||
1230 | qfq_schedule_agg(q, agg); | ||
1231 | 1249 | ||
1232 | return err; | 1250 | return err; |
1233 | } | 1251 | } |
@@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | |||
1261 | /* group was surely ineligible, remove */ | 1279 | /* group was surely ineligible, remove */ |
1262 | __clear_bit(grp->index, &q->bitmaps[IR]); | 1280 | __clear_bit(grp->index, &q->bitmaps[IR]); |
1263 | __clear_bit(grp->index, &q->bitmaps[IB]); | 1281 | __clear_bit(grp->index, &q->bitmaps[IB]); |
1264 | } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) | 1282 | } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && |
1283 | q->in_serv_agg == NULL) | ||
1265 | q->V = roundedS; | 1284 | q->V = roundedS; |
1266 | 1285 | ||
1267 | grp->S = roundedS; | 1286 | grp->S = roundedS; |
@@ -1284,8 +1303,15 @@ skip_update: | |||
1284 | static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | 1303 | static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, |
1285 | enum update_reason reason) | 1304 | enum update_reason reason) |
1286 | { | 1305 | { |
1306 | agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ | ||
1307 | |||
1287 | qfq_update_agg_ts(q, agg, reason); | 1308 | qfq_update_agg_ts(q, agg, reason); |
1288 | qfq_schedule_agg(q, agg); | 1309 | if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ |
1310 | q->in_serv_agg = agg; /* start serving this aggregate */ | ||
1311 | /* update V: to be in service, agg must be eligible */ | ||
1312 | q->oldV = q->V = agg->S; | ||
1313 | } else if (agg != q->in_serv_agg) | ||
1314 | qfq_schedule_agg(q, agg); | ||
1289 | } | 1315 | } |
1290 | 1316 | ||
1291 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, | 1317 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, |
@@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | |||
1357 | __set_bit(grp->index, &q->bitmaps[s]); | 1383 | __set_bit(grp->index, &q->bitmaps[s]); |
1358 | } | 1384 | } |
1359 | } | 1385 | } |
1360 | |||
1361 | qfq_update_eligible(q); | ||
1362 | } | 1386 | } |
1363 | 1387 | ||
1364 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) | 1388 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 2b3ef03c6098..12ed45dbe75d 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
155 | 155 | ||
156 | /* SCTP-AUTH extensions*/ | 156 | /* SCTP-AUTH extensions*/ |
157 | INIT_LIST_HEAD(&ep->endpoint_shared_keys); | 157 | INIT_LIST_HEAD(&ep->endpoint_shared_keys); |
158 | null_key = sctp_auth_shkey_create(0, GFP_KERNEL); | 158 | null_key = sctp_auth_shkey_create(0, gfp); |
159 | if (!null_key) | 159 | if (!null_key) |
160 | goto nomem; | 160 | goto nomem; |
161 | 161 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c99458df3f3f..b9070736b8d9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, | |||
5653 | if (len < sizeof(sctp_assoc_t)) | 5653 | if (len < sizeof(sctp_assoc_t)) |
5654 | return -EINVAL; | 5654 | return -EINVAL; |
5655 | 5655 | ||
5656 | /* Allow the struct to grow and fill in as much as possible */ | ||
5657 | len = min_t(size_t, len, sizeof(sas)); | ||
5658 | |||
5656 | if (copy_from_user(&sas, optval, len)) | 5659 | if (copy_from_user(&sas, optval, len)) |
5657 | return -EFAULT; | 5660 | return -EFAULT; |
5658 | 5661 | ||
@@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, | |||
5686 | /* Mark beginning of a new observation period */ | 5689 | /* Mark beginning of a new observation period */ |
5687 | asoc->stats.max_obs_rto = asoc->rto_min; | 5690 | asoc->stats.max_obs_rto = asoc->rto_min; |
5688 | 5691 | ||
5689 | /* Allow the struct to grow and fill in as much as possible */ | ||
5690 | len = min_t(size_t, len, sizeof(sas)); | ||
5691 | |||
5692 | if (put_user(len, optlen)) | 5692 | if (put_user(len, optlen)) |
5693 | return -EFAULT; | 5693 | return -EFAULT; |
5694 | 5694 | ||
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index 442ad4ed6315..825ea94415b3 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c | |||
@@ -41,8 +41,6 @@ | |||
41 | #include <net/sctp/sctp.h> | 41 | #include <net/sctp/sctp.h> |
42 | #include <net/sctp/sm.h> | 42 | #include <net/sctp/sm.h> |
43 | 43 | ||
44 | #define MAX_KMALLOC_SIZE 131072 | ||
45 | |||
46 | static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, | 44 | static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, |
47 | __u16 out); | 45 | __u16 out); |
48 | 46 | ||
@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, | |||
65 | int size; | 63 | int size; |
66 | 64 | ||
67 | size = sctp_ssnmap_size(in, out); | 65 | size = sctp_ssnmap_size(in, out); |
68 | if (size <= MAX_KMALLOC_SIZE) | 66 | if (size <= KMALLOC_MAX_SIZE) |
69 | retval = kmalloc(size, gfp); | 67 | retval = kmalloc(size, gfp); |
70 | else | 68 | else |
71 | retval = (struct sctp_ssnmap *) | 69 | retval = (struct sctp_ssnmap *) |
@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, | |||
82 | return retval; | 80 | return retval; |
83 | 81 | ||
84 | fail_map: | 82 | fail_map: |
85 | if (size <= MAX_KMALLOC_SIZE) | 83 | if (size <= KMALLOC_MAX_SIZE) |
86 | kfree(retval); | 84 | kfree(retval); |
87 | else | 85 | else |
88 | free_pages((unsigned long)retval, get_order(size)); | 86 | free_pages((unsigned long)retval, get_order(size)); |
@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map) | |||
124 | int size; | 122 | int size; |
125 | 123 | ||
126 | size = sctp_ssnmap_size(map->in.len, map->out.len); | 124 | size = sctp_ssnmap_size(map->in.len, map->out.len); |
127 | if (size <= MAX_KMALLOC_SIZE) | 125 | if (size <= KMALLOC_MAX_SIZE) |
128 | kfree(map); | 126 | kfree(map); |
129 | else | 127 | else |
130 | free_pages((unsigned long)map, get_order(size)); | 128 | free_pages((unsigned long)map, get_order(size)); |
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 5f25e0c92c31..396c45174e5b 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
@@ -51,7 +51,7 @@ | |||
51 | static void sctp_tsnmap_update(struct sctp_tsnmap *map); | 51 | static void sctp_tsnmap_update(struct sctp_tsnmap *map); |
52 | static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, | 52 | static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, |
53 | __u16 len, __u16 *start, __u16 *end); | 53 | __u16 len, __u16 *start, __u16 *end); |
54 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap); | 54 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size); |
55 | 55 | ||
56 | /* Initialize a block of memory as a tsnmap. */ | 56 | /* Initialize a block of memory as a tsnmap. */ |
57 | struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, | 57 | struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, |
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn, | |||
124 | 124 | ||
125 | gap = tsn - map->base_tsn; | 125 | gap = tsn - map->base_tsn; |
126 | 126 | ||
127 | if (gap >= map->len && !sctp_tsnmap_grow(map, gap)) | 127 | if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1)) |
128 | return -ENOMEM; | 128 | return -ENOMEM; |
129 | 129 | ||
130 | if (!sctp_tsnmap_has_gap(map) && gap == 0) { | 130 | if (!sctp_tsnmap_has_gap(map) && gap == 0) { |
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map, | |||
360 | return ngaps; | 360 | return ngaps; |
361 | } | 361 | } |
362 | 362 | ||
363 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap) | 363 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size) |
364 | { | 364 | { |
365 | unsigned long *new; | 365 | unsigned long *new; |
366 | unsigned long inc; | 366 | unsigned long inc; |
367 | u16 len; | 367 | u16 len; |
368 | 368 | ||
369 | if (gap >= SCTP_TSN_MAP_SIZE) | 369 | if (size > SCTP_TSN_MAP_SIZE) |
370 | return 0; | 370 | return 0; |
371 | 371 | ||
372 | inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; | 372 | inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; |
373 | len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); | 373 | len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); |
374 | 374 | ||
375 | new = kzalloc(len>>3, GFP_ATOMIC); | 375 | new = kzalloc(len>>3, GFP_ATOMIC); |
376 | if (!new) | 376 | if (!new) |
377 | return 0; | 377 | return 0; |
378 | 378 | ||
379 | bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); | 379 | bitmap_copy(new, map->tsn_map, |
380 | map->max_tsn_seen - map->cumulative_tsn_ack_point); | ||
380 | kfree(map->tsn_map); | 381 | kfree(map->tsn_map); |
381 | map->tsn_map = new; | 382 | map->tsn_map = new; |
382 | map->len = len; | 383 | map->len = len; |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ada17464b65b..0fd5b3d2df03 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
106 | { | 106 | { |
107 | struct sk_buff_head temp; | 107 | struct sk_buff_head temp; |
108 | struct sctp_ulpevent *event; | 108 | struct sctp_ulpevent *event; |
109 | int event_eor = 0; | ||
109 | 110 | ||
110 | /* Create an event from the incoming chunk. */ | 111 | /* Create an event from the incoming chunk. */ |
111 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); | 112 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); |
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
127 | /* Send event to the ULP. 'event' is the sctp_ulpevent for | 128 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
128 | * very first SKB on the 'temp' list. | 129 | * very first SKB on the 'temp' list. |
129 | */ | 130 | */ |
130 | if (event) | 131 | if (event) { |
132 | event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; | ||
131 | sctp_ulpq_tail_event(ulpq, event); | 133 | sctp_ulpq_tail_event(ulpq, event); |
134 | } | ||
132 | 135 | ||
133 | return 0; | 136 | return event_eor; |
134 | } | 137 | } |
135 | 138 | ||
136 | /* Add a new event for propagation to the ULP. */ | 139 | /* Add a new event for propagation to the ULP. */ |
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) | |||
540 | ctsn = cevent->tsn; | 543 | ctsn = cevent->tsn; |
541 | 544 | ||
542 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 545 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { |
546 | case SCTP_DATA_FIRST_FRAG: | ||
547 | if (!first_frag) | ||
548 | return NULL; | ||
549 | goto done; | ||
543 | case SCTP_DATA_MIDDLE_FRAG: | 550 | case SCTP_DATA_MIDDLE_FRAG: |
544 | if (!first_frag) { | 551 | if (!first_frag) { |
545 | first_frag = pos; | 552 | first_frag = pos; |
546 | next_tsn = ctsn + 1; | 553 | next_tsn = ctsn + 1; |
547 | last_frag = pos; | 554 | last_frag = pos; |
548 | } else if (next_tsn == ctsn) | 555 | } else if (next_tsn == ctsn) { |
549 | next_tsn++; | 556 | next_tsn++; |
550 | else | 557 | last_frag = pos; |
558 | } else | ||
551 | goto done; | 559 | goto done; |
552 | break; | 560 | break; |
553 | case SCTP_DATA_LAST_FRAG: | 561 | case SCTP_DATA_LAST_FRAG: |
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) | |||
651 | } else | 659 | } else |
652 | goto done; | 660 | goto done; |
653 | break; | 661 | break; |
662 | |||
663 | case SCTP_DATA_LAST_FRAG: | ||
664 | if (!first_frag) | ||
665 | return NULL; | ||
666 | else | ||
667 | goto done; | ||
668 | break; | ||
669 | |||
654 | default: | 670 | default: |
655 | return NULL; | 671 | return NULL; |
656 | } | 672 | } |
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, | |||
962 | struct sk_buff_head *list, __u16 needed) | 978 | struct sk_buff_head *list, __u16 needed) |
963 | { | 979 | { |
964 | __u16 freed = 0; | 980 | __u16 freed = 0; |
965 | __u32 tsn; | 981 | __u32 tsn, last_tsn; |
966 | struct sk_buff *skb; | 982 | struct sk_buff *skb, *flist, *last; |
967 | struct sctp_ulpevent *event; | 983 | struct sctp_ulpevent *event; |
968 | struct sctp_tsnmap *tsnmap; | 984 | struct sctp_tsnmap *tsnmap; |
969 | 985 | ||
970 | tsnmap = &ulpq->asoc->peer.tsn_map; | 986 | tsnmap = &ulpq->asoc->peer.tsn_map; |
971 | 987 | ||
972 | while ((skb = __skb_dequeue_tail(list)) != NULL) { | 988 | while ((skb = skb_peek_tail(list)) != NULL) { |
973 | freed += skb_headlen(skb); | ||
974 | event = sctp_skb2event(skb); | 989 | event = sctp_skb2event(skb); |
975 | tsn = event->tsn; | 990 | tsn = event->tsn; |
976 | 991 | ||
992 | /* Don't renege below the Cumulative TSN ACK Point. */ | ||
993 | if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) | ||
994 | break; | ||
995 | |||
996 | /* Events in ordering queue may have multiple fragments | ||
997 | * corresponding to additional TSNs. Sum the total | ||
998 | * freed space; find the last TSN. | ||
999 | */ | ||
1000 | freed += skb_headlen(skb); | ||
1001 | flist = skb_shinfo(skb)->frag_list; | ||
1002 | for (last = flist; flist; flist = flist->next) { | ||
1003 | last = flist; | ||
1004 | freed += skb_headlen(last); | ||
1005 | } | ||
1006 | if (last) | ||
1007 | last_tsn = sctp_skb2event(last)->tsn; | ||
1008 | else | ||
1009 | last_tsn = tsn; | ||
1010 | |||
1011 | /* Unlink the event, then renege all applicable TSNs. */ | ||
1012 | __skb_unlink(skb, list); | ||
977 | sctp_ulpevent_free(event); | 1013 | sctp_ulpevent_free(event); |
978 | sctp_tsnmap_renege(tsnmap, tsn); | 1014 | while (TSN_lte(tsn, last_tsn)) { |
1015 | sctp_tsnmap_renege(tsnmap, tsn); | ||
1016 | tsn++; | ||
1017 | } | ||
979 | if (freed >= needed) | 1018 | if (freed >= needed) |
980 | return freed; | 1019 | return freed; |
981 | } | 1020 | } |
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | |||
1002 | struct sctp_ulpevent *event; | 1041 | struct sctp_ulpevent *event; |
1003 | struct sctp_association *asoc; | 1042 | struct sctp_association *asoc; |
1004 | struct sctp_sock *sp; | 1043 | struct sctp_sock *sp; |
1044 | __u32 ctsn; | ||
1045 | struct sk_buff *skb; | ||
1005 | 1046 | ||
1006 | asoc = ulpq->asoc; | 1047 | asoc = ulpq->asoc; |
1007 | sp = sctp_sk(asoc->base.sk); | 1048 | sp = sctp_sk(asoc->base.sk); |
1008 | 1049 | ||
1009 | /* If the association is already in Partial Delivery mode | 1050 | /* If the association is already in Partial Delivery mode |
1010 | * we have noting to do. | 1051 | * we have nothing to do. |
1011 | */ | 1052 | */ |
1012 | if (ulpq->pd_mode) | 1053 | if (ulpq->pd_mode) |
1013 | return; | 1054 | return; |
1014 | 1055 | ||
1056 | /* Data must be at or below the Cumulative TSN ACK Point to | ||
1057 | * start partial delivery. | ||
1058 | */ | ||
1059 | skb = skb_peek(&asoc->ulpq.reasm); | ||
1060 | if (skb != NULL) { | ||
1061 | ctsn = sctp_skb2event(skb)->tsn; | ||
1062 | if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) | ||
1063 | return; | ||
1064 | } | ||
1065 | |||
1015 | /* If the user enabled fragment interleave socket option, | 1066 | /* If the user enabled fragment interleave socket option, |
1016 | * multiple associations can enter partial delivery. | 1067 | * multiple associations can enter partial delivery. |
1017 | * Otherwise, we can only enter partial delivery if the | 1068 | * Otherwise, we can only enter partial delivery if the |
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
1054 | } | 1105 | } |
1055 | /* If able to free enough room, accept this chunk. */ | 1106 | /* If able to free enough room, accept this chunk. */ |
1056 | if (chunk && (freed >= needed)) { | 1107 | if (chunk && (freed >= needed)) { |
1057 | __u32 tsn; | 1108 | int retval; |
1058 | tsn = ntohl(chunk->subh.data_hdr->tsn); | 1109 | retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); |
1059 | sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); | 1110 | /* |
1060 | sctp_ulpq_tail_data(ulpq, chunk, gfp); | 1111 | * Enter partial delivery if chunk has not been |
1061 | 1112 | * delivered; otherwise, drain the reassembly queue. | |
1062 | sctp_ulpq_partial_delivery(ulpq, gfp); | 1113 | */ |
1114 | if (retval <= 0) | ||
1115 | sctp_ulpq_partial_delivery(ulpq, gfp); | ||
1116 | else if (retval == 1) | ||
1117 | sctp_ulpq_reasm_drain(ulpq); | ||
1063 | } | 1118 | } |
1064 | 1119 | ||
1065 | sk_mem_reclaim(asoc->base.sk); | 1120 | sk_mem_reclaim(asoc->base.sk); |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index f7d34e7b6f81..5ead60550895 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -447,17 +447,21 @@ static int rsc_parse(struct cache_detail *cd, | |||
447 | else { | 447 | else { |
448 | int N, i; | 448 | int N, i; |
449 | 449 | ||
450 | /* | ||
451 | * NOTE: we skip uid_valid()/gid_valid() checks here: | ||
452 | * instead, * -1 id's are later mapped to the | ||
453 | * (export-specific) anonymous id by nfsd_setuser. | ||
454 | * | ||
455 | * (But supplementary gid's get no such special | ||
456 | * treatment so are checked for validity here.) | ||
457 | */ | ||
450 | /* uid */ | 458 | /* uid */ |
451 | rsci.cred.cr_uid = make_kuid(&init_user_ns, id); | 459 | rsci.cred.cr_uid = make_kuid(&init_user_ns, id); |
452 | if (!uid_valid(rsci.cred.cr_uid)) | ||
453 | goto out; | ||
454 | 460 | ||
455 | /* gid */ | 461 | /* gid */ |
456 | if (get_int(&mesg, &id)) | 462 | if (get_int(&mesg, &id)) |
457 | goto out; | 463 | goto out; |
458 | rsci.cred.cr_gid = make_kgid(&init_user_ns, id); | 464 | rsci.cred.cr_gid = make_kgid(&init_user_ns, id); |
459 | if (!gid_valid(rsci.cred.cr_gid)) | ||
460 | goto out; | ||
461 | 465 | ||
462 | /* number of additional gid's */ | 466 | /* number of additional gid's */ |
463 | if (get_int(&mesg, &N)) | 467 | if (get_int(&mesg, &N)) |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 7b9b40224a27..a9129f8d7070 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -1174,6 +1174,8 @@ static struct file_system_type rpc_pipe_fs_type = { | |||
1174 | .mount = rpc_mount, | 1174 | .mount = rpc_mount, |
1175 | .kill_sb = rpc_kill_sb, | 1175 | .kill_sb = rpc_kill_sb, |
1176 | }; | 1176 | }; |
1177 | MODULE_ALIAS_FS("rpc_pipefs"); | ||
1178 | MODULE_ALIAS("rpc_pipefs"); | ||
1177 | 1179 | ||
1178 | static void | 1180 | static void |
1179 | init_once(void *foo) | 1181 | init_once(void *foo) |
@@ -1218,6 +1220,3 @@ void unregister_rpc_pipefs(void) | |||
1218 | kmem_cache_destroy(rpc_inode_cachep); | 1220 | kmem_cache_destroy(rpc_inode_cachep); |
1219 | unregister_filesystem(&rpc_pipe_fs_type); | 1221 | unregister_filesystem(&rpc_pipe_fs_type); |
1220 | } | 1222 | } |
1221 | |||
1222 | /* Make 'mount -t rpc_pipefs ...' autoload this module. */ | ||
1223 | MODULE_ALIAS("rpc_pipefs"); | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c1d8476b7692..3d02130828da 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -849,6 +849,14 @@ static void xs_tcp_close(struct rpc_xprt *xprt) | |||
849 | xs_tcp_shutdown(xprt); | 849 | xs_tcp_shutdown(xprt); |
850 | } | 850 | } |
851 | 851 | ||
852 | static void xs_local_destroy(struct rpc_xprt *xprt) | ||
853 | { | ||
854 | xs_close(xprt); | ||
855 | xs_free_peer_addresses(xprt); | ||
856 | xprt_free(xprt); | ||
857 | module_put(THIS_MODULE); | ||
858 | } | ||
859 | |||
852 | /** | 860 | /** |
853 | * xs_destroy - prepare to shutdown a transport | 861 | * xs_destroy - prepare to shutdown a transport |
854 | * @xprt: doomed transport | 862 | * @xprt: doomed transport |
@@ -862,10 +870,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
862 | 870 | ||
863 | cancel_delayed_work_sync(&transport->connect_worker); | 871 | cancel_delayed_work_sync(&transport->connect_worker); |
864 | 872 | ||
865 | xs_close(xprt); | 873 | xs_local_destroy(xprt); |
866 | xs_free_peer_addresses(xprt); | ||
867 | xprt_free(xprt); | ||
868 | module_put(THIS_MODULE); | ||
869 | } | 874 | } |
870 | 875 | ||
871 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) | 876 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
@@ -2482,7 +2487,7 @@ static struct rpc_xprt_ops xs_local_ops = { | |||
2482 | .send_request = xs_local_send_request, | 2487 | .send_request = xs_local_send_request, |
2483 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 2488 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
2484 | .close = xs_close, | 2489 | .close = xs_close, |
2485 | .destroy = xs_destroy, | 2490 | .destroy = xs_local_destroy, |
2486 | .print_stats = xs_local_print_stats, | 2491 | .print_stats = xs_local_print_stats, |
2487 | }; | 2492 | }; |
2488 | 2493 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 5ffff039b017..ea4155fe9733 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
367 | rdev->wiphy.rts_threshold = (u32) -1; | 367 | rdev->wiphy.rts_threshold = (u32) -1; |
368 | rdev->wiphy.coverage_class = 0; | 368 | rdev->wiphy.coverage_class = 0; |
369 | 369 | ||
370 | rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH | | 370 | rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; |
371 | NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; | ||
372 | 371 | ||
373 | return &rdev->wiphy; | 372 | return &rdev->wiphy; |
374 | } | 373 | } |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 35545ccc30fd..d44ab216c0ec 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -554,27 +554,8 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, | |||
554 | if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && | 554 | if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && |
555 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) | 555 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) |
556 | goto nla_put_failure; | 556 | goto nla_put_failure; |
557 | if (chan->flags & IEEE80211_CHAN_RADAR) { | 557 | if ((chan->flags & IEEE80211_CHAN_RADAR) && |
558 | u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered); | 558 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) |
559 | if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) | ||
560 | goto nla_put_failure; | ||
561 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE, | ||
562 | chan->dfs_state)) | ||
563 | goto nla_put_failure; | ||
564 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time)) | ||
565 | goto nla_put_failure; | ||
566 | } | ||
567 | if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && | ||
568 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) | ||
569 | goto nla_put_failure; | ||
570 | if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && | ||
571 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) | ||
572 | goto nla_put_failure; | ||
573 | if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && | ||
574 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) | ||
575 | goto nla_put_failure; | ||
576 | if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && | ||
577 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) | ||
578 | goto nla_put_failure; | 559 | goto nla_put_failure; |
579 | 560 | ||
580 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, | 561 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, |
@@ -900,9 +881,6 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy, | |||
900 | nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, | 881 | nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, |
901 | c->max_interfaces)) | 882 | c->max_interfaces)) |
902 | goto nla_put_failure; | 883 | goto nla_put_failure; |
903 | if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, | ||
904 | c->radar_detect_widths)) | ||
905 | goto nla_put_failure; | ||
906 | 884 | ||
907 | nla_nest_end(msg, nl_combi); | 885 | nla_nest_end(msg, nl_combi); |
908 | } | 886 | } |
@@ -914,48 +892,6 @@ nla_put_failure: | |||
914 | return -ENOBUFS; | 892 | return -ENOBUFS; |
915 | } | 893 | } |
916 | 894 | ||
917 | #ifdef CONFIG_PM | ||
918 | static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, | ||
919 | struct sk_buff *msg) | ||
920 | { | ||
921 | const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp; | ||
922 | struct nlattr *nl_tcp; | ||
923 | |||
924 | if (!tcp) | ||
925 | return 0; | ||
926 | |||
927 | nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); | ||
928 | if (!nl_tcp) | ||
929 | return -ENOBUFS; | ||
930 | |||
931 | if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, | ||
932 | tcp->data_payload_max)) | ||
933 | return -ENOBUFS; | ||
934 | |||
935 | if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, | ||
936 | tcp->data_payload_max)) | ||
937 | return -ENOBUFS; | ||
938 | |||
939 | if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ)) | ||
940 | return -ENOBUFS; | ||
941 | |||
942 | if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, | ||
943 | sizeof(*tcp->tok), tcp->tok)) | ||
944 | return -ENOBUFS; | ||
945 | |||
946 | if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, | ||
947 | tcp->data_interval_max)) | ||
948 | return -ENOBUFS; | ||
949 | |||
950 | if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, | ||
951 | tcp->wake_payload_max)) | ||
952 | return -ENOBUFS; | ||
953 | |||
954 | nla_nest_end(msg, nl_tcp); | ||
955 | return 0; | ||
956 | } | ||
957 | #endif | ||
958 | |||
959 | static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, | 895 | static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, |
960 | struct cfg80211_registered_device *dev) | 896 | struct cfg80211_registered_device *dev) |
961 | { | 897 | { |
@@ -1330,9 +1266,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
1330 | goto nla_put_failure; | 1266 | goto nla_put_failure; |
1331 | } | 1267 | } |
1332 | 1268 | ||
1333 | if (nl80211_send_wowlan_tcp_caps(dev, msg)) | ||
1334 | goto nla_put_failure; | ||
1335 | |||
1336 | nla_nest_end(msg, nl_wowlan); | 1269 | nla_nest_end(msg, nl_wowlan); |
1337 | } | 1270 | } |
1338 | #endif | 1271 | #endif |
@@ -1365,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
1365 | dev->wiphy.max_acl_mac_addrs)) | 1298 | dev->wiphy.max_acl_mac_addrs)) |
1366 | goto nla_put_failure; | 1299 | goto nla_put_failure; |
1367 | 1300 | ||
1368 | if (dev->wiphy.extended_capabilities && | ||
1369 | (nla_put(msg, NL80211_ATTR_EXT_CAPA, | ||
1370 | dev->wiphy.extended_capabilities_len, | ||
1371 | dev->wiphy.extended_capabilities) || | ||
1372 | nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, | ||
1373 | dev->wiphy.extended_capabilities_len, | ||
1374 | dev->wiphy.extended_capabilities_mask))) | ||
1375 | goto nla_put_failure; | ||
1376 | |||
1377 | return genlmsg_end(msg, hdr); | 1301 | return genlmsg_end(msg, hdr); |
1378 | 1302 | ||
1379 | nla_put_failure: | 1303 | nla_put_failure: |
@@ -1383,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
1383 | 1307 | ||
1384 | static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | 1308 | static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) |
1385 | { | 1309 | { |
1386 | int idx = 0; | 1310 | int idx = 0, ret; |
1387 | int start = cb->args[0]; | 1311 | int start = cb->args[0]; |
1388 | struct cfg80211_registered_device *dev; | 1312 | struct cfg80211_registered_device *dev; |
1389 | 1313 | ||
@@ -1393,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | |||
1393 | continue; | 1317 | continue; |
1394 | if (++idx <= start) | 1318 | if (++idx <= start) |
1395 | continue; | 1319 | continue; |
1396 | if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, | 1320 | ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, |
1397 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1321 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
1398 | dev) < 0) { | 1322 | dev); |
1323 | if (ret < 0) { | ||
1324 | /* | ||
1325 | * If sending the wiphy data didn't fit (ENOBUFS or | ||
1326 | * EMSGSIZE returned), this SKB is still empty (so | ||
1327 | * it's not too big because another wiphy dataset is | ||
1328 | * already in the skb) and we've not tried to adjust | ||
1329 | * the dump allocation yet ... then adjust the alloc | ||
1330 | * size to be bigger, and return 1 but with the empty | ||
1331 | * skb. This results in an empty message being RX'ed | ||
1332 | * in userspace, but that is ignored. | ||
1333 | * | ||
1334 | * We can then retry with the larger buffer. | ||
1335 | */ | ||
1336 | if ((ret == -ENOBUFS || ret == -EMSGSIZE) && | ||
1337 | !skb->len && | ||
1338 | cb->min_dump_alloc < 4096) { | ||
1339 | cb->min_dump_alloc = 4096; | ||
1340 | mutex_unlock(&cfg80211_mutex); | ||
1341 | return 1; | ||
1342 | } | ||
1399 | idx--; | 1343 | idx--; |
1400 | break; | 1344 | break; |
1401 | } | 1345 | } |
@@ -1412,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
1412 | struct sk_buff *msg; | 1356 | struct sk_buff *msg; |
1413 | struct cfg80211_registered_device *dev = info->user_ptr[0]; | 1357 | struct cfg80211_registered_device *dev = info->user_ptr[0]; |
1414 | 1358 | ||
1415 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 1359 | msg = nlmsg_new(4096, GFP_KERNEL); |
1416 | if (!msg) | 1360 | if (!msg) |
1417 | return -ENOMEM; | 1361 | return -ENOMEM; |
1418 | 1362 | ||