diff options
| author | David Woodhouse <David.Woodhouse@intel.com> | 2012-11-21 05:38:13 -0500 |
|---|---|---|
| committer | David Woodhouse <David.Woodhouse@intel.com> | 2012-11-21 05:38:13 -0500 |
| commit | 851462444d421c223965b12b836bef63da61b57f (patch) | |
| tree | 495baa14e638817941496c36e1443aed7dae0ea0 /net | |
| parent | 5a6ea4af0907f995dc06df21a9c9ef764c7cd3bc (diff) | |
| parent | 6924d99fcdf1a688538a3cdebd1f135c22eec191 (diff) | |
Merge branch 'for-3.7' of git://git.infradead.org/users/dedekind/l2-mtd
Conflicts:
drivers/mtd/nand/nand_base.c
Diffstat (limited to 'net')
81 files changed, 952 insertions, 842 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 9096bcb08132..ee070722a3a3 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
| @@ -463,7 +463,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
| 463 | 463 | ||
| 464 | case NETDEV_PRE_TYPE_CHANGE: | 464 | case NETDEV_PRE_TYPE_CHANGE: |
| 465 | /* Forbid underlaying device to change its type. */ | 465 | /* Forbid underlaying device to change its type. */ |
| 466 | return NOTIFY_BAD; | 466 | if (vlan_uses_dev(dev)) |
| 467 | return NOTIFY_BAD; | ||
| 468 | break; | ||
| 467 | 469 | ||
| 468 | case NETDEV_NOTIFY_PEERS: | 470 | case NETDEV_NOTIFY_PEERS: |
| 469 | case NETDEV_BONDING_FAILOVER: | 471 | case NETDEV_BONDING_FAILOVER: |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index add69d0fd99d..65e06abe023f 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
| 6 | #include "vlan.h" | 6 | #include "vlan.h" |
| 7 | 7 | ||
| 8 | bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) | 8 | bool vlan_do_receive(struct sk_buff **skbp) |
| 9 | { | 9 | { |
| 10 | struct sk_buff *skb = *skbp; | 10 | struct sk_buff *skb = *skbp; |
| 11 | u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; | 11 | u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; |
| @@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) | |||
| 13 | struct vlan_pcpu_stats *rx_stats; | 13 | struct vlan_pcpu_stats *rx_stats; |
| 14 | 14 | ||
| 15 | vlan_dev = vlan_find_dev(skb->dev, vlan_id); | 15 | vlan_dev = vlan_find_dev(skb->dev, vlan_id); |
| 16 | if (!vlan_dev) { | 16 | if (!vlan_dev) |
| 17 | /* Only the last call to vlan_do_receive() should change | ||
| 18 | * pkt_type to PACKET_OTHERHOST | ||
| 19 | */ | ||
| 20 | if (vlan_id && last_handler) | ||
| 21 | skb->pkt_type = PACKET_OTHERHOST; | ||
| 22 | return false; | 17 | return false; |
| 23 | } | ||
| 24 | 18 | ||
| 25 | skb = *skbp = skb_share_check(skb, GFP_ATOMIC); | 19 | skb = *skbp = skb_share_check(skb, GFP_ATOMIC); |
| 26 | if (unlikely(!skb)) | 20 | if (unlikely(!skb)) |
| @@ -372,6 +366,13 @@ EXPORT_SYMBOL(vlan_vids_del_by_dev); | |||
| 372 | 366 | ||
| 373 | bool vlan_uses_dev(const struct net_device *dev) | 367 | bool vlan_uses_dev(const struct net_device *dev) |
| 374 | { | 368 | { |
| 375 | return rtnl_dereference(dev->vlan_info) ? true : false; | 369 | struct vlan_info *vlan_info; |
| 370 | |||
| 371 | ASSERT_RTNL(); | ||
| 372 | |||
| 373 | vlan_info = rtnl_dereference(dev->vlan_info); | ||
| 374 | if (!vlan_info) | ||
| 375 | return false; | ||
| 376 | return vlan_info->grp.nr_vlan_devs ? true : false; | ||
| 376 | } | 377 | } |
| 377 | EXPORT_SYMBOL(vlan_uses_dev); | 378 | EXPORT_SYMBOL(vlan_uses_dev); |
diff --git a/net/9p/client.c b/net/9p/client.c index 8260f132b32e..34d417670935 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
| @@ -76,6 +76,20 @@ inline int p9_is_proto_dotu(struct p9_client *clnt) | |||
| 76 | } | 76 | } |
| 77 | EXPORT_SYMBOL(p9_is_proto_dotu); | 77 | EXPORT_SYMBOL(p9_is_proto_dotu); |
| 78 | 78 | ||
| 79 | /* | ||
| 80 | * Some error codes are taken directly from the server replies, | ||
| 81 | * make sure they are valid. | ||
| 82 | */ | ||
| 83 | static int safe_errno(int err) | ||
| 84 | { | ||
| 85 | if ((err > 0) || (err < -MAX_ERRNO)) { | ||
| 86 | p9_debug(P9_DEBUG_ERROR, "Invalid error code %d\n", err); | ||
| 87 | return -EPROTO; | ||
| 88 | } | ||
| 89 | return err; | ||
| 90 | } | ||
| 91 | |||
| 92 | |||
| 79 | /* Interpret mount option for protocol version */ | 93 | /* Interpret mount option for protocol version */ |
| 80 | static int get_protocol_version(char *s) | 94 | static int get_protocol_version(char *s) |
| 81 | { | 95 | { |
| @@ -782,7 +796,7 @@ again: | |||
| 782 | return req; | 796 | return req; |
| 783 | reterr: | 797 | reterr: |
| 784 | p9_free_req(c, req); | 798 | p9_free_req(c, req); |
| 785 | return ERR_PTR(err); | 799 | return ERR_PTR(safe_errno(err)); |
| 786 | } | 800 | } |
| 787 | 801 | ||
| 788 | /** | 802 | /** |
| @@ -865,7 +879,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type, | |||
| 865 | return req; | 879 | return req; |
| 866 | reterr: | 880 | reterr: |
| 867 | p9_free_req(c, req); | 881 | p9_free_req(c, req); |
| 868 | return ERR_PTR(err); | 882 | return ERR_PTR(safe_errno(err)); |
| 869 | } | 883 | } |
| 870 | 884 | ||
| 871 | static struct p9_fid *p9_fid_create(struct p9_client *clnt) | 885 | static struct p9_fid *p9_fid_create(struct p9_client *clnt) |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 15656b8573f3..02efb25c2957 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
| @@ -316,8 +316,7 @@ static void p9_read_work(struct work_struct *work) | |||
| 316 | m->rsize - m->rpos); | 316 | m->rsize - m->rpos); |
| 317 | p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); | 317 | p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); |
| 318 | if (err == -EAGAIN) { | 318 | if (err == -EAGAIN) { |
| 319 | clear_bit(Rworksched, &m->wsched); | 319 | goto end_clear; |
| 320 | return; | ||
| 321 | } | 320 | } |
| 322 | 321 | ||
| 323 | if (err <= 0) | 322 | if (err <= 0) |
| @@ -379,19 +378,20 @@ static void p9_read_work(struct work_struct *work) | |||
| 379 | m->req = NULL; | 378 | m->req = NULL; |
| 380 | } | 379 | } |
| 381 | 380 | ||
| 381 | end_clear: | ||
| 382 | clear_bit(Rworksched, &m->wsched); | ||
| 383 | |||
| 382 | if (!list_empty(&m->req_list)) { | 384 | if (!list_empty(&m->req_list)) { |
| 383 | if (test_and_clear_bit(Rpending, &m->wsched)) | 385 | if (test_and_clear_bit(Rpending, &m->wsched)) |
| 384 | n = POLLIN; | 386 | n = POLLIN; |
| 385 | else | 387 | else |
| 386 | n = p9_fd_poll(m->client, NULL); | 388 | n = p9_fd_poll(m->client, NULL); |
| 387 | 389 | ||
| 388 | if (n & POLLIN) { | 390 | if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { |
| 389 | p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); | 391 | p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); |
| 390 | schedule_work(&m->rq); | 392 | schedule_work(&m->rq); |
| 391 | } else | 393 | } |
| 392 | clear_bit(Rworksched, &m->wsched); | 394 | } |
| 393 | } else | ||
| 394 | clear_bit(Rworksched, &m->wsched); | ||
| 395 | 395 | ||
| 396 | return; | 396 | return; |
| 397 | error: | 397 | error: |
| @@ -453,12 +453,13 @@ static void p9_write_work(struct work_struct *work) | |||
| 453 | } | 453 | } |
| 454 | 454 | ||
| 455 | if (!m->wsize) { | 455 | if (!m->wsize) { |
| 456 | spin_lock(&m->client->lock); | ||
| 456 | if (list_empty(&m->unsent_req_list)) { | 457 | if (list_empty(&m->unsent_req_list)) { |
| 457 | clear_bit(Wworksched, &m->wsched); | 458 | clear_bit(Wworksched, &m->wsched); |
| 459 | spin_unlock(&m->client->lock); | ||
| 458 | return; | 460 | return; |
| 459 | } | 461 | } |
| 460 | 462 | ||
| 461 | spin_lock(&m->client->lock); | ||
| 462 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, | 463 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, |
| 463 | req_list); | 464 | req_list); |
| 464 | req->status = REQ_STATUS_SENT; | 465 | req->status = REQ_STATUS_SENT; |
| @@ -476,10 +477,9 @@ static void p9_write_work(struct work_struct *work) | |||
| 476 | clear_bit(Wpending, &m->wsched); | 477 | clear_bit(Wpending, &m->wsched); |
| 477 | err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); | 478 | err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); |
| 478 | p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); | 479 | p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); |
| 479 | if (err == -EAGAIN) { | 480 | if (err == -EAGAIN) |
| 480 | clear_bit(Wworksched, &m->wsched); | 481 | goto end_clear; |
| 481 | return; | 482 | |
| 482 | } | ||
| 483 | 483 | ||
| 484 | if (err < 0) | 484 | if (err < 0) |
| 485 | goto error; | 485 | goto error; |
| @@ -492,19 +492,21 @@ static void p9_write_work(struct work_struct *work) | |||
| 492 | if (m->wpos == m->wsize) | 492 | if (m->wpos == m->wsize) |
| 493 | m->wpos = m->wsize = 0; | 493 | m->wpos = m->wsize = 0; |
| 494 | 494 | ||
| 495 | if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { | 495 | end_clear: |
| 496 | clear_bit(Wworksched, &m->wsched); | ||
| 497 | |||
| 498 | if (m->wsize || !list_empty(&m->unsent_req_list)) { | ||
| 496 | if (test_and_clear_bit(Wpending, &m->wsched)) | 499 | if (test_and_clear_bit(Wpending, &m->wsched)) |
| 497 | n = POLLOUT; | 500 | n = POLLOUT; |
| 498 | else | 501 | else |
| 499 | n = p9_fd_poll(m->client, NULL); | 502 | n = p9_fd_poll(m->client, NULL); |
| 500 | 503 | ||
| 501 | if (n & POLLOUT) { | 504 | if ((n & POLLOUT) && |
| 505 | !test_and_set_bit(Wworksched, &m->wsched)) { | ||
| 502 | p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); | 506 | p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); |
| 503 | schedule_work(&m->wq); | 507 | schedule_work(&m->wq); |
| 504 | } else | 508 | } |
| 505 | clear_bit(Wworksched, &m->wsched); | 509 | } |
| 506 | } else | ||
| 507 | clear_bit(Wworksched, &m->wsched); | ||
| 508 | 510 | ||
| 509 | return; | 511 | return; |
| 510 | 512 | ||
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 0a9084ad19a6..fd8d5afec0dd 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
| @@ -1167,6 +1167,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv) | |||
| 1167 | uint16_t crc; | 1167 | uint16_t crc; |
| 1168 | unsigned long entrytime; | 1168 | unsigned long entrytime; |
| 1169 | 1169 | ||
| 1170 | spin_lock_init(&bat_priv->bla.bcast_duplist_lock); | ||
| 1171 | |||
| 1170 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); | 1172 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); |
| 1171 | 1173 | ||
| 1172 | /* setting claim destination address */ | 1174 | /* setting claim destination address */ |
| @@ -1210,8 +1212,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv) | |||
| 1210 | /** | 1212 | /** |
| 1211 | * batadv_bla_check_bcast_duplist | 1213 | * batadv_bla_check_bcast_duplist |
| 1212 | * @bat_priv: the bat priv with all the soft interface information | 1214 | * @bat_priv: the bat priv with all the soft interface information |
| 1213 | * @bcast_packet: originator mac address | 1215 | * @bcast_packet: encapsulated broadcast frame plus batman header |
| 1214 | * @hdr_size: maximum length of the frame | 1216 | * @bcast_packet_len: length of encapsulated broadcast frame plus batman header |
| 1215 | * | 1217 | * |
| 1216 | * check if it is on our broadcast list. Another gateway might | 1218 | * check if it is on our broadcast list. Another gateway might |
| 1217 | * have sent the same packet because it is connected to the same backbone, | 1219 | * have sent the same packet because it is connected to the same backbone, |
| @@ -1224,20 +1226,22 @@ int batadv_bla_init(struct batadv_priv *bat_priv) | |||
| 1224 | */ | 1226 | */ |
| 1225 | int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | 1227 | int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, |
| 1226 | struct batadv_bcast_packet *bcast_packet, | 1228 | struct batadv_bcast_packet *bcast_packet, |
| 1227 | int hdr_size) | 1229 | int bcast_packet_len) |
| 1228 | { | 1230 | { |
| 1229 | int i, length, curr; | 1231 | int i, length, curr, ret = 0; |
| 1230 | uint8_t *content; | 1232 | uint8_t *content; |
| 1231 | uint16_t crc; | 1233 | uint16_t crc; |
| 1232 | struct batadv_bcast_duplist_entry *entry; | 1234 | struct batadv_bcast_duplist_entry *entry; |
| 1233 | 1235 | ||
| 1234 | length = hdr_size - sizeof(*bcast_packet); | 1236 | length = bcast_packet_len - sizeof(*bcast_packet); |
| 1235 | content = (uint8_t *)bcast_packet; | 1237 | content = (uint8_t *)bcast_packet; |
| 1236 | content += sizeof(*bcast_packet); | 1238 | content += sizeof(*bcast_packet); |
| 1237 | 1239 | ||
| 1238 | /* calculate the crc ... */ | 1240 | /* calculate the crc ... */ |
| 1239 | crc = crc16(0, content, length); | 1241 | crc = crc16(0, content, length); |
| 1240 | 1242 | ||
| 1243 | spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); | ||
| 1244 | |||
| 1241 | for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { | 1245 | for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { |
| 1242 | curr = (bat_priv->bla.bcast_duplist_curr + i); | 1246 | curr = (bat_priv->bla.bcast_duplist_curr + i); |
| 1243 | curr %= BATADV_DUPLIST_SIZE; | 1247 | curr %= BATADV_DUPLIST_SIZE; |
| @@ -1259,9 +1263,12 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | |||
| 1259 | /* this entry seems to match: same crc, not too old, | 1263 | /* this entry seems to match: same crc, not too old, |
| 1260 | * and from another gw. therefore return 1 to forbid it. | 1264 | * and from another gw. therefore return 1 to forbid it. |
| 1261 | */ | 1265 | */ |
| 1262 | return 1; | 1266 | ret = 1; |
| 1267 | goto out; | ||
| 1263 | } | 1268 | } |
| 1264 | /* not found, add a new entry (overwrite the oldest entry) */ | 1269 | /* not found, add a new entry (overwrite the oldest entry) |
| 1270 | * and allow it, its the first occurence. | ||
| 1271 | */ | ||
| 1265 | curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); | 1272 | curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); |
| 1266 | curr %= BATADV_DUPLIST_SIZE; | 1273 | curr %= BATADV_DUPLIST_SIZE; |
| 1267 | entry = &bat_priv->bla.bcast_duplist[curr]; | 1274 | entry = &bat_priv->bla.bcast_duplist[curr]; |
| @@ -1270,8 +1277,10 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | |||
| 1270 | memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); | 1277 | memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); |
| 1271 | bat_priv->bla.bcast_duplist_curr = curr; | 1278 | bat_priv->bla.bcast_duplist_curr = curr; |
| 1272 | 1279 | ||
| 1273 | /* allow it, its the first occurence. */ | 1280 | out: |
| 1274 | return 0; | 1281 | spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock); |
| 1282 | |||
| 1283 | return ret; | ||
| 1275 | } | 1284 | } |
| 1276 | 1285 | ||
| 1277 | 1286 | ||
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 939fc01371df..376b4cc6ca82 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
| @@ -1124,8 +1124,14 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, | |||
| 1124 | 1124 | ||
| 1125 | spin_unlock_bh(&orig_node->bcast_seqno_lock); | 1125 | spin_unlock_bh(&orig_node->bcast_seqno_lock); |
| 1126 | 1126 | ||
| 1127 | /* keep skb linear for crc calculation */ | ||
| 1128 | if (skb_linearize(skb) < 0) | ||
| 1129 | goto out; | ||
| 1130 | |||
| 1131 | bcast_packet = (struct batadv_bcast_packet *)skb->data; | ||
| 1132 | |||
| 1127 | /* check whether this has been sent by another originator before */ | 1133 | /* check whether this has been sent by another originator before */ |
| 1128 | if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) | 1134 | if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, skb->len)) |
| 1129 | goto out; | 1135 | goto out; |
| 1130 | 1136 | ||
| 1131 | /* rebroadcast packet */ | 1137 | /* rebroadcast packet */ |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 2ed82caacdca..ac1e07a80454 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
| @@ -205,6 +205,8 @@ struct batadv_priv_bla { | |||
| 205 | struct batadv_hashtable *backbone_hash; | 205 | struct batadv_hashtable *backbone_hash; |
| 206 | struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE]; | 206 | struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE]; |
| 207 | int bcast_duplist_curr; | 207 | int bcast_duplist_curr; |
| 208 | /* protects bcast_duplist and bcast_duplist_curr */ | ||
| 209 | spinlock_t bcast_duplist_lock; | ||
| 208 | struct batadv_bla_claim_dst claim_dest; | 210 | struct batadv_bla_claim_dst claim_dest; |
| 209 | struct delayed_work work; | 211 | struct delayed_work work; |
| 210 | }; | 212 | }; |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 9d49ee6d7219..ba033f09196e 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
| @@ -591,7 +591,7 @@ static int bt_seq_show(struct seq_file *seq, void *v) | |||
| 591 | atomic_read(&sk->sk_refcnt), | 591 | atomic_read(&sk->sk_refcnt), |
| 592 | sk_rmem_alloc_get(sk), | 592 | sk_rmem_alloc_get(sk), |
| 593 | sk_wmem_alloc_get(sk), | 593 | sk_wmem_alloc_get(sk), |
| 594 | sock_i_uid(sk), | 594 | from_kuid(seq_user_ns(seq), sock_i_uid(sk)), |
| 595 | sock_i_ino(sk), | 595 | sock_i_ino(sk), |
| 596 | &src_baswapped, | 596 | &src_baswapped, |
| 597 | &dst_baswapped, | 597 | &dst_baswapped, |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 8c225ef349cd..2ac8d50861e0 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | 32 | ||
| 33 | #define SMP_TIMEOUT msecs_to_jiffies(30000) | 33 | #define SMP_TIMEOUT msecs_to_jiffies(30000) |
| 34 | 34 | ||
| 35 | #define AUTH_REQ_MASK 0x07 | ||
| 36 | |||
| 35 | static inline void swap128(u8 src[16], u8 dst[16]) | 37 | static inline void swap128(u8 src[16], u8 dst[16]) |
| 36 | { | 38 | { |
| 37 | int i; | 39 | int i; |
| @@ -230,7 +232,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, | |||
| 230 | req->max_key_size = SMP_MAX_ENC_KEY_SIZE; | 232 | req->max_key_size = SMP_MAX_ENC_KEY_SIZE; |
| 231 | req->init_key_dist = 0; | 233 | req->init_key_dist = 0; |
| 232 | req->resp_key_dist = dist_keys; | 234 | req->resp_key_dist = dist_keys; |
| 233 | req->auth_req = authreq; | 235 | req->auth_req = (authreq & AUTH_REQ_MASK); |
| 234 | return; | 236 | return; |
| 235 | } | 237 | } |
| 236 | 238 | ||
| @@ -239,7 +241,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, | |||
| 239 | rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; | 241 | rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; |
| 240 | rsp->init_key_dist = 0; | 242 | rsp->init_key_dist = 0; |
| 241 | rsp->resp_key_dist = req->resp_key_dist & dist_keys; | 243 | rsp->resp_key_dist = req->resp_key_dist & dist_keys; |
| 242 | rsp->auth_req = authreq; | 244 | rsp->auth_req = (authreq & AUTH_REQ_MASK); |
| 243 | } | 245 | } |
| 244 | 246 | ||
| 245 | static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) | 247 | static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 68e8f364bbf8..fe43bc7b063f 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
| @@ -265,6 +265,9 @@ static int br_parse_ip_options(struct sk_buff *skb) | |||
| 265 | struct net_device *dev = skb->dev; | 265 | struct net_device *dev = skb->dev; |
| 266 | u32 len; | 266 | u32 len; |
| 267 | 267 | ||
| 268 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) | ||
| 269 | goto inhdr_error; | ||
| 270 | |||
| 268 | iph = ip_hdr(skb); | 271 | iph = ip_hdr(skb); |
| 269 | opt = &(IPCB(skb)->opt); | 272 | opt = &(IPCB(skb)->opt); |
| 270 | 273 | ||
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 9da7fdd3cd8a..af14cb425164 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c | |||
| @@ -423,14 +423,15 @@ int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, | |||
| 423 | } | 423 | } |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | int ceph_key_instantiate(struct key *key, const void *data, size_t datalen) | 426 | int ceph_key_instantiate(struct key *key, struct key_preparsed_payload *prep) |
| 427 | { | 427 | { |
| 428 | struct ceph_crypto_key *ckey; | 428 | struct ceph_crypto_key *ckey; |
| 429 | size_t datalen = prep->datalen; | ||
| 429 | int ret; | 430 | int ret; |
| 430 | void *p; | 431 | void *p; |
| 431 | 432 | ||
| 432 | ret = -EINVAL; | 433 | ret = -EINVAL; |
| 433 | if (datalen <= 0 || datalen > 32767 || !data) | 434 | if (datalen <= 0 || datalen > 32767 || !prep->data) |
| 434 | goto err; | 435 | goto err; |
| 435 | 436 | ||
| 436 | ret = key_payload_reserve(key, datalen); | 437 | ret = key_payload_reserve(key, datalen); |
| @@ -443,8 +444,8 @@ int ceph_key_instantiate(struct key *key, const void *data, size_t datalen) | |||
| 443 | goto err; | 444 | goto err; |
| 444 | 445 | ||
| 445 | /* TODO ceph_crypto_key_decode should really take const input */ | 446 | /* TODO ceph_crypto_key_decode should really take const input */ |
| 446 | p = (void *)data; | 447 | p = (void *)prep->data; |
| 447 | ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen); | 448 | ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen); |
| 448 | if (ret < 0) | 449 | if (ret < 0) |
| 449 | goto err_ckey; | 450 | goto err_ckey; |
| 450 | 451 | ||
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 159aa8bef9e7..3ef1759403b4 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -2300,10 +2300,11 @@ restart: | |||
| 2300 | mutex_unlock(&con->mutex); | 2300 | mutex_unlock(&con->mutex); |
| 2301 | return; | 2301 | return; |
| 2302 | } else { | 2302 | } else { |
| 2303 | con->ops->put(con); | ||
| 2304 | dout("con_work %p FAILED to back off %lu\n", con, | 2303 | dout("con_work %p FAILED to back off %lu\n", con, |
| 2305 | con->delay); | 2304 | con->delay); |
| 2305 | set_bit(CON_FLAG_BACKOFF, &con->flags); | ||
| 2306 | } | 2306 | } |
| 2307 | goto done; | ||
| 2307 | } | 2308 | } |
| 2308 | 2309 | ||
| 2309 | if (con->state == CON_STATE_STANDBY) { | 2310 | if (con->state == CON_STATE_STANDBY) { |
| @@ -2749,7 +2750,8 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) | |||
| 2749 | msg = con->ops->alloc_msg(con, hdr, skip); | 2750 | msg = con->ops->alloc_msg(con, hdr, skip); |
| 2750 | mutex_lock(&con->mutex); | 2751 | mutex_lock(&con->mutex); |
| 2751 | if (con->state != CON_STATE_OPEN) { | 2752 | if (con->state != CON_STATE_OPEN) { |
| 2752 | ceph_msg_put(msg); | 2753 | if (msg) |
| 2754 | ceph_msg_put(msg); | ||
| 2753 | return -EAGAIN; | 2755 | return -EAGAIN; |
| 2754 | } | 2756 | } |
| 2755 | con->in_msg = msg; | 2757 | con->in_msg = msg; |
diff --git a/net/core/dev.c b/net/core/dev.c index 1e0a1847c3bb..09cb3f6dc40c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -3300,18 +3300,18 @@ ncls: | |||
| 3300 | && !skb_pfmemalloc_protocol(skb)) | 3300 | && !skb_pfmemalloc_protocol(skb)) |
| 3301 | goto drop; | 3301 | goto drop; |
| 3302 | 3302 | ||
| 3303 | rx_handler = rcu_dereference(skb->dev->rx_handler); | ||
| 3304 | if (vlan_tx_tag_present(skb)) { | 3303 | if (vlan_tx_tag_present(skb)) { |
| 3305 | if (pt_prev) { | 3304 | if (pt_prev) { |
| 3306 | ret = deliver_skb(skb, pt_prev, orig_dev); | 3305 | ret = deliver_skb(skb, pt_prev, orig_dev); |
| 3307 | pt_prev = NULL; | 3306 | pt_prev = NULL; |
| 3308 | } | 3307 | } |
| 3309 | if (vlan_do_receive(&skb, !rx_handler)) | 3308 | if (vlan_do_receive(&skb)) |
| 3310 | goto another_round; | 3309 | goto another_round; |
| 3311 | else if (unlikely(!skb)) | 3310 | else if (unlikely(!skb)) |
| 3312 | goto unlock; | 3311 | goto unlock; |
| 3313 | } | 3312 | } |
| 3314 | 3313 | ||
| 3314 | rx_handler = rcu_dereference(skb->dev->rx_handler); | ||
| 3315 | if (rx_handler) { | 3315 | if (rx_handler) { |
| 3316 | if (pt_prev) { | 3316 | if (pt_prev) { |
| 3317 | ret = deliver_skb(skb, pt_prev, orig_dev); | 3317 | ret = deliver_skb(skb, pt_prev, orig_dev); |
| @@ -3331,6 +3331,9 @@ ncls: | |||
| 3331 | } | 3331 | } |
| 3332 | } | 3332 | } |
| 3333 | 3333 | ||
| 3334 | if (vlan_tx_nonzero_tag_present(skb)) | ||
| 3335 | skb->pkt_type = PACKET_OTHERHOST; | ||
| 3336 | |||
| 3334 | /* deliver only exact match when indicated */ | 3337 | /* deliver only exact match when indicated */ |
| 3335 | null_or_dev = deliver_exact ? skb->dev : NULL; | 3338 | null_or_dev = deliver_exact ? skb->dev : NULL; |
| 3336 | 3339 | ||
| @@ -3471,17 +3474,31 @@ out: | |||
| 3471 | return netif_receive_skb(skb); | 3474 | return netif_receive_skb(skb); |
| 3472 | } | 3475 | } |
| 3473 | 3476 | ||
| 3474 | inline void napi_gro_flush(struct napi_struct *napi) | 3477 | /* napi->gro_list contains packets ordered by age. |
| 3478 | * youngest packets at the head of it. | ||
| 3479 | * Complete skbs in reverse order to reduce latencies. | ||
| 3480 | */ | ||
| 3481 | void napi_gro_flush(struct napi_struct *napi, bool flush_old) | ||
| 3475 | { | 3482 | { |
| 3476 | struct sk_buff *skb, *next; | 3483 | struct sk_buff *skb, *prev = NULL; |
| 3477 | 3484 | ||
| 3478 | for (skb = napi->gro_list; skb; skb = next) { | 3485 | /* scan list and build reverse chain */ |
| 3479 | next = skb->next; | 3486 | for (skb = napi->gro_list; skb != NULL; skb = skb->next) { |
| 3487 | skb->prev = prev; | ||
| 3488 | prev = skb; | ||
| 3489 | } | ||
| 3490 | |||
| 3491 | for (skb = prev; skb; skb = prev) { | ||
| 3480 | skb->next = NULL; | 3492 | skb->next = NULL; |
| 3493 | |||
| 3494 | if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) | ||
| 3495 | return; | ||
| 3496 | |||
| 3497 | prev = skb->prev; | ||
| 3481 | napi_gro_complete(skb); | 3498 | napi_gro_complete(skb); |
| 3499 | napi->gro_count--; | ||
| 3482 | } | 3500 | } |
| 3483 | 3501 | ||
| 3484 | napi->gro_count = 0; | ||
| 3485 | napi->gro_list = NULL; | 3502 | napi->gro_list = NULL; |
| 3486 | } | 3503 | } |
| 3487 | EXPORT_SYMBOL(napi_gro_flush); | 3504 | EXPORT_SYMBOL(napi_gro_flush); |
| @@ -3542,6 +3559,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
| 3542 | 3559 | ||
| 3543 | napi->gro_count++; | 3560 | napi->gro_count++; |
| 3544 | NAPI_GRO_CB(skb)->count = 1; | 3561 | NAPI_GRO_CB(skb)->count = 1; |
| 3562 | NAPI_GRO_CB(skb)->age = jiffies; | ||
| 3545 | skb_shinfo(skb)->gso_size = skb_gro_len(skb); | 3563 | skb_shinfo(skb)->gso_size = skb_gro_len(skb); |
| 3546 | skb->next = napi->gro_list; | 3564 | skb->next = napi->gro_list; |
| 3547 | napi->gro_list = skb; | 3565 | napi->gro_list = skb; |
| @@ -3631,20 +3649,22 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) | |||
| 3631 | } | 3649 | } |
| 3632 | EXPORT_SYMBOL(napi_skb_finish); | 3650 | EXPORT_SYMBOL(napi_skb_finish); |
| 3633 | 3651 | ||
| 3634 | void skb_gro_reset_offset(struct sk_buff *skb) | 3652 | static void skb_gro_reset_offset(struct sk_buff *skb) |
| 3635 | { | 3653 | { |
| 3654 | const struct skb_shared_info *pinfo = skb_shinfo(skb); | ||
| 3655 | const skb_frag_t *frag0 = &pinfo->frags[0]; | ||
| 3656 | |||
| 3636 | NAPI_GRO_CB(skb)->data_offset = 0; | 3657 | NAPI_GRO_CB(skb)->data_offset = 0; |
| 3637 | NAPI_GRO_CB(skb)->frag0 = NULL; | 3658 | NAPI_GRO_CB(skb)->frag0 = NULL; |
| 3638 | NAPI_GRO_CB(skb)->frag0_len = 0; | 3659 | NAPI_GRO_CB(skb)->frag0_len = 0; |
| 3639 | 3660 | ||
| 3640 | if (skb->mac_header == skb->tail && | 3661 | if (skb->mac_header == skb->tail && |
| 3641 | !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) { | 3662 | pinfo->nr_frags && |
| 3642 | NAPI_GRO_CB(skb)->frag0 = | 3663 | !PageHighMem(skb_frag_page(frag0))) { |
| 3643 | skb_frag_address(&skb_shinfo(skb)->frags[0]); | 3664 | NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); |
| 3644 | NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]); | 3665 | NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); |
| 3645 | } | 3666 | } |
| 3646 | } | 3667 | } |
| 3647 | EXPORT_SYMBOL(skb_gro_reset_offset); | ||
| 3648 | 3668 | ||
| 3649 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 3669 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
| 3650 | { | 3670 | { |
| @@ -3876,7 +3896,7 @@ void napi_complete(struct napi_struct *n) | |||
| 3876 | if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) | 3896 | if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) |
| 3877 | return; | 3897 | return; |
| 3878 | 3898 | ||
| 3879 | napi_gro_flush(n); | 3899 | napi_gro_flush(n, false); |
| 3880 | local_irq_save(flags); | 3900 | local_irq_save(flags); |
| 3881 | __napi_complete(n); | 3901 | __napi_complete(n); |
| 3882 | local_irq_restore(flags); | 3902 | local_irq_restore(flags); |
| @@ -3981,8 +4001,17 @@ static void net_rx_action(struct softirq_action *h) | |||
| 3981 | local_irq_enable(); | 4001 | local_irq_enable(); |
| 3982 | napi_complete(n); | 4002 | napi_complete(n); |
| 3983 | local_irq_disable(); | 4003 | local_irq_disable(); |
| 3984 | } else | 4004 | } else { |
| 4005 | if (n->gro_list) { | ||
| 4006 | /* flush too old packets | ||
| 4007 | * If HZ < 1000, flush all packets. | ||
| 4008 | */ | ||
| 4009 | local_irq_enable(); | ||
| 4010 | napi_gro_flush(n, HZ >= 1000); | ||
| 4011 | local_irq_disable(); | ||
| 4012 | } | ||
| 3985 | list_move_tail(&n->poll_list, &sd->poll_list); | 4013 | list_move_tail(&n->poll_list, &sd->poll_list); |
| 4014 | } | ||
| 3986 | } | 4015 | } |
| 3987 | 4016 | ||
| 3988 | netpoll_poll_unlock(have); | 4017 | netpoll_poll_unlock(have); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index baca771caae2..22571488730a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -1301,8 +1301,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) | |||
| 1301 | if (!dst) | 1301 | if (!dst) |
| 1302 | goto discard; | 1302 | goto discard; |
| 1303 | 1303 | ||
| 1304 | __skb_pull(skb, skb_network_offset(skb)); | ||
| 1305 | |||
| 1306 | if (!neigh_event_send(neigh, skb)) { | 1304 | if (!neigh_event_send(neigh, skb)) { |
| 1307 | int err; | 1305 | int err; |
| 1308 | struct net_device *dev = neigh->dev; | 1306 | struct net_device *dev = neigh->dev; |
| @@ -1312,6 +1310,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) | |||
| 1312 | neigh_hh_init(neigh, dst); | 1310 | neigh_hh_init(neigh, dst); |
| 1313 | 1311 | ||
| 1314 | do { | 1312 | do { |
| 1313 | __skb_pull(skb, skb_network_offset(skb)); | ||
| 1315 | seq = read_seqbegin(&neigh->ha_lock); | 1314 | seq = read_seqbegin(&neigh->ha_lock); |
| 1316 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | 1315 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), |
| 1317 | neigh->ha, NULL, skb->len); | 1316 | neigh->ha, NULL, skb->len); |
| @@ -1342,9 +1341,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) | |||
| 1342 | unsigned int seq; | 1341 | unsigned int seq; |
| 1343 | int err; | 1342 | int err; |
| 1344 | 1343 | ||
| 1345 | __skb_pull(skb, skb_network_offset(skb)); | ||
| 1346 | |||
| 1347 | do { | 1344 | do { |
| 1345 | __skb_pull(skb, skb_network_offset(skb)); | ||
| 1348 | seq = read_seqbegin(&neigh->ha_lock); | 1346 | seq = read_seqbegin(&neigh->ha_lock); |
| 1349 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | 1347 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), |
| 1350 | neigh->ha, NULL, skb->len); | 1348 | neigh->ha, NULL, skb->len); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 148e73d2c451..d1dc14c2aac4 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -248,8 +248,8 @@ struct pktgen_dev { | |||
| 248 | int removal_mark; /* non-zero => the device is marked for | 248 | int removal_mark; /* non-zero => the device is marked for |
| 249 | * removal by worker thread */ | 249 | * removal by worker thread */ |
| 250 | 250 | ||
| 251 | int min_pkt_size; /* = ETH_ZLEN; */ | 251 | int min_pkt_size; |
| 252 | int max_pkt_size; /* = ETH_ZLEN; */ | 252 | int max_pkt_size; |
| 253 | int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ | 253 | int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ |
| 254 | int nfrags; | 254 | int nfrags; |
| 255 | struct page *page; | 255 | struct page *page; |
| @@ -449,8 +449,6 @@ static void pktgen_stop_all_threads_ifs(void); | |||
| 449 | static void pktgen_stop(struct pktgen_thread *t); | 449 | static void pktgen_stop(struct pktgen_thread *t); |
| 450 | static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); | 450 | static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); |
| 451 | 451 | ||
| 452 | static unsigned int scan_ip6(const char *s, char ip[16]); | ||
| 453 | |||
| 454 | /* Module parameters, defaults. */ | 452 | /* Module parameters, defaults. */ |
| 455 | static int pg_count_d __read_mostly = 1000; | 453 | static int pg_count_d __read_mostly = 1000; |
| 456 | static int pg_delay_d __read_mostly; | 454 | static int pg_delay_d __read_mostly; |
| @@ -702,8 +700,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
| 702 | &pkt_dev->cur_in6_saddr, | 700 | &pkt_dev->cur_in6_saddr, |
| 703 | &pkt_dev->cur_in6_daddr); | 701 | &pkt_dev->cur_in6_daddr); |
| 704 | } else | 702 | } else |
| 705 | seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", | 703 | seq_printf(seq, " cur_saddr: %pI4 cur_daddr: %pI4\n", |
| 706 | pkt_dev->cur_saddr, pkt_dev->cur_daddr); | 704 | &pkt_dev->cur_saddr, &pkt_dev->cur_daddr); |
| 707 | 705 | ||
| 708 | seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", | 706 | seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", |
| 709 | pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); | 707 | pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); |
| @@ -1299,7 +1297,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
| 1299 | return -EFAULT; | 1297 | return -EFAULT; |
| 1300 | buf[len] = 0; | 1298 | buf[len] = 0; |
| 1301 | 1299 | ||
| 1302 | scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); | 1300 | in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL); |
| 1303 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); | 1301 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); |
| 1304 | 1302 | ||
| 1305 | pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; | 1303 | pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; |
| @@ -1322,7 +1320,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
| 1322 | return -EFAULT; | 1320 | return -EFAULT; |
| 1323 | buf[len] = 0; | 1321 | buf[len] = 0; |
| 1324 | 1322 | ||
| 1325 | scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); | 1323 | in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL); |
| 1326 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); | 1324 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); |
| 1327 | 1325 | ||
| 1328 | pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; | 1326 | pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; |
| @@ -1344,7 +1342,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
| 1344 | return -EFAULT; | 1342 | return -EFAULT; |
| 1345 | buf[len] = 0; | 1343 | buf[len] = 0; |
| 1346 | 1344 | ||
| 1347 | scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); | 1345 | in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL); |
| 1348 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); | 1346 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); |
| 1349 | 1347 | ||
| 1350 | if (debug) | 1348 | if (debug) |
| @@ -1365,7 +1363,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
| 1365 | return -EFAULT; | 1363 | return -EFAULT; |
| 1366 | buf[len] = 0; | 1364 | buf[len] = 0; |
| 1367 | 1365 | ||
| 1368 | scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); | 1366 | in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL); |
| 1369 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); | 1367 | snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); |
| 1370 | 1368 | ||
| 1371 | pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; | 1369 | pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; |
| @@ -2036,19 +2034,17 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
| 2036 | /* Set up Dest MAC */ | 2034 | /* Set up Dest MAC */ |
| 2037 | memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); | 2035 | memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); |
| 2038 | 2036 | ||
| 2039 | /* Set up pkt size */ | ||
| 2040 | pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; | ||
| 2041 | |||
| 2042 | if (pkt_dev->flags & F_IPV6) { | 2037 | if (pkt_dev->flags & F_IPV6) { |
| 2043 | /* | ||
| 2044 | * Skip this automatic address setting until locks or functions | ||
| 2045 | * gets exported | ||
| 2046 | */ | ||
| 2047 | |||
| 2048 | #ifdef NOTNOW | ||
| 2049 | int i, set = 0, err = 1; | 2038 | int i, set = 0, err = 1; |
| 2050 | struct inet6_dev *idev; | 2039 | struct inet6_dev *idev; |
| 2051 | 2040 | ||
| 2041 | if (pkt_dev->min_pkt_size == 0) { | ||
| 2042 | pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr) | ||
| 2043 | + sizeof(struct udphdr) | ||
| 2044 | + sizeof(struct pktgen_hdr) | ||
| 2045 | + pkt_dev->pkt_overhead; | ||
| 2046 | } | ||
| 2047 | |||
| 2052 | for (i = 0; i < IN6_ADDR_HSIZE; i++) | 2048 | for (i = 0; i < IN6_ADDR_HSIZE; i++) |
| 2053 | if (pkt_dev->cur_in6_saddr.s6_addr[i]) { | 2049 | if (pkt_dev->cur_in6_saddr.s6_addr[i]) { |
| 2054 | set = 1; | 2050 | set = 1; |
| @@ -2069,9 +2065,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
| 2069 | struct inet6_ifaddr *ifp; | 2065 | struct inet6_ifaddr *ifp; |
| 2070 | 2066 | ||
| 2071 | read_lock_bh(&idev->lock); | 2067 | read_lock_bh(&idev->lock); |
| 2072 | for (ifp = idev->addr_list; ifp; | 2068 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
| 2073 | ifp = ifp->if_next) { | 2069 | if ((ifp->scope & IFA_LINK) && |
| 2074 | if (ifp->scope == IFA_LINK && | ||
| 2075 | !(ifp->flags & IFA_F_TENTATIVE)) { | 2070 | !(ifp->flags & IFA_F_TENTATIVE)) { |
| 2076 | pkt_dev->cur_in6_saddr = ifp->addr; | 2071 | pkt_dev->cur_in6_saddr = ifp->addr; |
| 2077 | err = 0; | 2072 | err = 0; |
| @@ -2084,8 +2079,14 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
| 2084 | if (err) | 2079 | if (err) |
| 2085 | pr_err("ERROR: IPv6 link address not available\n"); | 2080 | pr_err("ERROR: IPv6 link address not available\n"); |
| 2086 | } | 2081 | } |
| 2087 | #endif | ||
| 2088 | } else { | 2082 | } else { |
| 2083 | if (pkt_dev->min_pkt_size == 0) { | ||
| 2084 | pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr) | ||
| 2085 | + sizeof(struct udphdr) | ||
| 2086 | + sizeof(struct pktgen_hdr) | ||
| 2087 | + pkt_dev->pkt_overhead; | ||
| 2088 | } | ||
| 2089 | |||
| 2089 | pkt_dev->saddr_min = 0; | 2090 | pkt_dev->saddr_min = 0; |
| 2090 | pkt_dev->saddr_max = 0; | 2091 | pkt_dev->saddr_max = 0; |
| 2091 | if (strlen(pkt_dev->src_min) == 0) { | 2092 | if (strlen(pkt_dev->src_min) == 0) { |
| @@ -2111,6 +2112,10 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
| 2111 | pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); | 2112 | pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); |
| 2112 | } | 2113 | } |
| 2113 | /* Initialize current values. */ | 2114 | /* Initialize current values. */ |
| 2115 | pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; | ||
| 2116 | if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size) | ||
| 2117 | pkt_dev->max_pkt_size = pkt_dev->min_pkt_size; | ||
| 2118 | |||
| 2114 | pkt_dev->cur_dst_mac_offset = 0; | 2119 | pkt_dev->cur_dst_mac_offset = 0; |
| 2115 | pkt_dev->cur_src_mac_offset = 0; | 2120 | pkt_dev->cur_src_mac_offset = 0; |
| 2116 | pkt_dev->cur_saddr = pkt_dev->saddr_min; | 2121 | pkt_dev->cur_saddr = pkt_dev->saddr_min; |
| @@ -2758,97 +2763,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
| 2758 | return skb; | 2763 | return skb; |
| 2759 | } | 2764 | } |
| 2760 | 2765 | ||
| 2761 | /* | ||
| 2762 | * scan_ip6, fmt_ip taken from dietlibc-0.21 | ||
| 2763 | * Author Felix von Leitner <felix-dietlibc@fefe.de> | ||
| 2764 | * | ||
| 2765 | * Slightly modified for kernel. | ||
| 2766 | * Should be candidate for net/ipv4/utils.c | ||
| 2767 | * --ro | ||
| 2768 | */ | ||
| 2769 | |||
| 2770 | static unsigned int scan_ip6(const char *s, char ip[16]) | ||
| 2771 | { | ||
| 2772 | unsigned int i; | ||
| 2773 | unsigned int len = 0; | ||
| 2774 | unsigned long u; | ||
| 2775 | char suffix[16]; | ||
| 2776 | unsigned int prefixlen = 0; | ||
| 2777 | unsigned int suffixlen = 0; | ||
| 2778 | __be32 tmp; | ||
| 2779 | char *pos; | ||
| 2780 | |||
| 2781 | for (i = 0; i < 16; i++) | ||
| 2782 | ip[i] = 0; | ||
| 2783 | |||
| 2784 | for (;;) { | ||
| 2785 | if (*s == ':') { | ||
| 2786 | len++; | ||
| 2787 | if (s[1] == ':') { /* Found "::", skip to part 2 */ | ||
| 2788 | s += 2; | ||
| 2789 | len++; | ||
| 2790 | break; | ||
| 2791 | } | ||
| 2792 | s++; | ||
| 2793 | } | ||
| 2794 | |||
| 2795 | u = simple_strtoul(s, &pos, 16); | ||
| 2796 | i = pos - s; | ||
| 2797 | if (!i) | ||
| 2798 | return 0; | ||
| 2799 | if (prefixlen == 12 && s[i] == '.') { | ||
| 2800 | |||
| 2801 | /* the last 4 bytes may be written as IPv4 address */ | ||
| 2802 | |||
| 2803 | tmp = in_aton(s); | ||
| 2804 | memcpy((struct in_addr *)(ip + 12), &tmp, sizeof(tmp)); | ||
| 2805 | return i + len; | ||
| 2806 | } | ||
| 2807 | ip[prefixlen++] = (u >> 8); | ||
| 2808 | ip[prefixlen++] = (u & 255); | ||
| 2809 | s += i; | ||
| 2810 | len += i; | ||
| 2811 | if (prefixlen == 16) | ||
| 2812 | return len; | ||
| 2813 | } | ||
| 2814 | |||
| 2815 | /* part 2, after "::" */ | ||
| 2816 | for (;;) { | ||
| 2817 | if (*s == ':') { | ||
| 2818 | if (suffixlen == 0) | ||
| 2819 | break; | ||
| 2820 | s++; | ||
| 2821 | len++; | ||
| 2822 | } else if (suffixlen != 0) | ||
| 2823 | break; | ||
| 2824 | |||
| 2825 | u = simple_strtol(s, &pos, 16); | ||
| 2826 | i = pos - s; | ||
| 2827 | if (!i) { | ||
| 2828 | if (*s) | ||
| 2829 | len--; | ||
| 2830 | break; | ||
| 2831 | } | ||
| 2832 | if (suffixlen + prefixlen <= 12 && s[i] == '.') { | ||
| 2833 | tmp = in_aton(s); | ||
| 2834 | memcpy((struct in_addr *)(suffix + suffixlen), &tmp, | ||
| 2835 | sizeof(tmp)); | ||
| 2836 | suffixlen += 4; | ||
| 2837 | len += strlen(s); | ||
| 2838 | break; | ||
| 2839 | } | ||
| 2840 | suffix[suffixlen++] = (u >> 8); | ||
| 2841 | suffix[suffixlen++] = (u & 255); | ||
| 2842 | s += i; | ||
| 2843 | len += i; | ||
| 2844 | if (prefixlen + suffixlen == 16) | ||
| 2845 | break; | ||
| 2846 | } | ||
| 2847 | for (i = 0; i < suffixlen; i++) | ||
| 2848 | ip[16 - suffixlen + i] = suffix[i]; | ||
| 2849 | return len; | ||
| 2850 | } | ||
| 2851 | |||
| 2852 | static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | 2766 | static struct sk_buff *fill_packet_ipv6(struct net_device *odev, |
| 2853 | struct pktgen_dev *pkt_dev) | 2767 | struct pktgen_dev *pkt_dev) |
| 2854 | { | 2768 | { |
| @@ -2927,7 +2841,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
| 2927 | sizeof(struct ipv6hdr) - sizeof(struct udphdr) - | 2841 | sizeof(struct ipv6hdr) - sizeof(struct udphdr) - |
| 2928 | pkt_dev->pkt_overhead; | 2842 | pkt_dev->pkt_overhead; |
| 2929 | 2843 | ||
| 2930 | if (datalen < sizeof(struct pktgen_hdr)) { | 2844 | if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) { |
| 2931 | datalen = sizeof(struct pktgen_hdr); | 2845 | datalen = sizeof(struct pktgen_hdr); |
| 2932 | net_info_ratelimited("increased datalen to %d\n", datalen); | 2846 | net_info_ratelimited("increased datalen to %d\n", datalen); |
| 2933 | } | 2847 | } |
| @@ -3548,8 +3462,6 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) | |||
| 3548 | } | 3462 | } |
| 3549 | 3463 | ||
| 3550 | pkt_dev->removal_mark = 0; | 3464 | pkt_dev->removal_mark = 0; |
| 3551 | pkt_dev->min_pkt_size = ETH_ZLEN; | ||
| 3552 | pkt_dev->max_pkt_size = ETH_ZLEN; | ||
| 3553 | pkt_dev->nfrags = 0; | 3465 | pkt_dev->nfrags = 0; |
| 3554 | pkt_dev->delay = pg_delay_d; | 3466 | pkt_dev->delay = pg_delay_d; |
| 3555 | pkt_dev->count = pg_count_d; | 3467 | pkt_dev->count = pg_count_d; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index cdc28598f4ef..4007c1437fda 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -655,53 +655,6 @@ void consume_skb(struct sk_buff *skb) | |||
| 655 | } | 655 | } |
| 656 | EXPORT_SYMBOL(consume_skb); | 656 | EXPORT_SYMBOL(consume_skb); |
| 657 | 657 | ||
| 658 | /** | ||
| 659 | * skb_recycle - clean up an skb for reuse | ||
| 660 | * @skb: buffer | ||
| 661 | * | ||
| 662 | * Recycles the skb to be reused as a receive buffer. This | ||
| 663 | * function does any necessary reference count dropping, and | ||
| 664 | * cleans up the skbuff as if it just came from __alloc_skb(). | ||
| 665 | */ | ||
| 666 | void skb_recycle(struct sk_buff *skb) | ||
| 667 | { | ||
| 668 | struct skb_shared_info *shinfo; | ||
| 669 | |||
| 670 | skb_release_head_state(skb); | ||
| 671 | |||
| 672 | shinfo = skb_shinfo(skb); | ||
| 673 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); | ||
| 674 | atomic_set(&shinfo->dataref, 1); | ||
| 675 | |||
| 676 | memset(skb, 0, offsetof(struct sk_buff, tail)); | ||
| 677 | skb->data = skb->head + NET_SKB_PAD; | ||
| 678 | skb_reset_tail_pointer(skb); | ||
| 679 | } | ||
| 680 | EXPORT_SYMBOL(skb_recycle); | ||
| 681 | |||
| 682 | /** | ||
| 683 | * skb_recycle_check - check if skb can be reused for receive | ||
| 684 | * @skb: buffer | ||
| 685 | * @skb_size: minimum receive buffer size | ||
| 686 | * | ||
| 687 | * Checks that the skb passed in is not shared or cloned, and | ||
| 688 | * that it is linear and its head portion at least as large as | ||
| 689 | * skb_size so that it can be recycled as a receive buffer. | ||
| 690 | * If these conditions are met, this function does any necessary | ||
| 691 | * reference count dropping and cleans up the skbuff as if it | ||
| 692 | * just came from __alloc_skb(). | ||
| 693 | */ | ||
| 694 | bool skb_recycle_check(struct sk_buff *skb, int skb_size) | ||
| 695 | { | ||
| 696 | if (!skb_is_recycleable(skb, skb_size)) | ||
| 697 | return false; | ||
| 698 | |||
| 699 | skb_recycle(skb); | ||
| 700 | |||
| 701 | return true; | ||
| 702 | } | ||
| 703 | EXPORT_SYMBOL(skb_recycle_check); | ||
| 704 | |||
| 705 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | 658 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 706 | { | 659 | { |
| 707 | new->tstamp = old->tstamp; | 660 | new->tstamp = old->tstamp; |
| @@ -3426,10 +3379,12 @@ EXPORT_SYMBOL(__skb_warn_lro_forwarding); | |||
| 3426 | 3379 | ||
| 3427 | void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) | 3380 | void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) |
| 3428 | { | 3381 | { |
| 3429 | if (head_stolen) | 3382 | if (head_stolen) { |
| 3383 | skb_release_head_state(skb); | ||
| 3430 | kmem_cache_free(skbuff_head_cache, skb); | 3384 | kmem_cache_free(skbuff_head_cache, skb); |
| 3431 | else | 3385 | } else { |
| 3432 | __kfree_skb(skb); | 3386 | __kfree_skb(skb); |
| 3387 | } | ||
| 3433 | } | 3388 | } |
| 3434 | EXPORT_SYMBOL(kfree_skb_partial); | 3389 | EXPORT_SYMBOL(kfree_skb_partial); |
| 3435 | 3390 | ||
diff --git a/net/core/utils.c b/net/core/utils.c index f5613d569c23..e3487e461939 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
| @@ -107,6 +107,18 @@ static inline int xdigit2bin(char c, int delim) | |||
| 107 | return IN6PTON_UNKNOWN; | 107 | return IN6PTON_UNKNOWN; |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | /** | ||
| 111 | * in4_pton - convert an IPv4 address from literal to binary representation | ||
| 112 | * @src: the start of the IPv4 address string | ||
| 113 | * @srclen: the length of the string, -1 means strlen(src) | ||
| 114 | * @dst: the binary (u8[4] array) representation of the IPv4 address | ||
| 115 | * @delim: the delimiter of the IPv4 address in @src, -1 means no delimiter | ||
| 116 | * @end: A pointer to the end of the parsed string will be placed here | ||
| 117 | * | ||
| 118 | * Return one on success, return zero when any error occurs | ||
| 119 | * and @end will point to the end of the parsed string. | ||
| 120 | * | ||
| 121 | */ | ||
| 110 | int in4_pton(const char *src, int srclen, | 122 | int in4_pton(const char *src, int srclen, |
| 111 | u8 *dst, | 123 | u8 *dst, |
| 112 | int delim, const char **end) | 124 | int delim, const char **end) |
| @@ -161,6 +173,18 @@ out: | |||
| 161 | } | 173 | } |
| 162 | EXPORT_SYMBOL(in4_pton); | 174 | EXPORT_SYMBOL(in4_pton); |
| 163 | 175 | ||
| 176 | /** | ||
| 177 | * in6_pton - convert an IPv6 address from literal to binary representation | ||
| 178 | * @src: the start of the IPv6 address string | ||
| 179 | * @srclen: the length of the string, -1 means strlen(src) | ||
| 180 | * @dst: the binary (u8[16] array) representation of the IPv6 address | ||
| 181 | * @delim: the delimiter of the IPv6 address in @src, -1 means no delimiter | ||
| 182 | * @end: A pointer to the end of the parsed string will be placed here | ||
| 183 | * | ||
| 184 | * Return one on success, return zero when any error occurs | ||
| 185 | * and @end will point to the end of the parsed string. | ||
| 186 | * | ||
| 187 | */ | ||
| 164 | int in6_pton(const char *src, int srclen, | 188 | int in6_pton(const char *src, int srclen, |
| 165 | u8 *dst, | 189 | u8 *dst, |
| 166 | int delim, const char **end) | 190 | int delim, const char **end) |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 9807945a56d9..8aa4b1115384 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
| @@ -59,13 +59,13 @@ const struct cred *dns_resolver_cache; | |||
| 59 | * "ip1,ip2,...#foo=bar" | 59 | * "ip1,ip2,...#foo=bar" |
| 60 | */ | 60 | */ |
| 61 | static int | 61 | static int |
| 62 | dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) | 62 | dns_resolver_instantiate(struct key *key, struct key_preparsed_payload *prep) |
| 63 | { | 63 | { |
| 64 | struct user_key_payload *upayload; | 64 | struct user_key_payload *upayload; |
| 65 | unsigned long derrno; | 65 | unsigned long derrno; |
| 66 | int ret; | 66 | int ret; |
| 67 | size_t result_len = 0; | 67 | size_t datalen = prep->datalen, result_len = 0; |
| 68 | const char *data = _data, *end, *opt; | 68 | const char *data = prep->data, *end, *opt; |
| 69 | 69 | ||
| 70 | kenter("%%%d,%s,'%*.*s',%zu", | 70 | kenter("%%%d,%s,'%*.*s',%zu", |
| 71 | key->serial, key->description, | 71 | key->serial, key->description, |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 68c93d1bb03a..825c608826de 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -322,7 +322,8 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, | |||
| 322 | { | 322 | { |
| 323 | int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); | 323 | int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); |
| 324 | 324 | ||
| 325 | if (!r && !fib_num_tclassid_users(dev_net(dev))) { | 325 | if (!r && !fib_num_tclassid_users(dev_net(dev)) && |
| 326 | (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) { | ||
| 326 | *itag = 0; | 327 | *itag = 0; |
| 327 | return 0; | 328 | return 0; |
| 328 | } | 329 | } |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 267753060ffc..71b125cd5db1 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
| @@ -840,6 +840,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
| 840 | change_nexthops(fi) { | 840 | change_nexthops(fi) { |
| 841 | nexthop_nh->nh_parent = fi; | 841 | nexthop_nh->nh_parent = fi; |
| 842 | nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *); | 842 | nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *); |
| 843 | if (!nexthop_nh->nh_pcpu_rth_output) | ||
| 844 | goto failure; | ||
| 843 | } endfor_nexthops(fi) | 845 | } endfor_nexthops(fi) |
| 844 | 846 | ||
| 845 | if (cfg->fc_mx) { | 847 | if (cfg->fc_mx) { |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f0c5b9c1a957..d34ce2972c8f 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -406,7 +406,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, | |||
| 406 | rt = ip_route_output_flow(net, fl4, sk); | 406 | rt = ip_route_output_flow(net, fl4, sk); |
| 407 | if (IS_ERR(rt)) | 407 | if (IS_ERR(rt)) |
| 408 | goto no_route; | 408 | goto no_route; |
| 409 | if (opt && opt->opt.is_strictroute && rt->rt_gateway) | 409 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
| 410 | goto route_err; | 410 | goto route_err; |
| 411 | return &rt->dst; | 411 | return &rt->dst; |
| 412 | 412 | ||
| @@ -442,7 +442,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, | |||
| 442 | rt = ip_route_output_flow(net, fl4, sk); | 442 | rt = ip_route_output_flow(net, fl4, sk); |
| 443 | if (IS_ERR(rt)) | 443 | if (IS_ERR(rt)) |
| 444 | goto no_route; | 444 | goto no_route; |
| 445 | if (opt && opt->opt.is_strictroute && rt->rt_gateway) | 445 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
| 446 | goto route_err; | 446 | goto route_err; |
| 447 | rcu_read_unlock(); | 447 | rcu_read_unlock(); |
| 448 | return &rt->dst; | 448 | return &rt->dst; |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index ab09b126423c..694de3b7aebf 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
| @@ -85,7 +85,7 @@ int ip_forward(struct sk_buff *skb) | |||
| 85 | 85 | ||
| 86 | rt = skb_rtable(skb); | 86 | rt = skb_rtable(skb); |
| 87 | 87 | ||
| 88 | if (opt->is_strictroute && opt->nexthop != rt->rt_gateway) | 88 | if (opt->is_strictroute && rt->rt_uses_gateway) |
| 89 | goto sr_failed; | 89 | goto sr_failed; |
| 90 | 90 | ||
| 91 | if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && | 91 | if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 24a29a39e9a8..6537a408a4fb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -193,7 +193,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | rcu_read_lock_bh(); | 195 | rcu_read_lock_bh(); |
| 196 | nexthop = rt->rt_gateway ? rt->rt_gateway : ip_hdr(skb)->daddr; | 196 | nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr); |
| 197 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); | 197 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); |
| 198 | if (unlikely(!neigh)) | 198 | if (unlikely(!neigh)) |
| 199 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); | 199 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); |
| @@ -371,7 +371,7 @@ int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl) | |||
| 371 | skb_dst_set_noref(skb, &rt->dst); | 371 | skb_dst_set_noref(skb, &rt->dst); |
| 372 | 372 | ||
| 373 | packet_routed: | 373 | packet_routed: |
| 374 | if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gateway) | 374 | if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) |
| 375 | goto no_route; | 375 | goto no_route; |
| 376 | 376 | ||
| 377 | /* OK, we know where to send it, allocate and build IP header. */ | 377 | /* OK, we know where to send it, allocate and build IP header. */ |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 978bca4818ae..1831092f999f 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
| @@ -374,7 +374,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 374 | 374 | ||
| 375 | memset(&fl4, 0, sizeof(fl4)); | 375 | memset(&fl4, 0, sizeof(fl4)); |
| 376 | flowi4_init_output(&fl4, tunnel->parms.link, | 376 | flowi4_init_output(&fl4, tunnel->parms.link, |
| 377 | htonl(tunnel->parms.i_key), RT_TOS(tos), | 377 | be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), |
| 378 | RT_SCOPE_UNIVERSE, | 378 | RT_SCOPE_UNIVERSE, |
| 379 | IPPROTO_IPIP, 0, | 379 | IPPROTO_IPIP, 0, |
| 380 | dst, tiph->saddr, 0, 0); | 380 | dst, tiph->saddr, 0, 0); |
| @@ -441,7 +441,7 @@ static int vti_tunnel_bind_dev(struct net_device *dev) | |||
| 441 | struct flowi4 fl4; | 441 | struct flowi4 fl4; |
| 442 | memset(&fl4, 0, sizeof(fl4)); | 442 | memset(&fl4, 0, sizeof(fl4)); |
| 443 | flowi4_init_output(&fl4, tunnel->parms.link, | 443 | flowi4_init_output(&fl4, tunnel->parms.link, |
| 444 | htonl(tunnel->parms.i_key), | 444 | be32_to_cpu(tunnel->parms.i_key), |
| 445 | RT_TOS(iph->tos), RT_SCOPE_UNIVERSE, | 445 | RT_TOS(iph->tos), RT_SCOPE_UNIVERSE, |
| 446 | IPPROTO_IPIP, 0, | 446 | IPPROTO_IPIP, 0, |
| 447 | iph->daddr, iph->saddr, 0, 0); | 447 | iph->daddr, iph->saddr, 0, 0); |
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index 9e0ffaf1d942..a82047282dbb 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c | |||
| @@ -184,7 +184,8 @@ nf_nat_ipv4_out(unsigned int hooknum, | |||
| 184 | 184 | ||
| 185 | if ((ct->tuplehash[dir].tuple.src.u3.ip != | 185 | if ((ct->tuplehash[dir].tuple.src.u3.ip != |
| 186 | ct->tuplehash[!dir].tuple.dst.u3.ip) || | 186 | ct->tuplehash[!dir].tuple.dst.u3.ip) || |
| 187 | (ct->tuplehash[dir].tuple.src.u.all != | 187 | (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && |
| 188 | ct->tuplehash[dir].tuple.src.u.all != | ||
| 188 | ct->tuplehash[!dir].tuple.dst.u.all)) | 189 | ct->tuplehash[!dir].tuple.dst.u.all)) |
| 189 | if (nf_xfrm_me_harder(skb, AF_INET) < 0) | 190 | if (nf_xfrm_me_harder(skb, AF_INET) < 0) |
| 190 | ret = NF_DROP; | 191 | ret = NF_DROP; |
| @@ -221,6 +222,7 @@ nf_nat_ipv4_local_fn(unsigned int hooknum, | |||
| 221 | } | 222 | } |
| 222 | #ifdef CONFIG_XFRM | 223 | #ifdef CONFIG_XFRM |
| 223 | else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && | 224 | else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && |
| 225 | ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && | ||
| 224 | ct->tuplehash[dir].tuple.dst.u.all != | 226 | ct->tuplehash[dir].tuple.dst.u.all != |
| 225 | ct->tuplehash[!dir].tuple.src.u.all) | 227 | ct->tuplehash[!dir].tuple.src.u.all) |
| 226 | if (nf_xfrm_me_harder(skb, AF_INET) < 0) | 228 | if (nf_xfrm_me_harder(skb, AF_INET) < 0) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ff622069fcef..a8c651216fa6 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -802,7 +802,8 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
| 802 | net = dev_net(rt->dst.dev); | 802 | net = dev_net(rt->dst.dev); |
| 803 | peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); | 803 | peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); |
| 804 | if (!peer) { | 804 | if (!peer) { |
| 805 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); | 805 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, |
| 806 | rt_nexthop(rt, ip_hdr(skb)->daddr)); | ||
| 806 | return; | 807 | return; |
| 807 | } | 808 | } |
| 808 | 809 | ||
| @@ -827,7 +828,9 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
| 827 | time_after(jiffies, | 828 | time_after(jiffies, |
| 828 | (peer->rate_last + | 829 | (peer->rate_last + |
| 829 | (ip_rt_redirect_load << peer->rate_tokens)))) { | 830 | (ip_rt_redirect_load << peer->rate_tokens)))) { |
| 830 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); | 831 | __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); |
| 832 | |||
| 833 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); | ||
| 831 | peer->rate_last = jiffies; | 834 | peer->rate_last = jiffies; |
| 832 | ++peer->rate_tokens; | 835 | ++peer->rate_tokens; |
| 833 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 836 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
| @@ -835,7 +838,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
| 835 | peer->rate_tokens == ip_rt_redirect_number) | 838 | peer->rate_tokens == ip_rt_redirect_number) |
| 836 | net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", | 839 | net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", |
| 837 | &ip_hdr(skb)->saddr, inet_iif(skb), | 840 | &ip_hdr(skb)->saddr, inet_iif(skb), |
| 838 | &ip_hdr(skb)->daddr, &rt->rt_gateway); | 841 | &ip_hdr(skb)->daddr, &gw); |
| 839 | #endif | 842 | #endif |
| 840 | } | 843 | } |
| 841 | out_put_peer: | 844 | out_put_peer: |
| @@ -904,22 +907,32 @@ out: kfree_skb(skb); | |||
| 904 | return 0; | 907 | return 0; |
| 905 | } | 908 | } |
| 906 | 909 | ||
| 907 | static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) | 910 | static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) |
| 908 | { | 911 | { |
| 912 | struct dst_entry *dst = &rt->dst; | ||
| 909 | struct fib_result res; | 913 | struct fib_result res; |
| 910 | 914 | ||
| 915 | if (dst->dev->mtu < mtu) | ||
| 916 | return; | ||
| 917 | |||
| 911 | if (mtu < ip_rt_min_pmtu) | 918 | if (mtu < ip_rt_min_pmtu) |
| 912 | mtu = ip_rt_min_pmtu; | 919 | mtu = ip_rt_min_pmtu; |
| 913 | 920 | ||
| 921 | if (!rt->rt_pmtu) { | ||
| 922 | dst->obsolete = DST_OBSOLETE_KILL; | ||
| 923 | } else { | ||
| 924 | rt->rt_pmtu = mtu; | ||
| 925 | dst->expires = max(1UL, jiffies + ip_rt_mtu_expires); | ||
| 926 | } | ||
| 927 | |||
| 914 | rcu_read_lock(); | 928 | rcu_read_lock(); |
| 915 | if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { | 929 | if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) { |
| 916 | struct fib_nh *nh = &FIB_RES_NH(res); | 930 | struct fib_nh *nh = &FIB_RES_NH(res); |
| 917 | 931 | ||
| 918 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, | 932 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, |
| 919 | jiffies + ip_rt_mtu_expires); | 933 | jiffies + ip_rt_mtu_expires); |
| 920 | } | 934 | } |
| 921 | rcu_read_unlock(); | 935 | rcu_read_unlock(); |
| 922 | return mtu; | ||
| 923 | } | 936 | } |
| 924 | 937 | ||
| 925 | static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | 938 | static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, |
| @@ -929,14 +942,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | |||
| 929 | struct flowi4 fl4; | 942 | struct flowi4 fl4; |
| 930 | 943 | ||
| 931 | ip_rt_build_flow_key(&fl4, sk, skb); | 944 | ip_rt_build_flow_key(&fl4, sk, skb); |
| 932 | mtu = __ip_rt_update_pmtu(rt, &fl4, mtu); | 945 | __ip_rt_update_pmtu(rt, &fl4, mtu); |
| 933 | |||
| 934 | if (!rt->rt_pmtu) { | ||
| 935 | dst->obsolete = DST_OBSOLETE_KILL; | ||
| 936 | } else { | ||
| 937 | rt->rt_pmtu = mtu; | ||
| 938 | rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires); | ||
| 939 | } | ||
| 940 | } | 946 | } |
| 941 | 947 | ||
| 942 | void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, | 948 | void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, |
| @@ -1120,7 +1126,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) | |||
| 1120 | mtu = dst->dev->mtu; | 1126 | mtu = dst->dev->mtu; |
| 1121 | 1127 | ||
| 1122 | if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { | 1128 | if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { |
| 1123 | if (rt->rt_gateway && mtu > 576) | 1129 | if (rt->rt_uses_gateway && mtu > 576) |
| 1124 | mtu = 576; | 1130 | mtu = 576; |
| 1125 | } | 1131 | } |
| 1126 | 1132 | ||
| @@ -1157,8 +1163,12 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, | |||
| 1157 | spin_lock_bh(&fnhe_lock); | 1163 | spin_lock_bh(&fnhe_lock); |
| 1158 | 1164 | ||
| 1159 | if (daddr == fnhe->fnhe_daddr) { | 1165 | if (daddr == fnhe->fnhe_daddr) { |
| 1160 | struct rtable *orig; | 1166 | struct rtable *orig = rcu_dereference(fnhe->fnhe_rth); |
| 1161 | 1167 | if (orig && rt_is_expired(orig)) { | |
| 1168 | fnhe->fnhe_gw = 0; | ||
| 1169 | fnhe->fnhe_pmtu = 0; | ||
| 1170 | fnhe->fnhe_expires = 0; | ||
| 1171 | } | ||
| 1162 | if (fnhe->fnhe_pmtu) { | 1172 | if (fnhe->fnhe_pmtu) { |
| 1163 | unsigned long expires = fnhe->fnhe_expires; | 1173 | unsigned long expires = fnhe->fnhe_expires; |
| 1164 | unsigned long diff = expires - jiffies; | 1174 | unsigned long diff = expires - jiffies; |
| @@ -1171,22 +1181,16 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, | |||
| 1171 | if (fnhe->fnhe_gw) { | 1181 | if (fnhe->fnhe_gw) { |
| 1172 | rt->rt_flags |= RTCF_REDIRECTED; | 1182 | rt->rt_flags |= RTCF_REDIRECTED; |
| 1173 | rt->rt_gateway = fnhe->fnhe_gw; | 1183 | rt->rt_gateway = fnhe->fnhe_gw; |
| 1174 | } | 1184 | rt->rt_uses_gateway = 1; |
| 1185 | } else if (!rt->rt_gateway) | ||
| 1186 | rt->rt_gateway = daddr; | ||
| 1175 | 1187 | ||
| 1176 | orig = rcu_dereference(fnhe->fnhe_rth); | ||
| 1177 | rcu_assign_pointer(fnhe->fnhe_rth, rt); | 1188 | rcu_assign_pointer(fnhe->fnhe_rth, rt); |
| 1178 | if (orig) | 1189 | if (orig) |
| 1179 | rt_free(orig); | 1190 | rt_free(orig); |
| 1180 | 1191 | ||
| 1181 | fnhe->fnhe_stamp = jiffies; | 1192 | fnhe->fnhe_stamp = jiffies; |
| 1182 | ret = true; | 1193 | ret = true; |
| 1183 | } else { | ||
| 1184 | /* Routes we intend to cache in nexthop exception have | ||
| 1185 | * the DST_NOCACHE bit clear. However, if we are | ||
| 1186 | * unsuccessful at storing this route into the cache | ||
| 1187 | * we really need to set it. | ||
| 1188 | */ | ||
| 1189 | rt->dst.flags |= DST_NOCACHE; | ||
| 1190 | } | 1194 | } |
| 1191 | spin_unlock_bh(&fnhe_lock); | 1195 | spin_unlock_bh(&fnhe_lock); |
| 1192 | 1196 | ||
| @@ -1201,8 +1205,6 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) | |||
| 1201 | if (rt_is_input_route(rt)) { | 1205 | if (rt_is_input_route(rt)) { |
| 1202 | p = (struct rtable **)&nh->nh_rth_input; | 1206 | p = (struct rtable **)&nh->nh_rth_input; |
| 1203 | } else { | 1207 | } else { |
| 1204 | if (!nh->nh_pcpu_rth_output) | ||
| 1205 | goto nocache; | ||
| 1206 | p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); | 1208 | p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); |
| 1207 | } | 1209 | } |
| 1208 | orig = *p; | 1210 | orig = *p; |
| @@ -1211,16 +1213,8 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) | |||
| 1211 | if (prev == orig) { | 1213 | if (prev == orig) { |
| 1212 | if (orig) | 1214 | if (orig) |
| 1213 | rt_free(orig); | 1215 | rt_free(orig); |
| 1214 | } else { | 1216 | } else |
| 1215 | /* Routes we intend to cache in the FIB nexthop have | ||
| 1216 | * the DST_NOCACHE bit clear. However, if we are | ||
| 1217 | * unsuccessful at storing this route into the cache | ||
| 1218 | * we really need to set it. | ||
| 1219 | */ | ||
| 1220 | nocache: | ||
| 1221 | rt->dst.flags |= DST_NOCACHE; | ||
| 1222 | ret = false; | 1217 | ret = false; |
| 1223 | } | ||
| 1224 | 1218 | ||
| 1225 | return ret; | 1219 | return ret; |
| 1226 | } | 1220 | } |
| @@ -1281,8 +1275,10 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, | |||
| 1281 | if (fi) { | 1275 | if (fi) { |
| 1282 | struct fib_nh *nh = &FIB_RES_NH(*res); | 1276 | struct fib_nh *nh = &FIB_RES_NH(*res); |
| 1283 | 1277 | ||
| 1284 | if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) | 1278 | if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) { |
| 1285 | rt->rt_gateway = nh->nh_gw; | 1279 | rt->rt_gateway = nh->nh_gw; |
| 1280 | rt->rt_uses_gateway = 1; | ||
| 1281 | } | ||
| 1286 | dst_init_metrics(&rt->dst, fi->fib_metrics, true); | 1282 | dst_init_metrics(&rt->dst, fi->fib_metrics, true); |
| 1287 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1283 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 1288 | rt->dst.tclassid = nh->nh_tclassid; | 1284 | rt->dst.tclassid = nh->nh_tclassid; |
| @@ -1291,8 +1287,18 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, | |||
| 1291 | cached = rt_bind_exception(rt, fnhe, daddr); | 1287 | cached = rt_bind_exception(rt, fnhe, daddr); |
| 1292 | else if (!(rt->dst.flags & DST_NOCACHE)) | 1288 | else if (!(rt->dst.flags & DST_NOCACHE)) |
| 1293 | cached = rt_cache_route(nh, rt); | 1289 | cached = rt_cache_route(nh, rt); |
| 1294 | } | 1290 | if (unlikely(!cached)) { |
| 1295 | if (unlikely(!cached)) | 1291 | /* Routes we intend to cache in nexthop exception or |
| 1292 | * FIB nexthop have the DST_NOCACHE bit clear. | ||
| 1293 | * However, if we are unsuccessful at storing this | ||
| 1294 | * route into the cache we really need to set it. | ||
| 1295 | */ | ||
| 1296 | rt->dst.flags |= DST_NOCACHE; | ||
| 1297 | if (!rt->rt_gateway) | ||
| 1298 | rt->rt_gateway = daddr; | ||
| 1299 | rt_add_uncached_list(rt); | ||
| 1300 | } | ||
| 1301 | } else | ||
| 1296 | rt_add_uncached_list(rt); | 1302 | rt_add_uncached_list(rt); |
| 1297 | 1303 | ||
| 1298 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1304 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| @@ -1360,6 +1366,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
| 1360 | rth->rt_iif = 0; | 1366 | rth->rt_iif = 0; |
| 1361 | rth->rt_pmtu = 0; | 1367 | rth->rt_pmtu = 0; |
| 1362 | rth->rt_gateway = 0; | 1368 | rth->rt_gateway = 0; |
| 1369 | rth->rt_uses_gateway = 0; | ||
| 1363 | INIT_LIST_HEAD(&rth->rt_uncached); | 1370 | INIT_LIST_HEAD(&rth->rt_uncached); |
| 1364 | if (our) { | 1371 | if (our) { |
| 1365 | rth->dst.input= ip_local_deliver; | 1372 | rth->dst.input= ip_local_deliver; |
| @@ -1429,7 +1436,6 @@ static int __mkroute_input(struct sk_buff *skb, | |||
| 1429 | return -EINVAL; | 1436 | return -EINVAL; |
| 1430 | } | 1437 | } |
| 1431 | 1438 | ||
| 1432 | |||
| 1433 | err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), | 1439 | err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), |
| 1434 | in_dev->dev, in_dev, &itag); | 1440 | in_dev->dev, in_dev, &itag); |
| 1435 | if (err < 0) { | 1441 | if (err < 0) { |
| @@ -1439,10 +1445,13 @@ static int __mkroute_input(struct sk_buff *skb, | |||
| 1439 | goto cleanup; | 1445 | goto cleanup; |
| 1440 | } | 1446 | } |
| 1441 | 1447 | ||
| 1442 | if (out_dev == in_dev && err && | 1448 | do_cache = res->fi && !itag; |
| 1449 | if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && | ||
| 1443 | (IN_DEV_SHARED_MEDIA(out_dev) || | 1450 | (IN_DEV_SHARED_MEDIA(out_dev) || |
| 1444 | inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) | 1451 | inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { |
| 1445 | flags |= RTCF_DOREDIRECT; | 1452 | flags |= RTCF_DOREDIRECT; |
| 1453 | do_cache = false; | ||
| 1454 | } | ||
| 1446 | 1455 | ||
| 1447 | if (skb->protocol != htons(ETH_P_IP)) { | 1456 | if (skb->protocol != htons(ETH_P_IP)) { |
| 1448 | /* Not IP (i.e. ARP). Do not create route, if it is | 1457 | /* Not IP (i.e. ARP). Do not create route, if it is |
| @@ -1459,15 +1468,11 @@ static int __mkroute_input(struct sk_buff *skb, | |||
| 1459 | } | 1468 | } |
| 1460 | } | 1469 | } |
| 1461 | 1470 | ||
| 1462 | do_cache = false; | 1471 | if (do_cache) { |
| 1463 | if (res->fi) { | 1472 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); |
| 1464 | if (!itag) { | 1473 | if (rt_cache_valid(rth)) { |
| 1465 | rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); | 1474 | skb_dst_set_noref(skb, &rth->dst); |
| 1466 | if (rt_cache_valid(rth)) { | 1475 | goto out; |
| 1467 | skb_dst_set_noref(skb, &rth->dst); | ||
| 1468 | goto out; | ||
| 1469 | } | ||
| 1470 | do_cache = true; | ||
| 1471 | } | 1476 | } |
| 1472 | } | 1477 | } |
| 1473 | 1478 | ||
| @@ -1486,6 +1491,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
| 1486 | rth->rt_iif = 0; | 1491 | rth->rt_iif = 0; |
| 1487 | rth->rt_pmtu = 0; | 1492 | rth->rt_pmtu = 0; |
| 1488 | rth->rt_gateway = 0; | 1493 | rth->rt_gateway = 0; |
| 1494 | rth->rt_uses_gateway = 0; | ||
| 1489 | INIT_LIST_HEAD(&rth->rt_uncached); | 1495 | INIT_LIST_HEAD(&rth->rt_uncached); |
| 1490 | 1496 | ||
| 1491 | rth->dst.input = ip_forward; | 1497 | rth->dst.input = ip_forward; |
| @@ -1656,6 +1662,7 @@ local_input: | |||
| 1656 | rth->rt_iif = 0; | 1662 | rth->rt_iif = 0; |
| 1657 | rth->rt_pmtu = 0; | 1663 | rth->rt_pmtu = 0; |
| 1658 | rth->rt_gateway = 0; | 1664 | rth->rt_gateway = 0; |
| 1665 | rth->rt_uses_gateway = 0; | ||
| 1659 | INIT_LIST_HEAD(&rth->rt_uncached); | 1666 | INIT_LIST_HEAD(&rth->rt_uncached); |
| 1660 | if (res.type == RTN_UNREACHABLE) { | 1667 | if (res.type == RTN_UNREACHABLE) { |
| 1661 | rth->dst.input= ip_error; | 1668 | rth->dst.input= ip_error; |
| @@ -1758,6 +1765,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
| 1758 | struct in_device *in_dev; | 1765 | struct in_device *in_dev; |
| 1759 | u16 type = res->type; | 1766 | u16 type = res->type; |
| 1760 | struct rtable *rth; | 1767 | struct rtable *rth; |
| 1768 | bool do_cache; | ||
| 1761 | 1769 | ||
| 1762 | in_dev = __in_dev_get_rcu(dev_out); | 1770 | in_dev = __in_dev_get_rcu(dev_out); |
| 1763 | if (!in_dev) | 1771 | if (!in_dev) |
| @@ -1794,24 +1802,36 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
| 1794 | } | 1802 | } |
| 1795 | 1803 | ||
| 1796 | fnhe = NULL; | 1804 | fnhe = NULL; |
| 1805 | do_cache = fi != NULL; | ||
| 1797 | if (fi) { | 1806 | if (fi) { |
| 1798 | struct rtable __rcu **prth; | 1807 | struct rtable __rcu **prth; |
| 1808 | struct fib_nh *nh = &FIB_RES_NH(*res); | ||
| 1799 | 1809 | ||
| 1800 | fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr); | 1810 | fnhe = find_exception(nh, fl4->daddr); |
| 1801 | if (fnhe) | 1811 | if (fnhe) |
| 1802 | prth = &fnhe->fnhe_rth; | 1812 | prth = &fnhe->fnhe_rth; |
| 1803 | else | 1813 | else { |
| 1804 | prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output); | 1814 | if (unlikely(fl4->flowi4_flags & |
| 1815 | FLOWI_FLAG_KNOWN_NH && | ||
| 1816 | !(nh->nh_gw && | ||
| 1817 | nh->nh_scope == RT_SCOPE_LINK))) { | ||
| 1818 | do_cache = false; | ||
| 1819 | goto add; | ||
| 1820 | } | ||
| 1821 | prth = __this_cpu_ptr(nh->nh_pcpu_rth_output); | ||
| 1822 | } | ||
| 1805 | rth = rcu_dereference(*prth); | 1823 | rth = rcu_dereference(*prth); |
| 1806 | if (rt_cache_valid(rth)) { | 1824 | if (rt_cache_valid(rth)) { |
| 1807 | dst_hold(&rth->dst); | 1825 | dst_hold(&rth->dst); |
| 1808 | return rth; | 1826 | return rth; |
| 1809 | } | 1827 | } |
| 1810 | } | 1828 | } |
| 1829 | |||
| 1830 | add: | ||
| 1811 | rth = rt_dst_alloc(dev_out, | 1831 | rth = rt_dst_alloc(dev_out, |
| 1812 | IN_DEV_CONF_GET(in_dev, NOPOLICY), | 1832 | IN_DEV_CONF_GET(in_dev, NOPOLICY), |
| 1813 | IN_DEV_CONF_GET(in_dev, NOXFRM), | 1833 | IN_DEV_CONF_GET(in_dev, NOXFRM), |
| 1814 | fi); | 1834 | do_cache); |
| 1815 | if (!rth) | 1835 | if (!rth) |
| 1816 | return ERR_PTR(-ENOBUFS); | 1836 | return ERR_PTR(-ENOBUFS); |
| 1817 | 1837 | ||
| @@ -1824,6 +1844,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
| 1824 | rth->rt_iif = orig_oif ? : 0; | 1844 | rth->rt_iif = orig_oif ? : 0; |
| 1825 | rth->rt_pmtu = 0; | 1845 | rth->rt_pmtu = 0; |
| 1826 | rth->rt_gateway = 0; | 1846 | rth->rt_gateway = 0; |
| 1847 | rth->rt_uses_gateway = 0; | ||
| 1827 | INIT_LIST_HEAD(&rth->rt_uncached); | 1848 | INIT_LIST_HEAD(&rth->rt_uncached); |
| 1828 | 1849 | ||
| 1829 | RT_CACHE_STAT_INC(out_slow_tot); | 1850 | RT_CACHE_STAT_INC(out_slow_tot); |
| @@ -2102,6 +2123,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
| 2102 | rt->rt_flags = ort->rt_flags; | 2123 | rt->rt_flags = ort->rt_flags; |
| 2103 | rt->rt_type = ort->rt_type; | 2124 | rt->rt_type = ort->rt_type; |
| 2104 | rt->rt_gateway = ort->rt_gateway; | 2125 | rt->rt_gateway = ort->rt_gateway; |
| 2126 | rt->rt_uses_gateway = ort->rt_uses_gateway; | ||
| 2105 | 2127 | ||
| 2106 | INIT_LIST_HEAD(&rt->rt_uncached); | 2128 | INIT_LIST_HEAD(&rt->rt_uncached); |
| 2107 | 2129 | ||
| @@ -2180,28 +2202,31 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
| 2180 | if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr)) | 2202 | if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr)) |
| 2181 | goto nla_put_failure; | 2203 | goto nla_put_failure; |
| 2182 | } | 2204 | } |
| 2183 | if (rt->rt_gateway && | 2205 | if (rt->rt_uses_gateway && |
| 2184 | nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) | 2206 | nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) |
| 2185 | goto nla_put_failure; | 2207 | goto nla_put_failure; |
| 2186 | 2208 | ||
| 2209 | expires = rt->dst.expires; | ||
| 2210 | if (expires) { | ||
| 2211 | unsigned long now = jiffies; | ||
| 2212 | |||
| 2213 | if (time_before(now, expires)) | ||
| 2214 | expires -= now; | ||
| 2215 | else | ||
| 2216 | expires = 0; | ||
| 2217 | } | ||
| 2218 | |||
| 2187 | memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); | 2219 | memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); |
| 2188 | if (rt->rt_pmtu) | 2220 | if (rt->rt_pmtu && expires) |
| 2189 | metrics[RTAX_MTU - 1] = rt->rt_pmtu; | 2221 | metrics[RTAX_MTU - 1] = rt->rt_pmtu; |
| 2190 | if (rtnetlink_put_metrics(skb, metrics) < 0) | 2222 | if (rtnetlink_put_metrics(skb, metrics) < 0) |
| 2191 | goto nla_put_failure; | 2223 | goto nla_put_failure; |
| 2192 | 2224 | ||
| 2193 | if (fl4->flowi4_mark && | 2225 | if (fl4->flowi4_mark && |
| 2194 | nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark)) | 2226 | nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) |
| 2195 | goto nla_put_failure; | 2227 | goto nla_put_failure; |
| 2196 | 2228 | ||
| 2197 | error = rt->dst.error; | 2229 | error = rt->dst.error; |
| 2198 | expires = rt->dst.expires; | ||
| 2199 | if (expires) { | ||
| 2200 | if (time_before(jiffies, expires)) | ||
| 2201 | expires -= jiffies; | ||
| 2202 | else | ||
| 2203 | expires = 0; | ||
| 2204 | } | ||
| 2205 | 2230 | ||
| 2206 | if (rt_is_input_route(rt)) { | 2231 | if (rt_is_input_route(rt)) { |
| 2207 | if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) | 2232 | if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 9205e492dc9d..63d4eccc674d 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
| @@ -248,6 +248,8 @@ int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer, | |||
| 248 | ctxt = rcu_dereference(tcp_fastopen_ctx); | 248 | ctxt = rcu_dereference(tcp_fastopen_ctx); |
| 249 | if (ctxt) | 249 | if (ctxt) |
| 250 | memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); | 250 | memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); |
| 251 | else | ||
| 252 | memset(user_key, 0, sizeof(user_key)); | ||
| 251 | rcu_read_unlock(); | 253 | rcu_read_unlock(); |
| 252 | 254 | ||
| 253 | snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", | 255 | snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f32c02e2a543..197c0008503c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -549,14 +549,12 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
| 549 | !tp->urg_data || | 549 | !tp->urg_data || |
| 550 | before(tp->urg_seq, tp->copied_seq) || | 550 | before(tp->urg_seq, tp->copied_seq) || |
| 551 | !before(tp->urg_seq, tp->rcv_nxt)) { | 551 | !before(tp->urg_seq, tp->rcv_nxt)) { |
| 552 | struct sk_buff *skb; | ||
| 553 | 552 | ||
| 554 | answ = tp->rcv_nxt - tp->copied_seq; | 553 | answ = tp->rcv_nxt - tp->copied_seq; |
| 555 | 554 | ||
| 556 | /* Subtract 1, if FIN is in queue. */ | 555 | /* Subtract 1, if FIN was received */ |
| 557 | skb = skb_peek_tail(&sk->sk_receive_queue); | 556 | if (answ && sock_flag(sk, SOCK_DONE)) |
| 558 | if (answ && skb) | 557 | answ--; |
| 559 | answ -= tcp_hdr(skb)->fin; | ||
| 560 | } else | 558 | } else |
| 561 | answ = tp->urg_seq - tp->copied_seq; | 559 | answ = tp->urg_seq - tp->copied_seq; |
| 562 | release_sock(sk); | 560 | release_sock(sk); |
| @@ -2766,6 +2764,8 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info) | |||
| 2766 | info->tcpi_options |= TCPI_OPT_ECN; | 2764 | info->tcpi_options |= TCPI_OPT_ECN; |
| 2767 | if (tp->ecn_flags & TCP_ECN_SEEN) | 2765 | if (tp->ecn_flags & TCP_ECN_SEEN) |
| 2768 | info->tcpi_options |= TCPI_OPT_ECN_SEEN; | 2766 | info->tcpi_options |= TCPI_OPT_ECN_SEEN; |
| 2767 | if (tp->syn_data_acked) | ||
| 2768 | info->tcpi_options |= TCPI_OPT_SYN_DATA; | ||
| 2769 | 2769 | ||
| 2770 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); | 2770 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); |
| 2771 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); | 2771 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); |
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 813b43a76fec..834857f3c871 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c | |||
| @@ -313,11 +313,13 @@ static void tcp_illinois_info(struct sock *sk, u32 ext, | |||
| 313 | .tcpv_rttcnt = ca->cnt_rtt, | 313 | .tcpv_rttcnt = ca->cnt_rtt, |
| 314 | .tcpv_minrtt = ca->base_rtt, | 314 | .tcpv_minrtt = ca->base_rtt, |
| 315 | }; | 315 | }; |
| 316 | u64 t = ca->sum_rtt; | ||
| 317 | 316 | ||
| 318 | do_div(t, ca->cnt_rtt); | 317 | if (info.tcpv_rttcnt > 0) { |
| 319 | info.tcpv_rtt = t; | 318 | u64 t = ca->sum_rtt; |
| 320 | 319 | ||
| 320 | do_div(t, info.tcpv_rttcnt); | ||
| 321 | info.tcpv_rtt = t; | ||
| 322 | } | ||
| 321 | nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); | 323 | nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); |
| 322 | } | 324 | } |
| 323 | } | 325 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 432c36649db3..2c2b13a999ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -4529,6 +4529,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | |||
| 4529 | struct tcphdr *th; | 4529 | struct tcphdr *th; |
| 4530 | bool fragstolen; | 4530 | bool fragstolen; |
| 4531 | 4531 | ||
| 4532 | if (size == 0) | ||
| 4533 | return 0; | ||
| 4534 | |||
| 4532 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); | 4535 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); |
| 4533 | if (!skb) | 4536 | if (!skb) |
| 4534 | goto err; | 4537 | goto err; |
| @@ -5646,6 +5649,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, | |||
| 5646 | tcp_rearm_rto(sk); | 5649 | tcp_rearm_rto(sk); |
| 5647 | return true; | 5650 | return true; |
| 5648 | } | 5651 | } |
| 5652 | tp->syn_data_acked = tp->syn_data; | ||
| 5649 | return false; | 5653 | return false; |
| 5650 | } | 5654 | } |
| 5651 | 5655 | ||
| @@ -5963,7 +5967,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 5963 | 5967 | ||
| 5964 | req = tp->fastopen_rsk; | 5968 | req = tp->fastopen_rsk; |
| 5965 | if (req != NULL) { | 5969 | if (req != NULL) { |
| 5966 | BUG_ON(sk->sk_state != TCP_SYN_RECV && | 5970 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
| 5967 | sk->sk_state != TCP_FIN_WAIT1); | 5971 | sk->sk_state != TCP_FIN_WAIT1); |
| 5968 | 5972 | ||
| 5969 | if (tcp_check_req(sk, skb, req, NULL, true) == NULL) | 5973 | if (tcp_check_req(sk, skb, req, NULL, true) == NULL) |
| @@ -6052,7 +6056,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 6052 | * ACK we have received, this would have acknowledged | 6056 | * ACK we have received, this would have acknowledged |
| 6053 | * our SYNACK so stop the SYNACK timer. | 6057 | * our SYNACK so stop the SYNACK timer. |
| 6054 | */ | 6058 | */ |
| 6055 | if (acceptable && req != NULL) { | 6059 | if (req != NULL) { |
| 6060 | /* Return RST if ack_seq is invalid. | ||
| 6061 | * Note that RFC793 only says to generate a | ||
| 6062 | * DUPACK for it but for TCP Fast Open it seems | ||
| 6063 | * better to treat this case like TCP_SYN_RECV | ||
| 6064 | * above. | ||
| 6065 | */ | ||
| 6066 | if (!acceptable) | ||
| 6067 | return 1; | ||
| 6056 | /* We no longer need the request sock. */ | 6068 | /* We no longer need the request sock. */ |
| 6057 | reqsk_fastopen_remove(sk, req, false); | 6069 | reqsk_fastopen_remove(sk, req, false); |
| 6058 | tcp_rearm_rto(sk); | 6070 | tcp_rearm_rto(sk); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 75735c9a6a9d..0c4a64355603 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -708,10 +708,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
| 708 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 708 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
| 709 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; | 709 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; |
| 710 | /* When socket is gone, all binding information is lost. | 710 | /* When socket is gone, all binding information is lost. |
| 711 | * routing might fail in this case. using iif for oif to | 711 | * routing might fail in this case. No choice here, if we choose to force |
| 712 | * make sure we can deliver it | 712 | * input interface, we will misroute in case of asymmetric route. |
| 713 | */ | 713 | */ |
| 714 | arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb); | 714 | if (sk) |
| 715 | arg.bound_dev_if = sk->sk_bound_dev_if; | ||
| 715 | 716 | ||
| 716 | net = dev_net(skb_dst(skb)->dev); | 717 | net = dev_net(skb_dst(skb)->dev); |
| 717 | arg.tos = ip_hdr(skb)->tos; | 718 | arg.tos = ip_hdr(skb)->tos; |
| @@ -1460,6 +1461,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk, | |||
| 1460 | skb_set_owner_r(skb, child); | 1461 | skb_set_owner_r(skb, child); |
| 1461 | __skb_queue_tail(&child->sk_receive_queue, skb); | 1462 | __skb_queue_tail(&child->sk_receive_queue, skb); |
| 1462 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 1463 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
| 1464 | tp->syn_data_acked = 1; | ||
| 1463 | } | 1465 | } |
| 1464 | sk->sk_data_ready(sk, 0); | 1466 | sk->sk_data_ready(sk, 0); |
| 1465 | bh_unlock_sock(child); | 1467 | bh_unlock_sock(child); |
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 4c752a6e0bcd..53bc5847bfa8 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c | |||
| @@ -864,7 +864,7 @@ static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, | |||
| 864 | } | 864 | } |
| 865 | a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6]; | 865 | a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6]; |
| 866 | if (a) { | 866 | if (a) { |
| 867 | if (nla_len(a) != sizeof(sizeof(struct in6_addr))) | 867 | if (nla_len(a) != sizeof(struct in6_addr)) |
| 868 | return -EINVAL; | 868 | return -EINVAL; |
| 869 | addr->family = AF_INET6; | 869 | addr->family = AF_INET6; |
| 870 | memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6)); | 870 | memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6)); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 27536ba16c9d..a7302d974f32 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
| @@ -510,6 +510,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
| 510 | newtp->rx_opt.mss_clamp = req->mss; | 510 | newtp->rx_opt.mss_clamp = req->mss; |
| 511 | TCP_ECN_openreq_child(newtp, req); | 511 | TCP_ECN_openreq_child(newtp, req); |
| 512 | newtp->fastopen_rsk = NULL; | 512 | newtp->fastopen_rsk = NULL; |
| 513 | newtp->syn_data_acked = 0; | ||
| 513 | 514 | ||
| 514 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); | 515 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); |
| 515 | } | 516 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index fc04711e80c8..d47c1b4421a3 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -347,8 +347,8 @@ void tcp_retransmit_timer(struct sock *sk) | |||
| 347 | return; | 347 | return; |
| 348 | } | 348 | } |
| 349 | if (tp->fastopen_rsk) { | 349 | if (tp->fastopen_rsk) { |
| 350 | BUG_ON(sk->sk_state != TCP_SYN_RECV && | 350 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
| 351 | sk->sk_state != TCP_FIN_WAIT1); | 351 | sk->sk_state != TCP_FIN_WAIT1); |
| 352 | tcp_fastopen_synack_timer(sk); | 352 | tcp_fastopen_synack_timer(sk); |
| 353 | /* Before we receive ACK to our SYN-ACK don't retransmit | 353 | /* Before we receive ACK to our SYN-ACK don't retransmit |
| 354 | * anything else (e.g., data or FIN segments). | 354 | * anything else (e.g., data or FIN segments). |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 681ea2f413e2..05c5ab8d983c 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
| @@ -91,6 +91,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
| 91 | RTCF_LOCAL); | 91 | RTCF_LOCAL); |
| 92 | xdst->u.rt.rt_type = rt->rt_type; | 92 | xdst->u.rt.rt_type = rt->rt_type; |
| 93 | xdst->u.rt.rt_gateway = rt->rt_gateway; | 93 | xdst->u.rt.rt_gateway = rt->rt_gateway; |
| 94 | xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; | ||
| 94 | xdst->u.rt.rt_pmtu = rt->rt_pmtu; | 95 | xdst->u.rt.rt_pmtu = rt->rt_pmtu; |
| 95 | INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); | 96 | INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); |
| 96 | 97 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d7c56f8a5b4e..0424e4e27414 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -3064,14 +3064,15 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) | |||
| 3064 | struct hlist_node *n; | 3064 | struct hlist_node *n; |
| 3065 | hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], | 3065 | hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], |
| 3066 | addr_lst) { | 3066 | addr_lst) { |
| 3067 | if (!net_eq(dev_net(ifa->idev->dev), net)) | ||
| 3068 | continue; | ||
| 3067 | /* sync with offset */ | 3069 | /* sync with offset */ |
| 3068 | if (p < state->offset) { | 3070 | if (p < state->offset) { |
| 3069 | p++; | 3071 | p++; |
| 3070 | continue; | 3072 | continue; |
| 3071 | } | 3073 | } |
| 3072 | state->offset++; | 3074 | state->offset++; |
| 3073 | if (net_eq(dev_net(ifa->idev->dev), net)) | 3075 | return ifa; |
| 3074 | return ifa; | ||
| 3075 | } | 3076 | } |
| 3076 | 3077 | ||
| 3077 | /* prepare for next bucket */ | 3078 | /* prepare for next bucket */ |
| @@ -3089,18 +3090,20 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, | |||
| 3089 | struct hlist_node *n = &ifa->addr_lst; | 3090 | struct hlist_node *n = &ifa->addr_lst; |
| 3090 | 3091 | ||
| 3091 | hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { | 3092 | hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) { |
| 3093 | if (!net_eq(dev_net(ifa->idev->dev), net)) | ||
| 3094 | continue; | ||
| 3092 | state->offset++; | 3095 | state->offset++; |
| 3093 | if (net_eq(dev_net(ifa->idev->dev), net)) | 3096 | return ifa; |
| 3094 | return ifa; | ||
| 3095 | } | 3097 | } |
| 3096 | 3098 | ||
| 3097 | while (++state->bucket < IN6_ADDR_HSIZE) { | 3099 | while (++state->bucket < IN6_ADDR_HSIZE) { |
| 3098 | state->offset = 0; | 3100 | state->offset = 0; |
| 3099 | hlist_for_each_entry_rcu_bh(ifa, n, | 3101 | hlist_for_each_entry_rcu_bh(ifa, n, |
| 3100 | &inet6_addr_lst[state->bucket], addr_lst) { | 3102 | &inet6_addr_lst[state->bucket], addr_lst) { |
| 3103 | if (!net_eq(dev_net(ifa->idev->dev), net)) | ||
| 3104 | continue; | ||
| 3101 | state->offset++; | 3105 | state->offset++; |
| 3102 | if (net_eq(dev_net(ifa->idev->dev), net)) | 3106 | return ifa; |
| 3103 | return ifa; | ||
| 3104 | } | 3107 | } |
| 3105 | } | 3108 | } |
| 3106 | 3109 | ||
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e22e6d88bac6..a974247a9ae4 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
| @@ -822,13 +822,6 @@ out: | |||
| 822 | return segs; | 822 | return segs; |
| 823 | } | 823 | } |
| 824 | 824 | ||
| 825 | struct ipv6_gro_cb { | ||
| 826 | struct napi_gro_cb napi; | ||
| 827 | int proto; | ||
| 828 | }; | ||
| 829 | |||
| 830 | #define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb) | ||
| 831 | |||
| 832 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | 825 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, |
| 833 | struct sk_buff *skb) | 826 | struct sk_buff *skb) |
| 834 | { | 827 | { |
| @@ -874,28 +867,31 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
| 874 | iph = ipv6_hdr(skb); | 867 | iph = ipv6_hdr(skb); |
| 875 | } | 868 | } |
| 876 | 869 | ||
| 877 | IPV6_GRO_CB(skb)->proto = proto; | 870 | NAPI_GRO_CB(skb)->proto = proto; |
| 878 | 871 | ||
| 879 | flush--; | 872 | flush--; |
| 880 | nlen = skb_network_header_len(skb); | 873 | nlen = skb_network_header_len(skb); |
| 881 | 874 | ||
| 882 | for (p = *head; p; p = p->next) { | 875 | for (p = *head; p; p = p->next) { |
| 883 | struct ipv6hdr *iph2; | 876 | const struct ipv6hdr *iph2; |
| 877 | __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ | ||
| 884 | 878 | ||
| 885 | if (!NAPI_GRO_CB(p)->same_flow) | 879 | if (!NAPI_GRO_CB(p)->same_flow) |
| 886 | continue; | 880 | continue; |
| 887 | 881 | ||
| 888 | iph2 = ipv6_hdr(p); | 882 | iph2 = ipv6_hdr(p); |
| 883 | first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ; | ||
| 889 | 884 | ||
| 890 | /* All fields must match except length. */ | 885 | /* All fields must match except length and Traffic Class. */ |
| 891 | if (nlen != skb_network_header_len(p) || | 886 | if (nlen != skb_network_header_len(p) || |
| 892 | memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) || | 887 | (first_word & htonl(0xF00FFFFF)) || |
| 893 | memcmp(&iph->nexthdr, &iph2->nexthdr, | 888 | memcmp(&iph->nexthdr, &iph2->nexthdr, |
| 894 | nlen - offsetof(struct ipv6hdr, nexthdr))) { | 889 | nlen - offsetof(struct ipv6hdr, nexthdr))) { |
| 895 | NAPI_GRO_CB(p)->same_flow = 0; | 890 | NAPI_GRO_CB(p)->same_flow = 0; |
| 896 | continue; | 891 | continue; |
| 897 | } | 892 | } |
| 898 | 893 | /* flush if Traffic Class fields are different */ | |
| 894 | NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); | ||
| 899 | NAPI_GRO_CB(p)->flush |= flush; | 895 | NAPI_GRO_CB(p)->flush |= flush; |
| 900 | } | 896 | } |
| 901 | 897 | ||
| @@ -927,7 +923,7 @@ static int ipv6_gro_complete(struct sk_buff *skb) | |||
| 927 | sizeof(*iph)); | 923 | sizeof(*iph)); |
| 928 | 924 | ||
| 929 | rcu_read_lock(); | 925 | rcu_read_lock(); |
| 930 | ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]); | 926 | ops = rcu_dereference(inet6_protos[NAPI_GRO_CB(skb)->proto]); |
| 931 | if (WARN_ON(!ops || !ops->gro_complete)) | 927 | if (WARN_ON(!ops || !ops->gro_complete)) |
| 932 | goto out_unlock; | 928 | goto out_unlock; |
| 933 | 929 | ||
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index e418bd6350a4..d57dab17a182 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c | |||
| @@ -186,7 +186,8 @@ nf_nat_ipv6_out(unsigned int hooknum, | |||
| 186 | 186 | ||
| 187 | if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, | 187 | if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, |
| 188 | &ct->tuplehash[!dir].tuple.dst.u3) || | 188 | &ct->tuplehash[!dir].tuple.dst.u3) || |
| 189 | (ct->tuplehash[dir].tuple.src.u.all != | 189 | (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && |
| 190 | ct->tuplehash[dir].tuple.src.u.all != | ||
| 190 | ct->tuplehash[!dir].tuple.dst.u.all)) | 191 | ct->tuplehash[!dir].tuple.dst.u.all)) |
| 191 | if (nf_xfrm_me_harder(skb, AF_INET6) < 0) | 192 | if (nf_xfrm_me_harder(skb, AF_INET6) < 0) |
| 192 | ret = NF_DROP; | 193 | ret = NF_DROP; |
| @@ -222,6 +223,7 @@ nf_nat_ipv6_local_fn(unsigned int hooknum, | |||
| 222 | } | 223 | } |
| 223 | #ifdef CONFIG_XFRM | 224 | #ifdef CONFIG_XFRM |
| 224 | else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && | 225 | else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && |
| 226 | ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && | ||
| 225 | ct->tuplehash[dir].tuple.dst.u.all != | 227 | ct->tuplehash[dir].tuple.dst.u.all != |
| 226 | ct->tuplehash[!dir].tuple.src.u.all) | 228 | ct->tuplehash[!dir].tuple.src.u.all) |
| 227 | if (nf_xfrm_me_harder(skb, AF_INET6)) | 229 | if (nf_xfrm_me_harder(skb, AF_INET6)) |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 18bd9bbbd1c6..22c8ea951185 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -85,7 +85,7 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = { | |||
| 85 | { } | 85 | { } |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | static int __net_init nf_ct_frag6_sysctl_register(struct net *net) | 88 | static int nf_ct_frag6_sysctl_register(struct net *net) |
| 89 | { | 89 | { |
| 90 | struct ctl_table *table; | 90 | struct ctl_table *table; |
| 91 | struct ctl_table_header *hdr; | 91 | struct ctl_table_header *hdr; |
| @@ -127,7 +127,7 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | #else | 129 | #else |
| 130 | static int __net_init nf_ct_frag6_sysctl_register(struct net *net) | 130 | static int nf_ct_frag6_sysctl_register(struct net *net) |
| 131 | { | 131 | { |
| 132 | return 0; | 132 | return 0; |
| 133 | } | 133 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7c7e963260e1..b1e6cf0b95fd 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -219,7 +219,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { | |||
| 219 | }; | 219 | }; |
| 220 | 220 | ||
| 221 | static const u32 ip6_template_metrics[RTAX_MAX] = { | 221 | static const u32 ip6_template_metrics[RTAX_MAX] = { |
| 222 | [RTAX_HOPLIMIT - 1] = 255, | 222 | [RTAX_HOPLIMIT - 1] = 0, |
| 223 | }; | 223 | }; |
| 224 | 224 | ||
| 225 | static const struct rt6_info ip6_null_entry_template = { | 225 | static const struct rt6_info ip6_null_entry_template = { |
| @@ -1232,7 +1232,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
| 1232 | rt->rt6i_dst.addr = fl6->daddr; | 1232 | rt->rt6i_dst.addr = fl6->daddr; |
| 1233 | rt->rt6i_dst.plen = 128; | 1233 | rt->rt6i_dst.plen = 128; |
| 1234 | rt->rt6i_idev = idev; | 1234 | rt->rt6i_idev = idev; |
| 1235 | dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); | 1235 | dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); |
| 1236 | 1236 | ||
| 1237 | spin_lock_bh(&icmp6_dst_lock); | 1237 | spin_lock_bh(&icmp6_dst_lock); |
| 1238 | rt->dst.next = icmp6_dst_gc_list; | 1238 | rt->dst.next = icmp6_dst_gc_list; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 49c890386ce9..26175bffbaa0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -877,7 +877,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
| 877 | __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); | 877 | __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); |
| 878 | 878 | ||
| 879 | fl6.flowi6_proto = IPPROTO_TCP; | 879 | fl6.flowi6_proto = IPPROTO_TCP; |
| 880 | fl6.flowi6_oif = inet6_iif(skb); | 880 | if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) |
| 881 | fl6.flowi6_oif = inet6_iif(skb); | ||
| 881 | fl6.fl6_dport = t1->dest; | 882 | fl6.fl6_dport = t1->dest; |
| 882 | fl6.fl6_sport = t1->source; | 883 | fl6.fl6_sport = t1->source; |
| 883 | security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); | 884 | security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 95a3a7a336ba..496ce2cebcd7 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
| @@ -421,6 +421,8 @@ static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) | |||
| 421 | hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL); | 421 | hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL); |
| 422 | } | 422 | } |
| 423 | 423 | ||
| 424 | tty->driver_data = self; | ||
| 425 | |||
| 424 | return tty_port_install(&self->port, driver, tty); | 426 | return tty_port_install(&self->port, driver, tty); |
| 425 | } | 427 | } |
| 426 | 428 | ||
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 37b8b8ba31f7..76125c57ee6d 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
| @@ -291,6 +291,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p | |||
| 291 | 291 | ||
| 292 | out_del_dev: | 292 | out_del_dev: |
| 293 | free_netdev(dev); | 293 | free_netdev(dev); |
| 294 | spriv->dev = NULL; | ||
| 294 | out_del_session: | 295 | out_del_session: |
| 295 | l2tp_session_delete(session); | 296 | l2tp_session_delete(session); |
| 296 | out: | 297 | out: |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 5f3620f0bc0a..bf87c70ac6c5 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
| @@ -1108,7 +1108,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
| 1108 | sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; | 1108 | sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; |
| 1109 | sdata->u.ibss.ibss_join_req = jiffies; | 1109 | sdata->u.ibss.ibss_join_req = jiffies; |
| 1110 | 1110 | ||
| 1111 | memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN); | 1111 | memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); |
| 1112 | sdata->u.ibss.ssid_len = params->ssid_len; | 1112 | sdata->u.ibss.ssid_len = params->ssid_len; |
| 1113 | 1113 | ||
| 1114 | mutex_unlock(&sdata->u.ibss.mtx); | 1114 | mutex_unlock(&sdata->u.ibss.mtx); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 6f8a73c64fb3..7de7717ad67d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
| @@ -853,7 +853,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 853 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 853 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 854 | if (info->control.vif == &sdata->vif) { | 854 | if (info->control.vif == &sdata->vif) { |
| 855 | __skb_unlink(skb, &local->pending[i]); | 855 | __skb_unlink(skb, &local->pending[i]); |
| 856 | dev_kfree_skb_irq(skb); | 856 | ieee80211_free_txskb(&local->hw, skb); |
| 857 | } | 857 | } |
| 858 | } | 858 | } |
| 859 | } | 859 | } |
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index accfa00ffcdf..a16b7b4b1e02 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c | |||
| @@ -56,7 +56,6 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) | |||
| 56 | u64 tsfdelta; | 56 | u64 tsfdelta; |
| 57 | 57 | ||
| 58 | spin_lock_bh(&ifmsh->sync_offset_lock); | 58 | spin_lock_bh(&ifmsh->sync_offset_lock); |
| 59 | |||
| 60 | if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { | 59 | if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { |
| 61 | msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n", | 60 | msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n", |
| 62 | (long long) ifmsh->sync_offset_clockdrift_max); | 61 | (long long) ifmsh->sync_offset_clockdrift_max); |
| @@ -69,11 +68,11 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) | |||
| 69 | tsfdelta = -beacon_int_fraction; | 68 | tsfdelta = -beacon_int_fraction; |
| 70 | ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; | 69 | ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; |
| 71 | } | 70 | } |
| 71 | spin_unlock_bh(&ifmsh->sync_offset_lock); | ||
| 72 | 72 | ||
| 73 | tsf = drv_get_tsf(local, sdata); | 73 | tsf = drv_get_tsf(local, sdata); |
| 74 | if (tsf != -1ULL) | 74 | if (tsf != -1ULL) |
| 75 | drv_set_tsf(local, sdata, tsf + tsfdelta); | 75 | drv_set_tsf(local, sdata, tsf + tsfdelta); |
| 76 | spin_unlock_bh(&ifmsh->sync_offset_lock); | ||
| 77 | } | 76 | } |
| 78 | 77 | ||
| 79 | static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | 78 | static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e714ed8bb198..1b7eed252fe9 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
| @@ -3099,22 +3099,32 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, | |||
| 3099 | ht_cfreq, ht_oper->primary_chan, | 3099 | ht_cfreq, ht_oper->primary_chan, |
| 3100 | cbss->channel->band); | 3100 | cbss->channel->band); |
| 3101 | ht_oper = NULL; | 3101 | ht_oper = NULL; |
| 3102 | } else { | ||
| 3103 | channel_type = NL80211_CHAN_HT20; | ||
| 3102 | } | 3104 | } |
| 3103 | } | 3105 | } |
| 3104 | 3106 | ||
| 3105 | if (ht_oper) { | 3107 | if (ht_oper && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { |
| 3106 | channel_type = NL80211_CHAN_HT20; | 3108 | /* |
| 3109 | * cfg80211 already verified that the channel itself can | ||
| 3110 | * be used, but it didn't check that we can do the right | ||
| 3111 | * HT type, so do that here as well. If HT40 isn't allowed | ||
| 3112 | * on this channel, disable 40 MHz operation. | ||
| 3113 | */ | ||
| 3107 | 3114 | ||
| 3108 | if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { | 3115 | switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { |
| 3109 | switch (ht_oper->ht_param & | 3116 | case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: |
| 3110 | IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { | 3117 | if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40PLUS) |
| 3111 | case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: | 3118 | ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; |
| 3119 | else | ||
| 3112 | channel_type = NL80211_CHAN_HT40PLUS; | 3120 | channel_type = NL80211_CHAN_HT40PLUS; |
| 3113 | break; | 3121 | break; |
| 3114 | case IEEE80211_HT_PARAM_CHA_SEC_BELOW: | 3122 | case IEEE80211_HT_PARAM_CHA_SEC_BELOW: |
| 3123 | if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40MINUS) | ||
| 3124 | ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; | ||
| 3125 | else | ||
| 3115 | channel_type = NL80211_CHAN_HT40MINUS; | 3126 | channel_type = NL80211_CHAN_HT40MINUS; |
| 3116 | break; | 3127 | break; |
| 3117 | } | ||
| 3118 | } | 3128 | } |
| 3119 | } | 3129 | } |
| 3120 | 3130 | ||
| @@ -3549,6 +3559,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
| 3549 | { | 3559 | { |
| 3550 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 3560 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
| 3551 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; | 3561 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
| 3562 | bool tx = !req->local_state_change; | ||
| 3552 | 3563 | ||
| 3553 | mutex_lock(&ifmgd->mtx); | 3564 | mutex_lock(&ifmgd->mtx); |
| 3554 | 3565 | ||
| @@ -3565,12 +3576,12 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
| 3565 | if (ifmgd->associated && | 3576 | if (ifmgd->associated && |
| 3566 | ether_addr_equal(ifmgd->associated->bssid, req->bssid)) { | 3577 | ether_addr_equal(ifmgd->associated->bssid, req->bssid)) { |
| 3567 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | 3578 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, |
| 3568 | req->reason_code, true, frame_buf); | 3579 | req->reason_code, tx, frame_buf); |
| 3569 | } else { | 3580 | } else { |
| 3570 | drv_mgd_prepare_tx(sdata->local, sdata); | 3581 | drv_mgd_prepare_tx(sdata->local, sdata); |
| 3571 | ieee80211_send_deauth_disassoc(sdata, req->bssid, | 3582 | ieee80211_send_deauth_disassoc(sdata, req->bssid, |
| 3572 | IEEE80211_STYPE_DEAUTH, | 3583 | IEEE80211_STYPE_DEAUTH, |
| 3573 | req->reason_code, true, | 3584 | req->reason_code, tx, |
| 3574 | frame_buf); | 3585 | frame_buf); |
| 3575 | } | 3586 | } |
| 3576 | 3587 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 61c621e9273f..00ade7feb2e3 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -531,6 +531,11 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
| 531 | 531 | ||
| 532 | if (ieee80211_is_action(hdr->frame_control)) { | 532 | if (ieee80211_is_action(hdr->frame_control)) { |
| 533 | u8 category; | 533 | u8 category; |
| 534 | |||
| 535 | /* make sure category field is present */ | ||
| 536 | if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) | ||
| 537 | return RX_DROP_MONITOR; | ||
| 538 | |||
| 534 | mgmt = (struct ieee80211_mgmt *)hdr; | 539 | mgmt = (struct ieee80211_mgmt *)hdr; |
| 535 | category = mgmt->u.action.category; | 540 | category = mgmt->u.action.category; |
| 536 | if (category != WLAN_CATEGORY_MESH_ACTION && | 541 | if (category != WLAN_CATEGORY_MESH_ACTION && |
| @@ -883,14 +888,16 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | |||
| 883 | */ | 888 | */ |
| 884 | if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && | 889 | if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && |
| 885 | ieee80211_is_data_present(hdr->frame_control)) { | 890 | ieee80211_is_data_present(hdr->frame_control)) { |
| 886 | u16 ethertype; | 891 | unsigned int hdrlen; |
| 887 | u8 *payload; | 892 | __be16 ethertype; |
| 888 | 893 | ||
| 889 | payload = rx->skb->data + | 894 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
| 890 | ieee80211_hdrlen(hdr->frame_control); | 895 | |
| 891 | ethertype = (payload[6] << 8) | payload[7]; | 896 | if (rx->skb->len < hdrlen + 8) |
| 892 | if (cpu_to_be16(ethertype) == | 897 | return RX_DROP_MONITOR; |
| 893 | rx->sdata->control_port_protocol) | 898 | |
| 899 | skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); | ||
| 900 | if (ethertype == rx->sdata->control_port_protocol) | ||
| 894 | return RX_CONTINUE; | 901 | return RX_CONTINUE; |
| 895 | } | 902 | } |
| 896 | 903 | ||
| @@ -1462,11 +1469,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
| 1462 | 1469 | ||
| 1463 | hdr = (struct ieee80211_hdr *)rx->skb->data; | 1470 | hdr = (struct ieee80211_hdr *)rx->skb->data; |
| 1464 | fc = hdr->frame_control; | 1471 | fc = hdr->frame_control; |
| 1472 | |||
| 1473 | if (ieee80211_is_ctl(fc)) | ||
| 1474 | return RX_CONTINUE; | ||
| 1475 | |||
| 1465 | sc = le16_to_cpu(hdr->seq_ctrl); | 1476 | sc = le16_to_cpu(hdr->seq_ctrl); |
| 1466 | frag = sc & IEEE80211_SCTL_FRAG; | 1477 | frag = sc & IEEE80211_SCTL_FRAG; |
| 1467 | 1478 | ||
| 1468 | if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || | 1479 | if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || |
| 1469 | (rx->skb)->len < 24 || | ||
| 1470 | is_multicast_ether_addr(hdr->addr1))) { | 1480 | is_multicast_ether_addr(hdr->addr1))) { |
| 1471 | /* not fragmented */ | 1481 | /* not fragmented */ |
| 1472 | goto out; | 1482 | goto out; |
| @@ -1889,6 +1899,20 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
| 1889 | 1899 | ||
| 1890 | hdr = (struct ieee80211_hdr *) skb->data; | 1900 | hdr = (struct ieee80211_hdr *) skb->data; |
| 1891 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 1901 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
| 1902 | |||
| 1903 | /* make sure fixed part of mesh header is there, also checks skb len */ | ||
| 1904 | if (!pskb_may_pull(rx->skb, hdrlen + 6)) | ||
| 1905 | return RX_DROP_MONITOR; | ||
| 1906 | |||
| 1907 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | ||
| 1908 | |||
| 1909 | /* make sure full mesh header is there, also checks skb len */ | ||
| 1910 | if (!pskb_may_pull(rx->skb, | ||
| 1911 | hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) | ||
| 1912 | return RX_DROP_MONITOR; | ||
| 1913 | |||
| 1914 | /* reload pointers */ | ||
| 1915 | hdr = (struct ieee80211_hdr *) skb->data; | ||
| 1892 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | 1916 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
| 1893 | 1917 | ||
| 1894 | /* frame is in RMC, don't forward */ | 1918 | /* frame is in RMC, don't forward */ |
| @@ -1897,7 +1921,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
| 1897 | mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata)) | 1921 | mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata)) |
| 1898 | return RX_DROP_MONITOR; | 1922 | return RX_DROP_MONITOR; |
| 1899 | 1923 | ||
| 1900 | if (!ieee80211_is_data(hdr->frame_control)) | 1924 | if (!ieee80211_is_data(hdr->frame_control) || |
| 1925 | !(status->rx_flags & IEEE80211_RX_RA_MATCH)) | ||
| 1901 | return RX_CONTINUE; | 1926 | return RX_CONTINUE; |
| 1902 | 1927 | ||
| 1903 | if (!mesh_hdr->ttl) | 1928 | if (!mesh_hdr->ttl) |
| @@ -1911,9 +1936,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
| 1911 | if (is_multicast_ether_addr(hdr->addr1)) { | 1936 | if (is_multicast_ether_addr(hdr->addr1)) { |
| 1912 | mpp_addr = hdr->addr3; | 1937 | mpp_addr = hdr->addr3; |
| 1913 | proxied_addr = mesh_hdr->eaddr1; | 1938 | proxied_addr = mesh_hdr->eaddr1; |
| 1914 | } else { | 1939 | } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { |
| 1940 | /* has_a4 already checked in ieee80211_rx_mesh_check */ | ||
| 1915 | mpp_addr = hdr->addr4; | 1941 | mpp_addr = hdr->addr4; |
| 1916 | proxied_addr = mesh_hdr->eaddr2; | 1942 | proxied_addr = mesh_hdr->eaddr2; |
| 1943 | } else { | ||
| 1944 | return RX_DROP_MONITOR; | ||
| 1917 | } | 1945 | } |
| 1918 | 1946 | ||
| 1919 | rcu_read_lock(); | 1947 | rcu_read_lock(); |
| @@ -1941,12 +1969,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
| 1941 | } | 1969 | } |
| 1942 | skb_set_queue_mapping(skb, q); | 1970 | skb_set_queue_mapping(skb, q); |
| 1943 | 1971 | ||
| 1944 | if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) | ||
| 1945 | goto out; | ||
| 1946 | |||
| 1947 | if (!--mesh_hdr->ttl) { | 1972 | if (!--mesh_hdr->ttl) { |
| 1948 | IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); | 1973 | IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); |
| 1949 | return RX_DROP_MONITOR; | 1974 | goto out; |
| 1950 | } | 1975 | } |
| 1951 | 1976 | ||
| 1952 | if (!ifmsh->mshcfg.dot11MeshForwarding) | 1977 | if (!ifmsh->mshcfg.dot11MeshForwarding) |
| @@ -2353,6 +2378,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
| 2353 | } | 2378 | } |
| 2354 | break; | 2379 | break; |
| 2355 | case WLAN_CATEGORY_SELF_PROTECTED: | 2380 | case WLAN_CATEGORY_SELF_PROTECTED: |
| 2381 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
| 2382 | sizeof(mgmt->u.action.u.self_prot.action_code))) | ||
| 2383 | break; | ||
| 2384 | |||
| 2356 | switch (mgmt->u.action.u.self_prot.action_code) { | 2385 | switch (mgmt->u.action.u.self_prot.action_code) { |
| 2357 | case WLAN_SP_MESH_PEERING_OPEN: | 2386 | case WLAN_SP_MESH_PEERING_OPEN: |
| 2358 | case WLAN_SP_MESH_PEERING_CLOSE: | 2387 | case WLAN_SP_MESH_PEERING_CLOSE: |
| @@ -2371,6 +2400,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
| 2371 | } | 2400 | } |
| 2372 | break; | 2401 | break; |
| 2373 | case WLAN_CATEGORY_MESH_ACTION: | 2402 | case WLAN_CATEGORY_MESH_ACTION: |
| 2403 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
| 2404 | sizeof(mgmt->u.action.u.mesh_action.action_code))) | ||
| 2405 | break; | ||
| 2406 | |||
| 2374 | if (!ieee80211_vif_is_mesh(&sdata->vif)) | 2407 | if (!ieee80211_vif_is_mesh(&sdata->vif)) |
| 2375 | break; | 2408 | break; |
| 2376 | if (mesh_action_is_path_sel(mgmt) && | 2409 | if (mesh_action_is_path_sel(mgmt) && |
| @@ -2913,10 +2946,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
| 2913 | if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) | 2946 | if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) |
| 2914 | local->dot11ReceivedFragmentCount++; | 2947 | local->dot11ReceivedFragmentCount++; |
| 2915 | 2948 | ||
| 2916 | if (ieee80211_is_mgmt(fc)) | 2949 | if (ieee80211_is_mgmt(fc)) { |
| 2917 | err = skb_linearize(skb); | 2950 | /* drop frame if too short for header */ |
| 2918 | else | 2951 | if (skb->len < ieee80211_hdrlen(fc)) |
| 2952 | err = -ENOBUFS; | ||
| 2953 | else | ||
| 2954 | err = skb_linearize(skb); | ||
| 2955 | } else { | ||
| 2919 | err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); | 2956 | err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); |
| 2957 | } | ||
| 2920 | 2958 | ||
| 2921 | if (err) { | 2959 | if (err) { |
| 2922 | dev_kfree_skb(skb); | 2960 | dev_kfree_skb(skb); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 797dd36a220d..0a4e4c04db89 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
| @@ -650,7 +650,7 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, | |||
| 650 | */ | 650 | */ |
| 651 | if (!skb) | 651 | if (!skb) |
| 652 | break; | 652 | break; |
| 653 | dev_kfree_skb(skb); | 653 | ieee80211_free_txskb(&local->hw, skb); |
| 654 | } | 654 | } |
| 655 | 655 | ||
| 656 | /* | 656 | /* |
| @@ -679,7 +679,7 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, | |||
| 679 | local->total_ps_buffered--; | 679 | local->total_ps_buffered--; |
| 680 | ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", | 680 | ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", |
| 681 | sta->sta.addr); | 681 | sta->sta.addr); |
| 682 | dev_kfree_skb(skb); | 682 | ieee80211_free_txskb(&local->hw, skb); |
| 683 | } | 683 | } |
| 684 | 684 | ||
| 685 | /* | 685 | /* |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 2ce89732d0f2..3af0cc4130f1 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
| @@ -34,7 +34,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, | |||
| 34 | skb_queue_len(&local->skb_queue_unreliable); | 34 | skb_queue_len(&local->skb_queue_unreliable); |
| 35 | while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && | 35 | while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && |
| 36 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { | 36 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { |
| 37 | dev_kfree_skb_irq(skb); | 37 | ieee80211_free_txskb(hw, skb); |
| 38 | tmp--; | 38 | tmp--; |
| 39 | I802_DEBUG_INC(local->tx_status_drop); | 39 | I802_DEBUG_INC(local->tx_status_drop); |
| 40 | } | 40 | } |
| @@ -159,7 +159,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
| 159 | "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", | 159 | "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n", |
| 160 | skb_queue_len(&sta->tx_filtered[ac]), | 160 | skb_queue_len(&sta->tx_filtered[ac]), |
| 161 | !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies); | 161 | !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies); |
| 162 | dev_kfree_skb(skb); | 162 | ieee80211_free_txskb(&local->hw, skb); |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid) | 165 | static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid) |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e0e0d1d0e830..c9bf83f36657 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -354,7 +354,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
| 354 | total += skb_queue_len(&sta->ps_tx_buf[ac]); | 354 | total += skb_queue_len(&sta->ps_tx_buf[ac]); |
| 355 | if (skb) { | 355 | if (skb) { |
| 356 | purged++; | 356 | purged++; |
| 357 | dev_kfree_skb(skb); | 357 | ieee80211_free_txskb(&local->hw, skb); |
| 358 | break; | 358 | break; |
| 359 | } | 359 | } |
| 360 | } | 360 | } |
| @@ -466,7 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
| 466 | ps_dbg(tx->sdata, | 466 | ps_dbg(tx->sdata, |
| 467 | "STA %pM TX buffer for AC %d full - dropping oldest frame\n", | 467 | "STA %pM TX buffer for AC %d full - dropping oldest frame\n", |
| 468 | sta->sta.addr, ac); | 468 | sta->sta.addr, ac); |
| 469 | dev_kfree_skb(old); | 469 | ieee80211_free_txskb(&local->hw, old); |
| 470 | } else | 470 | } else |
| 471 | tx->local->total_ps_buffered++; | 471 | tx->local->total_ps_buffered++; |
| 472 | 472 | ||
| @@ -1103,7 +1103,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, | |||
| 1103 | spin_unlock(&tx->sta->lock); | 1103 | spin_unlock(&tx->sta->lock); |
| 1104 | 1104 | ||
| 1105 | if (purge_skb) | 1105 | if (purge_skb) |
| 1106 | dev_kfree_skb(purge_skb); | 1106 | ieee80211_free_txskb(&tx->local->hw, purge_skb); |
| 1107 | } | 1107 | } |
| 1108 | 1108 | ||
| 1109 | /* reset session timer */ | 1109 | /* reset session timer */ |
| @@ -1214,7 +1214,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, | |||
| 1214 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1214 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
| 1215 | if (WARN_ON_ONCE(q >= local->hw.queues)) { | 1215 | if (WARN_ON_ONCE(q >= local->hw.queues)) { |
| 1216 | __skb_unlink(skb, skbs); | 1216 | __skb_unlink(skb, skbs); |
| 1217 | dev_kfree_skb(skb); | 1217 | ieee80211_free_txskb(&local->hw, skb); |
| 1218 | continue; | 1218 | continue; |
| 1219 | } | 1219 | } |
| 1220 | #endif | 1220 | #endif |
| @@ -1356,7 +1356,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
| 1356 | if (unlikely(res == TX_DROP)) { | 1356 | if (unlikely(res == TX_DROP)) { |
| 1357 | I802_DEBUG_INC(tx->local->tx_handlers_drop); | 1357 | I802_DEBUG_INC(tx->local->tx_handlers_drop); |
| 1358 | if (tx->skb) | 1358 | if (tx->skb) |
| 1359 | dev_kfree_skb(tx->skb); | 1359 | ieee80211_free_txskb(&tx->local->hw, tx->skb); |
| 1360 | else | 1360 | else |
| 1361 | __skb_queue_purge(&tx->skbs); | 1361 | __skb_queue_purge(&tx->skbs); |
| 1362 | return -1; | 1362 | return -1; |
| @@ -1393,7 +1393,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, | |||
| 1393 | res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); | 1393 | res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); |
| 1394 | 1394 | ||
| 1395 | if (unlikely(res_prepare == TX_DROP)) { | 1395 | if (unlikely(res_prepare == TX_DROP)) { |
| 1396 | dev_kfree_skb(skb); | 1396 | ieee80211_free_txskb(&local->hw, skb); |
| 1397 | goto out; | 1397 | goto out; |
| 1398 | } else if (unlikely(res_prepare == TX_QUEUED)) { | 1398 | } else if (unlikely(res_prepare == TX_QUEUED)) { |
| 1399 | goto out; | 1399 | goto out; |
| @@ -1465,7 +1465,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
| 1465 | headroom = max_t(int, 0, headroom); | 1465 | headroom = max_t(int, 0, headroom); |
| 1466 | 1466 | ||
| 1467 | if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { | 1467 | if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { |
| 1468 | dev_kfree_skb(skb); | 1468 | ieee80211_free_txskb(&local->hw, skb); |
| 1469 | rcu_read_unlock(); | 1469 | rcu_read_unlock(); |
| 1470 | return; | 1470 | return; |
| 1471 | } | 1471 | } |
| @@ -2050,8 +2050,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
| 2050 | head_need += IEEE80211_ENCRYPT_HEADROOM; | 2050 | head_need += IEEE80211_ENCRYPT_HEADROOM; |
| 2051 | head_need += local->tx_headroom; | 2051 | head_need += local->tx_headroom; |
| 2052 | head_need = max_t(int, 0, head_need); | 2052 | head_need = max_t(int, 0, head_need); |
| 2053 | if (ieee80211_skb_resize(sdata, skb, head_need, true)) | 2053 | if (ieee80211_skb_resize(sdata, skb, head_need, true)) { |
| 2054 | goto fail; | 2054 | ieee80211_free_txskb(&local->hw, skb); |
| 2055 | return NETDEV_TX_OK; | ||
| 2056 | } | ||
| 2055 | } | 2057 | } |
| 2056 | 2058 | ||
| 2057 | if (encaps_data) { | 2059 | if (encaps_data) { |
| @@ -2184,7 +2186,7 @@ void ieee80211_tx_pending(unsigned long data) | |||
| 2184 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2186 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 2185 | 2187 | ||
| 2186 | if (WARN_ON(!info->control.vif)) { | 2188 | if (WARN_ON(!info->control.vif)) { |
| 2187 | kfree_skb(skb); | 2189 | ieee80211_free_txskb(&local->hw, skb); |
| 2188 | continue; | 2190 | continue; |
| 2189 | } | 2191 | } |
| 2190 | 2192 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 22ca35054dd0..239391807ca9 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
| @@ -406,7 +406,7 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, | |||
| 406 | int queue = info->hw_queue; | 406 | int queue = info->hw_queue; |
| 407 | 407 | ||
| 408 | if (WARN_ON(!info->control.vif)) { | 408 | if (WARN_ON(!info->control.vif)) { |
| 409 | kfree_skb(skb); | 409 | ieee80211_free_txskb(&local->hw, skb); |
| 410 | return; | 410 | return; |
| 411 | } | 411 | } |
| 412 | 412 | ||
| @@ -431,7 +431,7 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, | |||
| 431 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 431 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 432 | 432 | ||
| 433 | if (WARN_ON(!info->control.vif)) { | 433 | if (WARN_ON(!info->control.vif)) { |
| 434 | kfree_skb(skb); | 434 | ieee80211_free_txskb(&local->hw, skb); |
| 435 | continue; | 435 | continue; |
| 436 | } | 436 | } |
| 437 | 437 | ||
| @@ -643,13 +643,41 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, | |||
| 643 | break; | 643 | break; |
| 644 | } | 644 | } |
| 645 | 645 | ||
| 646 | if (id != WLAN_EID_VENDOR_SPECIFIC && | 646 | switch (id) { |
| 647 | id != WLAN_EID_QUIET && | 647 | case WLAN_EID_SSID: |
| 648 | test_bit(id, seen_elems)) { | 648 | case WLAN_EID_SUPP_RATES: |
| 649 | elems->parse_error = true; | 649 | case WLAN_EID_FH_PARAMS: |
| 650 | left -= elen; | 650 | case WLAN_EID_DS_PARAMS: |
| 651 | pos += elen; | 651 | case WLAN_EID_CF_PARAMS: |
| 652 | continue; | 652 | case WLAN_EID_TIM: |
| 653 | case WLAN_EID_IBSS_PARAMS: | ||
| 654 | case WLAN_EID_CHALLENGE: | ||
| 655 | case WLAN_EID_RSN: | ||
| 656 | case WLAN_EID_ERP_INFO: | ||
| 657 | case WLAN_EID_EXT_SUPP_RATES: | ||
| 658 | case WLAN_EID_HT_CAPABILITY: | ||
| 659 | case WLAN_EID_HT_OPERATION: | ||
| 660 | case WLAN_EID_VHT_CAPABILITY: | ||
| 661 | case WLAN_EID_VHT_OPERATION: | ||
| 662 | case WLAN_EID_MESH_ID: | ||
| 663 | case WLAN_EID_MESH_CONFIG: | ||
| 664 | case WLAN_EID_PEER_MGMT: | ||
| 665 | case WLAN_EID_PREQ: | ||
| 666 | case WLAN_EID_PREP: | ||
| 667 | case WLAN_EID_PERR: | ||
| 668 | case WLAN_EID_RANN: | ||
| 669 | case WLAN_EID_CHANNEL_SWITCH: | ||
| 670 | case WLAN_EID_EXT_CHANSWITCH_ANN: | ||
| 671 | case WLAN_EID_COUNTRY: | ||
| 672 | case WLAN_EID_PWR_CONSTRAINT: | ||
| 673 | case WLAN_EID_TIMEOUT_INTERVAL: | ||
| 674 | if (test_bit(id, seen_elems)) { | ||
| 675 | elems->parse_error = true; | ||
| 676 | left -= elen; | ||
| 677 | pos += elen; | ||
| 678 | continue; | ||
| 679 | } | ||
| 680 | break; | ||
| 653 | } | 681 | } |
| 654 | 682 | ||
| 655 | if (calc_crc && id < 64 && (filter & (1ULL << id))) | 683 | if (calc_crc && id < 64 && (filter & (1ULL << id))) |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index bdb53aba888e..8bd2f5c6a56e 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
| @@ -106,7 +106,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
| 106 | if (status->flag & RX_FLAG_MMIC_ERROR) | 106 | if (status->flag & RX_FLAG_MMIC_ERROR) |
| 107 | goto mic_fail; | 107 | goto mic_fail; |
| 108 | 108 | ||
| 109 | if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key) | 109 | if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key && |
| 110 | rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP) | ||
| 110 | goto update_iv; | 111 | goto update_iv; |
| 111 | 112 | ||
| 112 | return RX_CONTINUE; | 113 | return RX_CONTINUE; |
| @@ -545,14 +546,19 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
| 545 | 546 | ||
| 546 | static void bip_aad(struct sk_buff *skb, u8 *aad) | 547 | static void bip_aad(struct sk_buff *skb, u8 *aad) |
| 547 | { | 548 | { |
| 549 | __le16 mask_fc; | ||
| 550 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
| 551 | |||
| 548 | /* BIP AAD: FC(masked) || A1 || A2 || A3 */ | 552 | /* BIP AAD: FC(masked) || A1 || A2 || A3 */ |
| 549 | 553 | ||
| 550 | /* FC type/subtype */ | 554 | /* FC type/subtype */ |
| 551 | aad[0] = skb->data[0]; | ||
| 552 | /* Mask FC Retry, PwrMgt, MoreData flags to zero */ | 555 | /* Mask FC Retry, PwrMgt, MoreData flags to zero */ |
| 553 | aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6)); | 556 | mask_fc = hdr->frame_control; |
| 557 | mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | | ||
| 558 | IEEE80211_FCTL_MOREDATA); | ||
| 559 | put_unaligned(mask_fc, (__le16 *) &aad[0]); | ||
| 554 | /* A1 || A2 || A3 */ | 560 | /* A1 || A2 || A3 */ |
| 555 | memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN); | 561 | memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN); |
| 556 | } | 562 | } |
| 557 | 563 | ||
| 558 | 564 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 7e7198b51c06..c4ee43710aab 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
| @@ -2589,6 +2589,8 @@ __ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u) | |||
| 2589 | struct ip_vs_proto_data *pd; | 2589 | struct ip_vs_proto_data *pd; |
| 2590 | #endif | 2590 | #endif |
| 2591 | 2591 | ||
| 2592 | memset(u, 0, sizeof (*u)); | ||
| 2593 | |||
| 2592 | #ifdef CONFIG_IP_VS_PROTO_TCP | 2594 | #ifdef CONFIG_IP_VS_PROTO_TCP |
| 2593 | pd = ip_vs_proto_data_get(net, IPPROTO_TCP); | 2595 | pd = ip_vs_proto_data_get(net, IPPROTO_TCP); |
| 2594 | u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; | 2596 | u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; |
| @@ -2766,7 +2768,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
| 2766 | { | 2768 | { |
| 2767 | struct ip_vs_timeout_user t; | 2769 | struct ip_vs_timeout_user t; |
| 2768 | 2770 | ||
| 2769 | memset(&t, 0, sizeof(t)); | ||
| 2770 | __ip_vs_get_timeouts(net, &t); | 2771 | __ip_vs_get_timeouts(net, &t); |
| 2771 | if (copy_to_user(user, &t, sizeof(t)) != 0) | 2772 | if (copy_to_user(user, &t, sizeof(t)) != 0) |
| 2772 | ret = -EFAULT; | 2773 | ret = -EFAULT; |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 56f6d5d81a77..cc4c8095681a 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
| @@ -50,6 +50,7 @@ enum { | |||
| 50 | * local | 50 | * local |
| 51 | */ | 51 | */ |
| 52 | IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */ | 52 | IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */ |
| 53 | IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */ | ||
| 53 | }; | 54 | }; |
| 54 | 55 | ||
| 55 | /* | 56 | /* |
| @@ -113,6 +114,8 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr, | |||
| 113 | fl4.daddr = daddr; | 114 | fl4.daddr = daddr; |
| 114 | fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0; | 115 | fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0; |
| 115 | fl4.flowi4_tos = rtos; | 116 | fl4.flowi4_tos = rtos; |
| 117 | fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? | ||
| 118 | FLOWI_FLAG_KNOWN_NH : 0; | ||
| 116 | 119 | ||
| 117 | retry: | 120 | retry: |
| 118 | rt = ip_route_output_key(net, &fl4); | 121 | rt = ip_route_output_key(net, &fl4); |
| @@ -1061,7 +1064,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
| 1061 | if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, | 1064 | if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, |
| 1062 | RT_TOS(iph->tos), | 1065 | RT_TOS(iph->tos), |
| 1063 | IP_VS_RT_MODE_LOCAL | | 1066 | IP_VS_RT_MODE_LOCAL | |
| 1064 | IP_VS_RT_MODE_NON_LOCAL, NULL))) | 1067 | IP_VS_RT_MODE_NON_LOCAL | |
| 1068 | IP_VS_RT_MODE_KNOWN_NH, NULL))) | ||
| 1065 | goto tx_error_icmp; | 1069 | goto tx_error_icmp; |
| 1066 | if (rt->rt_flags & RTCF_LOCAL) { | 1070 | if (rt->rt_flags & RTCF_LOCAL) { |
| 1067 | ip_rt_put(rt); | 1071 | ip_rt_put(rt); |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 1b30b0dee708..962795e839ab 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
| @@ -753,7 +753,8 @@ static int callforward_do_filter(const union nf_inet_addr *src, | |||
| 753 | flowi4_to_flowi(&fl1), false)) { | 753 | flowi4_to_flowi(&fl1), false)) { |
| 754 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, | 754 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, |
| 755 | flowi4_to_flowi(&fl2), false)) { | 755 | flowi4_to_flowi(&fl2), false)) { |
| 756 | if (rt1->rt_gateway == rt2->rt_gateway && | 756 | if (rt_nexthop(rt1, fl1.daddr) == |
| 757 | rt_nexthop(rt2, fl2.daddr) && | ||
| 757 | rt1->dst.dev == rt2->dst.dev) | 758 | rt1->dst.dev == rt2->dst.dev) |
| 758 | ret = 1; | 759 | ret = 1; |
| 759 | dst_release(&rt2->dst); | 760 | dst_release(&rt2->dst); |
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 16c712563860..ae7f5daeee43 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
| @@ -180,9 +180,9 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
| 180 | typeof(nf_ct_timeout_find_get_hook) timeout_find_get; | 180 | typeof(nf_ct_timeout_find_get_hook) timeout_find_get; |
| 181 | struct ctnl_timeout *timeout; | 181 | struct ctnl_timeout *timeout; |
| 182 | struct nf_conn_timeout *timeout_ext; | 182 | struct nf_conn_timeout *timeout_ext; |
| 183 | const struct ipt_entry *e = par->entryinfo; | ||
| 184 | struct nf_conntrack_l4proto *l4proto; | 183 | struct nf_conntrack_l4proto *l4proto; |
| 185 | int ret = 0; | 184 | int ret = 0; |
| 185 | u8 proto; | ||
| 186 | 186 | ||
| 187 | rcu_read_lock(); | 187 | rcu_read_lock(); |
| 188 | timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook); | 188 | timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook); |
| @@ -192,9 +192,11 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
| 192 | goto out; | 192 | goto out; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | if (e->ip.invflags & IPT_INV_PROTO) { | 195 | proto = xt_ct_find_proto(par); |
| 196 | if (!proto) { | ||
| 196 | ret = -EINVAL; | 197 | ret = -EINVAL; |
| 197 | pr_info("You cannot use inversion on L4 protocol\n"); | 198 | pr_info("You must specify a L4 protocol, and not use " |
| 199 | "inversions on it.\n"); | ||
| 198 | goto out; | 200 | goto out; |
| 199 | } | 201 | } |
| 200 | 202 | ||
| @@ -214,7 +216,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
| 214 | /* Make sure the timeout policy matches any existing protocol tracker, | 216 | /* Make sure the timeout policy matches any existing protocol tracker, |
| 215 | * otherwise default to generic. | 217 | * otherwise default to generic. |
| 216 | */ | 218 | */ |
| 217 | l4proto = __nf_ct_l4proto_find(par->family, e->ip.proto); | 219 | l4proto = __nf_ct_l4proto_find(par->family, proto); |
| 218 | if (timeout->l4proto->l4proto != l4proto->l4proto) { | 220 | if (timeout->l4proto->l4proto != l4proto->l4proto) { |
| 219 | ret = -EINVAL; | 221 | ret = -EINVAL; |
| 220 | pr_info("Timeout policy `%s' can only be used by L4 protocol " | 222 | pr_info("Timeout policy `%s' can only be used by L4 protocol " |
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index ee2e5bc5a8c7..bd93e51d30ac 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c | |||
| @@ -70,6 +70,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) | |||
| 70 | fl4.daddr = info->gw.ip; | 70 | fl4.daddr = info->gw.ip; |
| 71 | fl4.flowi4_tos = RT_TOS(iph->tos); | 71 | fl4.flowi4_tos = RT_TOS(iph->tos); |
| 72 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; | 72 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; |
| 73 | fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH; | ||
| 73 | rt = ip_route_output_key(net, &fl4); | 74 | rt = ip_route_output_key(net, &fl4); |
| 74 | if (IS_ERR(rt)) | 75 | if (IS_ERR(rt)) |
| 75 | return false; | 76 | return false; |
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c index 81aafa8e4fef..bea7464cc43f 100644 --- a/net/netfilter/xt_nat.c +++ b/net/netfilter/xt_nat.c | |||
| @@ -111,7 +111,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = { | |||
| 111 | .family = NFPROTO_IPV4, | 111 | .family = NFPROTO_IPV4, |
| 112 | .table = "nat", | 112 | .table = "nat", |
| 113 | .hooks = (1 << NF_INET_POST_ROUTING) | | 113 | .hooks = (1 << NF_INET_POST_ROUTING) | |
| 114 | (1 << NF_INET_LOCAL_OUT), | 114 | (1 << NF_INET_LOCAL_IN), |
| 115 | .me = THIS_MODULE, | 115 | .me = THIS_MODULE, |
| 116 | }, | 116 | }, |
| 117 | { | 117 | { |
| @@ -123,7 +123,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = { | |||
| 123 | .family = NFPROTO_IPV4, | 123 | .family = NFPROTO_IPV4, |
| 124 | .table = "nat", | 124 | .table = "nat", |
| 125 | .hooks = (1 << NF_INET_PRE_ROUTING) | | 125 | .hooks = (1 << NF_INET_PRE_ROUTING) | |
| 126 | (1 << NF_INET_LOCAL_IN), | 126 | (1 << NF_INET_LOCAL_OUT), |
| 127 | .me = THIS_MODULE, | 127 | .me = THIS_MODULE, |
| 128 | }, | 128 | }, |
| 129 | { | 129 | { |
| @@ -133,7 +133,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = { | |||
| 133 | .targetsize = sizeof(struct nf_nat_range), | 133 | .targetsize = sizeof(struct nf_nat_range), |
| 134 | .table = "nat", | 134 | .table = "nat", |
| 135 | .hooks = (1 << NF_INET_POST_ROUTING) | | 135 | .hooks = (1 << NF_INET_POST_ROUTING) | |
| 136 | (1 << NF_INET_LOCAL_OUT), | 136 | (1 << NF_INET_LOCAL_IN), |
| 137 | .me = THIS_MODULE, | 137 | .me = THIS_MODULE, |
| 138 | }, | 138 | }, |
| 139 | { | 139 | { |
| @@ -143,7 +143,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = { | |||
| 143 | .targetsize = sizeof(struct nf_nat_range), | 143 | .targetsize = sizeof(struct nf_nat_range), |
| 144 | .table = "nat", | 144 | .table = "nat", |
| 145 | .hooks = (1 << NF_INET_PRE_ROUTING) | | 145 | .hooks = (1 << NF_INET_PRE_ROUTING) | |
| 146 | (1 << NF_INET_LOCAL_IN), | 146 | (1 << NF_INET_LOCAL_OUT), |
| 147 | .me = THIS_MODULE, | 147 | .me = THIS_MODULE, |
| 148 | }, | 148 | }, |
| 149 | }; | 149 | }; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 0f2e3ad69c47..4da797fa5ec5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -138,6 +138,8 @@ static int netlink_dump(struct sock *sk); | |||
| 138 | static DEFINE_RWLOCK(nl_table_lock); | 138 | static DEFINE_RWLOCK(nl_table_lock); |
| 139 | static atomic_t nl_table_users = ATOMIC_INIT(0); | 139 | static atomic_t nl_table_users = ATOMIC_INIT(0); |
| 140 | 140 | ||
| 141 | #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock)); | ||
| 142 | |||
| 141 | static ATOMIC_NOTIFIER_HEAD(netlink_chain); | 143 | static ATOMIC_NOTIFIER_HEAD(netlink_chain); |
| 142 | 144 | ||
| 143 | static inline u32 netlink_group_mask(u32 group) | 145 | static inline u32 netlink_group_mask(u32 group) |
| @@ -169,6 +171,8 @@ static void netlink_sock_destruct(struct sock *sk) | |||
| 169 | if (nlk->cb) { | 171 | if (nlk->cb) { |
| 170 | if (nlk->cb->done) | 172 | if (nlk->cb->done) |
| 171 | nlk->cb->done(nlk->cb); | 173 | nlk->cb->done(nlk->cb); |
| 174 | |||
| 175 | module_put(nlk->cb->module); | ||
| 172 | netlink_destroy_callback(nlk->cb); | 176 | netlink_destroy_callback(nlk->cb); |
| 173 | } | 177 | } |
| 174 | 178 | ||
| @@ -343,6 +347,11 @@ netlink_update_listeners(struct sock *sk) | |||
| 343 | struct hlist_node *node; | 347 | struct hlist_node *node; |
| 344 | unsigned long mask; | 348 | unsigned long mask; |
| 345 | unsigned int i; | 349 | unsigned int i; |
| 350 | struct listeners *listeners; | ||
| 351 | |||
| 352 | listeners = nl_deref_protected(tbl->listeners); | ||
| 353 | if (!listeners) | ||
| 354 | return; | ||
| 346 | 355 | ||
| 347 | for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { | 356 | for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { |
| 348 | mask = 0; | 357 | mask = 0; |
| @@ -350,7 +359,7 @@ netlink_update_listeners(struct sock *sk) | |||
| 350 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) | 359 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) |
| 351 | mask |= nlk_sk(sk)->groups[i]; | 360 | mask |= nlk_sk(sk)->groups[i]; |
| 352 | } | 361 | } |
| 353 | tbl->listeners->masks[i] = mask; | 362 | listeners->masks[i] = mask; |
| 354 | } | 363 | } |
| 355 | /* this function is only called with the netlink table "grabbed", which | 364 | /* this function is only called with the netlink table "grabbed", which |
| 356 | * makes sure updates are visible before bind or setsockopt return. */ | 365 | * makes sure updates are visible before bind or setsockopt return. */ |
| @@ -534,7 +543,11 @@ static int netlink_release(struct socket *sock) | |||
| 534 | if (netlink_is_kernel(sk)) { | 543 | if (netlink_is_kernel(sk)) { |
| 535 | BUG_ON(nl_table[sk->sk_protocol].registered == 0); | 544 | BUG_ON(nl_table[sk->sk_protocol].registered == 0); |
| 536 | if (--nl_table[sk->sk_protocol].registered == 0) { | 545 | if (--nl_table[sk->sk_protocol].registered == 0) { |
| 537 | kfree(nl_table[sk->sk_protocol].listeners); | 546 | struct listeners *old; |
| 547 | |||
| 548 | old = nl_deref_protected(nl_table[sk->sk_protocol].listeners); | ||
| 549 | RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL); | ||
| 550 | kfree_rcu(old, rcu); | ||
| 538 | nl_table[sk->sk_protocol].module = NULL; | 551 | nl_table[sk->sk_protocol].module = NULL; |
| 539 | nl_table[sk->sk_protocol].bind = NULL; | 552 | nl_table[sk->sk_protocol].bind = NULL; |
| 540 | nl_table[sk->sk_protocol].flags = 0; | 553 | nl_table[sk->sk_protocol].flags = 0; |
| @@ -980,7 +993,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group) | |||
| 980 | rcu_read_lock(); | 993 | rcu_read_lock(); |
| 981 | listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); | 994 | listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); |
| 982 | 995 | ||
| 983 | if (group - 1 < nl_table[sk->sk_protocol].groups) | 996 | if (listeners && group - 1 < nl_table[sk->sk_protocol].groups) |
| 984 | res = test_bit(group - 1, listeners->masks); | 997 | res = test_bit(group - 1, listeners->masks); |
| 985 | 998 | ||
| 986 | rcu_read_unlock(); | 999 | rcu_read_unlock(); |
| @@ -1623,7 +1636,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups) | |||
| 1623 | new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); | 1636 | new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); |
| 1624 | if (!new) | 1637 | if (!new) |
| 1625 | return -ENOMEM; | 1638 | return -ENOMEM; |
| 1626 | old = rcu_dereference_protected(tbl->listeners, 1); | 1639 | old = nl_deref_protected(tbl->listeners); |
| 1627 | memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); | 1640 | memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); |
| 1628 | rcu_assign_pointer(tbl->listeners, new); | 1641 | rcu_assign_pointer(tbl->listeners, new); |
| 1629 | 1642 | ||
| @@ -1758,6 +1771,7 @@ static int netlink_dump(struct sock *sk) | |||
| 1758 | nlk->cb = NULL; | 1771 | nlk->cb = NULL; |
| 1759 | mutex_unlock(nlk->cb_mutex); | 1772 | mutex_unlock(nlk->cb_mutex); |
| 1760 | 1773 | ||
| 1774 | module_put(cb->module); | ||
| 1761 | netlink_consume_callback(cb); | 1775 | netlink_consume_callback(cb); |
| 1762 | return 0; | 1776 | return 0; |
| 1763 | 1777 | ||
| @@ -1767,9 +1781,9 @@ errout_skb: | |||
| 1767 | return err; | 1781 | return err; |
| 1768 | } | 1782 | } |
| 1769 | 1783 | ||
| 1770 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | 1784 | int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, |
| 1771 | const struct nlmsghdr *nlh, | 1785 | const struct nlmsghdr *nlh, |
| 1772 | struct netlink_dump_control *control) | 1786 | struct netlink_dump_control *control) |
| 1773 | { | 1787 | { |
| 1774 | struct netlink_callback *cb; | 1788 | struct netlink_callback *cb; |
| 1775 | struct sock *sk; | 1789 | struct sock *sk; |
| @@ -1784,6 +1798,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 1784 | cb->done = control->done; | 1798 | cb->done = control->done; |
| 1785 | cb->nlh = nlh; | 1799 | cb->nlh = nlh; |
| 1786 | cb->data = control->data; | 1800 | cb->data = control->data; |
| 1801 | cb->module = control->module; | ||
| 1787 | cb->min_dump_alloc = control->min_dump_alloc; | 1802 | cb->min_dump_alloc = control->min_dump_alloc; |
| 1788 | atomic_inc(&skb->users); | 1803 | atomic_inc(&skb->users); |
| 1789 | cb->skb = skb; | 1804 | cb->skb = skb; |
| @@ -1794,19 +1809,28 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 1794 | return -ECONNREFUSED; | 1809 | return -ECONNREFUSED; |
| 1795 | } | 1810 | } |
| 1796 | nlk = nlk_sk(sk); | 1811 | nlk = nlk_sk(sk); |
| 1797 | /* A dump is in progress... */ | 1812 | |
| 1798 | mutex_lock(nlk->cb_mutex); | 1813 | mutex_lock(nlk->cb_mutex); |
| 1814 | /* A dump is in progress... */ | ||
| 1799 | if (nlk->cb) { | 1815 | if (nlk->cb) { |
| 1800 | mutex_unlock(nlk->cb_mutex); | 1816 | mutex_unlock(nlk->cb_mutex); |
| 1801 | netlink_destroy_callback(cb); | 1817 | netlink_destroy_callback(cb); |
| 1802 | sock_put(sk); | 1818 | ret = -EBUSY; |
| 1803 | return -EBUSY; | 1819 | goto out; |
| 1804 | } | 1820 | } |
| 1821 | /* add reference of module which cb->dump belongs to */ | ||
| 1822 | if (!try_module_get(cb->module)) { | ||
| 1823 | mutex_unlock(nlk->cb_mutex); | ||
| 1824 | netlink_destroy_callback(cb); | ||
| 1825 | ret = -EPROTONOSUPPORT; | ||
| 1826 | goto out; | ||
| 1827 | } | ||
| 1828 | |||
| 1805 | nlk->cb = cb; | 1829 | nlk->cb = cb; |
| 1806 | mutex_unlock(nlk->cb_mutex); | 1830 | mutex_unlock(nlk->cb_mutex); |
| 1807 | 1831 | ||
| 1808 | ret = netlink_dump(sk); | 1832 | ret = netlink_dump(sk); |
| 1809 | 1833 | out: | |
| 1810 | sock_put(sk); | 1834 | sock_put(sk); |
| 1811 | 1835 | ||
| 1812 | if (ret) | 1836 | if (ret) |
| @@ -1817,7 +1841,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 1817 | */ | 1841 | */ |
| 1818 | return -EINTR; | 1842 | return -EINTR; |
| 1819 | } | 1843 | } |
| 1820 | EXPORT_SYMBOL(netlink_dump_start); | 1844 | EXPORT_SYMBOL(__netlink_dump_start); |
| 1821 | 1845 | ||
| 1822 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | 1846 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) |
| 1823 | { | 1847 | { |
diff --git a/net/rds/send.c b/net/rds/send.c index 96531d4033a2..88eace57dd6b 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
| @@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) | |||
| 1122 | rds_stats_inc(s_send_pong); | 1122 | rds_stats_inc(s_send_pong); |
| 1123 | 1123 | ||
| 1124 | if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | 1124 | if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) |
| 1125 | rds_send_xmit(conn); | 1125 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
| 1126 | 1126 | ||
| 1127 | rds_message_put(rm); | 1127 | rds_message_put(rm); |
| 1128 | return 0; | 1128 | return 0; |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 011d2384b115..7633a752c65e 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
| @@ -26,8 +26,8 @@ | |||
| 26 | #include "ar-internal.h" | 26 | #include "ar-internal.h" |
| 27 | 27 | ||
| 28 | static int rxrpc_vet_description_s(const char *); | 28 | static int rxrpc_vet_description_s(const char *); |
| 29 | static int rxrpc_instantiate(struct key *, const void *, size_t); | 29 | static int rxrpc_instantiate(struct key *, struct key_preparsed_payload *); |
| 30 | static int rxrpc_instantiate_s(struct key *, const void *, size_t); | 30 | static int rxrpc_instantiate_s(struct key *, struct key_preparsed_payload *); |
| 31 | static void rxrpc_destroy(struct key *); | 31 | static void rxrpc_destroy(struct key *); |
| 32 | static void rxrpc_destroy_s(struct key *); | 32 | static void rxrpc_destroy_s(struct key *); |
| 33 | static void rxrpc_describe(const struct key *, struct seq_file *); | 33 | static void rxrpc_describe(const struct key *, struct seq_file *); |
| @@ -678,7 +678,7 @@ error: | |||
| 678 | * | 678 | * |
| 679 | * if no data is provided, then a no-security key is made | 679 | * if no data is provided, then a no-security key is made |
| 680 | */ | 680 | */ |
| 681 | static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | 681 | static int rxrpc_instantiate(struct key *key, struct key_preparsed_payload *prep) |
| 682 | { | 682 | { |
| 683 | const struct rxrpc_key_data_v1 *v1; | 683 | const struct rxrpc_key_data_v1 *v1; |
| 684 | struct rxrpc_key_token *token, **pp; | 684 | struct rxrpc_key_token *token, **pp; |
| @@ -686,26 +686,26 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | |||
| 686 | u32 kver; | 686 | u32 kver; |
| 687 | int ret; | 687 | int ret; |
| 688 | 688 | ||
| 689 | _enter("{%x},,%zu", key_serial(key), datalen); | 689 | _enter("{%x},,%zu", key_serial(key), prep->datalen); |
| 690 | 690 | ||
| 691 | /* handle a no-security key */ | 691 | /* handle a no-security key */ |
| 692 | if (!data && datalen == 0) | 692 | if (!prep->data && prep->datalen == 0) |
| 693 | return 0; | 693 | return 0; |
| 694 | 694 | ||
| 695 | /* determine if the XDR payload format is being used */ | 695 | /* determine if the XDR payload format is being used */ |
| 696 | if (datalen > 7 * 4) { | 696 | if (prep->datalen > 7 * 4) { |
| 697 | ret = rxrpc_instantiate_xdr(key, data, datalen); | 697 | ret = rxrpc_instantiate_xdr(key, prep->data, prep->datalen); |
| 698 | if (ret != -EPROTO) | 698 | if (ret != -EPROTO) |
| 699 | return ret; | 699 | return ret; |
| 700 | } | 700 | } |
| 701 | 701 | ||
| 702 | /* get the key interface version number */ | 702 | /* get the key interface version number */ |
| 703 | ret = -EINVAL; | 703 | ret = -EINVAL; |
| 704 | if (datalen <= 4 || !data) | 704 | if (prep->datalen <= 4 || !prep->data) |
| 705 | goto error; | 705 | goto error; |
| 706 | memcpy(&kver, data, sizeof(kver)); | 706 | memcpy(&kver, prep->data, sizeof(kver)); |
| 707 | data += sizeof(kver); | 707 | prep->data += sizeof(kver); |
| 708 | datalen -= sizeof(kver); | 708 | prep->datalen -= sizeof(kver); |
| 709 | 709 | ||
| 710 | _debug("KEY I/F VERSION: %u", kver); | 710 | _debug("KEY I/F VERSION: %u", kver); |
| 711 | 711 | ||
| @@ -715,11 +715,11 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | |||
| 715 | 715 | ||
| 716 | /* deal with a version 1 key */ | 716 | /* deal with a version 1 key */ |
| 717 | ret = -EINVAL; | 717 | ret = -EINVAL; |
| 718 | if (datalen < sizeof(*v1)) | 718 | if (prep->datalen < sizeof(*v1)) |
| 719 | goto error; | 719 | goto error; |
| 720 | 720 | ||
| 721 | v1 = data; | 721 | v1 = prep->data; |
| 722 | if (datalen != sizeof(*v1) + v1->ticket_length) | 722 | if (prep->datalen != sizeof(*v1) + v1->ticket_length) |
| 723 | goto error; | 723 | goto error; |
| 724 | 724 | ||
| 725 | _debug("SCIX: %u", v1->security_index); | 725 | _debug("SCIX: %u", v1->security_index); |
| @@ -784,17 +784,17 @@ error: | |||
| 784 | * instantiate a server secret key | 784 | * instantiate a server secret key |
| 785 | * data should be a pointer to the 8-byte secret key | 785 | * data should be a pointer to the 8-byte secret key |
| 786 | */ | 786 | */ |
| 787 | static int rxrpc_instantiate_s(struct key *key, const void *data, | 787 | static int rxrpc_instantiate_s(struct key *key, |
| 788 | size_t datalen) | 788 | struct key_preparsed_payload *prep) |
| 789 | { | 789 | { |
| 790 | struct crypto_blkcipher *ci; | 790 | struct crypto_blkcipher *ci; |
| 791 | 791 | ||
| 792 | _enter("{%x},,%zu", key_serial(key), datalen); | 792 | _enter("{%x},,%zu", key_serial(key), prep->datalen); |
| 793 | 793 | ||
| 794 | if (datalen != 8) | 794 | if (prep->datalen != 8) |
| 795 | return -EINVAL; | 795 | return -EINVAL; |
| 796 | 796 | ||
| 797 | memcpy(&key->type_data, data, 8); | 797 | memcpy(&key->type_data, prep->data, 8); |
| 798 | 798 | ||
| 799 | ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC); | 799 | ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC); |
| 800 | if (IS_ERR(ci)) { | 800 | if (IS_ERR(ci)) { |
| @@ -802,7 +802,7 @@ static int rxrpc_instantiate_s(struct key *key, const void *data, | |||
| 802 | return PTR_ERR(ci); | 802 | return PTR_ERR(ci); |
| 803 | } | 803 | } |
| 804 | 804 | ||
| 805 | if (crypto_blkcipher_setkey(ci, data, 8) < 0) | 805 | if (crypto_blkcipher_setkey(ci, prep->data, 8) < 0) |
| 806 | BUG(); | 806 | BUG(); |
| 807 | 807 | ||
| 808 | key->payload.data = ci; | 808 | key->payload.data = ci; |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 57f7de839b03..6773d7803627 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
| @@ -1642,8 +1642,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
| 1642 | asoc->outqueue.outstanding_bytes; | 1642 | asoc->outqueue.outstanding_bytes; |
| 1643 | sackh.num_gap_ack_blocks = 0; | 1643 | sackh.num_gap_ack_blocks = 0; |
| 1644 | sackh.num_dup_tsns = 0; | 1644 | sackh.num_dup_tsns = 0; |
| 1645 | chunk->subh.sack_hdr = &sackh; | ||
| 1645 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, | 1646 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, |
| 1646 | SCTP_SACKH(&sackh)); | 1647 | SCTP_CHUNK(chunk)); |
| 1647 | break; | 1648 | break; |
| 1648 | 1649 | ||
| 1649 | case SCTP_CMD_DISCARD_PACKET: | 1650 | case SCTP_CMD_DISCARD_PACKET: |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 59d16ea927f0..a60d1f8b41c5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -974,7 +974,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, | |||
| 974 | void *addr_buf; | 974 | void *addr_buf; |
| 975 | struct sctp_af *af; | 975 | struct sctp_af *af; |
| 976 | 976 | ||
| 977 | SCTP_DEBUG_PRINTK("sctp_setsocktopt_bindx: sk %p addrs %p" | 977 | SCTP_DEBUG_PRINTK("sctp_setsockopt_bindx: sk %p addrs %p" |
| 978 | " addrs_size %d opt %d\n", sk, addrs, addrs_size, op); | 978 | " addrs_size %d opt %d\n", sk, addrs, addrs_size, op); |
| 979 | 979 | ||
| 980 | if (unlikely(addrs_size <= 0)) | 980 | if (unlikely(addrs_size <= 0)) |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 34c522021004..909dc0c31aab 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -239,7 +239,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct | |||
| 239 | } | 239 | } |
| 240 | return q; | 240 | return q; |
| 241 | err: | 241 | err: |
| 242 | dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p)); | 242 | dprintk("RPC: %s returning %ld\n", __func__, -PTR_ERR(p)); |
| 243 | return p; | 243 | return p; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| @@ -301,10 +301,10 @@ __gss_find_upcall(struct rpc_pipe *pipe, uid_t uid) | |||
| 301 | if (pos->uid != uid) | 301 | if (pos->uid != uid) |
| 302 | continue; | 302 | continue; |
| 303 | atomic_inc(&pos->count); | 303 | atomic_inc(&pos->count); |
| 304 | dprintk("RPC: gss_find_upcall found msg %p\n", pos); | 304 | dprintk("RPC: %s found msg %p\n", __func__, pos); |
| 305 | return pos; | 305 | return pos; |
| 306 | } | 306 | } |
| 307 | dprintk("RPC: gss_find_upcall found nothing\n"); | 307 | dprintk("RPC: %s found nothing\n", __func__); |
| 308 | return NULL; | 308 | return NULL; |
| 309 | } | 309 | } |
| 310 | 310 | ||
| @@ -507,8 +507,8 @@ gss_refresh_upcall(struct rpc_task *task) | |||
| 507 | struct rpc_pipe *pipe; | 507 | struct rpc_pipe *pipe; |
| 508 | int err = 0; | 508 | int err = 0; |
| 509 | 509 | ||
| 510 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, | 510 | dprintk("RPC: %5u %s for uid %u\n", |
| 511 | cred->cr_uid); | 511 | task->tk_pid, __func__, cred->cr_uid); |
| 512 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); | 512 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); |
| 513 | if (PTR_ERR(gss_msg) == -EAGAIN) { | 513 | if (PTR_ERR(gss_msg) == -EAGAIN) { |
| 514 | /* XXX: warning on the first, under the assumption we | 514 | /* XXX: warning on the first, under the assumption we |
| @@ -539,8 +539,8 @@ gss_refresh_upcall(struct rpc_task *task) | |||
| 539 | spin_unlock(&pipe->lock); | 539 | spin_unlock(&pipe->lock); |
| 540 | gss_release_msg(gss_msg); | 540 | gss_release_msg(gss_msg); |
| 541 | out: | 541 | out: |
| 542 | dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", | 542 | dprintk("RPC: %5u %s for uid %u result %d\n", |
| 543 | task->tk_pid, cred->cr_uid, err); | 543 | task->tk_pid, __func__, cred->cr_uid, err); |
| 544 | return err; | 544 | return err; |
| 545 | } | 545 | } |
| 546 | 546 | ||
| @@ -553,7 +553,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
| 553 | DEFINE_WAIT(wait); | 553 | DEFINE_WAIT(wait); |
| 554 | int err = 0; | 554 | int err = 0; |
| 555 | 555 | ||
| 556 | dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid); | 556 | dprintk("RPC: %s for uid %u\n", __func__, cred->cr_uid); |
| 557 | retry: | 557 | retry: |
| 558 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); | 558 | gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); |
| 559 | if (PTR_ERR(gss_msg) == -EAGAIN) { | 559 | if (PTR_ERR(gss_msg) == -EAGAIN) { |
| @@ -594,8 +594,8 @@ out_intr: | |||
| 594 | finish_wait(&gss_msg->waitqueue, &wait); | 594 | finish_wait(&gss_msg->waitqueue, &wait); |
| 595 | gss_release_msg(gss_msg); | 595 | gss_release_msg(gss_msg); |
| 596 | out: | 596 | out: |
| 597 | dprintk("RPC: gss_create_upcall for uid %u result %d\n", | 597 | dprintk("RPC: %s for uid %u result %d\n", |
| 598 | cred->cr_uid, err); | 598 | __func__, cred->cr_uid, err); |
| 599 | return err; | 599 | return err; |
| 600 | } | 600 | } |
| 601 | 601 | ||
| @@ -681,7 +681,7 @@ err_put_ctx: | |||
| 681 | err: | 681 | err: |
| 682 | kfree(buf); | 682 | kfree(buf); |
| 683 | out: | 683 | out: |
| 684 | dprintk("RPC: gss_pipe_downcall returning %Zd\n", err); | 684 | dprintk("RPC: %s returning %Zd\n", __func__, err); |
| 685 | return err; | 685 | return err; |
| 686 | } | 686 | } |
| 687 | 687 | ||
| @@ -747,8 +747,8 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) | |||
| 747 | struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); | 747 | struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); |
| 748 | 748 | ||
| 749 | if (msg->errno < 0) { | 749 | if (msg->errno < 0) { |
| 750 | dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n", | 750 | dprintk("RPC: %s releasing msg %p\n", |
| 751 | gss_msg); | 751 | __func__, gss_msg); |
| 752 | atomic_inc(&gss_msg->count); | 752 | atomic_inc(&gss_msg->count); |
| 753 | gss_unhash_msg(gss_msg); | 753 | gss_unhash_msg(gss_msg); |
| 754 | if (msg->errno == -ETIMEDOUT) | 754 | if (msg->errno == -ETIMEDOUT) |
| @@ -976,7 +976,7 @@ gss_destroying_context(struct rpc_cred *cred) | |||
| 976 | static void | 976 | static void |
| 977 | gss_do_free_ctx(struct gss_cl_ctx *ctx) | 977 | gss_do_free_ctx(struct gss_cl_ctx *ctx) |
| 978 | { | 978 | { |
| 979 | dprintk("RPC: gss_free_ctx\n"); | 979 | dprintk("RPC: %s\n", __func__); |
| 980 | 980 | ||
| 981 | gss_delete_sec_context(&ctx->gc_gss_ctx); | 981 | gss_delete_sec_context(&ctx->gc_gss_ctx); |
| 982 | kfree(ctx->gc_wire_ctx.data); | 982 | kfree(ctx->gc_wire_ctx.data); |
| @@ -999,7 +999,7 @@ gss_free_ctx(struct gss_cl_ctx *ctx) | |||
| 999 | static void | 999 | static void |
| 1000 | gss_free_cred(struct gss_cred *gss_cred) | 1000 | gss_free_cred(struct gss_cred *gss_cred) |
| 1001 | { | 1001 | { |
| 1002 | dprintk("RPC: gss_free_cred %p\n", gss_cred); | 1002 | dprintk("RPC: %s cred=%p\n", __func__, gss_cred); |
| 1003 | kfree(gss_cred); | 1003 | kfree(gss_cred); |
| 1004 | } | 1004 | } |
| 1005 | 1005 | ||
| @@ -1049,8 +1049,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 1049 | struct gss_cred *cred = NULL; | 1049 | struct gss_cred *cred = NULL; |
| 1050 | int err = -ENOMEM; | 1050 | int err = -ENOMEM; |
| 1051 | 1051 | ||
| 1052 | dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", | 1052 | dprintk("RPC: %s for uid %d, flavor %d\n", |
| 1053 | acred->uid, auth->au_flavor); | 1053 | __func__, acred->uid, auth->au_flavor); |
| 1054 | 1054 | ||
| 1055 | if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) | 1055 | if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) |
| 1056 | goto out_err; | 1056 | goto out_err; |
| @@ -1069,7 +1069,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 1069 | return &cred->gc_base; | 1069 | return &cred->gc_base; |
| 1070 | 1070 | ||
| 1071 | out_err: | 1071 | out_err: |
| 1072 | dprintk("RPC: gss_create_cred failed with error %d\n", err); | 1072 | dprintk("RPC: %s failed with error %d\n", __func__, err); |
| 1073 | return ERR_PTR(err); | 1073 | return ERR_PTR(err); |
| 1074 | } | 1074 | } |
| 1075 | 1075 | ||
| @@ -1127,7 +1127,7 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
| 1127 | struct kvec iov; | 1127 | struct kvec iov; |
| 1128 | struct xdr_buf verf_buf; | 1128 | struct xdr_buf verf_buf; |
| 1129 | 1129 | ||
| 1130 | dprintk("RPC: %5u gss_marshal\n", task->tk_pid); | 1130 | dprintk("RPC: %5u %s\n", task->tk_pid, __func__); |
| 1131 | 1131 | ||
| 1132 | *p++ = htonl(RPC_AUTH_GSS); | 1132 | *p++ = htonl(RPC_AUTH_GSS); |
| 1133 | cred_len = p++; | 1133 | cred_len = p++; |
| @@ -1253,7 +1253,7 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
| 1253 | u32 flav,len; | 1253 | u32 flav,len; |
| 1254 | u32 maj_stat; | 1254 | u32 maj_stat; |
| 1255 | 1255 | ||
| 1256 | dprintk("RPC: %5u gss_validate\n", task->tk_pid); | 1256 | dprintk("RPC: %5u %s\n", task->tk_pid, __func__); |
| 1257 | 1257 | ||
| 1258 | flav = ntohl(*p++); | 1258 | flav = ntohl(*p++); |
| 1259 | if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) | 1259 | if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) |
| @@ -1271,20 +1271,20 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
| 1271 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1271 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
| 1272 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); | 1272 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); |
| 1273 | if (maj_stat) { | 1273 | if (maj_stat) { |
| 1274 | dprintk("RPC: %5u gss_validate: gss_verify_mic returned " | 1274 | dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n", |
| 1275 | "error 0x%08x\n", task->tk_pid, maj_stat); | 1275 | task->tk_pid, __func__, maj_stat); |
| 1276 | goto out_bad; | 1276 | goto out_bad; |
| 1277 | } | 1277 | } |
| 1278 | /* We leave it to unwrap to calculate au_rslack. For now we just | 1278 | /* We leave it to unwrap to calculate au_rslack. For now we just |
| 1279 | * calculate the length of the verifier: */ | 1279 | * calculate the length of the verifier: */ |
| 1280 | cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; | 1280 | cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; |
| 1281 | gss_put_ctx(ctx); | 1281 | gss_put_ctx(ctx); |
| 1282 | dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", | 1282 | dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", |
| 1283 | task->tk_pid); | 1283 | task->tk_pid, __func__); |
| 1284 | return p + XDR_QUADLEN(len); | 1284 | return p + XDR_QUADLEN(len); |
| 1285 | out_bad: | 1285 | out_bad: |
| 1286 | gss_put_ctx(ctx); | 1286 | gss_put_ctx(ctx); |
| 1287 | dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid); | 1287 | dprintk("RPC: %5u %s failed.\n", task->tk_pid, __func__); |
| 1288 | return NULL; | 1288 | return NULL; |
| 1289 | } | 1289 | } |
| 1290 | 1290 | ||
| @@ -1466,7 +1466,7 @@ gss_wrap_req(struct rpc_task *task, | |||
| 1466 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1466 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
| 1467 | int status = -EIO; | 1467 | int status = -EIO; |
| 1468 | 1468 | ||
| 1469 | dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid); | 1469 | dprintk("RPC: %5u %s\n", task->tk_pid, __func__); |
| 1470 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) { | 1470 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) { |
| 1471 | /* The spec seems a little ambiguous here, but I think that not | 1471 | /* The spec seems a little ambiguous here, but I think that not |
| 1472 | * wrapping context destruction requests makes the most sense. | 1472 | * wrapping context destruction requests makes the most sense. |
| @@ -1489,7 +1489,7 @@ gss_wrap_req(struct rpc_task *task, | |||
| 1489 | } | 1489 | } |
| 1490 | out: | 1490 | out: |
| 1491 | gss_put_ctx(ctx); | 1491 | gss_put_ctx(ctx); |
| 1492 | dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status); | 1492 | dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status); |
| 1493 | return status; | 1493 | return status; |
| 1494 | } | 1494 | } |
| 1495 | 1495 | ||
| @@ -1604,8 +1604,8 @@ out_decode: | |||
| 1604 | status = gss_unwrap_req_decode(decode, rqstp, p, obj); | 1604 | status = gss_unwrap_req_decode(decode, rqstp, p, obj); |
| 1605 | out: | 1605 | out: |
| 1606 | gss_put_ctx(ctx); | 1606 | gss_put_ctx(ctx); |
| 1607 | dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, | 1607 | dprintk("RPC: %5u %s returning %d\n", |
| 1608 | status); | 1608 | task->tk_pid, __func__, status); |
| 1609 | return status; | 1609 | return status; |
| 1610 | } | 1610 | } |
| 1611 | 1611 | ||
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 5a3d675d2f2f..a9c0bbccad6b 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
| @@ -172,7 +172,7 @@ out_free: | |||
| 172 | xprt_free_allocation(req); | 172 | xprt_free_allocation(req); |
| 173 | 173 | ||
| 174 | dprintk("RPC: setup backchannel transport failed\n"); | 174 | dprintk("RPC: setup backchannel transport failed\n"); |
| 175 | return -1; | 175 | return -ENOMEM; |
| 176 | } | 176 | } |
| 177 | EXPORT_SYMBOL_GPL(xprt_setup_backchannel); | 177 | EXPORT_SYMBOL_GPL(xprt_setup_backchannel); |
| 178 | 178 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 2a68bb3db772..fc2f7aa4dca7 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
| @@ -1409,11 +1409,11 @@ static ssize_t read_flush(struct file *file, char __user *buf, | |||
| 1409 | size_t count, loff_t *ppos, | 1409 | size_t count, loff_t *ppos, |
| 1410 | struct cache_detail *cd) | 1410 | struct cache_detail *cd) |
| 1411 | { | 1411 | { |
| 1412 | char tbuf[20]; | 1412 | char tbuf[22]; |
| 1413 | unsigned long p = *ppos; | 1413 | unsigned long p = *ppos; |
| 1414 | size_t len; | 1414 | size_t len; |
| 1415 | 1415 | ||
| 1416 | sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time)); | 1416 | snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time)); |
| 1417 | len = strlen(tbuf); | 1417 | len = strlen(tbuf); |
| 1418 | if (p >= len) | 1418 | if (p >= len) |
| 1419 | return 0; | 1419 | return 0; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index fa48c60aef23..cdc7564b4512 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -490,61 +490,86 @@ EXPORT_SYMBOL_GPL(rpc_create); | |||
| 490 | * same transport while varying parameters such as the authentication | 490 | * same transport while varying parameters such as the authentication |
| 491 | * flavour. | 491 | * flavour. |
| 492 | */ | 492 | */ |
| 493 | struct rpc_clnt * | 493 | static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, |
| 494 | rpc_clone_client(struct rpc_clnt *clnt) | 494 | struct rpc_clnt *clnt) |
| 495 | { | 495 | { |
| 496 | struct rpc_clnt *new; | ||
| 497 | struct rpc_xprt *xprt; | 496 | struct rpc_xprt *xprt; |
| 498 | int err = -ENOMEM; | 497 | struct rpc_clnt *new; |
| 498 | int err; | ||
| 499 | 499 | ||
| 500 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); | 500 | err = -ENOMEM; |
| 501 | if (!new) | ||
| 502 | goto out_no_clnt; | ||
| 503 | new->cl_parent = clnt; | ||
| 504 | /* Turn off autobind on clones */ | ||
| 505 | new->cl_autobind = 0; | ||
| 506 | INIT_LIST_HEAD(&new->cl_tasks); | ||
| 507 | spin_lock_init(&new->cl_lock); | ||
| 508 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval); | ||
| 509 | new->cl_metrics = rpc_alloc_iostats(clnt); | ||
| 510 | if (new->cl_metrics == NULL) | ||
| 511 | goto out_no_stats; | ||
| 512 | if (clnt->cl_principal) { | ||
| 513 | new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); | ||
| 514 | if (new->cl_principal == NULL) | ||
| 515 | goto out_no_principal; | ||
| 516 | } | ||
| 517 | rcu_read_lock(); | 501 | rcu_read_lock(); |
| 518 | xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); | 502 | xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); |
| 519 | rcu_read_unlock(); | 503 | rcu_read_unlock(); |
| 520 | if (xprt == NULL) | 504 | if (xprt == NULL) |
| 521 | goto out_no_transport; | 505 | goto out_err; |
| 522 | rcu_assign_pointer(new->cl_xprt, xprt); | 506 | args->servername = xprt->servername; |
| 523 | atomic_set(&new->cl_count, 1); | 507 | |
| 524 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); | 508 | new = rpc_new_client(args, xprt); |
| 525 | if (err != 0) | 509 | if (IS_ERR(new)) { |
| 526 | goto out_no_path; | 510 | err = PTR_ERR(new); |
| 527 | rpc_clnt_set_nodename(new, utsname()->nodename); | 511 | goto out_put; |
| 528 | if (new->cl_auth) | 512 | } |
| 529 | atomic_inc(&new->cl_auth->au_count); | 513 | |
| 530 | atomic_inc(&clnt->cl_count); | 514 | atomic_inc(&clnt->cl_count); |
| 531 | rpc_register_client(new); | 515 | new->cl_parent = clnt; |
| 532 | rpciod_up(); | 516 | |
| 517 | /* Turn off autobind on clones */ | ||
| 518 | new->cl_autobind = 0; | ||
| 519 | new->cl_softrtry = clnt->cl_softrtry; | ||
| 520 | new->cl_discrtry = clnt->cl_discrtry; | ||
| 521 | new->cl_chatty = clnt->cl_chatty; | ||
| 533 | return new; | 522 | return new; |
| 534 | out_no_path: | 523 | |
| 524 | out_put: | ||
| 535 | xprt_put(xprt); | 525 | xprt_put(xprt); |
| 536 | out_no_transport: | 526 | out_err: |
| 537 | kfree(new->cl_principal); | ||
| 538 | out_no_principal: | ||
| 539 | rpc_free_iostats(new->cl_metrics); | ||
| 540 | out_no_stats: | ||
| 541 | kfree(new); | ||
| 542 | out_no_clnt: | ||
| 543 | dprintk("RPC: %s: returned error %d\n", __func__, err); | 527 | dprintk("RPC: %s: returned error %d\n", __func__, err); |
| 544 | return ERR_PTR(err); | 528 | return ERR_PTR(err); |
| 545 | } | 529 | } |
| 530 | |||
| 531 | /** | ||
| 532 | * rpc_clone_client - Clone an RPC client structure | ||
| 533 | * | ||
| 534 | * @clnt: RPC client whose parameters are copied | ||
| 535 | * | ||
| 536 | * Returns a fresh RPC client or an ERR_PTR. | ||
| 537 | */ | ||
| 538 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) | ||
| 539 | { | ||
| 540 | struct rpc_create_args args = { | ||
| 541 | .program = clnt->cl_program, | ||
| 542 | .prognumber = clnt->cl_prog, | ||
| 543 | .version = clnt->cl_vers, | ||
| 544 | .authflavor = clnt->cl_auth->au_flavor, | ||
| 545 | .client_name = clnt->cl_principal, | ||
| 546 | }; | ||
| 547 | return __rpc_clone_client(&args, clnt); | ||
| 548 | } | ||
| 546 | EXPORT_SYMBOL_GPL(rpc_clone_client); | 549 | EXPORT_SYMBOL_GPL(rpc_clone_client); |
| 547 | 550 | ||
| 551 | /** | ||
| 552 | * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth | ||
| 553 | * | ||
| 554 | * @clnt: RPC client whose parameters are copied | ||
| 555 | * @auth: security flavor for new client | ||
| 556 | * | ||
| 557 | * Returns a fresh RPC client or an ERR_PTR. | ||
| 558 | */ | ||
| 559 | struct rpc_clnt * | ||
| 560 | rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | ||
| 561 | { | ||
| 562 | struct rpc_create_args args = { | ||
| 563 | .program = clnt->cl_program, | ||
| 564 | .prognumber = clnt->cl_prog, | ||
| 565 | .version = clnt->cl_vers, | ||
| 566 | .authflavor = flavor, | ||
| 567 | .client_name = clnt->cl_principal, | ||
| 568 | }; | ||
| 569 | return __rpc_clone_client(&args, clnt); | ||
| 570 | } | ||
| 571 | EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); | ||
| 572 | |||
| 548 | /* | 573 | /* |
| 549 | * Kill all tasks for the given client. | 574 | * Kill all tasks for the given client. |
| 550 | * XXX: kill their descendants as well? | 575 | * XXX: kill their descendants as well? |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 21fde99e5c56..80f5dd23417d 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -1119,8 +1119,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) | |||
| 1119 | return -ENOMEM; | 1119 | return -ENOMEM; |
| 1120 | if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) | 1120 | if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) |
| 1121 | return -ENOMEM; | 1121 | return -ENOMEM; |
| 1122 | dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, | 1122 | dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", |
| 1123 | NET_NAME(net)); | 1123 | net, NET_NAME(net)); |
| 1124 | sn->pipefs_sb = sb; | 1124 | sn->pipefs_sb = sb; |
| 1125 | err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, | 1125 | err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, |
| 1126 | RPC_PIPEFS_MOUNT, | 1126 | RPC_PIPEFS_MOUNT, |
| @@ -1155,8 +1155,8 @@ static void rpc_kill_sb(struct super_block *sb) | |||
| 1155 | sn->pipefs_sb = NULL; | 1155 | sn->pipefs_sb = NULL; |
| 1156 | mutex_unlock(&sn->pipefs_sb_lock); | 1156 | mutex_unlock(&sn->pipefs_sb_lock); |
| 1157 | put_net(net); | 1157 | put_net(net); |
| 1158 | dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net, | 1158 | dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", |
| 1159 | NET_NAME(net)); | 1159 | net, NET_NAME(net)); |
| 1160 | blocking_notifier_call_chain(&rpc_pipefs_notifier_list, | 1160 | blocking_notifier_call_chain(&rpc_pipefs_notifier_list, |
| 1161 | RPC_PIPEFS_UMOUNT, | 1161 | RPC_PIPEFS_UMOUNT, |
| 1162 | sb); | 1162 | sb); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 128494ec9a64..6357fcb00c7e 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
| @@ -1022,7 +1022,7 @@ static int rpciod_start(void) | |||
| 1022 | * Create the rpciod thread and wait for it to start. | 1022 | * Create the rpciod thread and wait for it to start. |
| 1023 | */ | 1023 | */ |
| 1024 | dprintk("RPC: creating workqueue rpciod\n"); | 1024 | dprintk("RPC: creating workqueue rpciod\n"); |
| 1025 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0); | 1025 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1); |
| 1026 | rpciod_workqueue = wq; | 1026 | rpciod_workqueue = wq; |
| 1027 | return rpciod_workqueue != NULL; | 1027 | return rpciod_workqueue != NULL; |
| 1028 | } | 1028 | } |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index bac973a31367..194d865fae72 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
| @@ -208,6 +208,35 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, | |||
| 208 | return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); | 208 | return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | /* | ||
| 212 | * svc_xprt_received conditionally queues the transport for processing | ||
| 213 | * by another thread. The caller must hold the XPT_BUSY bit and must | ||
| 214 | * not thereafter touch transport data. | ||
| 215 | * | ||
| 216 | * Note: XPT_DATA only gets cleared when a read-attempt finds no (or | ||
| 217 | * insufficient) data. | ||
| 218 | */ | ||
| 219 | static void svc_xprt_received(struct svc_xprt *xprt) | ||
| 220 | { | ||
| 221 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | ||
| 222 | /* As soon as we clear busy, the xprt could be closed and | ||
| 223 | * 'put', so we need a reference to call svc_xprt_enqueue with: | ||
| 224 | */ | ||
| 225 | svc_xprt_get(xprt); | ||
| 226 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
| 227 | svc_xprt_enqueue(xprt); | ||
| 228 | svc_xprt_put(xprt); | ||
| 229 | } | ||
| 230 | |||
| 231 | void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) | ||
| 232 | { | ||
| 233 | clear_bit(XPT_TEMP, &new->xpt_flags); | ||
| 234 | spin_lock_bh(&serv->sv_lock); | ||
| 235 | list_add(&new->xpt_list, &serv->sv_permsocks); | ||
| 236 | spin_unlock_bh(&serv->sv_lock); | ||
| 237 | svc_xprt_received(new); | ||
| 238 | } | ||
| 239 | |||
| 211 | int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | 240 | int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, |
| 212 | struct net *net, const int family, | 241 | struct net *net, const int family, |
| 213 | const unsigned short port, int flags) | 242 | const unsigned short port, int flags) |
| @@ -232,13 +261,8 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | |||
| 232 | module_put(xcl->xcl_owner); | 261 | module_put(xcl->xcl_owner); |
| 233 | return PTR_ERR(newxprt); | 262 | return PTR_ERR(newxprt); |
| 234 | } | 263 | } |
| 235 | 264 | svc_add_new_perm_xprt(serv, newxprt); | |
| 236 | clear_bit(XPT_TEMP, &newxprt->xpt_flags); | ||
| 237 | spin_lock_bh(&serv->sv_lock); | ||
| 238 | list_add(&newxprt->xpt_list, &serv->sv_permsocks); | ||
| 239 | spin_unlock_bh(&serv->sv_lock); | ||
| 240 | newport = svc_xprt_local_port(newxprt); | 265 | newport = svc_xprt_local_port(newxprt); |
| 241 | clear_bit(XPT_BUSY, &newxprt->xpt_flags); | ||
| 242 | return newport; | 266 | return newport; |
| 243 | } | 267 | } |
| 244 | err: | 268 | err: |
| @@ -394,27 +418,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | |||
| 394 | return xprt; | 418 | return xprt; |
| 395 | } | 419 | } |
| 396 | 420 | ||
| 397 | /* | ||
| 398 | * svc_xprt_received conditionally queues the transport for processing | ||
| 399 | * by another thread. The caller must hold the XPT_BUSY bit and must | ||
| 400 | * not thereafter touch transport data. | ||
| 401 | * | ||
| 402 | * Note: XPT_DATA only gets cleared when a read-attempt finds no (or | ||
| 403 | * insufficient) data. | ||
| 404 | */ | ||
| 405 | void svc_xprt_received(struct svc_xprt *xprt) | ||
| 406 | { | ||
| 407 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | ||
| 408 | /* As soon as we clear busy, the xprt could be closed and | ||
| 409 | * 'put', so we need a reference to call svc_xprt_enqueue with: | ||
| 410 | */ | ||
| 411 | svc_xprt_get(xprt); | ||
| 412 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
| 413 | svc_xprt_enqueue(xprt); | ||
| 414 | svc_xprt_put(xprt); | ||
| 415 | } | ||
| 416 | EXPORT_SYMBOL_GPL(svc_xprt_received); | ||
| 417 | |||
| 418 | /** | 421 | /** |
| 419 | * svc_reserve - change the space reserved for the reply to a request. | 422 | * svc_reserve - change the space reserved for the reply to a request. |
| 420 | * @rqstp: The request in question | 423 | * @rqstp: The request in question |
| @@ -565,33 +568,12 @@ static void svc_check_conn_limits(struct svc_serv *serv) | |||
| 565 | } | 568 | } |
| 566 | } | 569 | } |
| 567 | 570 | ||
| 568 | /* | 571 | int svc_alloc_arg(struct svc_rqst *rqstp) |
| 569 | * Receive the next request on any transport. This code is carefully | ||
| 570 | * organised not to touch any cachelines in the shared svc_serv | ||
| 571 | * structure, only cachelines in the local svc_pool. | ||
| 572 | */ | ||
| 573 | int svc_recv(struct svc_rqst *rqstp, long timeout) | ||
| 574 | { | 572 | { |
| 575 | struct svc_xprt *xprt = NULL; | 573 | struct svc_serv *serv = rqstp->rq_server; |
| 576 | struct svc_serv *serv = rqstp->rq_server; | 574 | struct xdr_buf *arg; |
| 577 | struct svc_pool *pool = rqstp->rq_pool; | 575 | int pages; |
| 578 | int len, i; | 576 | int i; |
| 579 | int pages; | ||
| 580 | struct xdr_buf *arg; | ||
| 581 | DECLARE_WAITQUEUE(wait, current); | ||
| 582 | long time_left; | ||
| 583 | |||
| 584 | dprintk("svc: server %p waiting for data (to = %ld)\n", | ||
| 585 | rqstp, timeout); | ||
| 586 | |||
| 587 | if (rqstp->rq_xprt) | ||
| 588 | printk(KERN_ERR | ||
| 589 | "svc_recv: service %p, transport not NULL!\n", | ||
| 590 | rqstp); | ||
| 591 | if (waitqueue_active(&rqstp->rq_wait)) | ||
| 592 | printk(KERN_ERR | ||
| 593 | "svc_recv: service %p, wait queue active!\n", | ||
| 594 | rqstp); | ||
| 595 | 577 | ||
| 596 | /* now allocate needed pages. If we get a failure, sleep briefly */ | 578 | /* now allocate needed pages. If we get a failure, sleep briefly */ |
| 597 | pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; | 579 | pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; |
| @@ -621,11 +603,15 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 621 | arg->page_len = (pages-2)*PAGE_SIZE; | 603 | arg->page_len = (pages-2)*PAGE_SIZE; |
| 622 | arg->len = (pages-1)*PAGE_SIZE; | 604 | arg->len = (pages-1)*PAGE_SIZE; |
| 623 | arg->tail[0].iov_len = 0; | 605 | arg->tail[0].iov_len = 0; |
| 606 | return 0; | ||
| 607 | } | ||
| 624 | 608 | ||
| 625 | try_to_freeze(); | 609 | struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) |
| 626 | cond_resched(); | 610 | { |
| 627 | if (signalled() || kthread_should_stop()) | 611 | struct svc_xprt *xprt; |
| 628 | return -EINTR; | 612 | struct svc_pool *pool = rqstp->rq_pool; |
| 613 | DECLARE_WAITQUEUE(wait, current); | ||
| 614 | long time_left; | ||
| 629 | 615 | ||
| 630 | /* Normally we will wait up to 5 seconds for any required | 616 | /* Normally we will wait up to 5 seconds for any required |
| 631 | * cache information to be provided. | 617 | * cache information to be provided. |
| @@ -663,7 +649,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 663 | if (kthread_should_stop()) { | 649 | if (kthread_should_stop()) { |
| 664 | set_current_state(TASK_RUNNING); | 650 | set_current_state(TASK_RUNNING); |
| 665 | spin_unlock_bh(&pool->sp_lock); | 651 | spin_unlock_bh(&pool->sp_lock); |
| 666 | return -EINTR; | 652 | return ERR_PTR(-EINTR); |
| 667 | } | 653 | } |
| 668 | 654 | ||
| 669 | add_wait_queue(&rqstp->rq_wait, &wait); | 655 | add_wait_queue(&rqstp->rq_wait, &wait); |
| @@ -684,48 +670,58 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 684 | spin_unlock_bh(&pool->sp_lock); | 670 | spin_unlock_bh(&pool->sp_lock); |
| 685 | dprintk("svc: server %p, no data yet\n", rqstp); | 671 | dprintk("svc: server %p, no data yet\n", rqstp); |
| 686 | if (signalled() || kthread_should_stop()) | 672 | if (signalled() || kthread_should_stop()) |
| 687 | return -EINTR; | 673 | return ERR_PTR(-EINTR); |
| 688 | else | 674 | else |
| 689 | return -EAGAIN; | 675 | return ERR_PTR(-EAGAIN); |
| 690 | } | 676 | } |
| 691 | } | 677 | } |
| 692 | spin_unlock_bh(&pool->sp_lock); | 678 | spin_unlock_bh(&pool->sp_lock); |
| 679 | return xprt; | ||
| 680 | } | ||
| 681 | |||
| 682 | void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) | ||
| 683 | { | ||
| 684 | spin_lock_bh(&serv->sv_lock); | ||
| 685 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | ||
| 686 | list_add(&newxpt->xpt_list, &serv->sv_tempsocks); | ||
| 687 | serv->sv_tmpcnt++; | ||
| 688 | if (serv->sv_temptimer.function == NULL) { | ||
| 689 | /* setup timer to age temp transports */ | ||
| 690 | setup_timer(&serv->sv_temptimer, svc_age_temp_xprts, | ||
| 691 | (unsigned long)serv); | ||
| 692 | mod_timer(&serv->sv_temptimer, | ||
| 693 | jiffies + svc_conn_age_period * HZ); | ||
| 694 | } | ||
| 695 | spin_unlock_bh(&serv->sv_lock); | ||
| 696 | svc_xprt_received(newxpt); | ||
| 697 | } | ||
| 698 | |||
| 699 | static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) | ||
| 700 | { | ||
| 701 | struct svc_serv *serv = rqstp->rq_server; | ||
| 702 | int len = 0; | ||
| 693 | 703 | ||
| 694 | len = 0; | ||
| 695 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | 704 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
| 696 | dprintk("svc_recv: found XPT_CLOSE\n"); | 705 | dprintk("svc_recv: found XPT_CLOSE\n"); |
| 697 | svc_delete_xprt(xprt); | 706 | svc_delete_xprt(xprt); |
| 698 | /* Leave XPT_BUSY set on the dead xprt: */ | 707 | /* Leave XPT_BUSY set on the dead xprt: */ |
| 699 | goto out; | 708 | return 0; |
| 700 | } | 709 | } |
| 701 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | 710 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { |
| 702 | struct svc_xprt *newxpt; | 711 | struct svc_xprt *newxpt; |
| 712 | /* | ||
| 713 | * We know this module_get will succeed because the | ||
| 714 | * listener holds a reference too | ||
| 715 | */ | ||
| 716 | __module_get(xprt->xpt_class->xcl_owner); | ||
| 717 | svc_check_conn_limits(xprt->xpt_server); | ||
| 703 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | 718 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
| 704 | if (newxpt) { | 719 | if (newxpt) |
| 705 | /* | 720 | svc_add_new_temp_xprt(serv, newxpt); |
| 706 | * We know this module_get will succeed because the | ||
| 707 | * listener holds a reference too | ||
| 708 | */ | ||
| 709 | __module_get(newxpt->xpt_class->xcl_owner); | ||
| 710 | svc_check_conn_limits(xprt->xpt_server); | ||
| 711 | spin_lock_bh(&serv->sv_lock); | ||
| 712 | set_bit(XPT_TEMP, &newxpt->xpt_flags); | ||
| 713 | list_add(&newxpt->xpt_list, &serv->sv_tempsocks); | ||
| 714 | serv->sv_tmpcnt++; | ||
| 715 | if (serv->sv_temptimer.function == NULL) { | ||
| 716 | /* setup timer to age temp transports */ | ||
| 717 | setup_timer(&serv->sv_temptimer, | ||
| 718 | svc_age_temp_xprts, | ||
| 719 | (unsigned long)serv); | ||
| 720 | mod_timer(&serv->sv_temptimer, | ||
| 721 | jiffies + svc_conn_age_period * HZ); | ||
| 722 | } | ||
| 723 | spin_unlock_bh(&serv->sv_lock); | ||
| 724 | svc_xprt_received(newxpt); | ||
| 725 | } | ||
| 726 | } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { | 721 | } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { |
| 722 | /* XPT_DATA|XPT_DEFERRED case: */ | ||
| 727 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | 723 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
| 728 | rqstp, pool->sp_id, xprt, | 724 | rqstp, rqstp->rq_pool->sp_id, xprt, |
| 729 | atomic_read(&xprt->xpt_ref.refcount)); | 725 | atomic_read(&xprt->xpt_ref.refcount)); |
| 730 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); | 726 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); |
| 731 | if (rqstp->rq_deferred) | 727 | if (rqstp->rq_deferred) |
| @@ -736,10 +732,51 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 736 | rqstp->rq_reserved = serv->sv_max_mesg; | 732 | rqstp->rq_reserved = serv->sv_max_mesg; |
| 737 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 733 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
| 738 | } | 734 | } |
| 735 | /* clear XPT_BUSY: */ | ||
| 739 | svc_xprt_received(xprt); | 736 | svc_xprt_received(xprt); |
| 737 | return len; | ||
| 738 | } | ||
| 739 | |||
| 740 | /* | ||
| 741 | * Receive the next request on any transport. This code is carefully | ||
| 742 | * organised not to touch any cachelines in the shared svc_serv | ||
| 743 | * structure, only cachelines in the local svc_pool. | ||
| 744 | */ | ||
| 745 | int svc_recv(struct svc_rqst *rqstp, long timeout) | ||
| 746 | { | ||
| 747 | struct svc_xprt *xprt = NULL; | ||
| 748 | struct svc_serv *serv = rqstp->rq_server; | ||
| 749 | int len, err; | ||
| 750 | |||
| 751 | dprintk("svc: server %p waiting for data (to = %ld)\n", | ||
| 752 | rqstp, timeout); | ||
| 753 | |||
| 754 | if (rqstp->rq_xprt) | ||
| 755 | printk(KERN_ERR | ||
| 756 | "svc_recv: service %p, transport not NULL!\n", | ||
| 757 | rqstp); | ||
| 758 | if (waitqueue_active(&rqstp->rq_wait)) | ||
| 759 | printk(KERN_ERR | ||
| 760 | "svc_recv: service %p, wait queue active!\n", | ||
| 761 | rqstp); | ||
| 762 | |||
| 763 | err = svc_alloc_arg(rqstp); | ||
| 764 | if (err) | ||
| 765 | return err; | ||
| 766 | |||
| 767 | try_to_freeze(); | ||
| 768 | cond_resched(); | ||
| 769 | if (signalled() || kthread_should_stop()) | ||
| 770 | return -EINTR; | ||
| 771 | |||
| 772 | xprt = svc_get_next_xprt(rqstp, timeout); | ||
| 773 | if (IS_ERR(xprt)) | ||
| 774 | return PTR_ERR(xprt); | ||
| 775 | |||
| 776 | len = svc_handle_xprt(rqstp, xprt); | ||
| 740 | 777 | ||
| 741 | /* No data, incomplete (TCP) read, or accept() */ | 778 | /* No data, incomplete (TCP) read, or accept() */ |
| 742 | if (len == 0 || len == -EAGAIN) | 779 | if (len <= 0) |
| 743 | goto out; | 780 | goto out; |
| 744 | 781 | ||
| 745 | clear_bit(XPT_OLD, &xprt->xpt_flags); | 782 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
| @@ -917,16 +954,18 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
| 917 | } | 954 | } |
| 918 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 955 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
| 919 | 956 | ||
| 920 | static void svc_close_list(struct list_head *xprt_list, struct net *net) | 957 | static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) |
| 921 | { | 958 | { |
| 922 | struct svc_xprt *xprt; | 959 | struct svc_xprt *xprt; |
| 923 | 960 | ||
| 961 | spin_lock(&serv->sv_lock); | ||
| 924 | list_for_each_entry(xprt, xprt_list, xpt_list) { | 962 | list_for_each_entry(xprt, xprt_list, xpt_list) { |
| 925 | if (xprt->xpt_net != net) | 963 | if (xprt->xpt_net != net) |
| 926 | continue; | 964 | continue; |
| 927 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 965 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 928 | set_bit(XPT_BUSY, &xprt->xpt_flags); | 966 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
| 929 | } | 967 | } |
| 968 | spin_unlock(&serv->sv_lock); | ||
| 930 | } | 969 | } |
| 931 | 970 | ||
| 932 | static void svc_clear_pools(struct svc_serv *serv, struct net *net) | 971 | static void svc_clear_pools(struct svc_serv *serv, struct net *net) |
| @@ -949,24 +988,28 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net) | |||
| 949 | } | 988 | } |
| 950 | } | 989 | } |
| 951 | 990 | ||
| 952 | static void svc_clear_list(struct list_head *xprt_list, struct net *net) | 991 | static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) |
| 953 | { | 992 | { |
| 954 | struct svc_xprt *xprt; | 993 | struct svc_xprt *xprt; |
| 955 | struct svc_xprt *tmp; | 994 | struct svc_xprt *tmp; |
| 995 | LIST_HEAD(victims); | ||
| 956 | 996 | ||
| 997 | spin_lock(&serv->sv_lock); | ||
| 957 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | 998 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { |
| 958 | if (xprt->xpt_net != net) | 999 | if (xprt->xpt_net != net) |
| 959 | continue; | 1000 | continue; |
| 960 | svc_delete_xprt(xprt); | 1001 | list_move(&xprt->xpt_list, &victims); |
| 961 | } | 1002 | } |
| 962 | list_for_each_entry(xprt, xprt_list, xpt_list) | 1003 | spin_unlock(&serv->sv_lock); |
| 963 | BUG_ON(xprt->xpt_net == net); | 1004 | |
| 1005 | list_for_each_entry_safe(xprt, tmp, &victims, xpt_list) | ||
| 1006 | svc_delete_xprt(xprt); | ||
| 964 | } | 1007 | } |
| 965 | 1008 | ||
| 966 | void svc_close_net(struct svc_serv *serv, struct net *net) | 1009 | void svc_close_net(struct svc_serv *serv, struct net *net) |
| 967 | { | 1010 | { |
| 968 | svc_close_list(&serv->sv_tempsocks, net); | 1011 | svc_close_list(serv, &serv->sv_tempsocks, net); |
| 969 | svc_close_list(&serv->sv_permsocks, net); | 1012 | svc_close_list(serv, &serv->sv_permsocks, net); |
| 970 | 1013 | ||
| 971 | svc_clear_pools(serv, net); | 1014 | svc_clear_pools(serv, net); |
| 972 | /* | 1015 | /* |
| @@ -974,8 +1017,8 @@ void svc_close_net(struct svc_serv *serv, struct net *net) | |||
| 974 | * svc_xprt_enqueue will not add new entries without taking the | 1017 | * svc_xprt_enqueue will not add new entries without taking the |
| 975 | * sp_lock and checking XPT_BUSY. | 1018 | * sp_lock and checking XPT_BUSY. |
| 976 | */ | 1019 | */ |
| 977 | svc_clear_list(&serv->sv_tempsocks, net); | 1020 | svc_clear_list(serv, &serv->sv_tempsocks, net); |
| 978 | svc_clear_list(&serv->sv_permsocks, net); | 1021 | svc_clear_list(serv, &serv->sv_permsocks, net); |
| 979 | } | 1022 | } |
| 980 | 1023 | ||
| 981 | /* | 1024 | /* |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 998aa8c1807c..03827cef1fa7 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
| @@ -59,7 +59,7 @@ | |||
| 59 | 59 | ||
| 60 | 60 | ||
| 61 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, | 61 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, |
| 62 | int *errp, int flags); | 62 | int flags); |
| 63 | static void svc_udp_data_ready(struct sock *, int); | 63 | static void svc_udp_data_ready(struct sock *, int); |
| 64 | static int svc_udp_recvfrom(struct svc_rqst *); | 64 | static int svc_udp_recvfrom(struct svc_rqst *); |
| 65 | static int svc_udp_sendto(struct svc_rqst *); | 65 | static int svc_udp_sendto(struct svc_rqst *); |
| @@ -305,57 +305,6 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) | |||
| 305 | return len; | 305 | return len; |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | /** | ||
| 309 | * svc_sock_names - construct a list of listener names in a string | ||
| 310 | * @serv: pointer to RPC service | ||
| 311 | * @buf: pointer to a buffer to fill in with socket names | ||
| 312 | * @buflen: size of the buffer to be filled | ||
| 313 | * @toclose: pointer to '\0'-terminated C string containing the name | ||
| 314 | * of a listener to be closed | ||
| 315 | * | ||
| 316 | * Fills in @buf with a '\n'-separated list of names of listener | ||
| 317 | * sockets. If @toclose is not NULL, the socket named by @toclose | ||
| 318 | * is closed, and is not included in the output list. | ||
| 319 | * | ||
| 320 | * Returns positive length of the socket name string, or a negative | ||
| 321 | * errno value on error. | ||
| 322 | */ | ||
| 323 | int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen, | ||
| 324 | const char *toclose) | ||
| 325 | { | ||
| 326 | struct svc_sock *svsk, *closesk = NULL; | ||
| 327 | int len = 0; | ||
| 328 | |||
| 329 | if (!serv) | ||
| 330 | return 0; | ||
| 331 | |||
| 332 | spin_lock_bh(&serv->sv_lock); | ||
| 333 | list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) { | ||
| 334 | int onelen = svc_one_sock_name(svsk, buf + len, buflen - len); | ||
| 335 | if (onelen < 0) { | ||
| 336 | len = onelen; | ||
| 337 | break; | ||
| 338 | } | ||
| 339 | if (toclose && strcmp(toclose, buf + len) == 0) { | ||
| 340 | closesk = svsk; | ||
| 341 | svc_xprt_get(&closesk->sk_xprt); | ||
| 342 | } else | ||
| 343 | len += onelen; | ||
| 344 | } | ||
| 345 | spin_unlock_bh(&serv->sv_lock); | ||
| 346 | |||
| 347 | if (closesk) { | ||
| 348 | /* Should unregister with portmap, but you cannot | ||
| 349 | * unregister just one protocol... | ||
| 350 | */ | ||
| 351 | svc_close_xprt(&closesk->sk_xprt); | ||
| 352 | svc_xprt_put(&closesk->sk_xprt); | ||
| 353 | } else if (toclose) | ||
| 354 | return -ENOENT; | ||
| 355 | return len; | ||
| 356 | } | ||
| 357 | EXPORT_SYMBOL_GPL(svc_sock_names); | ||
| 358 | |||
| 359 | /* | 308 | /* |
| 360 | * Check input queue length | 309 | * Check input queue length |
| 361 | */ | 310 | */ |
| @@ -598,11 +547,9 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
| 598 | dprintk("svc: recvfrom returned error %d\n", -err); | 547 | dprintk("svc: recvfrom returned error %d\n", -err); |
| 599 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 548 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
| 600 | } | 549 | } |
| 601 | return -EAGAIN; | 550 | return 0; |
| 602 | } | 551 | } |
| 603 | len = svc_addr_len(svc_addr(rqstp)); | 552 | len = svc_addr_len(svc_addr(rqstp)); |
| 604 | if (len == 0) | ||
| 605 | return -EAFNOSUPPORT; | ||
| 606 | rqstp->rq_addrlen = len; | 553 | rqstp->rq_addrlen = len; |
| 607 | if (skb->tstamp.tv64 == 0) { | 554 | if (skb->tstamp.tv64 == 0) { |
| 608 | skb->tstamp = ktime_get_real(); | 555 | skb->tstamp = ktime_get_real(); |
| @@ -620,10 +567,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
| 620 | if (!svc_udp_get_dest_address(rqstp, cmh)) { | 567 | if (!svc_udp_get_dest_address(rqstp, cmh)) { |
| 621 | net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", | 568 | net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", |
| 622 | cmh->cmsg_level, cmh->cmsg_type); | 569 | cmh->cmsg_level, cmh->cmsg_type); |
| 623 | out_free: | 570 | goto out_free; |
| 624 | trace_kfree_skb(skb, svc_udp_recvfrom); | ||
| 625 | skb_free_datagram_locked(svsk->sk_sk, skb); | ||
| 626 | return 0; | ||
| 627 | } | 571 | } |
| 628 | rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp)); | 572 | rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp)); |
| 629 | 573 | ||
| @@ -662,6 +606,10 @@ out_free: | |||
| 662 | serv->sv_stats->netudpcnt++; | 606 | serv->sv_stats->netudpcnt++; |
| 663 | 607 | ||
| 664 | return len; | 608 | return len; |
| 609 | out_free: | ||
| 610 | trace_kfree_skb(skb, svc_udp_recvfrom); | ||
| 611 | skb_free_datagram_locked(svsk->sk_sk, skb); | ||
| 612 | return 0; | ||
| 665 | } | 613 | } |
| 666 | 614 | ||
| 667 | static int | 615 | static int |
| @@ -900,8 +848,9 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) | |||
| 900 | */ | 848 | */ |
| 901 | newsock->sk->sk_sndtimeo = HZ*30; | 849 | newsock->sk->sk_sndtimeo = HZ*30; |
| 902 | 850 | ||
| 903 | if (!(newsvsk = svc_setup_socket(serv, newsock, &err, | 851 | newsvsk = svc_setup_socket(serv, newsock, |
| 904 | (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)))) | 852 | (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)); |
| 853 | if (IS_ERR(newsvsk)) | ||
| 905 | goto failed; | 854 | goto failed; |
| 906 | svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen); | 855 | svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen); |
| 907 | err = kernel_getsockname(newsock, sin, &slen); | 856 | err = kernel_getsockname(newsock, sin, &slen); |
| @@ -1174,13 +1123,13 @@ error: | |||
| 1174 | if (len != -EAGAIN) | 1123 | if (len != -EAGAIN) |
| 1175 | goto err_other; | 1124 | goto err_other; |
| 1176 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); | 1125 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); |
| 1177 | return -EAGAIN; | 1126 | return 0; |
| 1178 | err_other: | 1127 | err_other: |
| 1179 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", | 1128 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", |
| 1180 | svsk->sk_xprt.xpt_server->sv_name, -len); | 1129 | svsk->sk_xprt.xpt_server->sv_name, -len); |
| 1181 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 1130 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
| 1182 | err_noclose: | 1131 | err_noclose: |
| 1183 | return -EAGAIN; /* record not complete */ | 1132 | return 0; /* record not complete */ |
| 1184 | } | 1133 | } |
| 1185 | 1134 | ||
| 1186 | /* | 1135 | /* |
| @@ -1383,29 +1332,29 @@ EXPORT_SYMBOL_GPL(svc_sock_update_bufs); | |||
| 1383 | */ | 1332 | */ |
| 1384 | static struct svc_sock *svc_setup_socket(struct svc_serv *serv, | 1333 | static struct svc_sock *svc_setup_socket(struct svc_serv *serv, |
| 1385 | struct socket *sock, | 1334 | struct socket *sock, |
| 1386 | int *errp, int flags) | 1335 | int flags) |
| 1387 | { | 1336 | { |
| 1388 | struct svc_sock *svsk; | 1337 | struct svc_sock *svsk; |
| 1389 | struct sock *inet; | 1338 | struct sock *inet; |
| 1390 | int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); | 1339 | int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); |
| 1340 | int err = 0; | ||
| 1391 | 1341 | ||
| 1392 | dprintk("svc: svc_setup_socket %p\n", sock); | 1342 | dprintk("svc: svc_setup_socket %p\n", sock); |
| 1393 | if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { | 1343 | svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); |
| 1394 | *errp = -ENOMEM; | 1344 | if (!svsk) |
| 1395 | return NULL; | 1345 | return ERR_PTR(-ENOMEM); |
| 1396 | } | ||
| 1397 | 1346 | ||
| 1398 | inet = sock->sk; | 1347 | inet = sock->sk; |
| 1399 | 1348 | ||
| 1400 | /* Register socket with portmapper */ | 1349 | /* Register socket with portmapper */ |
| 1401 | if (*errp >= 0 && pmap_register) | 1350 | if (pmap_register) |
| 1402 | *errp = svc_register(serv, sock_net(sock->sk), inet->sk_family, | 1351 | err = svc_register(serv, sock_net(sock->sk), inet->sk_family, |
| 1403 | inet->sk_protocol, | 1352 | inet->sk_protocol, |
| 1404 | ntohs(inet_sk(inet)->inet_sport)); | 1353 | ntohs(inet_sk(inet)->inet_sport)); |
| 1405 | 1354 | ||
| 1406 | if (*errp < 0) { | 1355 | if (err < 0) { |
| 1407 | kfree(svsk); | 1356 | kfree(svsk); |
| 1408 | return NULL; | 1357 | return ERR_PTR(err); |
| 1409 | } | 1358 | } |
| 1410 | 1359 | ||
| 1411 | inet->sk_user_data = svsk; | 1360 | inet->sk_user_data = svsk; |
| @@ -1450,42 +1399,38 @@ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, | |||
| 1450 | int err = 0; | 1399 | int err = 0; |
| 1451 | struct socket *so = sockfd_lookup(fd, &err); | 1400 | struct socket *so = sockfd_lookup(fd, &err); |
| 1452 | struct svc_sock *svsk = NULL; | 1401 | struct svc_sock *svsk = NULL; |
| 1402 | struct sockaddr_storage addr; | ||
| 1403 | struct sockaddr *sin = (struct sockaddr *)&addr; | ||
| 1404 | int salen; | ||
| 1453 | 1405 | ||
| 1454 | if (!so) | 1406 | if (!so) |
| 1455 | return err; | 1407 | return err; |
| 1408 | err = -EAFNOSUPPORT; | ||
| 1456 | if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) | 1409 | if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) |
| 1457 | err = -EAFNOSUPPORT; | 1410 | goto out; |
| 1458 | else if (so->sk->sk_protocol != IPPROTO_TCP && | 1411 | err = -EPROTONOSUPPORT; |
| 1412 | if (so->sk->sk_protocol != IPPROTO_TCP && | ||
| 1459 | so->sk->sk_protocol != IPPROTO_UDP) | 1413 | so->sk->sk_protocol != IPPROTO_UDP) |
| 1460 | err = -EPROTONOSUPPORT; | 1414 | goto out; |
| 1461 | else if (so->state > SS_UNCONNECTED) | 1415 | err = -EISCONN; |
| 1462 | err = -EISCONN; | 1416 | if (so->state > SS_UNCONNECTED) |
| 1463 | else { | 1417 | goto out; |
| 1464 | if (!try_module_get(THIS_MODULE)) | 1418 | err = -ENOENT; |
| 1465 | err = -ENOENT; | 1419 | if (!try_module_get(THIS_MODULE)) |
| 1466 | else | 1420 | goto out; |
| 1467 | svsk = svc_setup_socket(serv, so, &err, | 1421 | svsk = svc_setup_socket(serv, so, SVC_SOCK_DEFAULTS); |
| 1468 | SVC_SOCK_DEFAULTS); | 1422 | if (IS_ERR(svsk)) { |
| 1469 | if (svsk) { | 1423 | module_put(THIS_MODULE); |
| 1470 | struct sockaddr_storage addr; | 1424 | err = PTR_ERR(svsk); |
| 1471 | struct sockaddr *sin = (struct sockaddr *)&addr; | 1425 | goto out; |
| 1472 | int salen; | ||
| 1473 | if (kernel_getsockname(svsk->sk_sock, sin, &salen) == 0) | ||
| 1474 | svc_xprt_set_local(&svsk->sk_xprt, sin, salen); | ||
| 1475 | clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags); | ||
| 1476 | spin_lock_bh(&serv->sv_lock); | ||
| 1477 | list_add(&svsk->sk_xprt.xpt_list, &serv->sv_permsocks); | ||
| 1478 | spin_unlock_bh(&serv->sv_lock); | ||
| 1479 | svc_xprt_received(&svsk->sk_xprt); | ||
| 1480 | err = 0; | ||
| 1481 | } else | ||
| 1482 | module_put(THIS_MODULE); | ||
| 1483 | } | ||
| 1484 | if (err) { | ||
| 1485 | sockfd_put(so); | ||
| 1486 | return err; | ||
| 1487 | } | 1426 | } |
| 1427 | if (kernel_getsockname(svsk->sk_sock, sin, &salen) == 0) | ||
| 1428 | svc_xprt_set_local(&svsk->sk_xprt, sin, salen); | ||
| 1429 | svc_add_new_perm_xprt(serv, &svsk->sk_xprt); | ||
| 1488 | return svc_one_sock_name(svsk, name_return, len); | 1430 | return svc_one_sock_name(svsk, name_return, len); |
| 1431 | out: | ||
| 1432 | sockfd_put(so); | ||
| 1433 | return err; | ||
| 1489 | } | 1434 | } |
| 1490 | EXPORT_SYMBOL_GPL(svc_addsock); | 1435 | EXPORT_SYMBOL_GPL(svc_addsock); |
| 1491 | 1436 | ||
| @@ -1563,11 +1508,13 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, | |||
| 1563 | goto bummer; | 1508 | goto bummer; |
| 1564 | } | 1509 | } |
| 1565 | 1510 | ||
| 1566 | if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) { | 1511 | svsk = svc_setup_socket(serv, sock, flags); |
| 1567 | svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); | 1512 | if (IS_ERR(svsk)) { |
| 1568 | return (struct svc_xprt *)svsk; | 1513 | error = PTR_ERR(svsk); |
| 1514 | goto bummer; | ||
| 1569 | } | 1515 | } |
| 1570 | 1516 | svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); | |
| 1517 | return (struct svc_xprt *)svsk; | ||
| 1571 | bummer: | 1518 | bummer: |
| 1572 | dprintk("svc: svc_create_socket error = %d\n", -error); | 1519 | dprintk("svc: svc_create_socket error = %d\n", -error); |
| 1573 | sock_release(sock); | 1520 | sock_release(sock); |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 0afba1b4b656..08f50afd5f2a 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
| @@ -730,19 +730,24 @@ static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) | |||
| 730 | 730 | ||
| 731 | if (xdr->nwords == 0) | 731 | if (xdr->nwords == 0) |
| 732 | return 0; | 732 | return 0; |
| 733 | if (nwords > xdr->nwords) { | ||
| 734 | nwords = xdr->nwords; | ||
| 735 | len = nwords << 2; | ||
| 736 | } | ||
| 737 | /* Realign pages to current pointer position */ | 733 | /* Realign pages to current pointer position */ |
| 738 | iov = buf->head; | 734 | iov = buf->head; |
| 739 | if (iov->iov_len > cur) | 735 | if (iov->iov_len > cur) { |
| 740 | xdr_shrink_bufhead(buf, iov->iov_len - cur); | 736 | xdr_shrink_bufhead(buf, iov->iov_len - cur); |
| 737 | xdr->nwords = XDR_QUADLEN(buf->len - cur); | ||
| 738 | } | ||
| 741 | 739 | ||
| 742 | /* Truncate page data and move it into the tail */ | 740 | if (nwords > xdr->nwords) { |
| 743 | if (buf->page_len > len) | 741 | nwords = xdr->nwords; |
| 742 | len = nwords << 2; | ||
| 743 | } | ||
| 744 | if (buf->page_len <= len) | ||
| 745 | len = buf->page_len; | ||
| 746 | else if (nwords < xdr->nwords) { | ||
| 747 | /* Truncate page data and move it into the tail */ | ||
| 744 | xdr_shrink_pagelen(buf, buf->page_len - len); | 748 | xdr_shrink_pagelen(buf, buf->page_len - len); |
| 745 | xdr->nwords = XDR_QUADLEN(buf->len - cur); | 749 | xdr->nwords = XDR_QUADLEN(buf->len - cur); |
| 750 | } | ||
| 746 | return len; | 751 | return len; |
| 747 | } | 752 | } |
| 748 | 753 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 5d7f61d7559c..bd462a532acf 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -231,7 +231,7 @@ EXPORT_SYMBOL_GPL(xprt_reserve_xprt); | |||
| 231 | static void xprt_clear_locked(struct rpc_xprt *xprt) | 231 | static void xprt_clear_locked(struct rpc_xprt *xprt) |
| 232 | { | 232 | { |
| 233 | xprt->snd_task = NULL; | 233 | xprt->snd_task = NULL; |
| 234 | if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) { | 234 | if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { |
| 235 | smp_mb__before_clear_bit(); | 235 | smp_mb__before_clear_bit(); |
| 236 | clear_bit(XPRT_LOCKED, &xprt->state); | 236 | clear_bit(XPRT_LOCKED, &xprt->state); |
| 237 | smp_mb__after_clear_bit(); | 237 | smp_mb__after_clear_bit(); |
| @@ -504,9 +504,6 @@ EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); | |||
| 504 | */ | 504 | */ |
| 505 | void xprt_write_space(struct rpc_xprt *xprt) | 505 | void xprt_write_space(struct rpc_xprt *xprt) |
| 506 | { | 506 | { |
| 507 | if (unlikely(xprt->shutdown)) | ||
| 508 | return; | ||
| 509 | |||
| 510 | spin_lock_bh(&xprt->transport_lock); | 507 | spin_lock_bh(&xprt->transport_lock); |
| 511 | if (xprt->snd_task) { | 508 | if (xprt->snd_task) { |
| 512 | dprintk("RPC: write space: waking waiting task on " | 509 | dprintk("RPC: write space: waking waiting task on " |
| @@ -679,7 +676,7 @@ xprt_init_autodisconnect(unsigned long data) | |||
| 679 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; | 676 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; |
| 680 | 677 | ||
| 681 | spin_lock(&xprt->transport_lock); | 678 | spin_lock(&xprt->transport_lock); |
| 682 | if (!list_empty(&xprt->recv) || xprt->shutdown) | 679 | if (!list_empty(&xprt->recv)) |
| 683 | goto out_abort; | 680 | goto out_abort; |
| 684 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 681 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
| 685 | goto out_abort; | 682 | goto out_abort; |
| @@ -1262,7 +1259,6 @@ out: | |||
| 1262 | static void xprt_destroy(struct rpc_xprt *xprt) | 1259 | static void xprt_destroy(struct rpc_xprt *xprt) |
| 1263 | { | 1260 | { |
| 1264 | dprintk("RPC: destroying transport %p\n", xprt); | 1261 | dprintk("RPC: destroying transport %p\n", xprt); |
| 1265 | xprt->shutdown = 1; | ||
| 1266 | del_timer_sync(&xprt->timer); | 1262 | del_timer_sync(&xprt->timer); |
| 1267 | 1263 | ||
| 1268 | rpc_destroy_wait_queue(&xprt->binding); | 1264 | rpc_destroy_wait_queue(&xprt->binding); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 73b428bef598..62e4f9bcc387 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
| @@ -578,10 +578,6 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) | |||
| 578 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); | 578 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); |
| 579 | spin_unlock_bh(&listen_xprt->sc_lock); | 579 | spin_unlock_bh(&listen_xprt->sc_lock); |
| 580 | 580 | ||
| 581 | /* | ||
| 582 | * Can't use svc_xprt_received here because we are not on a | ||
| 583 | * rqstp thread | ||
| 584 | */ | ||
| 585 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); | 581 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); |
| 586 | svc_xprt_enqueue(&listen_xprt->sc_xprt); | 582 | svc_xprt_enqueue(&listen_xprt->sc_xprt); |
| 587 | } | 583 | } |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 5d9202dc7cb1..c9aa7a35f3bf 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
| @@ -199,21 +199,15 @@ xprt_rdma_connect_worker(struct work_struct *work) | |||
| 199 | struct rpc_xprt *xprt = &r_xprt->xprt; | 199 | struct rpc_xprt *xprt = &r_xprt->xprt; |
| 200 | int rc = 0; | 200 | int rc = 0; |
| 201 | 201 | ||
| 202 | if (!xprt->shutdown) { | 202 | current->flags |= PF_FSTRANS; |
| 203 | current->flags |= PF_FSTRANS; | 203 | xprt_clear_connected(xprt); |
| 204 | xprt_clear_connected(xprt); | 204 | |
| 205 | 205 | dprintk("RPC: %s: %sconnect\n", __func__, | |
| 206 | dprintk("RPC: %s: %sconnect\n", __func__, | 206 | r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); |
| 207 | r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); | 207 | rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); |
| 208 | rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); | 208 | if (rc) |
| 209 | if (rc) | 209 | xprt_wake_pending_tasks(xprt, rc); |
| 210 | goto out; | ||
| 211 | } | ||
| 212 | goto out_clear; | ||
| 213 | 210 | ||
| 214 | out: | ||
| 215 | xprt_wake_pending_tasks(xprt, rc); | ||
| 216 | out_clear: | ||
| 217 | dprintk("RPC: %s: exit\n", __func__); | 211 | dprintk("RPC: %s: exit\n", __func__); |
| 218 | xprt_clear_connecting(xprt); | 212 | xprt_clear_connecting(xprt); |
| 219 | current->flags &= ~PF_FSTRANS; | 213 | current->flags &= ~PF_FSTRANS; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index a35b8e52e551..75853cabf4c9 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -254,7 +254,6 @@ struct sock_xprt { | |||
| 254 | void (*old_data_ready)(struct sock *, int); | 254 | void (*old_data_ready)(struct sock *, int); |
| 255 | void (*old_state_change)(struct sock *); | 255 | void (*old_state_change)(struct sock *); |
| 256 | void (*old_write_space)(struct sock *); | 256 | void (*old_write_space)(struct sock *); |
| 257 | void (*old_error_report)(struct sock *); | ||
| 258 | }; | 257 | }; |
| 259 | 258 | ||
| 260 | /* | 259 | /* |
| @@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
| 737 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | 736 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
| 738 | -status); | 737 | -status); |
| 739 | case -ECONNRESET: | 738 | case -ECONNRESET: |
| 740 | case -EPIPE: | ||
| 741 | xs_tcp_shutdown(xprt); | 739 | xs_tcp_shutdown(xprt); |
| 742 | case -ECONNREFUSED: | 740 | case -ECONNREFUSED: |
| 743 | case -ENOTCONN: | 741 | case -ENOTCONN: |
| 742 | case -EPIPE: | ||
| 744 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 743 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); |
| 745 | } | 744 | } |
| 746 | 745 | ||
| @@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) | |||
| 781 | transport->old_data_ready = sk->sk_data_ready; | 780 | transport->old_data_ready = sk->sk_data_ready; |
| 782 | transport->old_state_change = sk->sk_state_change; | 781 | transport->old_state_change = sk->sk_state_change; |
| 783 | transport->old_write_space = sk->sk_write_space; | 782 | transport->old_write_space = sk->sk_write_space; |
| 784 | transport->old_error_report = sk->sk_error_report; | ||
| 785 | } | 783 | } |
| 786 | 784 | ||
| 787 | static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) | 785 | static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) |
| @@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s | |||
| 789 | sk->sk_data_ready = transport->old_data_ready; | 787 | sk->sk_data_ready = transport->old_data_ready; |
| 790 | sk->sk_state_change = transport->old_state_change; | 788 | sk->sk_state_change = transport->old_state_change; |
| 791 | sk->sk_write_space = transport->old_write_space; | 789 | sk->sk_write_space = transport->old_write_space; |
| 792 | sk->sk_error_report = transport->old_error_report; | ||
| 793 | } | 790 | } |
| 794 | 791 | ||
| 795 | static void xs_reset_transport(struct sock_xprt *transport) | 792 | static void xs_reset_transport(struct sock_xprt *transport) |
| @@ -917,9 +914,6 @@ static void xs_local_data_ready(struct sock *sk, int len) | |||
| 917 | if (skb == NULL) | 914 | if (skb == NULL) |
| 918 | goto out; | 915 | goto out; |
| 919 | 916 | ||
| 920 | if (xprt->shutdown) | ||
| 921 | goto dropit; | ||
| 922 | |||
| 923 | repsize = skb->len - sizeof(rpc_fraghdr); | 917 | repsize = skb->len - sizeof(rpc_fraghdr); |
| 924 | if (repsize < 4) { | 918 | if (repsize < 4) { |
| 925 | dprintk("RPC: impossible RPC reply size %d\n", repsize); | 919 | dprintk("RPC: impossible RPC reply size %d\n", repsize); |
| @@ -981,9 +975,6 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
| 981 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) | 975 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) |
| 982 | goto out; | 976 | goto out; |
| 983 | 977 | ||
| 984 | if (xprt->shutdown) | ||
| 985 | goto dropit; | ||
| 986 | |||
| 987 | repsize = skb->len - sizeof(struct udphdr); | 978 | repsize = skb->len - sizeof(struct udphdr); |
| 988 | if (repsize < 4) { | 979 | if (repsize < 4) { |
| 989 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); | 980 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); |
| @@ -1025,6 +1016,16 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
| 1025 | read_unlock_bh(&sk->sk_callback_lock); | 1016 | read_unlock_bh(&sk->sk_callback_lock); |
| 1026 | } | 1017 | } |
| 1027 | 1018 | ||
| 1019 | /* | ||
| 1020 | * Helper function to force a TCP close if the server is sending | ||
| 1021 | * junk and/or it has put us in CLOSE_WAIT | ||
| 1022 | */ | ||
| 1023 | static void xs_tcp_force_close(struct rpc_xprt *xprt) | ||
| 1024 | { | ||
| 1025 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
| 1026 | xprt_force_disconnect(xprt); | ||
| 1027 | } | ||
| 1028 | |||
| 1028 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) | 1029 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) |
| 1029 | { | 1030 | { |
| 1030 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 1031 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
| @@ -1051,7 +1052,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea | |||
| 1051 | /* Sanity check of the record length */ | 1052 | /* Sanity check of the record length */ |
| 1052 | if (unlikely(transport->tcp_reclen < 8)) { | 1053 | if (unlikely(transport->tcp_reclen < 8)) { |
| 1053 | dprintk("RPC: invalid TCP record fragment length\n"); | 1054 | dprintk("RPC: invalid TCP record fragment length\n"); |
| 1054 | xprt_force_disconnect(xprt); | 1055 | xs_tcp_force_close(xprt); |
| 1055 | return; | 1056 | return; |
| 1056 | } | 1057 | } |
| 1057 | dprintk("RPC: reading TCP record fragment of length %d\n", | 1058 | dprintk("RPC: reading TCP record fragment of length %d\n", |
| @@ -1132,7 +1133,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport, | |||
| 1132 | break; | 1133 | break; |
| 1133 | default: | 1134 | default: |
| 1134 | dprintk("RPC: invalid request message type\n"); | 1135 | dprintk("RPC: invalid request message type\n"); |
| 1135 | xprt_force_disconnect(&transport->xprt); | 1136 | xs_tcp_force_close(&transport->xprt); |
| 1136 | } | 1137 | } |
| 1137 | xs_tcp_check_fraghdr(transport); | 1138 | xs_tcp_check_fraghdr(transport); |
| 1138 | } | 1139 | } |
| @@ -1402,9 +1403,6 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes) | |||
| 1402 | read_lock_bh(&sk->sk_callback_lock); | 1403 | read_lock_bh(&sk->sk_callback_lock); |
| 1403 | if (!(xprt = xprt_from_sock(sk))) | 1404 | if (!(xprt = xprt_from_sock(sk))) |
| 1404 | goto out; | 1405 | goto out; |
| 1405 | if (xprt->shutdown) | ||
| 1406 | goto out; | ||
| 1407 | |||
| 1408 | /* Any data means we had a useful conversation, so | 1406 | /* Any data means we had a useful conversation, so |
| 1409 | * the we don't need to delay the next reconnect | 1407 | * the we don't need to delay the next reconnect |
| 1410 | */ | 1408 | */ |
| @@ -1452,12 +1450,19 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) | |||
| 1452 | xprt_clear_connecting(xprt); | 1450 | xprt_clear_connecting(xprt); |
| 1453 | } | 1451 | } |
| 1454 | 1452 | ||
| 1455 | static void xs_sock_mark_closed(struct rpc_xprt *xprt) | 1453 | static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) |
| 1456 | { | 1454 | { |
| 1457 | smp_mb__before_clear_bit(); | 1455 | smp_mb__before_clear_bit(); |
| 1456 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | ||
| 1457 | clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
| 1458 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | 1458 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); |
| 1459 | clear_bit(XPRT_CLOSING, &xprt->state); | 1459 | clear_bit(XPRT_CLOSING, &xprt->state); |
| 1460 | smp_mb__after_clear_bit(); | 1460 | smp_mb__after_clear_bit(); |
| 1461 | } | ||
| 1462 | |||
| 1463 | static void xs_sock_mark_closed(struct rpc_xprt *xprt) | ||
| 1464 | { | ||
| 1465 | xs_sock_reset_connection_flags(xprt); | ||
| 1461 | /* Mark transport as closed and wake up all pending tasks */ | 1466 | /* Mark transport as closed and wake up all pending tasks */ |
| 1462 | xprt_disconnect_done(xprt); | 1467 | xprt_disconnect_done(xprt); |
| 1463 | } | 1468 | } |
| @@ -1512,8 +1517,9 @@ static void xs_tcp_state_change(struct sock *sk) | |||
| 1512 | break; | 1517 | break; |
| 1513 | case TCP_CLOSE_WAIT: | 1518 | case TCP_CLOSE_WAIT: |
| 1514 | /* The server initiated a shutdown of the socket */ | 1519 | /* The server initiated a shutdown of the socket */ |
| 1515 | xprt_force_disconnect(xprt); | ||
| 1516 | xprt->connect_cookie++; | 1520 | xprt->connect_cookie++; |
| 1521 | clear_bit(XPRT_CONNECTED, &xprt->state); | ||
| 1522 | xs_tcp_force_close(xprt); | ||
| 1517 | case TCP_CLOSING: | 1523 | case TCP_CLOSING: |
| 1518 | /* | 1524 | /* |
| 1519 | * If the server closed down the connection, make sure that | 1525 | * If the server closed down the connection, make sure that |
| @@ -1537,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk) | |||
| 1537 | read_unlock_bh(&sk->sk_callback_lock); | 1543 | read_unlock_bh(&sk->sk_callback_lock); |
| 1538 | } | 1544 | } |
| 1539 | 1545 | ||
| 1540 | /** | ||
| 1541 | * xs_error_report - callback mainly for catching socket errors | ||
| 1542 | * @sk: socket | ||
| 1543 | */ | ||
| 1544 | static void xs_error_report(struct sock *sk) | ||
| 1545 | { | ||
| 1546 | struct rpc_xprt *xprt; | ||
| 1547 | |||
| 1548 | read_lock_bh(&sk->sk_callback_lock); | ||
| 1549 | if (!(xprt = xprt_from_sock(sk))) | ||
| 1550 | goto out; | ||
| 1551 | dprintk("RPC: %s client %p...\n" | ||
| 1552 | "RPC: error %d\n", | ||
| 1553 | __func__, xprt, sk->sk_err); | ||
| 1554 | xprt_wake_pending_tasks(xprt, -EAGAIN); | ||
| 1555 | out: | ||
| 1556 | read_unlock_bh(&sk->sk_callback_lock); | ||
| 1557 | } | ||
| 1558 | |||
| 1559 | static void xs_write_space(struct sock *sk) | 1546 | static void xs_write_space(struct sock *sk) |
| 1560 | { | 1547 | { |
| 1561 | struct socket *sock; | 1548 | struct socket *sock; |
| @@ -1855,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, | |||
| 1855 | sk->sk_user_data = xprt; | 1842 | sk->sk_user_data = xprt; |
| 1856 | sk->sk_data_ready = xs_local_data_ready; | 1843 | sk->sk_data_ready = xs_local_data_ready; |
| 1857 | sk->sk_write_space = xs_udp_write_space; | 1844 | sk->sk_write_space = xs_udp_write_space; |
| 1858 | sk->sk_error_report = xs_error_report; | ||
| 1859 | sk->sk_allocation = GFP_ATOMIC; | 1845 | sk->sk_allocation = GFP_ATOMIC; |
| 1860 | 1846 | ||
| 1861 | xprt_clear_connected(xprt); | 1847 | xprt_clear_connected(xprt); |
| @@ -1889,9 +1875,6 @@ static void xs_local_setup_socket(struct work_struct *work) | |||
| 1889 | struct socket *sock; | 1875 | struct socket *sock; |
| 1890 | int status = -EIO; | 1876 | int status = -EIO; |
| 1891 | 1877 | ||
| 1892 | if (xprt->shutdown) | ||
| 1893 | goto out; | ||
| 1894 | |||
| 1895 | current->flags |= PF_FSTRANS; | 1878 | current->flags |= PF_FSTRANS; |
| 1896 | 1879 | ||
| 1897 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | 1880 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); |
| @@ -1983,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
| 1983 | sk->sk_user_data = xprt; | 1966 | sk->sk_user_data = xprt; |
| 1984 | sk->sk_data_ready = xs_udp_data_ready; | 1967 | sk->sk_data_ready = xs_udp_data_ready; |
| 1985 | sk->sk_write_space = xs_udp_write_space; | 1968 | sk->sk_write_space = xs_udp_write_space; |
| 1986 | sk->sk_error_report = xs_error_report; | ||
| 1987 | sk->sk_no_check = UDP_CSUM_NORCV; | 1969 | sk->sk_no_check = UDP_CSUM_NORCV; |
| 1988 | sk->sk_allocation = GFP_ATOMIC; | 1970 | sk->sk_allocation = GFP_ATOMIC; |
| 1989 | 1971 | ||
| @@ -2008,9 +1990,6 @@ static void xs_udp_setup_socket(struct work_struct *work) | |||
| 2008 | struct socket *sock = transport->sock; | 1990 | struct socket *sock = transport->sock; |
| 2009 | int status = -EIO; | 1991 | int status = -EIO; |
| 2010 | 1992 | ||
| 2011 | if (xprt->shutdown) | ||
| 2012 | goto out; | ||
| 2013 | |||
| 2014 | current->flags |= PF_FSTRANS; | 1993 | current->flags |= PF_FSTRANS; |
| 2015 | 1994 | ||
| 2016 | /* Start by resetting any existing state */ | 1995 | /* Start by resetting any existing state */ |
| @@ -2053,10 +2032,8 @@ static void xs_abort_connection(struct sock_xprt *transport) | |||
| 2053 | any.sa_family = AF_UNSPEC; | 2032 | any.sa_family = AF_UNSPEC; |
| 2054 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); | 2033 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); |
| 2055 | if (!result) | 2034 | if (!result) |
| 2056 | xs_sock_mark_closed(&transport->xprt); | 2035 | xs_sock_reset_connection_flags(&transport->xprt); |
| 2057 | else | 2036 | dprintk("RPC: AF_UNSPEC connect return code %d\n", result); |
| 2058 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | ||
| 2059 | result); | ||
| 2060 | } | 2037 | } |
| 2061 | 2038 | ||
| 2062 | static void xs_tcp_reuse_connection(struct sock_xprt *transport) | 2039 | static void xs_tcp_reuse_connection(struct sock_xprt *transport) |
| @@ -2101,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
| 2101 | sk->sk_data_ready = xs_tcp_data_ready; | 2078 | sk->sk_data_ready = xs_tcp_data_ready; |
| 2102 | sk->sk_state_change = xs_tcp_state_change; | 2079 | sk->sk_state_change = xs_tcp_state_change; |
| 2103 | sk->sk_write_space = xs_tcp_write_space; | 2080 | sk->sk_write_space = xs_tcp_write_space; |
| 2104 | sk->sk_error_report = xs_error_report; | ||
| 2105 | sk->sk_allocation = GFP_ATOMIC; | 2081 | sk->sk_allocation = GFP_ATOMIC; |
| 2106 | 2082 | ||
| 2107 | /* socket options */ | 2083 | /* socket options */ |
| @@ -2156,9 +2132,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
| 2156 | struct rpc_xprt *xprt = &transport->xprt; | 2132 | struct rpc_xprt *xprt = &transport->xprt; |
| 2157 | int status = -EIO; | 2133 | int status = -EIO; |
| 2158 | 2134 | ||
| 2159 | if (xprt->shutdown) | ||
| 2160 | goto out; | ||
| 2161 | |||
| 2162 | current->flags |= PF_FSTRANS; | 2135 | current->flags |= PF_FSTRANS; |
| 2163 | 2136 | ||
| 2164 | if (!sock) { | 2137 | if (!sock) { |
| @@ -2199,8 +2172,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
| 2199 | /* We're probably in TIME_WAIT. Get rid of existing socket, | 2172 | /* We're probably in TIME_WAIT. Get rid of existing socket, |
| 2200 | * and retry | 2173 | * and retry |
| 2201 | */ | 2174 | */ |
| 2202 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | 2175 | xs_tcp_force_close(xprt); |
| 2203 | xprt_force_disconnect(xprt); | ||
| 2204 | break; | 2176 | break; |
| 2205 | case -ECONNREFUSED: | 2177 | case -ECONNREFUSED: |
| 2206 | case -ECONNRESET: | 2178 | case -ECONNRESET: |
| @@ -2528,6 +2500,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
| 2528 | static struct rpc_xprt_ops bc_tcp_ops = { | 2500 | static struct rpc_xprt_ops bc_tcp_ops = { |
| 2529 | .reserve_xprt = xprt_reserve_xprt, | 2501 | .reserve_xprt = xprt_reserve_xprt, |
| 2530 | .release_xprt = xprt_release_xprt, | 2502 | .release_xprt = xprt_release_xprt, |
| 2503 | .alloc_slot = xprt_alloc_slot, | ||
| 2531 | .rpcbind = xs_local_rpcbind, | 2504 | .rpcbind = xs_local_rpcbind, |
| 2532 | .buf_alloc = bc_malloc, | 2505 | .buf_alloc = bc_malloc, |
| 2533 | .buf_free = bc_free, | 2506 | .buf_free = bc_free, |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 443d4d7deea2..3f7253052088 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
| @@ -526,8 +526,7 @@ int wiphy_register(struct wiphy *wiphy) | |||
| 526 | for (i = 0; i < sband->n_channels; i++) { | 526 | for (i = 0; i < sband->n_channels; i++) { |
| 527 | sband->channels[i].orig_flags = | 527 | sband->channels[i].orig_flags = |
| 528 | sband->channels[i].flags; | 528 | sband->channels[i].flags; |
| 529 | sband->channels[i].orig_mag = | 529 | sband->channels[i].orig_mag = INT_MAX; |
| 530 | sband->channels[i].max_antenna_gain; | ||
| 531 | sband->channels[i].orig_mpwr = | 530 | sband->channels[i].orig_mpwr = |
| 532 | sband->channels[i].max_power; | 531 | sband->channels[i].max_power; |
| 533 | sband->channels[i].band = band; | 532 | sband->channels[i].band = band; |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 8016fee0752b..904a7f368325 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
| @@ -457,20 +457,14 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | |||
| 457 | .reason_code = reason, | 457 | .reason_code = reason, |
| 458 | .ie = ie, | 458 | .ie = ie, |
| 459 | .ie_len = ie_len, | 459 | .ie_len = ie_len, |
| 460 | .local_state_change = local_state_change, | ||
| 460 | }; | 461 | }; |
| 461 | 462 | ||
| 462 | ASSERT_WDEV_LOCK(wdev); | 463 | ASSERT_WDEV_LOCK(wdev); |
| 463 | 464 | ||
| 464 | if (local_state_change) { | 465 | if (local_state_change && (!wdev->current_bss || |
| 465 | if (wdev->current_bss && | 466 | !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) |
| 466 | ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { | ||
| 467 | cfg80211_unhold_bss(wdev->current_bss); | ||
| 468 | cfg80211_put_bss(&wdev->current_bss->pub); | ||
| 469 | wdev->current_bss = NULL; | ||
| 470 | } | ||
| 471 | |||
| 472 | return 0; | 467 | return 0; |
| 473 | } | ||
| 474 | 468 | ||
| 475 | return rdev->ops->deauth(&rdev->wiphy, dev, &req); | 469 | return rdev->ops->deauth(&rdev->wiphy, dev, &req); |
| 476 | } | 470 | } |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 3b8cbbc214db..bcc7d7ee5a51 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
| @@ -908,7 +908,7 @@ static void handle_channel(struct wiphy *wiphy, | |||
| 908 | map_regdom_flags(reg_rule->flags) | bw_flags; | 908 | map_regdom_flags(reg_rule->flags) | bw_flags; |
| 909 | chan->max_antenna_gain = chan->orig_mag = | 909 | chan->max_antenna_gain = chan->orig_mag = |
| 910 | (int) MBI_TO_DBI(power_rule->max_antenna_gain); | 910 | (int) MBI_TO_DBI(power_rule->max_antenna_gain); |
| 911 | chan->max_power = chan->orig_mpwr = | 911 | chan->max_reg_power = chan->max_power = chan->orig_mpwr = |
| 912 | (int) MBM_TO_DBM(power_rule->max_eirp); | 912 | (int) MBM_TO_DBM(power_rule->max_eirp); |
| 913 | return; | 913 | return; |
| 914 | } | 914 | } |
| @@ -1331,7 +1331,8 @@ static void handle_channel_custom(struct wiphy *wiphy, | |||
| 1331 | 1331 | ||
| 1332 | chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; | 1332 | chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; |
| 1333 | chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); | 1333 | chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); |
| 1334 | chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); | 1334 | chan->max_reg_power = chan->max_power = |
| 1335 | (int) MBM_TO_DBM(power_rule->max_eirp); | ||
| 1335 | } | 1336 | } |
| 1336 | 1337 | ||
| 1337 | static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band, | 1338 | static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band, |
diff --git a/net/wireless/util.c b/net/wireless/util.c index ef35f4ef2aa6..2762e8329986 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
| @@ -309,23 +309,21 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) | |||
| 309 | } | 309 | } |
| 310 | EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); | 310 | EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); |
| 311 | 311 | ||
| 312 | static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) | 312 | unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) |
| 313 | { | 313 | { |
| 314 | int ae = meshhdr->flags & MESH_FLAGS_AE; | 314 | int ae = meshhdr->flags & MESH_FLAGS_AE; |
| 315 | /* 7.1.3.5a.2 */ | 315 | /* 802.11-2012, 8.2.4.7.3 */ |
| 316 | switch (ae) { | 316 | switch (ae) { |
| 317 | default: | ||
| 317 | case 0: | 318 | case 0: |
| 318 | return 6; | 319 | return 6; |
| 319 | case MESH_FLAGS_AE_A4: | 320 | case MESH_FLAGS_AE_A4: |
| 320 | return 12; | 321 | return 12; |
| 321 | case MESH_FLAGS_AE_A5_A6: | 322 | case MESH_FLAGS_AE_A5_A6: |
| 322 | return 18; | 323 | return 18; |
| 323 | case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6): | ||
| 324 | return 24; | ||
| 325 | default: | ||
| 326 | return 6; | ||
| 327 | } | 324 | } |
| 328 | } | 325 | } |
| 326 | EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); | ||
| 329 | 327 | ||
| 330 | int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, | 328 | int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, |
| 331 | enum nl80211_iftype iftype) | 329 | enum nl80211_iftype iftype) |
| @@ -373,6 +371,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, | |||
| 373 | /* make sure meshdr->flags is on the linear part */ | 371 | /* make sure meshdr->flags is on the linear part */ |
| 374 | if (!pskb_may_pull(skb, hdrlen + 1)) | 372 | if (!pskb_may_pull(skb, hdrlen + 1)) |
| 375 | return -1; | 373 | return -1; |
| 374 | if (meshdr->flags & MESH_FLAGS_AE_A4) | ||
| 375 | return -1; | ||
| 376 | if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { | 376 | if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { |
| 377 | skb_copy_bits(skb, hdrlen + | 377 | skb_copy_bits(skb, hdrlen + |
| 378 | offsetof(struct ieee80211s_hdr, eaddr1), | 378 | offsetof(struct ieee80211s_hdr, eaddr1), |
| @@ -397,6 +397,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, | |||
| 397 | /* make sure meshdr->flags is on the linear part */ | 397 | /* make sure meshdr->flags is on the linear part */ |
| 398 | if (!pskb_may_pull(skb, hdrlen + 1)) | 398 | if (!pskb_may_pull(skb, hdrlen + 1)) |
| 399 | return -1; | 399 | return -1; |
| 400 | if (meshdr->flags & MESH_FLAGS_AE_A5_A6) | ||
| 401 | return -1; | ||
| 400 | if (meshdr->flags & MESH_FLAGS_AE_A4) | 402 | if (meshdr->flags & MESH_FLAGS_AE_A4) |
| 401 | skb_copy_bits(skb, hdrlen + | 403 | skb_copy_bits(skb, hdrlen + |
| 402 | offsetof(struct ieee80211s_hdr, eaddr1), | 404 | offsetof(struct ieee80211s_hdr, eaddr1), |
