diff options
Diffstat (limited to 'net')
38 files changed, 365 insertions, 185 deletions
diff --git a/net/802/psnap.c b/net/802/psnap.c index 4d638944d933..34e42968b477 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c | |||
| @@ -59,8 +59,10 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 59 | proto = find_snap_client(skb->h.raw); | 59 | proto = find_snap_client(skb->h.raw); |
| 60 | if (proto) { | 60 | if (proto) { |
| 61 | /* Pass the frame on. */ | 61 | /* Pass the frame on. */ |
| 62 | u8 *hdr = skb->data; | ||
| 62 | skb->h.raw += 5; | 63 | skb->h.raw += 5; |
| 63 | skb_pull(skb, 5); | 64 | skb_pull(skb, 5); |
| 65 | skb_postpull_rcsum(skb, hdr, 5); | ||
| 64 | rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); | 66 | rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); |
| 65 | } else { | 67 | } else { |
| 66 | skb->sk = NULL; | 68 | skb->sk = NULL; |
diff --git a/net/Kconfig b/net/Kconfig index bc603d9aea56..5126f58d9c44 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
| @@ -27,6 +27,13 @@ if NET | |||
| 27 | 27 | ||
| 28 | menu "Networking options" | 28 | menu "Networking options" |
| 29 | 29 | ||
| 30 | config NETDEBUG | ||
| 31 | bool "Network packet debugging" | ||
| 32 | help | ||
| 33 | You can say Y here if you want to get additional messages useful in | ||
| 34 | debugging bad packets, but can overwhelm logs under denial of service | ||
| 35 | attacks. | ||
| 36 | |||
| 30 | source "net/packet/Kconfig" | 37 | source "net/packet/Kconfig" |
| 31 | source "net/unix/Kconfig" | 38 | source "net/unix/Kconfig" |
| 32 | source "net/xfrm/Kconfig" | 39 | source "net/xfrm/Kconfig" |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ba442883e877..da687c8dc6ff 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
| @@ -104,6 +104,7 @@ static void destroy_nbp(struct net_bridge_port *p) | |||
| 104 | { | 104 | { |
| 105 | struct net_device *dev = p->dev; | 105 | struct net_device *dev = p->dev; |
| 106 | 106 | ||
| 107 | dev->br_port = NULL; | ||
| 107 | p->br = NULL; | 108 | p->br = NULL; |
| 108 | p->dev = NULL; | 109 | p->dev = NULL; |
| 109 | dev_put(dev); | 110 | dev_put(dev); |
| @@ -118,13 +119,24 @@ static void destroy_nbp_rcu(struct rcu_head *head) | |||
| 118 | destroy_nbp(p); | 119 | destroy_nbp(p); |
| 119 | } | 120 | } |
| 120 | 121 | ||
| 121 | /* called with RTNL */ | 122 | /* Delete port(interface) from bridge is done in two steps. |
| 123 | * via RCU. First step, marks device as down. That deletes | ||
| 124 | * all the timers and stops new packets from flowing through. | ||
| 125 | * | ||
| 126 | * Final cleanup doesn't occur until after all CPU's finished | ||
| 127 | * processing packets. | ||
| 128 | * | ||
| 129 | * Protected from multiple admin operations by RTNL mutex | ||
| 130 | */ | ||
| 122 | static void del_nbp(struct net_bridge_port *p) | 131 | static void del_nbp(struct net_bridge_port *p) |
| 123 | { | 132 | { |
| 124 | struct net_bridge *br = p->br; | 133 | struct net_bridge *br = p->br; |
| 125 | struct net_device *dev = p->dev; | 134 | struct net_device *dev = p->dev; |
| 126 | 135 | ||
| 127 | dev->br_port = NULL; | 136 | /* Race between RTNL notify and RCU callback */ |
| 137 | if (p->deleted) | ||
| 138 | return; | ||
| 139 | |||
| 128 | dev_set_promiscuity(dev, -1); | 140 | dev_set_promiscuity(dev, -1); |
| 129 | 141 | ||
| 130 | cancel_delayed_work(&p->carrier_check); | 142 | cancel_delayed_work(&p->carrier_check); |
| @@ -132,16 +144,13 @@ static void del_nbp(struct net_bridge_port *p) | |||
| 132 | 144 | ||
| 133 | spin_lock_bh(&br->lock); | 145 | spin_lock_bh(&br->lock); |
| 134 | br_stp_disable_port(p); | 146 | br_stp_disable_port(p); |
| 147 | p->deleted = 1; | ||
| 135 | spin_unlock_bh(&br->lock); | 148 | spin_unlock_bh(&br->lock); |
| 136 | 149 | ||
| 137 | br_fdb_delete_by_port(br, p); | 150 | br_fdb_delete_by_port(br, p); |
| 138 | 151 | ||
| 139 | list_del_rcu(&p->list); | 152 | list_del_rcu(&p->list); |
| 140 | 153 | ||
| 141 | del_timer_sync(&p->message_age_timer); | ||
| 142 | del_timer_sync(&p->forward_delay_timer); | ||
| 143 | del_timer_sync(&p->hold_timer); | ||
| 144 | |||
| 145 | call_rcu(&p->rcu, destroy_nbp_rcu); | 154 | call_rcu(&p->rcu, destroy_nbp_rcu); |
| 146 | } | 155 | } |
| 147 | 156 | ||
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c5bd631ffcd5..e330b17b6d81 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
| @@ -68,6 +68,7 @@ struct net_bridge_port | |||
| 68 | /* STP */ | 68 | /* STP */ |
| 69 | u8 priority; | 69 | u8 priority; |
| 70 | u8 state; | 70 | u8 state; |
| 71 | u8 deleted; | ||
| 71 | u16 port_no; | 72 | u16 port_no; |
| 72 | unsigned char topology_change_ack; | 73 | unsigned char topology_change_ack; |
| 73 | unsigned char config_pending; | 74 | unsigned char config_pending; |
diff --git a/net/core/dev.c b/net/core/dev.c index fd070a098f20..ffb82073056e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2543,13 +2543,14 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
| 2543 | case SIOCBONDENSLAVE: | 2543 | case SIOCBONDENSLAVE: |
| 2544 | case SIOCBONDRELEASE: | 2544 | case SIOCBONDRELEASE: |
| 2545 | case SIOCBONDSETHWADDR: | 2545 | case SIOCBONDSETHWADDR: |
| 2546 | case SIOCBONDSLAVEINFOQUERY: | ||
| 2547 | case SIOCBONDINFOQUERY: | ||
| 2548 | case SIOCBONDCHANGEACTIVE: | 2546 | case SIOCBONDCHANGEACTIVE: |
| 2549 | case SIOCBRADDIF: | 2547 | case SIOCBRADDIF: |
| 2550 | case SIOCBRDELIF: | 2548 | case SIOCBRDELIF: |
| 2551 | if (!capable(CAP_NET_ADMIN)) | 2549 | if (!capable(CAP_NET_ADMIN)) |
| 2552 | return -EPERM; | 2550 | return -EPERM; |
| 2551 | /* fall through */ | ||
| 2552 | case SIOCBONDSLAVEINFOQUERY: | ||
| 2553 | case SIOCBONDINFOQUERY: | ||
| 2553 | dev_load(ifr.ifr_name); | 2554 | dev_load(ifr.ifr_name); |
| 2554 | rtnl_lock(); | 2555 | rtnl_lock(); |
| 2555 | ret = dev_ifsioc(&ifr, cmd); | 2556 | ret = dev_ifsioc(&ifr, cmd); |
diff --git a/net/core/filter.c b/net/core/filter.c index 9540946a48f3..93fbd01d2259 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -64,7 +64,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k, | |||
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | /** | 66 | /** |
| 67 | * sk_run_filter - run a filter on a socket | 67 | * sk_run_filter - run a filter on a socket |
| 68 | * @skb: buffer to run the filter on | 68 | * @skb: buffer to run the filter on |
| 69 | * @filter: filter to apply | 69 | * @filter: filter to apply |
| 70 | * @flen: length of filter | 70 | * @flen: length of filter |
| @@ -78,8 +78,8 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
| 78 | { | 78 | { |
| 79 | struct sock_filter *fentry; /* We walk down these */ | 79 | struct sock_filter *fentry; /* We walk down these */ |
| 80 | void *ptr; | 80 | void *ptr; |
| 81 | u32 A = 0; /* Accumulator */ | 81 | u32 A = 0; /* Accumulator */ |
| 82 | u32 X = 0; /* Index Register */ | 82 | u32 X = 0; /* Index Register */ |
| 83 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ | 83 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ |
| 84 | u32 tmp; | 84 | u32 tmp; |
| 85 | int k; | 85 | int k; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d0732e9c8560..6766f118f070 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -135,13 +135,15 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
| 135 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | 135 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
| 136 | int fclone) | 136 | int fclone) |
| 137 | { | 137 | { |
| 138 | kmem_cache_t *cache; | ||
| 138 | struct skb_shared_info *shinfo; | 139 | struct skb_shared_info *shinfo; |
| 139 | struct sk_buff *skb; | 140 | struct sk_buff *skb; |
| 140 | u8 *data; | 141 | u8 *data; |
| 141 | 142 | ||
| 143 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; | ||
| 144 | |||
| 142 | /* Get the HEAD */ | 145 | /* Get the HEAD */ |
| 143 | skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache, | 146 | skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); |
| 144 | gfp_mask & ~__GFP_DMA); | ||
| 145 | if (!skb) | 147 | if (!skb) |
| 146 | goto out; | 148 | goto out; |
| 147 | 149 | ||
| @@ -180,7 +182,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
| 180 | out: | 182 | out: |
| 181 | return skb; | 183 | return skb; |
| 182 | nodata: | 184 | nodata: |
| 183 | kmem_cache_free(skbuff_head_cache, skb); | 185 | kmem_cache_free(cache, skb); |
| 184 | skb = NULL; | 186 | skb = NULL; |
| 185 | goto out; | 187 | goto out; |
| 186 | } | 188 | } |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 00f983226672..dc0487b5bace 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
| @@ -119,7 +119,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
| 119 | if (err != 0) | 119 | if (err != 0) |
| 120 | goto failure; | 120 | goto failure; |
| 121 | 121 | ||
| 122 | err = ip_route_newports(&rt, inet->sport, inet->dport, sk); | 122 | err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, |
| 123 | sk); | ||
| 123 | if (err != 0) | 124 | if (err != 0) |
| 124 | goto failure; | 125 | goto failure; |
| 125 | 126 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index df074259f9c3..80c4d048869e 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
| @@ -468,6 +468,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
| 468 | done: | 468 | done: |
| 469 | if (opt && opt != np->opt) | 469 | if (opt && opt != np->opt) |
| 470 | sock_kfree_s(sk, opt, opt->tot_len); | 470 | sock_kfree_s(sk, opt, opt->tot_len); |
| 471 | dst_release(dst); | ||
| 471 | return err; | 472 | return err; |
| 472 | } | 473 | } |
| 473 | 474 | ||
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 7a121802faa9..960aa78cdb97 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
| @@ -350,6 +350,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
| 350 | u8 src[ETH_ALEN]; | 350 | u8 src[ETH_ALEN]; |
| 351 | struct ieee80211_crypt_data *crypt = NULL; | 351 | struct ieee80211_crypt_data *crypt = NULL; |
| 352 | int keyidx = 0; | 352 | int keyidx = 0; |
| 353 | int can_be_decrypted = 0; | ||
| 353 | 354 | ||
| 354 | hdr = (struct ieee80211_hdr_4addr *)skb->data; | 355 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
| 355 | stats = &ieee->stats; | 356 | stats = &ieee->stats; |
| @@ -410,12 +411,23 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
| 410 | return 1; | 411 | return 1; |
| 411 | } | 412 | } |
| 412 | 413 | ||
| 413 | if (is_multicast_ether_addr(hdr->addr1) | 414 | can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) || |
| 414 | ? ieee->host_mc_decrypt : ieee->host_decrypt) { | 415 | is_broadcast_ether_addr(hdr->addr2)) ? |
| 416 | ieee->host_mc_decrypt : ieee->host_decrypt; | ||
| 417 | |||
| 418 | if (can_be_decrypted) { | ||
| 415 | int idx = 0; | 419 | int idx = 0; |
| 416 | if (skb->len >= hdrlen + 3) | 420 | if (skb->len >= hdrlen + 3) { |
| 421 | /* Top two-bits of byte 3 are the key index */ | ||
| 417 | idx = skb->data[hdrlen + 3] >> 6; | 422 | idx = skb->data[hdrlen + 3] >> 6; |
| 423 | } | ||
| 424 | |||
| 425 | /* ieee->crypt[] is WEP_KEY (4) in length. Given that idx | ||
| 426 | * is only allowed 2-bits of storage, no value of idx can | ||
| 427 | * be provided via above code that would result in idx | ||
| 428 | * being out of range */ | ||
| 418 | crypt = ieee->crypt[idx]; | 429 | crypt = ieee->crypt[idx]; |
| 430 | |||
| 419 | #ifdef NOT_YET | 431 | #ifdef NOT_YET |
| 420 | sta = NULL; | 432 | sta = NULL; |
| 421 | 433 | ||
| @@ -553,7 +565,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
| 553 | 565 | ||
| 554 | /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ | 566 | /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ |
| 555 | 567 | ||
| 556 | if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && | 568 | if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && |
| 557 | (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) | 569 | (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) |
| 558 | goto rx_dropped; | 570 | goto rx_dropped; |
| 559 | 571 | ||
| @@ -617,7 +629,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
| 617 | 629 | ||
| 618 | /* skb: hdr + (possible reassembled) full MSDU payload; possibly still | 630 | /* skb: hdr + (possible reassembled) full MSDU payload; possibly still |
| 619 | * encrypted/authenticated */ | 631 | * encrypted/authenticated */ |
| 620 | if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && | 632 | if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && |
| 621 | ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) | 633 | ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) |
| 622 | goto rx_dropped; | 634 | goto rx_dropped; |
| 623 | 635 | ||
| @@ -1439,7 +1451,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
| 1439 | break; | 1451 | break; |
| 1440 | 1452 | ||
| 1441 | case IEEE80211_STYPE_PROBE_REQ: | 1453 | case IEEE80211_STYPE_PROBE_REQ: |
| 1442 | IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", | 1454 | IEEE80211_DEBUG_MGMT("received auth (%d)\n", |
| 1443 | WLAN_FC_GET_STYPE(le16_to_cpu | 1455 | WLAN_FC_GET_STYPE(le16_to_cpu |
| 1444 | (header->frame_ctl))); | 1456 | (header->frame_ctl))); |
| 1445 | 1457 | ||
| @@ -1473,7 +1485,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
| 1473 | break; | 1485 | break; |
| 1474 | case IEEE80211_STYPE_AUTH: | 1486 | case IEEE80211_STYPE_AUTH: |
| 1475 | 1487 | ||
| 1476 | IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", | 1488 | IEEE80211_DEBUG_MGMT("received auth (%d)\n", |
| 1477 | WLAN_FC_GET_STYPE(le16_to_cpu | 1489 | WLAN_FC_GET_STYPE(le16_to_cpu |
| 1478 | (header->frame_ctl))); | 1490 | (header->frame_ctl))); |
| 1479 | 1491 | ||
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 23e1630f50b7..f87c6b89f845 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c | |||
| @@ -232,15 +232,18 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
| 232 | return start; | 232 | return start; |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | #define SCAN_ITEM_SIZE 128 | ||
| 236 | |||
| 235 | int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | 237 | int ieee80211_wx_get_scan(struct ieee80211_device *ieee, |
| 236 | struct iw_request_info *info, | 238 | struct iw_request_info *info, |
| 237 | union iwreq_data *wrqu, char *extra) | 239 | union iwreq_data *wrqu, char *extra) |
| 238 | { | 240 | { |
| 239 | struct ieee80211_network *network; | 241 | struct ieee80211_network *network; |
| 240 | unsigned long flags; | 242 | unsigned long flags; |
| 243 | int err = 0; | ||
| 241 | 244 | ||
| 242 | char *ev = extra; | 245 | char *ev = extra; |
| 243 | char *stop = ev + IW_SCAN_MAX_DATA; | 246 | char *stop = ev + wrqu->data.length; |
| 244 | int i = 0; | 247 | int i = 0; |
| 245 | 248 | ||
| 246 | IEEE80211_DEBUG_WX("Getting scan\n"); | 249 | IEEE80211_DEBUG_WX("Getting scan\n"); |
| @@ -249,6 +252,11 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | |||
| 249 | 252 | ||
| 250 | list_for_each_entry(network, &ieee->network_list, list) { | 253 | list_for_each_entry(network, &ieee->network_list, list) { |
| 251 | i++; | 254 | i++; |
| 255 | if (stop - ev < SCAN_ITEM_SIZE) { | ||
| 256 | err = -E2BIG; | ||
| 257 | break; | ||
| 258 | } | ||
| 259 | |||
| 252 | if (ieee->scan_age == 0 || | 260 | if (ieee->scan_age == 0 || |
| 253 | time_after(network->last_scanned + ieee->scan_age, jiffies)) | 261 | time_after(network->last_scanned + ieee->scan_age, jiffies)) |
| 254 | ev = ipw2100_translate_scan(ieee, ev, stop, network); | 262 | ev = ipw2100_translate_scan(ieee, ev, stop, network); |
| @@ -270,7 +278,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | |||
| 270 | 278 | ||
| 271 | IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i); | 279 | IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i); |
| 272 | 280 | ||
| 273 | return 0; | 281 | return err; |
| 274 | } | 282 | } |
| 275 | 283 | ||
| 276 | int ieee80211_wx_set_encode(struct ieee80211_device *ieee, | 284 | int ieee80211_wx_set_encode(struct ieee80211_device *ieee, |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 105039eb7629..6bc0887b0834 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
| @@ -385,7 +385,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
| 385 | u32 daddr; | 385 | u32 daddr; |
| 386 | 386 | ||
| 387 | if (ip_options_echo(&icmp_param->replyopts, skb)) | 387 | if (ip_options_echo(&icmp_param->replyopts, skb)) |
| 388 | goto out; | 388 | return; |
| 389 | 389 | ||
| 390 | if (icmp_xmit_lock()) | 390 | if (icmp_xmit_lock()) |
| 391 | return; | 391 | return; |
| @@ -416,7 +416,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
| 416 | ip_rt_put(rt); | 416 | ip_rt_put(rt); |
| 417 | out_unlock: | 417 | out_unlock: |
| 418 | icmp_xmit_unlock(); | 418 | icmp_xmit_unlock(); |
| 419 | out:; | ||
| 420 | } | 419 | } |
| 421 | 420 | ||
| 422 | 421 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d8ce7133cd8f..0b4e95f93dad 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
| @@ -970,7 +970,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
| 970 | case IGMP_MTRACE_RESP: | 970 | case IGMP_MTRACE_RESP: |
| 971 | break; | 971 | break; |
| 972 | default: | 972 | default: |
| 973 | NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type); | 973 | break; |
| 974 | } | 974 | } |
| 975 | 975 | ||
| 976 | drop: | 976 | drop: |
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c index d34a9fa608e0..342d0b9098f5 100644 --- a/net/ipv4/multipath_wrandom.c +++ b/net/ipv4/multipath_wrandom.c | |||
| @@ -228,7 +228,7 @@ static void wrandom_set_nhinfo(__u32 network, | |||
| 228 | struct multipath_dest *d, *target_dest = NULL; | 228 | struct multipath_dest *d, *target_dest = NULL; |
| 229 | 229 | ||
| 230 | /* store the weight information for a certain route */ | 230 | /* store the weight information for a certain route */ |
| 231 | spin_lock(&state[state_idx].lock); | 231 | spin_lock_bh(&state[state_idx].lock); |
| 232 | 232 | ||
| 233 | /* find state entry for gateway or add one if necessary */ | 233 | /* find state entry for gateway or add one if necessary */ |
| 234 | list_for_each_entry_rcu(r, &state[state_idx].head, list) { | 234 | list_for_each_entry_rcu(r, &state[state_idx].head, list) { |
| @@ -276,7 +276,7 @@ static void wrandom_set_nhinfo(__u32 network, | |||
| 276 | * we are finished | 276 | * we are finished |
| 277 | */ | 277 | */ |
| 278 | 278 | ||
| 279 | spin_unlock(&state[state_idx].lock); | 279 | spin_unlock_bh(&state[state_idx].lock); |
| 280 | } | 280 | } |
| 281 | 281 | ||
| 282 | static void __multipath_free(struct rcu_head *head) | 282 | static void __multipath_free(struct rcu_head *head) |
| @@ -302,7 +302,7 @@ static void wrandom_flush(void) | |||
| 302 | for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { | 302 | for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { |
| 303 | struct multipath_route *r; | 303 | struct multipath_route *r; |
| 304 | 304 | ||
| 305 | spin_lock(&state[i].lock); | 305 | spin_lock_bh(&state[i].lock); |
| 306 | list_for_each_entry_rcu(r, &state[i].head, list) { | 306 | list_for_each_entry_rcu(r, &state[i].head, list) { |
| 307 | struct multipath_dest *d; | 307 | struct multipath_dest *d; |
| 308 | list_for_each_entry_rcu(d, &r->dests, list) { | 308 | list_for_each_entry_rcu(d, &r->dests, list) { |
| @@ -315,7 +315,7 @@ static void wrandom_flush(void) | |||
| 315 | __multipath_free); | 315 | __multipath_free); |
| 316 | } | 316 | } |
| 317 | 317 | ||
| 318 | spin_unlock(&state[i].lock); | 318 | spin_unlock_bh(&state[i].lock); |
| 319 | } | 319 | } |
| 320 | } | 320 | } |
| 321 | 321 | ||
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 3284cfb993e6..128de4d7c0b7 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c | |||
| @@ -230,7 +230,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
| 230 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 230 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
| 231 | tp->snd_cwnd++; | 231 | tp->snd_cwnd++; |
| 232 | tp->snd_cwnd_cnt = 0; | 232 | tp->snd_cwnd_cnt = 0; |
| 233 | ca->ccount++; | ||
| 234 | } | 233 | } |
| 235 | } | 234 | } |
| 236 | } | 235 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6ea353907af5..233bdf259965 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -236,7 +236,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
| 236 | if (err) | 236 | if (err) |
| 237 | goto failure; | 237 | goto failure; |
| 238 | 238 | ||
| 239 | err = ip_route_newports(&rt, inet->sport, inet->dport, sk); | 239 | err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk); |
| 240 | if (err) | 240 | if (err) |
| 241 | goto failure; | 241 | goto failure; |
| 242 | 242 | ||
| @@ -1845,7 +1845,6 @@ void __init tcp_v4_init(struct net_proto_family *ops) | |||
| 1845 | } | 1845 | } |
| 1846 | 1846 | ||
| 1847 | EXPORT_SYMBOL(ipv4_specific); | 1847 | EXPORT_SYMBOL(ipv4_specific); |
| 1848 | EXPORT_SYMBOL(inet_bind_bucket_create); | ||
| 1849 | EXPORT_SYMBOL(tcp_hashinfo); | 1848 | EXPORT_SYMBOL(tcp_hashinfo); |
| 1850 | EXPORT_SYMBOL(tcp_prot); | 1849 | EXPORT_SYMBOL(tcp_prot); |
| 1851 | EXPORT_SYMBOL(tcp_unhash); | 1850 | EXPORT_SYMBOL(tcp_unhash); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d328d5986143..1db50487916b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -3321,9 +3321,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
| 3321 | 3321 | ||
| 3322 | switch (event) { | 3322 | switch (event) { |
| 3323 | case RTM_NEWADDR: | 3323 | case RTM_NEWADDR: |
| 3324 | dst_hold(&ifp->rt->u.dst); | 3324 | ip6_ins_rt(ifp->rt, NULL, NULL, NULL); |
| 3325 | if (ip6_ins_rt(ifp->rt, NULL, NULL, NULL)) | ||
| 3326 | dst_release(&ifp->rt->u.dst); | ||
| 3327 | if (ifp->idev->cnf.forwarding) | 3325 | if (ifp->idev->cnf.forwarding) |
| 3328 | addrconf_join_anycast(ifp); | 3326 | addrconf_join_anycast(ifp); |
| 3329 | break; | 3327 | break; |
| @@ -3334,8 +3332,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
| 3334 | dst_hold(&ifp->rt->u.dst); | 3332 | dst_hold(&ifp->rt->u.dst); |
| 3335 | if (ip6_del_rt(ifp->rt, NULL, NULL, NULL)) | 3333 | if (ip6_del_rt(ifp->rt, NULL, NULL, NULL)) |
| 3336 | dst_free(&ifp->rt->u.dst); | 3334 | dst_free(&ifp->rt->u.dst); |
| 3337 | else | ||
| 3338 | dst_release(&ifp->rt->u.dst); | ||
| 3339 | break; | 3335 | break; |
| 3340 | } | 3336 | } |
| 3341 | } | 3337 | } |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 064ffab82a9f..6c9711ac1c03 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
| @@ -369,12 +369,6 @@ int inet6_destroy_sock(struct sock *sk) | |||
| 369 | struct sk_buff *skb; | 369 | struct sk_buff *skb; |
| 370 | struct ipv6_txoptions *opt; | 370 | struct ipv6_txoptions *opt; |
| 371 | 371 | ||
| 372 | /* | ||
| 373 | * Release destination entry | ||
| 374 | */ | ||
| 375 | |||
| 376 | sk_dst_reset(sk); | ||
| 377 | |||
| 378 | /* Release rx options */ | 372 | /* Release rx options */ |
| 379 | 373 | ||
| 380 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) | 374 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6c05c7978bef..4420948a1bfe 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
| @@ -1252,8 +1252,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
| 1252 | } | 1252 | } |
| 1253 | } else { | 1253 | } else { |
| 1254 | for (ma = idev->mc_list; ma; ma=ma->next) { | 1254 | for (ma = idev->mc_list; ma; ma=ma->next) { |
| 1255 | if (group_type != IPV6_ADDR_ANY && | 1255 | if (!ipv6_addr_equal(group, &ma->mca_addr)) |
| 1256 | !ipv6_addr_equal(group, &ma->mca_addr)) | ||
| 1257 | continue; | 1256 | continue; |
| 1258 | spin_lock_bh(&ma->mca_lock); | 1257 | spin_lock_bh(&ma->mca_lock); |
| 1259 | if (ma->mca_flags & MAF_TIMER_RUNNING) { | 1258 | if (ma->mca_flags & MAF_TIMER_RUNNING) { |
| @@ -1268,11 +1267,10 @@ int igmp6_event_query(struct sk_buff *skb) | |||
| 1268 | ma->mca_flags &= ~MAF_GSQUERY; | 1267 | ma->mca_flags &= ~MAF_GSQUERY; |
| 1269 | } | 1268 | } |
| 1270 | if (!(ma->mca_flags & MAF_GSQUERY) || | 1269 | if (!(ma->mca_flags & MAF_GSQUERY) || |
| 1271 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) | 1270 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) |
| 1272 | igmp6_group_queried(ma, max_delay); | 1271 | igmp6_group_queried(ma, max_delay); |
| 1273 | spin_unlock_bh(&ma->mca_lock); | 1272 | spin_unlock_bh(&ma->mca_lock); |
| 1274 | if (group_type != IPV6_ADDR_ANY) | 1273 | break; |
| 1275 | break; | ||
| 1276 | } | 1274 | } |
| 1277 | } | 1275 | } |
| 1278 | read_unlock_bh(&idev->lock); | 1276 | read_unlock_bh(&idev->lock); |
| @@ -1351,7 +1349,7 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, | |||
| 1351 | * in all filters | 1349 | * in all filters |
| 1352 | */ | 1350 | */ |
| 1353 | if (psf->sf_count[MCAST_INCLUDE]) | 1351 | if (psf->sf_count[MCAST_INCLUDE]) |
| 1354 | return 0; | 1352 | return type == MLD2_MODE_IS_INCLUDE; |
| 1355 | return pmc->mca_sfcount[MCAST_EXCLUDE] == | 1353 | return pmc->mca_sfcount[MCAST_EXCLUDE] == |
| 1356 | psf->sf_count[MCAST_EXCLUDE]; | 1354 | psf->sf_count[MCAST_EXCLUDE]; |
| 1357 | } | 1355 | } |
| @@ -1966,7 +1964,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc) | |||
| 1966 | 1964 | ||
| 1967 | static int sf_setstate(struct ifmcaddr6 *pmc) | 1965 | static int sf_setstate(struct ifmcaddr6 *pmc) |
| 1968 | { | 1966 | { |
| 1969 | struct ip6_sf_list *psf; | 1967 | struct ip6_sf_list *psf, *dpsf; |
| 1970 | int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; | 1968 | int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; |
| 1971 | int qrv = pmc->idev->mc_qrv; | 1969 | int qrv = pmc->idev->mc_qrv; |
| 1972 | int new_in, rv; | 1970 | int new_in, rv; |
| @@ -1978,8 +1976,48 @@ static int sf_setstate(struct ifmcaddr6 *pmc) | |||
| 1978 | !psf->sf_count[MCAST_INCLUDE]; | 1976 | !psf->sf_count[MCAST_INCLUDE]; |
| 1979 | } else | 1977 | } else |
| 1980 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; | 1978 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; |
| 1981 | if (new_in != psf->sf_oldin) { | 1979 | if (new_in) { |
| 1982 | psf->sf_crcount = qrv; | 1980 | if (!psf->sf_oldin) { |
| 1981 | struct ip6_sf_list *prev = 0; | ||
| 1982 | |||
| 1983 | for (dpsf=pmc->mca_tomb; dpsf; | ||
| 1984 | dpsf=dpsf->sf_next) { | ||
| 1985 | if (ipv6_addr_equal(&dpsf->sf_addr, | ||
| 1986 | &psf->sf_addr)) | ||
| 1987 | break; | ||
| 1988 | prev = dpsf; | ||
| 1989 | } | ||
| 1990 | if (dpsf) { | ||
| 1991 | if (prev) | ||
| 1992 | prev->sf_next = dpsf->sf_next; | ||
| 1993 | else | ||
| 1994 | pmc->mca_tomb = dpsf->sf_next; | ||
| 1995 | kfree(dpsf); | ||
| 1996 | } | ||
| 1997 | psf->sf_crcount = qrv; | ||
| 1998 | rv++; | ||
| 1999 | } | ||
| 2000 | } else if (psf->sf_oldin) { | ||
| 2001 | psf->sf_crcount = 0; | ||
| 2002 | /* | ||
| 2003 | * add or update "delete" records if an active filter | ||
| 2004 | * is now inactive | ||
| 2005 | */ | ||
| 2006 | for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) | ||
| 2007 | if (ipv6_addr_equal(&dpsf->sf_addr, | ||
| 2008 | &psf->sf_addr)) | ||
| 2009 | break; | ||
| 2010 | if (!dpsf) { | ||
| 2011 | dpsf = (struct ip6_sf_list *) | ||
| 2012 | kmalloc(sizeof(*dpsf), GFP_ATOMIC); | ||
| 2013 | if (!dpsf) | ||
| 2014 | continue; | ||
| 2015 | *dpsf = *psf; | ||
| 2016 | /* pmc->mca_lock held by callers */ | ||
| 2017 | dpsf->sf_next = pmc->mca_tomb; | ||
| 2018 | pmc->mca_tomb = dpsf; | ||
| 2019 | } | ||
| 2020 | dpsf->sf_crcount = qrv; | ||
| 1983 | rv++; | 2021 | rv++; |
| 1984 | } | 2022 | } |
| 1985 | } | 2023 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 66d04004afda..ca9cf6853755 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -515,6 +515,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
| 515 | done: | 515 | done: |
| 516 | if (opt && opt != np->opt) | 516 | if (opt && opt != np->opt) |
| 517 | sock_kfree_s(sk, opt, opt->tot_len); | 517 | sock_kfree_s(sk, opt, opt->tot_len); |
| 518 | dst_release(dst); | ||
| 518 | return err; | 519 | return err; |
| 519 | } | 520 | } |
| 520 | 521 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index 43f1ce74187d..ae86d237a456 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -1620,6 +1620,7 @@ static int key_notify_sa_flush(struct km_event *c) | |||
| 1620 | return -ENOBUFS; | 1620 | return -ENOBUFS; |
| 1621 | hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); | 1621 | hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); |
| 1622 | hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); | 1622 | hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); |
| 1623 | hdr->sadb_msg_type = SADB_FLUSH; | ||
| 1623 | hdr->sadb_msg_seq = c->seq; | 1624 | hdr->sadb_msg_seq = c->seq; |
| 1624 | hdr->sadb_msg_pid = c->pid; | 1625 | hdr->sadb_msg_pid = c->pid; |
| 1625 | hdr->sadb_msg_version = PF_KEY_V2; | 1626 | hdr->sadb_msg_version = PF_KEY_V2; |
| @@ -2385,6 +2386,7 @@ static int key_notify_policy_flush(struct km_event *c) | |||
| 2385 | if (!skb_out) | 2386 | if (!skb_out) |
| 2386 | return -ENOBUFS; | 2387 | return -ENOBUFS; |
| 2387 | hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); | 2388 | hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); |
| 2389 | hdr->sadb_msg_type = SADB_X_SPDFLUSH; | ||
| 2388 | hdr->sadb_msg_seq = c->seq; | 2390 | hdr->sadb_msg_seq = c->seq; |
| 2389 | hdr->sadb_msg_pid = c->pid; | 2391 | hdr->sadb_msg_pid = c->pid; |
| 2390 | hdr->sadb_msg_version = PF_KEY_V2; | 2392 | hdr->sadb_msg_version = PF_KEY_V2; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ee93abc71cb8..9db7dbdb16e6 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -365,7 +365,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
| 365 | */ | 365 | */ |
| 366 | 366 | ||
| 367 | err = -EMSGSIZE; | 367 | err = -EMSGSIZE; |
| 368 | if(len>dev->mtu+dev->hard_header_len) | 368 | if (len > dev->mtu + dev->hard_header_len) |
| 369 | goto out_unlock; | 369 | goto out_unlock; |
| 370 | 370 | ||
| 371 | err = -ENOBUFS; | 371 | err = -ENOBUFS; |
| @@ -935,7 +935,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add | |||
| 935 | * Check legality | 935 | * Check legality |
| 936 | */ | 936 | */ |
| 937 | 937 | ||
| 938 | if(addr_len!=sizeof(struct sockaddr)) | 938 | if (addr_len != sizeof(struct sockaddr)) |
| 939 | return -EINVAL; | 939 | return -EINVAL; |
| 940 | strlcpy(name,uaddr->sa_data,sizeof(name)); | 940 | strlcpy(name,uaddr->sa_data,sizeof(name)); |
| 941 | 941 | ||
| @@ -1092,7 +1092,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1092 | * retries. | 1092 | * retries. |
| 1093 | */ | 1093 | */ |
| 1094 | 1094 | ||
| 1095 | if(skb==NULL) | 1095 | if (skb == NULL) |
| 1096 | goto out; | 1096 | goto out; |
| 1097 | 1097 | ||
| 1098 | /* | 1098 | /* |
| @@ -1392,8 +1392,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
| 1392 | if (level != SOL_PACKET) | 1392 | if (level != SOL_PACKET) |
| 1393 | return -ENOPROTOOPT; | 1393 | return -ENOPROTOOPT; |
| 1394 | 1394 | ||
| 1395 | if (get_user(len,optlen)) | 1395 | if (get_user(len, optlen)) |
| 1396 | return -EFAULT; | 1396 | return -EFAULT; |
| 1397 | 1397 | ||
| 1398 | if (len < 0) | 1398 | if (len < 0) |
| 1399 | return -EINVAL; | 1399 | return -EINVAL; |
| @@ -1419,9 +1419,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
| 1419 | return -ENOPROTOOPT; | 1419 | return -ENOPROTOOPT; |
| 1420 | } | 1420 | } |
| 1421 | 1421 | ||
| 1422 | if (put_user(len, optlen)) | 1422 | if (put_user(len, optlen)) |
| 1423 | return -EFAULT; | 1423 | return -EFAULT; |
| 1424 | return 0; | 1424 | return 0; |
| 1425 | } | 1425 | } |
| 1426 | 1426 | ||
| 1427 | 1427 | ||
diff --git a/net/sctp/input.c b/net/sctp/input.c index 4aa6fc60357c..cb78b50868ee 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
| @@ -257,20 +257,26 @@ int sctp_rcv(struct sk_buff *skb) | |||
| 257 | */ | 257 | */ |
| 258 | sctp_bh_lock_sock(sk); | 258 | sctp_bh_lock_sock(sk); |
| 259 | 259 | ||
| 260 | /* It is possible that the association could have moved to a different | ||
| 261 | * socket if it is peeled off. If so, update the sk. | ||
| 262 | */ | ||
| 263 | if (sk != rcvr->sk) { | ||
| 264 | sctp_bh_lock_sock(rcvr->sk); | ||
| 265 | sctp_bh_unlock_sock(sk); | ||
| 266 | sk = rcvr->sk; | ||
| 267 | } | ||
| 268 | |||
| 260 | if (sock_owned_by_user(sk)) | 269 | if (sock_owned_by_user(sk)) |
| 261 | sk_add_backlog(sk, skb); | 270 | sk_add_backlog(sk, skb); |
| 262 | else | 271 | else |
| 263 | sctp_backlog_rcv(sk, skb); | 272 | sctp_backlog_rcv(sk, skb); |
| 264 | 273 | ||
| 265 | /* Release the sock and any reference counts we took in the | 274 | /* Release the sock and the sock ref we took in the lookup calls. |
| 266 | * lookup calls. | 275 | * The asoc/ep ref will be released in sctp_backlog_rcv. |
| 267 | */ | 276 | */ |
| 268 | sctp_bh_unlock_sock(sk); | 277 | sctp_bh_unlock_sock(sk); |
| 269 | if (asoc) | ||
| 270 | sctp_association_put(asoc); | ||
| 271 | else | ||
| 272 | sctp_endpoint_put(ep); | ||
| 273 | sock_put(sk); | 278 | sock_put(sk); |
| 279 | |||
| 274 | return ret; | 280 | return ret; |
| 275 | 281 | ||
| 276 | discard_it: | 282 | discard_it: |
| @@ -296,12 +302,50 @@ discard_release: | |||
| 296 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 302 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
| 297 | { | 303 | { |
| 298 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 304 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
| 299 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; | 305 | struct sctp_inq *inqueue = NULL; |
| 300 | 306 | struct sctp_ep_common *rcvr = NULL; | |
| 301 | sctp_inq_push(inqueue, chunk); | 307 | |
| 308 | rcvr = chunk->rcvr; | ||
| 309 | |||
| 310 | BUG_TRAP(rcvr->sk == sk); | ||
| 311 | |||
| 312 | if (rcvr->dead) { | ||
| 313 | sctp_chunk_free(chunk); | ||
| 314 | } else { | ||
| 315 | inqueue = &chunk->rcvr->inqueue; | ||
| 316 | sctp_inq_push(inqueue, chunk); | ||
| 317 | } | ||
| 318 | |||
| 319 | /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ | ||
| 320 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | ||
| 321 | sctp_association_put(sctp_assoc(rcvr)); | ||
| 322 | else | ||
| 323 | sctp_endpoint_put(sctp_ep(rcvr)); | ||
| 324 | |||
| 302 | return 0; | 325 | return 0; |
| 303 | } | 326 | } |
| 304 | 327 | ||
| 328 | void sctp_backlog_migrate(struct sctp_association *assoc, | ||
| 329 | struct sock *oldsk, struct sock *newsk) | ||
| 330 | { | ||
| 331 | struct sk_buff *skb; | ||
| 332 | struct sctp_chunk *chunk; | ||
| 333 | |||
| 334 | skb = oldsk->sk_backlog.head; | ||
| 335 | oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; | ||
| 336 | while (skb != NULL) { | ||
| 337 | struct sk_buff *next = skb->next; | ||
| 338 | |||
| 339 | chunk = SCTP_INPUT_CB(skb)->chunk; | ||
| 340 | skb->next = NULL; | ||
| 341 | if (&assoc->base == chunk->rcvr) | ||
| 342 | sk_add_backlog(newsk, skb); | ||
| 343 | else | ||
| 344 | sk_add_backlog(oldsk, skb); | ||
| 345 | skb = next; | ||
| 346 | } | ||
| 347 | } | ||
| 348 | |||
| 305 | /* Handle icmp frag needed error. */ | 349 | /* Handle icmp frag needed error. */ |
| 306 | void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, | 350 | void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, |
| 307 | struct sctp_transport *t, __u32 pmtu) | 351 | struct sctp_transport *t, __u32 pmtu) |
| @@ -544,10 +588,16 @@ int sctp_rcv_ootb(struct sk_buff *skb) | |||
| 544 | sctp_errhdr_t *err; | 588 | sctp_errhdr_t *err; |
| 545 | 589 | ||
| 546 | ch = (sctp_chunkhdr_t *) skb->data; | 590 | ch = (sctp_chunkhdr_t *) skb->data; |
| 547 | ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length)); | ||
| 548 | 591 | ||
| 549 | /* Scan through all the chunks in the packet. */ | 592 | /* Scan through all the chunks in the packet. */ |
| 550 | while (ch_end > (__u8 *)ch && ch_end < skb->tail) { | 593 | do { |
| 594 | /* Break out if chunk length is less then minimal. */ | ||
| 595 | if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) | ||
| 596 | break; | ||
| 597 | |||
| 598 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | ||
| 599 | if (ch_end > skb->tail) | ||
| 600 | break; | ||
| 551 | 601 | ||
| 552 | /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the | 602 | /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the |
| 553 | * receiver MUST silently discard the OOTB packet and take no | 603 | * receiver MUST silently discard the OOTB packet and take no |
| @@ -578,8 +628,7 @@ int sctp_rcv_ootb(struct sk_buff *skb) | |||
| 578 | } | 628 | } |
| 579 | 629 | ||
| 580 | ch = (sctp_chunkhdr_t *) ch_end; | 630 | ch = (sctp_chunkhdr_t *) ch_end; |
| 581 | ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length)); | 631 | } while (ch_end < skb->tail); |
| 582 | } | ||
| 583 | 632 | ||
| 584 | return 0; | 633 | return 0; |
| 585 | 634 | ||
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 2d33922c044b..297b8951463e 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
| @@ -73,8 +73,10 @@ void sctp_inq_free(struct sctp_inq *queue) | |||
| 73 | /* If there is a packet which is currently being worked on, | 73 | /* If there is a packet which is currently being worked on, |
| 74 | * free it as well. | 74 | * free it as well. |
| 75 | */ | 75 | */ |
| 76 | if (queue->in_progress) | 76 | if (queue->in_progress) { |
| 77 | sctp_chunk_free(queue->in_progress); | 77 | sctp_chunk_free(queue->in_progress); |
| 78 | queue->in_progress = NULL; | ||
| 79 | } | ||
| 78 | 80 | ||
| 79 | if (queue->malloced) { | 81 | if (queue->malloced) { |
| 80 | /* Dump the master memory segment. */ | 82 | /* Dump the master memory segment. */ |
diff --git a/net/sctp/output.c b/net/sctp/output.c index a40991ef72c9..437cba7260a4 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -608,7 +608,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
| 608 | * When a Fast Retransmit is being performed the sender SHOULD | 608 | * When a Fast Retransmit is being performed the sender SHOULD |
| 609 | * ignore the value of cwnd and SHOULD NOT delay retransmission. | 609 | * ignore the value of cwnd and SHOULD NOT delay retransmission. |
| 610 | */ | 610 | */ |
| 611 | if (!chunk->fast_retransmit) | 611 | if (chunk->fast_retransmit <= 0) |
| 612 | if (transport->flight_size >= transport->cwnd) { | 612 | if (transport->flight_size >= transport->cwnd) { |
| 613 | retval = SCTP_XMIT_RWND_FULL; | 613 | retval = SCTP_XMIT_RWND_FULL; |
| 614 | goto finish; | 614 | goto finish; |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index efb72faba20c..f148f9576dd2 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
| @@ -406,7 +406,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
| 406 | * chunks that are not yet acked should be added to the | 406 | * chunks that are not yet acked should be added to the |
| 407 | * retransmit queue. | 407 | * retransmit queue. |
| 408 | */ | 408 | */ |
| 409 | if ((fast_retransmit && chunk->fast_retransmit) || | 409 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || |
| 410 | (!fast_retransmit && !chunk->tsn_gap_acked)) { | 410 | (!fast_retransmit && !chunk->tsn_gap_acked)) { |
| 411 | /* RFC 2960 6.2.1 Processing a Received SACK | 411 | /* RFC 2960 6.2.1 Processing a Received SACK |
| 412 | * | 412 | * |
| @@ -603,7 +603,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
| 603 | /* Mark the chunk as ineligible for fast retransmit | 603 | /* Mark the chunk as ineligible for fast retransmit |
| 604 | * after it is retransmitted. | 604 | * after it is retransmitted. |
| 605 | */ | 605 | */ |
| 606 | chunk->fast_retransmit = 0; | 606 | if (chunk->fast_retransmit > 0) |
| 607 | chunk->fast_retransmit = -1; | ||
| 607 | 608 | ||
| 608 | *start_timer = 1; | 609 | *start_timer = 1; |
| 609 | q->empty = 0; | 610 | q->empty = 0; |
| @@ -621,7 +622,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
| 621 | list_for_each(lchunk1, lqueue) { | 622 | list_for_each(lchunk1, lqueue) { |
| 622 | chunk1 = list_entry(lchunk1, struct sctp_chunk, | 623 | chunk1 = list_entry(lchunk1, struct sctp_chunk, |
| 623 | transmitted_list); | 624 | transmitted_list); |
| 624 | chunk1->fast_retransmit = 0; | 625 | if (chunk1->fast_retransmit > 0) |
| 626 | chunk1->fast_retransmit = -1; | ||
| 625 | } | 627 | } |
| 626 | } | 628 | } |
| 627 | } | 629 | } |
| @@ -1562,11 +1564,11 @@ static void sctp_mark_missing(struct sctp_outq *q, | |||
| 1562 | /* | 1564 | /* |
| 1563 | * M4) If any DATA chunk is found to have a | 1565 | * M4) If any DATA chunk is found to have a |
| 1564 | * 'TSN.Missing.Report' | 1566 | * 'TSN.Missing.Report' |
| 1565 | * value larger than or equal to 4, mark that chunk for | 1567 | * value larger than or equal to 3, mark that chunk for |
| 1566 | * retransmission and start the fast retransmit procedure. | 1568 | * retransmission and start the fast retransmit procedure. |
| 1567 | */ | 1569 | */ |
| 1568 | 1570 | ||
| 1569 | if (chunk->tsn_missing_report >= 4) { | 1571 | if (chunk->tsn_missing_report >= 3) { |
| 1570 | chunk->fast_retransmit = 1; | 1572 | chunk->fast_retransmit = 1; |
| 1571 | do_fast_retransmit = 1; | 1573 | do_fast_retransmit = 1; |
| 1572 | } | 1574 | } |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 6e4dc28874d7..d47a52c303a8 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
| @@ -176,7 +176,7 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa | |||
| 176 | 176 | ||
| 177 | static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) | 177 | static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) |
| 178 | { | 178 | { |
| 179 | if (*pos > sctp_ep_hashsize) | 179 | if (*pos >= sctp_ep_hashsize) |
| 180 | return NULL; | 180 | return NULL; |
| 181 | 181 | ||
| 182 | if (*pos < 0) | 182 | if (*pos < 0) |
| @@ -185,8 +185,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 185 | if (*pos == 0) | 185 | if (*pos == 0) |
| 186 | seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); | 186 | seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); |
| 187 | 187 | ||
| 188 | ++*pos; | ||
| 189 | |||
| 190 | return (void *)pos; | 188 | return (void *)pos; |
| 191 | } | 189 | } |
| 192 | 190 | ||
| @@ -198,11 +196,9 @@ static void sctp_eps_seq_stop(struct seq_file *seq, void *v) | |||
| 198 | 196 | ||
| 199 | static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 197 | static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 200 | { | 198 | { |
| 201 | if (*pos > sctp_ep_hashsize) | 199 | if (++*pos >= sctp_ep_hashsize) |
| 202 | return NULL; | 200 | return NULL; |
| 203 | 201 | ||
| 204 | ++*pos; | ||
| 205 | |||
| 206 | return pos; | 202 | return pos; |
| 207 | } | 203 | } |
| 208 | 204 | ||
| @@ -214,19 +210,19 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) | |||
| 214 | struct sctp_ep_common *epb; | 210 | struct sctp_ep_common *epb; |
| 215 | struct sctp_endpoint *ep; | 211 | struct sctp_endpoint *ep; |
| 216 | struct sock *sk; | 212 | struct sock *sk; |
| 217 | int hash = *(int *)v; | 213 | int hash = *(loff_t *)v; |
| 218 | 214 | ||
| 219 | if (hash > sctp_ep_hashsize) | 215 | if (hash >= sctp_ep_hashsize) |
| 220 | return -ENOMEM; | 216 | return -ENOMEM; |
| 221 | 217 | ||
| 222 | head = &sctp_ep_hashtable[hash-1]; | 218 | head = &sctp_ep_hashtable[hash]; |
| 223 | sctp_local_bh_disable(); | 219 | sctp_local_bh_disable(); |
| 224 | read_lock(&head->lock); | 220 | read_lock(&head->lock); |
| 225 | for (epb = head->chain; epb; epb = epb->next) { | 221 | for (epb = head->chain; epb; epb = epb->next) { |
| 226 | ep = sctp_ep(epb); | 222 | ep = sctp_ep(epb); |
| 227 | sk = epb->sk; | 223 | sk = epb->sk; |
| 228 | seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, | 224 | seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, |
| 229 | sctp_sk(sk)->type, sk->sk_state, hash-1, | 225 | sctp_sk(sk)->type, sk->sk_state, hash, |
| 230 | epb->bind_addr.port, | 226 | epb->bind_addr.port, |
| 231 | sock_i_uid(sk), sock_i_ino(sk)); | 227 | sock_i_uid(sk), sock_i_ino(sk)); |
| 232 | 228 | ||
| @@ -283,7 +279,7 @@ void sctp_eps_proc_exit(void) | |||
| 283 | 279 | ||
| 284 | static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) | 280 | static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) |
| 285 | { | 281 | { |
| 286 | if (*pos > sctp_assoc_hashsize) | 282 | if (*pos >= sctp_assoc_hashsize) |
| 287 | return NULL; | 283 | return NULL; |
| 288 | 284 | ||
| 289 | if (*pos < 0) | 285 | if (*pos < 0) |
| @@ -293,8 +289,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 293 | seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " | 289 | seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " |
| 294 | "RPORT LADDRS <-> RADDRS\n"); | 290 | "RPORT LADDRS <-> RADDRS\n"); |
| 295 | 291 | ||
| 296 | ++*pos; | ||
| 297 | |||
| 298 | return (void *)pos; | 292 | return (void *)pos; |
| 299 | } | 293 | } |
| 300 | 294 | ||
| @@ -306,11 +300,9 @@ static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) | |||
| 306 | 300 | ||
| 307 | static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 301 | static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 308 | { | 302 | { |
| 309 | if (*pos > sctp_assoc_hashsize) | 303 | if (++*pos >= sctp_assoc_hashsize) |
| 310 | return NULL; | 304 | return NULL; |
| 311 | 305 | ||
| 312 | ++*pos; | ||
| 313 | |||
| 314 | return pos; | 306 | return pos; |
| 315 | } | 307 | } |
| 316 | 308 | ||
| @@ -321,12 +313,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
| 321 | struct sctp_ep_common *epb; | 313 | struct sctp_ep_common *epb; |
| 322 | struct sctp_association *assoc; | 314 | struct sctp_association *assoc; |
| 323 | struct sock *sk; | 315 | struct sock *sk; |
| 324 | int hash = *(int *)v; | 316 | int hash = *(loff_t *)v; |
| 325 | 317 | ||
| 326 | if (hash > sctp_assoc_hashsize) | 318 | if (hash >= sctp_assoc_hashsize) |
| 327 | return -ENOMEM; | 319 | return -ENOMEM; |
| 328 | 320 | ||
| 329 | head = &sctp_assoc_hashtable[hash-1]; | 321 | head = &sctp_assoc_hashtable[hash]; |
| 330 | sctp_local_bh_disable(); | 322 | sctp_local_bh_disable(); |
| 331 | read_lock(&head->lock); | 323 | read_lock(&head->lock); |
| 332 | for (epb = head->chain; epb; epb = epb->next) { | 324 | for (epb = head->chain; epb; epb = epb->next) { |
| @@ -335,7 +327,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
| 335 | seq_printf(seq, | 327 | seq_printf(seq, |
| 336 | "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ", | 328 | "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ", |
| 337 | assoc, sk, sctp_sk(sk)->type, sk->sk_state, | 329 | assoc, sk, sctp_sk(sk)->type, sk->sk_state, |
| 338 | assoc->state, hash-1, assoc->assoc_id, | 330 | assoc->state, hash, assoc->assoc_id, |
| 339 | (sk->sk_rcvbuf - assoc->rwnd), | 331 | (sk->sk_rcvbuf - assoc->rwnd), |
| 340 | assoc->sndbuf_used, | 332 | assoc->sndbuf_used, |
| 341 | sock_i_uid(sk), sock_i_ino(sk), | 333 | sock_i_uid(sk), sock_i_ino(sk), |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 556c495c6922..5e0de3c0eead 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -1275,7 +1275,12 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | |||
| 1275 | unsigned int keylen; | 1275 | unsigned int keylen; |
| 1276 | char *key; | 1276 | char *key; |
| 1277 | 1277 | ||
| 1278 | headersize = sizeof(sctp_paramhdr_t) + SCTP_SECRET_SIZE; | 1278 | /* Header size is static data prior to the actual cookie, including |
| 1279 | * any padding. | ||
| 1280 | */ | ||
| 1281 | headersize = sizeof(sctp_paramhdr_t) + | ||
| 1282 | (sizeof(struct sctp_signed_cookie) - | ||
| 1283 | sizeof(struct sctp_cookie)); | ||
| 1279 | bodysize = sizeof(struct sctp_cookie) | 1284 | bodysize = sizeof(struct sctp_cookie) |
| 1280 | + ntohs(init_chunk->chunk_hdr->length) + addrs_len; | 1285 | + ntohs(init_chunk->chunk_hdr->length) + addrs_len; |
| 1281 | 1286 | ||
| @@ -1354,7 +1359,7 @@ struct sctp_association *sctp_unpack_cookie( | |||
| 1354 | struct sctp_signed_cookie *cookie; | 1359 | struct sctp_signed_cookie *cookie; |
| 1355 | struct sctp_cookie *bear_cookie; | 1360 | struct sctp_cookie *bear_cookie; |
| 1356 | int headersize, bodysize, fixed_size; | 1361 | int headersize, bodysize, fixed_size; |
| 1357 | __u8 digest[SCTP_SIGNATURE_SIZE]; | 1362 | __u8 *digest = ep->digest; |
| 1358 | struct scatterlist sg; | 1363 | struct scatterlist sg; |
| 1359 | unsigned int keylen, len; | 1364 | unsigned int keylen, len; |
| 1360 | char *key; | 1365 | char *key; |
| @@ -1362,7 +1367,12 @@ struct sctp_association *sctp_unpack_cookie( | |||
| 1362 | struct sk_buff *skb = chunk->skb; | 1367 | struct sk_buff *skb = chunk->skb; |
| 1363 | struct timeval tv; | 1368 | struct timeval tv; |
| 1364 | 1369 | ||
| 1365 | headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE; | 1370 | /* Header size is static data prior to the actual cookie, including |
| 1371 | * any padding. | ||
| 1372 | */ | ||
| 1373 | headersize = sizeof(sctp_chunkhdr_t) + | ||
| 1374 | (sizeof(struct sctp_signed_cookie) - | ||
| 1375 | sizeof(struct sctp_cookie)); | ||
| 1366 | bodysize = ntohs(chunk->chunk_hdr->length) - headersize; | 1376 | bodysize = ntohs(chunk->chunk_hdr->length) - headersize; |
| 1367 | fixed_size = headersize + sizeof(struct sctp_cookie); | 1377 | fixed_size = headersize + sizeof(struct sctp_cookie); |
| 1368 | 1378 | ||
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b8b38aba92b3..8d1dc24bab4c 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
| @@ -1300,7 +1300,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
| 1300 | "T1 INIT Timeout adjustment" | 1300 | "T1 INIT Timeout adjustment" |
| 1301 | " init_err_counter: %d" | 1301 | " init_err_counter: %d" |
| 1302 | " cycle: %d" | 1302 | " cycle: %d" |
| 1303 | " timeout: %d\n", | 1303 | " timeout: %ld\n", |
| 1304 | asoc->init_err_counter, | 1304 | asoc->init_err_counter, |
| 1305 | asoc->init_cycle, | 1305 | asoc->init_cycle, |
| 1306 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]); | 1306 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]); |
| @@ -1328,7 +1328,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
| 1328 | SCTP_DEBUG_PRINTK( | 1328 | SCTP_DEBUG_PRINTK( |
| 1329 | "T1 COOKIE Timeout adjustment" | 1329 | "T1 COOKIE Timeout adjustment" |
| 1330 | " init_err_counter: %d" | 1330 | " init_err_counter: %d" |
| 1331 | " timeout: %d\n", | 1331 | " timeout: %ld\n", |
| 1332 | asoc->init_err_counter, | 1332 | asoc->init_err_counter, |
| 1333 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]); | 1333 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]); |
| 1334 | 1334 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 477d7f80dba6..2b9a832b29a7 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -884,7 +884,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, | |||
| 884 | { | 884 | { |
| 885 | struct sctp_transport *transport = (struct sctp_transport *) arg; | 885 | struct sctp_transport *transport = (struct sctp_transport *) arg; |
| 886 | 886 | ||
| 887 | if (asoc->overall_error_count > asoc->max_retrans) { | 887 | if (asoc->overall_error_count >= asoc->max_retrans) { |
| 888 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 888 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
| 889 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 889 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
| 890 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 890 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
| @@ -2122,7 +2122,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, | |||
| 2122 | struct sctp_bind_addr *bp; | 2122 | struct sctp_bind_addr *bp; |
| 2123 | int attempts = asoc->init_err_counter + 1; | 2123 | int attempts = asoc->init_err_counter + 1; |
| 2124 | 2124 | ||
| 2125 | if (attempts >= asoc->max_init_attempts) { | 2125 | if (attempts > asoc->max_init_attempts) { |
| 2126 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 2126 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
| 2127 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); | 2127 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); |
| 2128 | return SCTP_DISPOSITION_DELETE_TCB; | 2128 | return SCTP_DISPOSITION_DELETE_TCB; |
| @@ -3090,6 +3090,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, | |||
| 3090 | break; | 3090 | break; |
| 3091 | 3091 | ||
| 3092 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | 3092 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); |
| 3093 | if (ch_end > skb->tail) | ||
| 3094 | break; | ||
| 3093 | 3095 | ||
| 3094 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) | 3096 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) |
| 3095 | ootb_shut_ack = 1; | 3097 | ootb_shut_ack = 1; |
| @@ -4638,7 +4640,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep, | |||
| 4638 | 4640 | ||
| 4639 | SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); | 4641 | SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); |
| 4640 | 4642 | ||
| 4641 | if (attempts < asoc->max_init_attempts) { | 4643 | if (attempts <= asoc->max_init_attempts) { |
| 4642 | bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; | 4644 | bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; |
| 4643 | repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); | 4645 | repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); |
| 4644 | if (!repl) | 4646 | if (!repl) |
| @@ -4695,7 +4697,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep | |||
| 4695 | 4697 | ||
| 4696 | SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); | 4698 | SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); |
| 4697 | 4699 | ||
| 4698 | if (attempts < asoc->max_init_attempts) { | 4700 | if (attempts <= asoc->max_init_attempts) { |
| 4699 | repl = sctp_make_cookie_echo(asoc, NULL); | 4701 | repl = sctp_make_cookie_echo(asoc, NULL); |
| 4700 | if (!repl) | 4702 | if (!repl) |
| 4701 | return SCTP_DISPOSITION_NOMEM; | 4703 | return SCTP_DISPOSITION_NOMEM; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c98ee375ba5e..0ea947eb6813 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -2995,7 +2995,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
| 2995 | sp->hbinterval = jiffies_to_msecs(sctp_hb_interval); | 2995 | sp->hbinterval = jiffies_to_msecs(sctp_hb_interval); |
| 2996 | sp->pathmaxrxt = sctp_max_retrans_path; | 2996 | sp->pathmaxrxt = sctp_max_retrans_path; |
| 2997 | sp->pathmtu = 0; // allow default discovery | 2997 | sp->pathmtu = 0; // allow default discovery |
| 2998 | sp->sackdelay = sctp_sack_timeout; | 2998 | sp->sackdelay = jiffies_to_msecs(sctp_sack_timeout); |
| 2999 | sp->param_flags = SPP_HB_ENABLE | | 2999 | sp->param_flags = SPP_HB_ENABLE | |
| 3000 | SPP_PMTUD_ENABLE | | 3000 | SPP_PMTUD_ENABLE | |
| 3001 | SPP_SACKDELAY_ENABLE; | 3001 | SPP_SACKDELAY_ENABLE; |
| @@ -5426,7 +5426,7 @@ out: | |||
| 5426 | return err; | 5426 | return err; |
| 5427 | 5427 | ||
| 5428 | do_error: | 5428 | do_error: |
| 5429 | if (asoc->init_err_counter + 1 >= asoc->max_init_attempts) | 5429 | if (asoc->init_err_counter + 1 > asoc->max_init_attempts) |
| 5430 | err = -ETIMEDOUT; | 5430 | err = -ETIMEDOUT; |
| 5431 | else | 5431 | else |
| 5432 | err = -ECONNREFUSED; | 5432 | err = -ECONNREFUSED; |
| @@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5602 | */ | 5602 | */ |
| 5603 | newsp->type = type; | 5603 | newsp->type = type; |
| 5604 | 5604 | ||
| 5605 | spin_lock_bh(&oldsk->sk_lock.slock); | ||
| 5606 | /* Migrate the backlog from oldsk to newsk. */ | ||
| 5607 | sctp_backlog_migrate(assoc, oldsk, newsk); | ||
| 5605 | /* Migrate the association to the new socket. */ | 5608 | /* Migrate the association to the new socket. */ |
| 5606 | sctp_assoc_migrate(assoc, newsk); | 5609 | sctp_assoc_migrate(assoc, newsk); |
| 5610 | spin_unlock_bh(&oldsk->sk_lock.slock); | ||
| 5607 | 5611 | ||
| 5608 | /* If the association on the newsk is already closed before accept() | 5612 | /* If the association on the newsk is already closed before accept() |
| 5609 | * is called, set RCV_SHUTDOWN flag. | 5613 | * is called, set RCV_SHUTDOWN flag. |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index fcd7096c953d..dc6f3ff32358 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
| @@ -159,12 +159,9 @@ static ctl_table sctp_table[] = { | |||
| 159 | .ctl_name = NET_SCTP_PRESERVE_ENABLE, | 159 | .ctl_name = NET_SCTP_PRESERVE_ENABLE, |
| 160 | .procname = "cookie_preserve_enable", | 160 | .procname = "cookie_preserve_enable", |
| 161 | .data = &sctp_cookie_preserve_enable, | 161 | .data = &sctp_cookie_preserve_enable, |
| 162 | .maxlen = sizeof(long), | 162 | .maxlen = sizeof(int), |
| 163 | .mode = 0644, | 163 | .mode = 0644, |
| 164 | .proc_handler = &proc_doulongvec_ms_jiffies_minmax, | 164 | .proc_handler = &proc_dointvec |
| 165 | .strategy = &sctp_sysctl_jiffies_ms, | ||
| 166 | .extra1 = &rto_timer_min, | ||
| 167 | .extra2 = &rto_timer_max | ||
| 168 | }, | 165 | }, |
| 169 | { | 166 | { |
| 170 | .ctl_name = NET_SCTP_RTO_ALPHA, | 167 | .ctl_name = NET_SCTP_RTO_ALPHA, |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 68d73e2dd155..160f62ad1cc5 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
| @@ -350,7 +350,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) | |||
| 350 | tp->rto_pending = 0; | 350 | tp->rto_pending = 0; |
| 351 | 351 | ||
| 352 | SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " | 352 | SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " |
| 353 | "rttvar: %d, rto: %d\n", __FUNCTION__, | 353 | "rttvar: %d, rto: %ld\n", __FUNCTION__, |
| 354 | tp, rtt, tp->srtt, tp->rttvar, tp->rto); | 354 | tp, rtt, tp->srtt, tp->rttvar, tp->rto); |
| 355 | } | 355 | } |
| 356 | 356 | ||
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 9ac1b8c26c01..8d6f1a176b15 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
| @@ -184,7 +184,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) | |||
| 184 | */ | 184 | */ |
| 185 | struct rpc_cred * | 185 | struct rpc_cred * |
| 186 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | 186 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, |
| 187 | int taskflags) | 187 | int flags) |
| 188 | { | 188 | { |
| 189 | struct rpc_cred_cache *cache = auth->au_credcache; | 189 | struct rpc_cred_cache *cache = auth->au_credcache; |
| 190 | HLIST_HEAD(free); | 190 | HLIST_HEAD(free); |
| @@ -193,7 +193,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
| 193 | *cred = NULL; | 193 | *cred = NULL; |
| 194 | int nr = 0; | 194 | int nr = 0; |
| 195 | 195 | ||
| 196 | if (!(taskflags & RPC_TASK_ROOTCREDS)) | 196 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) |
| 197 | nr = acred->uid & RPC_CREDCACHE_MASK; | 197 | nr = acred->uid & RPC_CREDCACHE_MASK; |
| 198 | retry: | 198 | retry: |
| 199 | spin_lock(&rpc_credcache_lock); | 199 | spin_lock(&rpc_credcache_lock); |
| @@ -202,7 +202,7 @@ retry: | |||
| 202 | hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { | 202 | hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { |
| 203 | struct rpc_cred *entry; | 203 | struct rpc_cred *entry; |
| 204 | entry = hlist_entry(pos, struct rpc_cred, cr_hash); | 204 | entry = hlist_entry(pos, struct rpc_cred, cr_hash); |
| 205 | if (entry->cr_ops->crmatch(acred, entry, taskflags)) { | 205 | if (entry->cr_ops->crmatch(acred, entry, flags)) { |
| 206 | hlist_del(&entry->cr_hash); | 206 | hlist_del(&entry->cr_hash); |
| 207 | cred = entry; | 207 | cred = entry; |
| 208 | break; | 208 | break; |
| @@ -224,7 +224,7 @@ retry: | |||
| 224 | rpcauth_destroy_credlist(&free); | 224 | rpcauth_destroy_credlist(&free); |
| 225 | 225 | ||
| 226 | if (!cred) { | 226 | if (!cred) { |
| 227 | new = auth->au_ops->crcreate(auth, acred, taskflags); | 227 | new = auth->au_ops->crcreate(auth, acred, flags); |
| 228 | if (!IS_ERR(new)) { | 228 | if (!IS_ERR(new)) { |
| 229 | #ifdef RPC_DEBUG | 229 | #ifdef RPC_DEBUG |
| 230 | new->cr_magic = RPCAUTH_CRED_MAGIC; | 230 | new->cr_magic = RPCAUTH_CRED_MAGIC; |
| @@ -232,13 +232,21 @@ retry: | |||
| 232 | goto retry; | 232 | goto retry; |
| 233 | } else | 233 | } else |
| 234 | cred = new; | 234 | cred = new; |
| 235 | } else if ((cred->cr_flags & RPCAUTH_CRED_NEW) | ||
| 236 | && cred->cr_ops->cr_init != NULL | ||
| 237 | && !(flags & RPCAUTH_LOOKUP_NEW)) { | ||
| 238 | int res = cred->cr_ops->cr_init(auth, cred); | ||
| 239 | if (res < 0) { | ||
| 240 | put_rpccred(cred); | ||
| 241 | cred = ERR_PTR(res); | ||
| 242 | } | ||
| 235 | } | 243 | } |
| 236 | 244 | ||
| 237 | return (struct rpc_cred *) cred; | 245 | return (struct rpc_cred *) cred; |
| 238 | } | 246 | } |
| 239 | 247 | ||
| 240 | struct rpc_cred * | 248 | struct rpc_cred * |
| 241 | rpcauth_lookupcred(struct rpc_auth *auth, int taskflags) | 249 | rpcauth_lookupcred(struct rpc_auth *auth, int flags) |
| 242 | { | 250 | { |
| 243 | struct auth_cred acred = { | 251 | struct auth_cred acred = { |
| 244 | .uid = current->fsuid, | 252 | .uid = current->fsuid, |
| @@ -250,7 +258,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int taskflags) | |||
| 250 | dprintk("RPC: looking up %s cred\n", | 258 | dprintk("RPC: looking up %s cred\n", |
| 251 | auth->au_ops->au_name); | 259 | auth->au_ops->au_name); |
| 252 | get_group_info(acred.group_info); | 260 | get_group_info(acred.group_info); |
| 253 | ret = auth->au_ops->lookup_cred(auth, &acred, taskflags); | 261 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); |
| 254 | put_group_info(acred.group_info); | 262 | put_group_info(acred.group_info); |
| 255 | return ret; | 263 | return ret; |
| 256 | } | 264 | } |
| @@ -265,11 +273,14 @@ rpcauth_bindcred(struct rpc_task *task) | |||
| 265 | .group_info = current->group_info, | 273 | .group_info = current->group_info, |
| 266 | }; | 274 | }; |
| 267 | struct rpc_cred *ret; | 275 | struct rpc_cred *ret; |
| 276 | int flags = 0; | ||
| 268 | 277 | ||
| 269 | dprintk("RPC: %4d looking up %s cred\n", | 278 | dprintk("RPC: %4d looking up %s cred\n", |
| 270 | task->tk_pid, task->tk_auth->au_ops->au_name); | 279 | task->tk_pid, task->tk_auth->au_ops->au_name); |
| 271 | get_group_info(acred.group_info); | 280 | get_group_info(acred.group_info); |
| 272 | ret = auth->au_ops->lookup_cred(auth, &acred, task->tk_flags); | 281 | if (task->tk_flags & RPC_TASK_ROOTCREDS) |
| 282 | flags |= RPCAUTH_LOOKUP_ROOTCREDS; | ||
| 283 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); | ||
| 273 | if (!IS_ERR(ret)) | 284 | if (!IS_ERR(ret)) |
| 274 | task->tk_msg.rpc_cred = ret; | 285 | task->tk_msg.rpc_cred = ret; |
| 275 | else | 286 | else |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 8d782282ec19..bb46efd92e57 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -158,6 +158,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) | |||
| 158 | old = gss_cred->gc_ctx; | 158 | old = gss_cred->gc_ctx; |
| 159 | gss_cred->gc_ctx = ctx; | 159 | gss_cred->gc_ctx = ctx; |
| 160 | cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 160 | cred->cr_flags |= RPCAUTH_CRED_UPTODATE; |
| 161 | cred->cr_flags &= ~RPCAUTH_CRED_NEW; | ||
| 161 | write_unlock(&gss_ctx_lock); | 162 | write_unlock(&gss_ctx_lock); |
| 162 | if (old) | 163 | if (old) |
| 163 | gss_put_ctx(old); | 164 | gss_put_ctx(old); |
| @@ -580,7 +581,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
| 580 | } else { | 581 | } else { |
| 581 | struct auth_cred acred = { .uid = uid }; | 582 | struct auth_cred acred = { .uid = uid }; |
| 582 | spin_unlock(&gss_auth->lock); | 583 | spin_unlock(&gss_auth->lock); |
| 583 | cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, 0); | 584 | cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW); |
| 584 | if (IS_ERR(cred)) { | 585 | if (IS_ERR(cred)) { |
| 585 | err = PTR_ERR(cred); | 586 | err = PTR_ERR(cred); |
| 586 | goto err_put_ctx; | 587 | goto err_put_ctx; |
| @@ -758,13 +759,13 @@ gss_destroy_cred(struct rpc_cred *rc) | |||
| 758 | * Lookup RPCSEC_GSS cred for the current process | 759 | * Lookup RPCSEC_GSS cred for the current process |
| 759 | */ | 760 | */ |
| 760 | static struct rpc_cred * | 761 | static struct rpc_cred * |
| 761 | gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | 762 | gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) |
| 762 | { | 763 | { |
| 763 | return rpcauth_lookup_credcache(auth, acred, taskflags); | 764 | return rpcauth_lookup_credcache(auth, acred, flags); |
| 764 | } | 765 | } |
| 765 | 766 | ||
| 766 | static struct rpc_cred * | 767 | static struct rpc_cred * |
| 767 | gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | 768 | gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) |
| 768 | { | 769 | { |
| 769 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); | 770 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); |
| 770 | struct gss_cred *cred = NULL; | 771 | struct gss_cred *cred = NULL; |
| @@ -785,13 +786,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | |||
| 785 | */ | 786 | */ |
| 786 | cred->gc_flags = 0; | 787 | cred->gc_flags = 0; |
| 787 | cred->gc_base.cr_ops = &gss_credops; | 788 | cred->gc_base.cr_ops = &gss_credops; |
| 789 | cred->gc_base.cr_flags = RPCAUTH_CRED_NEW; | ||
| 788 | cred->gc_service = gss_auth->service; | 790 | cred->gc_service = gss_auth->service; |
| 789 | do { | ||
| 790 | err = gss_create_upcall(gss_auth, cred); | ||
| 791 | } while (err == -EAGAIN); | ||
| 792 | if (err < 0) | ||
| 793 | goto out_err; | ||
| 794 | |||
| 795 | return &cred->gc_base; | 791 | return &cred->gc_base; |
| 796 | 792 | ||
| 797 | out_err: | 793 | out_err: |
| @@ -801,13 +797,34 @@ out_err: | |||
| 801 | } | 797 | } |
| 802 | 798 | ||
| 803 | static int | 799 | static int |
| 804 | gss_match(struct auth_cred *acred, struct rpc_cred *rc, int taskflags) | 800 | gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) |
| 801 | { | ||
| 802 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); | ||
| 803 | struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); | ||
| 804 | int err; | ||
| 805 | |||
| 806 | do { | ||
| 807 | err = gss_create_upcall(gss_auth, gss_cred); | ||
| 808 | } while (err == -EAGAIN); | ||
| 809 | return err; | ||
| 810 | } | ||
| 811 | |||
| 812 | static int | ||
| 813 | gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) | ||
| 805 | { | 814 | { |
| 806 | struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); | 815 | struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); |
| 807 | 816 | ||
| 817 | /* | ||
| 818 | * If the searchflags have set RPCAUTH_LOOKUP_NEW, then | ||
| 819 | * we don't really care if the credential has expired or not, | ||
| 820 | * since the caller should be prepared to reinitialise it. | ||
| 821 | */ | ||
| 822 | if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW)) | ||
| 823 | goto out; | ||
| 808 | /* Don't match with creds that have expired. */ | 824 | /* Don't match with creds that have expired. */ |
| 809 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) | 825 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) |
| 810 | return 0; | 826 | return 0; |
| 827 | out: | ||
| 811 | return (rc->cr_uid == acred->uid); | 828 | return (rc->cr_uid == acred->uid); |
| 812 | } | 829 | } |
| 813 | 830 | ||
| @@ -1241,6 +1258,7 @@ static struct rpc_authops authgss_ops = { | |||
| 1241 | static struct rpc_credops gss_credops = { | 1258 | static struct rpc_credops gss_credops = { |
| 1242 | .cr_name = "AUTH_GSS", | 1259 | .cr_name = "AUTH_GSS", |
| 1243 | .crdestroy = gss_destroy_cred, | 1260 | .crdestroy = gss_destroy_cred, |
| 1261 | .cr_init = gss_cred_init, | ||
| 1244 | .crmatch = gss_match, | 1262 | .crmatch = gss_match, |
| 1245 | .crmarshal = gss_marshal, | 1263 | .crmarshal = gss_marshal, |
| 1246 | .crrefresh = gss_refresh, | 1264 | .crrefresh = gss_refresh, |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 1b3ed4fd1987..df14b6bfbf10 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
| @@ -75,7 +75,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
| 75 | 75 | ||
| 76 | atomic_set(&cred->uc_count, 1); | 76 | atomic_set(&cred->uc_count, 1); |
| 77 | cred->uc_flags = RPCAUTH_CRED_UPTODATE; | 77 | cred->uc_flags = RPCAUTH_CRED_UPTODATE; |
| 78 | if (flags & RPC_TASK_ROOTCREDS) { | 78 | if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { |
| 79 | cred->uc_uid = 0; | 79 | cred->uc_uid = 0; |
| 80 | cred->uc_gid = 0; | 80 | cred->uc_gid = 0; |
| 81 | cred->uc_gids[0] = NOGROUP; | 81 | cred->uc_gids[0] = NOGROUP; |
| @@ -108,12 +108,12 @@ unx_destroy_cred(struct rpc_cred *cred) | |||
| 108 | * request root creds (e.g. for NFS swapping). | 108 | * request root creds (e.g. for NFS swapping). |
| 109 | */ | 109 | */ |
| 110 | static int | 110 | static int |
| 111 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int taskflags) | 111 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) |
| 112 | { | 112 | { |
| 113 | struct unx_cred *cred = (struct unx_cred *) rcred; | 113 | struct unx_cred *cred = (struct unx_cred *) rcred; |
| 114 | int i; | 114 | int i; |
| 115 | 115 | ||
| 116 | if (!(taskflags & RPC_TASK_ROOTCREDS)) { | 116 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { |
| 117 | int groups; | 117 | int groups; |
| 118 | 118 | ||
| 119 | if (cred->uc_uid != acred->uid | 119 | if (cred->uc_uid != acred->uid |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9764c80ab0b2..a5c0c7b6e151 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -38,44 +38,42 @@ static kmem_cache_t *rpc_inode_cachep __read_mostly; | |||
| 38 | 38 | ||
| 39 | #define RPC_UPCALL_TIMEOUT (30*HZ) | 39 | #define RPC_UPCALL_TIMEOUT (30*HZ) |
| 40 | 40 | ||
| 41 | static void | 41 | static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, |
| 42 | __rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, int err) | 42 | void (*destroy_msg)(struct rpc_pipe_msg *), int err) |
| 43 | { | 43 | { |
| 44 | struct rpc_pipe_msg *msg; | 44 | struct rpc_pipe_msg *msg; |
| 45 | void (*destroy_msg)(struct rpc_pipe_msg *); | ||
| 46 | 45 | ||
| 47 | destroy_msg = rpci->ops->destroy_msg; | 46 | if (list_empty(head)) |
| 48 | while (!list_empty(head)) { | 47 | return; |
| 48 | do { | ||
| 49 | msg = list_entry(head->next, struct rpc_pipe_msg, list); | 49 | msg = list_entry(head->next, struct rpc_pipe_msg, list); |
| 50 | list_del_init(&msg->list); | 50 | list_del(&msg->list); |
| 51 | msg->errno = err; | 51 | msg->errno = err; |
| 52 | destroy_msg(msg); | 52 | destroy_msg(msg); |
| 53 | } | 53 | } while (!list_empty(head)); |
| 54 | } | ||
| 55 | |||
| 56 | static void | ||
| 57 | __rpc_purge_upcall(struct inode *inode, int err) | ||
| 58 | { | ||
| 59 | struct rpc_inode *rpci = RPC_I(inode); | ||
| 60 | |||
| 61 | __rpc_purge_list(rpci, &rpci->pipe, err); | ||
| 62 | rpci->pipelen = 0; | ||
| 63 | wake_up(&rpci->waitq); | 54 | wake_up(&rpci->waitq); |
| 64 | } | 55 | } |
| 65 | 56 | ||
| 66 | static void | 57 | static void |
| 67 | rpc_timeout_upcall_queue(void *data) | 58 | rpc_timeout_upcall_queue(void *data) |
| 68 | { | 59 | { |
| 60 | LIST_HEAD(free_list); | ||
| 69 | struct rpc_inode *rpci = (struct rpc_inode *)data; | 61 | struct rpc_inode *rpci = (struct rpc_inode *)data; |
| 70 | struct inode *inode = &rpci->vfs_inode; | 62 | struct inode *inode = &rpci->vfs_inode; |
| 63 | void (*destroy_msg)(struct rpc_pipe_msg *); | ||
| 71 | 64 | ||
| 72 | mutex_lock(&inode->i_mutex); | 65 | spin_lock(&inode->i_lock); |
| 73 | if (rpci->ops == NULL) | 66 | if (rpci->ops == NULL) { |
| 74 | goto out; | 67 | spin_unlock(&inode->i_lock); |
| 75 | if (rpci->nreaders == 0 && !list_empty(&rpci->pipe)) | 68 | return; |
| 76 | __rpc_purge_upcall(inode, -ETIMEDOUT); | 69 | } |
| 77 | out: | 70 | destroy_msg = rpci->ops->destroy_msg; |
| 78 | mutex_unlock(&inode->i_mutex); | 71 | if (rpci->nreaders == 0) { |
| 72 | list_splice_init(&rpci->pipe, &free_list); | ||
| 73 | rpci->pipelen = 0; | ||
| 74 | } | ||
| 75 | spin_unlock(&inode->i_lock); | ||
| 76 | rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); | ||
| 79 | } | 77 | } |
| 80 | 78 | ||
| 81 | int | 79 | int |
| @@ -84,7 +82,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
| 84 | struct rpc_inode *rpci = RPC_I(inode); | 82 | struct rpc_inode *rpci = RPC_I(inode); |
| 85 | int res = -EPIPE; | 83 | int res = -EPIPE; |
| 86 | 84 | ||
| 87 | mutex_lock(&inode->i_mutex); | 85 | spin_lock(&inode->i_lock); |
| 88 | if (rpci->ops == NULL) | 86 | if (rpci->ops == NULL) |
| 89 | goto out; | 87 | goto out; |
| 90 | if (rpci->nreaders) { | 88 | if (rpci->nreaders) { |
| @@ -100,7 +98,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
| 100 | res = 0; | 98 | res = 0; |
| 101 | } | 99 | } |
| 102 | out: | 100 | out: |
| 103 | mutex_unlock(&inode->i_mutex); | 101 | spin_unlock(&inode->i_lock); |
| 104 | wake_up(&rpci->waitq); | 102 | wake_up(&rpci->waitq); |
| 105 | return res; | 103 | return res; |
| 106 | } | 104 | } |
| @@ -115,21 +113,29 @@ static void | |||
| 115 | rpc_close_pipes(struct inode *inode) | 113 | rpc_close_pipes(struct inode *inode) |
| 116 | { | 114 | { |
| 117 | struct rpc_inode *rpci = RPC_I(inode); | 115 | struct rpc_inode *rpci = RPC_I(inode); |
| 116 | struct rpc_pipe_ops *ops; | ||
| 118 | 117 | ||
| 119 | mutex_lock(&inode->i_mutex); | 118 | mutex_lock(&inode->i_mutex); |
| 120 | if (rpci->ops != NULL) { | 119 | ops = rpci->ops; |
| 120 | if (ops != NULL) { | ||
| 121 | LIST_HEAD(free_list); | ||
| 122 | |||
| 123 | spin_lock(&inode->i_lock); | ||
| 121 | rpci->nreaders = 0; | 124 | rpci->nreaders = 0; |
| 122 | __rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE); | 125 | list_splice_init(&rpci->in_upcall, &free_list); |
| 123 | __rpc_purge_upcall(inode, -EPIPE); | 126 | list_splice_init(&rpci->pipe, &free_list); |
| 124 | rpci->nwriters = 0; | 127 | rpci->pipelen = 0; |
| 125 | if (rpci->ops->release_pipe) | ||
| 126 | rpci->ops->release_pipe(inode); | ||
| 127 | rpci->ops = NULL; | 128 | rpci->ops = NULL; |
| 129 | spin_unlock(&inode->i_lock); | ||
| 130 | rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); | ||
| 131 | rpci->nwriters = 0; | ||
| 132 | if (ops->release_pipe) | ||
| 133 | ops->release_pipe(inode); | ||
| 134 | cancel_delayed_work(&rpci->queue_timeout); | ||
| 135 | flush_scheduled_work(); | ||
| 128 | } | 136 | } |
| 129 | rpc_inode_setowner(inode, NULL); | 137 | rpc_inode_setowner(inode, NULL); |
| 130 | mutex_unlock(&inode->i_mutex); | 138 | mutex_unlock(&inode->i_mutex); |
| 131 | cancel_delayed_work(&rpci->queue_timeout); | ||
| 132 | flush_scheduled_work(); | ||
| 133 | } | 139 | } |
| 134 | 140 | ||
| 135 | static struct inode * | 141 | static struct inode * |
| @@ -177,16 +183,26 @@ rpc_pipe_release(struct inode *inode, struct file *filp) | |||
| 177 | goto out; | 183 | goto out; |
| 178 | msg = (struct rpc_pipe_msg *)filp->private_data; | 184 | msg = (struct rpc_pipe_msg *)filp->private_data; |
| 179 | if (msg != NULL) { | 185 | if (msg != NULL) { |
| 186 | spin_lock(&inode->i_lock); | ||
| 180 | msg->errno = -EAGAIN; | 187 | msg->errno = -EAGAIN; |
| 181 | list_del_init(&msg->list); | 188 | list_del(&msg->list); |
| 189 | spin_unlock(&inode->i_lock); | ||
| 182 | rpci->ops->destroy_msg(msg); | 190 | rpci->ops->destroy_msg(msg); |
| 183 | } | 191 | } |
| 184 | if (filp->f_mode & FMODE_WRITE) | 192 | if (filp->f_mode & FMODE_WRITE) |
| 185 | rpci->nwriters --; | 193 | rpci->nwriters --; |
| 186 | if (filp->f_mode & FMODE_READ) | 194 | if (filp->f_mode & FMODE_READ) { |
| 187 | rpci->nreaders --; | 195 | rpci->nreaders --; |
| 188 | if (!rpci->nreaders) | 196 | if (rpci->nreaders == 0) { |
| 189 | __rpc_purge_upcall(inode, -EAGAIN); | 197 | LIST_HEAD(free_list); |
| 198 | spin_lock(&inode->i_lock); | ||
| 199 | list_splice_init(&rpci->pipe, &free_list); | ||
| 200 | rpci->pipelen = 0; | ||
| 201 | spin_unlock(&inode->i_lock); | ||
| 202 | rpc_purge_list(rpci, &free_list, | ||
| 203 | rpci->ops->destroy_msg, -EAGAIN); | ||
| 204 | } | ||
| 205 | } | ||
| 190 | if (rpci->ops->release_pipe) | 206 | if (rpci->ops->release_pipe) |
| 191 | rpci->ops->release_pipe(inode); | 207 | rpci->ops->release_pipe(inode); |
| 192 | out: | 208 | out: |
| @@ -209,6 +225,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
| 209 | } | 225 | } |
| 210 | msg = filp->private_data; | 226 | msg = filp->private_data; |
| 211 | if (msg == NULL) { | 227 | if (msg == NULL) { |
| 228 | spin_lock(&inode->i_lock); | ||
| 212 | if (!list_empty(&rpci->pipe)) { | 229 | if (!list_empty(&rpci->pipe)) { |
| 213 | msg = list_entry(rpci->pipe.next, | 230 | msg = list_entry(rpci->pipe.next, |
| 214 | struct rpc_pipe_msg, | 231 | struct rpc_pipe_msg, |
| @@ -218,6 +235,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
| 218 | filp->private_data = msg; | 235 | filp->private_data = msg; |
| 219 | msg->copied = 0; | 236 | msg->copied = 0; |
| 220 | } | 237 | } |
| 238 | spin_unlock(&inode->i_lock); | ||
| 221 | if (msg == NULL) | 239 | if (msg == NULL) |
| 222 | goto out_unlock; | 240 | goto out_unlock; |
| 223 | } | 241 | } |
| @@ -225,7 +243,9 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
| 225 | res = rpci->ops->upcall(filp, msg, buf, len); | 243 | res = rpci->ops->upcall(filp, msg, buf, len); |
| 226 | if (res < 0 || msg->len == msg->copied) { | 244 | if (res < 0 || msg->len == msg->copied) { |
| 227 | filp->private_data = NULL; | 245 | filp->private_data = NULL; |
| 228 | list_del_init(&msg->list); | 246 | spin_lock(&inode->i_lock); |
| 247 | list_del(&msg->list); | ||
| 248 | spin_unlock(&inode->i_lock); | ||
| 229 | rpci->ops->destroy_msg(msg); | 249 | rpci->ops->destroy_msg(msg); |
| 230 | } | 250 | } |
| 231 | out_unlock: | 251 | out_unlock: |
| @@ -610,7 +630,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
| 610 | return ERR_PTR(error); | 630 | return ERR_PTR(error); |
| 611 | dir = nd->dentry->d_inode; | 631 | dir = nd->dentry->d_inode; |
| 612 | mutex_lock(&dir->i_mutex); | 632 | mutex_lock(&dir->i_mutex); |
| 613 | dentry = lookup_hash(nd); | 633 | dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); |
| 614 | if (IS_ERR(dentry)) | 634 | if (IS_ERR(dentry)) |
| 615 | goto out_err; | 635 | goto out_err; |
| 616 | if (dentry->d_inode) { | 636 | if (dentry->d_inode) { |
| @@ -672,7 +692,7 @@ rpc_rmdir(char *path) | |||
| 672 | return error; | 692 | return error; |
| 673 | dir = nd.dentry->d_inode; | 693 | dir = nd.dentry->d_inode; |
| 674 | mutex_lock(&dir->i_mutex); | 694 | mutex_lock(&dir->i_mutex); |
| 675 | dentry = lookup_hash(&nd); | 695 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); |
| 676 | if (IS_ERR(dentry)) { | 696 | if (IS_ERR(dentry)) { |
| 677 | error = PTR_ERR(dentry); | 697 | error = PTR_ERR(dentry); |
| 678 | goto out_release; | 698 | goto out_release; |
| @@ -733,7 +753,7 @@ rpc_unlink(char *path) | |||
| 733 | return error; | 753 | return error; |
| 734 | dir = nd.dentry->d_inode; | 754 | dir = nd.dentry->d_inode; |
| 735 | mutex_lock(&dir->i_mutex); | 755 | mutex_lock(&dir->i_mutex); |
| 736 | dentry = lookup_hash(&nd); | 756 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); |
| 737 | if (IS_ERR(dentry)) { | 757 | if (IS_ERR(dentry)) { |
| 738 | error = PTR_ERR(dentry); | 758 | error = PTR_ERR(dentry); |
| 739 | goto out_release; | 759 | goto out_release; |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 7415406aa1ae..802d4fe0f55c 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
| @@ -908,10 +908,10 @@ void rpc_release_task(struct rpc_task *task) | |||
| 908 | 908 | ||
| 909 | /** | 909 | /** |
| 910 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | 910 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it |
| 911 | * @clnt - pointer to RPC client | 911 | * @clnt: pointer to RPC client |
| 912 | * @flags - RPC flags | 912 | * @flags: RPC flags |
| 913 | * @ops - RPC call ops | 913 | * @ops: RPC call ops |
| 914 | * @data - user call data | 914 | * @data: user call data |
| 915 | */ | 915 | */ |
| 916 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | 916 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, |
| 917 | const struct rpc_call_ops *ops, | 917 | const struct rpc_call_ops *ops, |
| @@ -930,6 +930,7 @@ EXPORT_SYMBOL(rpc_run_task); | |||
| 930 | /** | 930 | /** |
| 931 | * rpc_find_parent - find the parent of a child task. | 931 | * rpc_find_parent - find the parent of a child task. |
| 932 | * @child: child task | 932 | * @child: child task |
| 933 | * @parent: parent task | ||
| 933 | * | 934 | * |
| 934 | * Checks that the parent task is still sleeping on the | 935 | * Checks that the parent task is still sleeping on the |
| 935 | * queue 'childq'. If so returns a pointer to the parent. | 936 | * queue 'childq'. If so returns a pointer to the parent. |
