diff options
Diffstat (limited to 'net')
34 files changed, 230 insertions, 175 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index bb86d2932394..6da5daeebab7 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
| @@ -1392,7 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
| 1392 | ax25_cb *ax25; | 1392 | ax25_cb *ax25; |
| 1393 | int err = 0; | 1393 | int err = 0; |
| 1394 | 1394 | ||
| 1395 | memset(fsa, 0, sizeof(fsa)); | 1395 | memset(fsa, 0, sizeof(*fsa)); |
| 1396 | lock_sock(sk); | 1396 | lock_sock(sk); |
| 1397 | ax25 = ax25_sk(sk); | 1397 | ax25 = ax25_sk(sk); |
| 1398 | 1398 | ||
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index d4d9926c2201..65106fb61b8f 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
| @@ -151,9 +151,9 @@ int debug_log(struct bat_priv *bat_priv, char *fmt, ...); | |||
| 151 | } \ | 151 | } \ |
| 152 | while (0) | 152 | while (0) |
| 153 | #else /* !CONFIG_BATMAN_ADV_DEBUG */ | 153 | #else /* !CONFIG_BATMAN_ADV_DEBUG */ |
| 154 | static inline void bat_dbg(char type __attribute__((unused)), | 154 | static inline void bat_dbg(char type __always_unused, |
| 155 | struct bat_priv *bat_priv __attribute__((unused)), | 155 | struct bat_priv *bat_priv __always_unused, |
| 156 | char *fmt __attribute__((unused)), ...) | 156 | char *fmt __always_unused, ...) |
| 157 | { | 157 | { |
| 158 | } | 158 | } |
| 159 | #endif | 159 | #endif |
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index b49fdf70a6d5..2284e8129cb2 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h | |||
| @@ -63,7 +63,7 @@ struct batman_packet { | |||
| 63 | uint8_t num_hna; | 63 | uint8_t num_hna; |
| 64 | uint8_t gw_flags; /* flags related to gateway class */ | 64 | uint8_t gw_flags; /* flags related to gateway class */ |
| 65 | uint8_t align; | 65 | uint8_t align; |
| 66 | } __attribute__((packed)); | 66 | } __packed; |
| 67 | 67 | ||
| 68 | #define BAT_PACKET_LEN sizeof(struct batman_packet) | 68 | #define BAT_PACKET_LEN sizeof(struct batman_packet) |
| 69 | 69 | ||
| @@ -76,7 +76,7 @@ struct icmp_packet { | |||
| 76 | uint8_t orig[6]; | 76 | uint8_t orig[6]; |
| 77 | uint16_t seqno; | 77 | uint16_t seqno; |
| 78 | uint8_t uid; | 78 | uint8_t uid; |
| 79 | } __attribute__((packed)); | 79 | } __packed; |
| 80 | 80 | ||
| 81 | #define BAT_RR_LEN 16 | 81 | #define BAT_RR_LEN 16 |
| 82 | 82 | ||
| @@ -93,14 +93,14 @@ struct icmp_packet_rr { | |||
| 93 | uint8_t uid; | 93 | uint8_t uid; |
| 94 | uint8_t rr_cur; | 94 | uint8_t rr_cur; |
| 95 | uint8_t rr[BAT_RR_LEN][ETH_ALEN]; | 95 | uint8_t rr[BAT_RR_LEN][ETH_ALEN]; |
| 96 | } __attribute__((packed)); | 96 | } __packed; |
| 97 | 97 | ||
| 98 | struct unicast_packet { | 98 | struct unicast_packet { |
| 99 | uint8_t packet_type; | 99 | uint8_t packet_type; |
| 100 | uint8_t version; /* batman version field */ | 100 | uint8_t version; /* batman version field */ |
| 101 | uint8_t dest[6]; | 101 | uint8_t dest[6]; |
| 102 | uint8_t ttl; | 102 | uint8_t ttl; |
| 103 | } __attribute__((packed)); | 103 | } __packed; |
| 104 | 104 | ||
| 105 | struct unicast_frag_packet { | 105 | struct unicast_frag_packet { |
| 106 | uint8_t packet_type; | 106 | uint8_t packet_type; |
| @@ -110,7 +110,7 @@ struct unicast_frag_packet { | |||
| 110 | uint8_t flags; | 110 | uint8_t flags; |
| 111 | uint8_t orig[6]; | 111 | uint8_t orig[6]; |
| 112 | uint16_t seqno; | 112 | uint16_t seqno; |
| 113 | } __attribute__((packed)); | 113 | } __packed; |
| 114 | 114 | ||
| 115 | struct bcast_packet { | 115 | struct bcast_packet { |
| 116 | uint8_t packet_type; | 116 | uint8_t packet_type; |
| @@ -118,7 +118,7 @@ struct bcast_packet { | |||
| 118 | uint8_t orig[6]; | 118 | uint8_t orig[6]; |
| 119 | uint8_t ttl; | 119 | uint8_t ttl; |
| 120 | uint32_t seqno; | 120 | uint32_t seqno; |
| 121 | } __attribute__((packed)); | 121 | } __packed; |
| 122 | 122 | ||
| 123 | struct vis_packet { | 123 | struct vis_packet { |
| 124 | uint8_t packet_type; | 124 | uint8_t packet_type; |
| @@ -131,6 +131,6 @@ struct vis_packet { | |||
| 131 | * neighbors */ | 131 | * neighbors */ |
| 132 | uint8_t target_orig[6]; /* who should receive this packet */ | 132 | uint8_t target_orig[6]; /* who should receive this packet */ |
| 133 | uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ | 133 | uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ |
| 134 | } __attribute__((packed)); | 134 | } __packed; |
| 135 | 135 | ||
| 136 | #endif /* _NET_BATMAN_ADV_PACKET_H_ */ | 136 | #endif /* _NET_BATMAN_ADV_PACKET_H_ */ |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 97cb23dd3e69..bf3f6f5a12c4 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
| @@ -246,13 +246,13 @@ struct vis_info { | |||
| 246 | /* this packet might be part of the vis send queue. */ | 246 | /* this packet might be part of the vis send queue. */ |
| 247 | struct sk_buff *skb_packet; | 247 | struct sk_buff *skb_packet; |
| 248 | /* vis_info may follow here*/ | 248 | /* vis_info may follow here*/ |
| 249 | } __attribute__((packed)); | 249 | } __packed; |
| 250 | 250 | ||
| 251 | struct vis_info_entry { | 251 | struct vis_info_entry { |
| 252 | uint8_t src[ETH_ALEN]; | 252 | uint8_t src[ETH_ALEN]; |
| 253 | uint8_t dest[ETH_ALEN]; | 253 | uint8_t dest[ETH_ALEN]; |
| 254 | uint8_t quality; /* quality = 0 means HNA */ | 254 | uint8_t quality; /* quality = 0 means HNA */ |
| 255 | } __attribute__((packed)); | 255 | } __packed; |
| 256 | 256 | ||
| 257 | struct recvlist_node { | 257 | struct recvlist_node { |
| 258 | struct list_head list; | 258 | struct list_head list; |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index dc2e28bed844..ee41fef04b21 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
| @@ -229,10 +229,12 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | |||
| 229 | if (!bat_priv->primary_if) | 229 | if (!bat_priv->primary_if) |
| 230 | goto dropped; | 230 | goto dropped; |
| 231 | 231 | ||
| 232 | unicast_packet = (struct unicast_packet *) skb->data; | 232 | frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); |
| 233 | if (!frag_skb) | ||
| 234 | goto dropped; | ||
| 233 | 235 | ||
| 236 | unicast_packet = (struct unicast_packet *) skb->data; | ||
| 234 | memcpy(&tmp_uc, unicast_packet, uc_hdr_len); | 237 | memcpy(&tmp_uc, unicast_packet, uc_hdr_len); |
| 235 | frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); | ||
| 236 | skb_split(skb, frag_skb, data_len / 2); | 238 | skb_split(skb, frag_skb, data_len / 2); |
| 237 | 239 | ||
| 238 | if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || | 240 | if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || |
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 21ede141018a..c665de778b60 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
| @@ -191,6 +191,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | |||
| 191 | struct cflayer *servl = NULL; | 191 | struct cflayer *servl = NULL; |
| 192 | struct cfcnfg_phyinfo *phyinfo = NULL; | 192 | struct cfcnfg_phyinfo *phyinfo = NULL; |
| 193 | u8 phyid = 0; | 193 | u8 phyid = 0; |
| 194 | |||
| 194 | caif_assert(adap_layer != NULL); | 195 | caif_assert(adap_layer != NULL); |
| 195 | channel_id = adap_layer->id; | 196 | channel_id = adap_layer->id; |
| 196 | if (adap_layer->dn == NULL || channel_id == 0) { | 197 | if (adap_layer->dn == NULL || channel_id == 0) { |
| @@ -199,16 +200,16 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | |||
| 199 | goto end; | 200 | goto end; |
| 200 | } | 201 | } |
| 201 | servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); | 202 | servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); |
| 202 | if (servl == NULL) | ||
| 203 | goto end; | ||
| 204 | layer_set_up(servl, NULL); | ||
| 205 | ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); | ||
| 206 | if (servl == NULL) { | 203 | if (servl == NULL) { |
| 207 | pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", | 204 | pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", |
| 208 | channel_id); | 205 | channel_id); |
| 209 | ret = -EINVAL; | 206 | ret = -EINVAL; |
| 210 | goto end; | 207 | goto end; |
| 211 | } | 208 | } |
| 209 | layer_set_up(servl, NULL); | ||
| 210 | ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); | ||
| 211 | if (ret) | ||
| 212 | goto end; | ||
| 212 | caif_assert(channel_id == servl->id); | 213 | caif_assert(channel_id == servl->id); |
| 213 | if (adap_layer->dn != NULL) { | 214 | if (adap_layer->dn != NULL) { |
| 214 | phyid = cfsrvl_getphyid(adap_layer->dn); | 215 | phyid = cfsrvl_getphyid(adap_layer->dn); |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 9d5e8accfab1..092dc88a7c64 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
| @@ -1256,6 +1256,9 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1256 | struct sockaddr_can *addr = | 1256 | struct sockaddr_can *addr = |
| 1257 | (struct sockaddr_can *)msg->msg_name; | 1257 | (struct sockaddr_can *)msg->msg_name; |
| 1258 | 1258 | ||
| 1259 | if (msg->msg_namelen < sizeof(*addr)) | ||
| 1260 | return -EINVAL; | ||
| 1261 | |||
| 1259 | if (addr->can_family != AF_CAN) | 1262 | if (addr->can_family != AF_CAN) |
| 1260 | return -EINVAL; | 1263 | return -EINVAL; |
| 1261 | 1264 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index e88f610fdb7b..883e9d74fddf 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
| @@ -649,6 +649,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 649 | struct sockaddr_can *addr = | 649 | struct sockaddr_can *addr = |
| 650 | (struct sockaddr_can *)msg->msg_name; | 650 | (struct sockaddr_can *)msg->msg_name; |
| 651 | 651 | ||
| 652 | if (msg->msg_namelen < sizeof(*addr)) | ||
| 653 | return -EINVAL; | ||
| 654 | |||
| 652 | if (addr->can_family != AF_CAN) | 655 | if (addr->can_family != AF_CAN) |
| 653 | return -EINVAL; | 656 | return -EINVAL; |
| 654 | 657 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 06d0e7b25385..7c6a46f80372 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2001,7 +2001,7 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol) | |||
| 2001 | 2001 | ||
| 2002 | static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features) | 2002 | static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features) |
| 2003 | { | 2003 | { |
| 2004 | if (!can_checksum_protocol(protocol, features)) { | 2004 | if (!can_checksum_protocol(features, protocol)) { |
| 2005 | features &= ~NETIF_F_ALL_CSUM; | 2005 | features &= ~NETIF_F_ALL_CSUM; |
| 2006 | features &= ~NETIF_F_SG; | 2006 | features &= ~NETIF_F_SG; |
| 2007 | } else if (illegal_highdma(skb->dev, skb)) { | 2007 | } else if (illegal_highdma(skb->dev, skb)) { |
| @@ -2023,13 +2023,13 @@ int netif_skb_features(struct sk_buff *skb) | |||
| 2023 | return harmonize_features(skb, protocol, features); | 2023 | return harmonize_features(skb, protocol, features); |
| 2024 | } | 2024 | } |
| 2025 | 2025 | ||
| 2026 | features &= skb->dev->vlan_features; | 2026 | features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); |
| 2027 | 2027 | ||
| 2028 | if (protocol != htons(ETH_P_8021Q)) { | 2028 | if (protocol != htons(ETH_P_8021Q)) { |
| 2029 | return harmonize_features(skb, protocol, features); | 2029 | return harmonize_features(skb, protocol, features); |
| 2030 | } else { | 2030 | } else { |
| 2031 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | | 2031 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | |
| 2032 | NETIF_F_GEN_CSUM; | 2032 | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; |
| 2033 | return harmonize_features(skb, protocol, features); | 2033 | return harmonize_features(skb, protocol, features); |
| 2034 | } | 2034 | } |
| 2035 | } | 2035 | } |
| @@ -5523,34 +5523,6 @@ void netdev_run_todo(void) | |||
| 5523 | } | 5523 | } |
| 5524 | } | 5524 | } |
| 5525 | 5525 | ||
| 5526 | /** | ||
| 5527 | * dev_txq_stats_fold - fold tx_queues stats | ||
| 5528 | * @dev: device to get statistics from | ||
| 5529 | * @stats: struct rtnl_link_stats64 to hold results | ||
| 5530 | */ | ||
| 5531 | void dev_txq_stats_fold(const struct net_device *dev, | ||
| 5532 | struct rtnl_link_stats64 *stats) | ||
| 5533 | { | ||
| 5534 | u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; | ||
| 5535 | unsigned int i; | ||
| 5536 | struct netdev_queue *txq; | ||
| 5537 | |||
| 5538 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
| 5539 | txq = netdev_get_tx_queue(dev, i); | ||
| 5540 | spin_lock_bh(&txq->_xmit_lock); | ||
| 5541 | tx_bytes += txq->tx_bytes; | ||
| 5542 | tx_packets += txq->tx_packets; | ||
| 5543 | tx_dropped += txq->tx_dropped; | ||
| 5544 | spin_unlock_bh(&txq->_xmit_lock); | ||
| 5545 | } | ||
| 5546 | if (tx_bytes || tx_packets || tx_dropped) { | ||
| 5547 | stats->tx_bytes = tx_bytes; | ||
| 5548 | stats->tx_packets = tx_packets; | ||
| 5549 | stats->tx_dropped = tx_dropped; | ||
| 5550 | } | ||
| 5551 | } | ||
| 5552 | EXPORT_SYMBOL(dev_txq_stats_fold); | ||
| 5553 | |||
| 5554 | /* Convert net_device_stats to rtnl_link_stats64. They have the same | 5526 | /* Convert net_device_stats to rtnl_link_stats64. They have the same |
| 5555 | * fields in the same order, with only the type differing. | 5527 | * fields in the same order, with only the type differing. |
| 5556 | */ | 5528 | */ |
| @@ -5594,7 +5566,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | |||
| 5594 | netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); | 5566 | netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); |
| 5595 | } else { | 5567 | } else { |
| 5596 | netdev_stats_to_stats64(storage, &dev->stats); | 5568 | netdev_stats_to_stats64(storage, &dev->stats); |
| 5597 | dev_txq_stats_fold(dev, storage); | ||
| 5598 | } | 5569 | } |
| 5599 | storage->rx_dropped += atomic_long_read(&dev->rx_dropped); | 5570 | storage->rx_dropped += atomic_long_read(&dev->rx_dropped); |
| 5600 | return storage; | 5571 | return storage; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a5f7535aab5b..750db57f3bb3 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -1820,7 +1820,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 1820 | if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) | 1820 | if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) |
| 1821 | return -EPERM; | 1821 | return -EPERM; |
| 1822 | 1822 | ||
| 1823 | if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { | 1823 | if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { |
| 1824 | struct sock *rtnl; | 1824 | struct sock *rtnl; |
| 1825 | rtnl_dumpit_func dumpit; | 1825 | rtnl_dumpit_func dumpit; |
| 1826 | 1826 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 19d6c21220fd..d31bb36ae0dc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -380,6 +380,8 @@ static void skb_release_head_state(struct sk_buff *skb) | |||
| 380 | } | 380 | } |
| 381 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 381 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
| 382 | nf_conntrack_put(skb->nfct); | 382 | nf_conntrack_put(skb->nfct); |
| 383 | #endif | ||
| 384 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | ||
| 383 | nf_conntrack_put_reasm(skb->nfct_reasm); | 385 | nf_conntrack_put_reasm(skb->nfct_reasm); |
| 384 | #endif | 386 | #endif |
| 385 | #ifdef CONFIG_BRIDGE_NETFILTER | 387 | #ifdef CONFIG_BRIDGE_NETFILTER |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f9d7ac924f15..44d2b42fda56 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
| @@ -351,7 +351,7 @@ EXPORT_SYMBOL(ether_setup); | |||
| 351 | * @sizeof_priv: Size of additional driver-private structure to be allocated | 351 | * @sizeof_priv: Size of additional driver-private structure to be allocated |
| 352 | * for this Ethernet device | 352 | * for this Ethernet device |
| 353 | * @txqs: The number of TX queues this device has. | 353 | * @txqs: The number of TX queues this device has. |
| 354 | * @txqs: The number of RX queues this device has. | 354 | * @rxqs: The number of RX queues this device has. |
| 355 | * | 355 | * |
| 356 | * Fill in the fields of the device structure with Ethernet-generic | 356 | * Fill in the fields of the device structure with Ethernet-generic |
| 357 | * values. Basically does everything except registering the device. | 357 | * values. Basically does everything except registering the device. |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 2746c1fa6417..2ada17129fce 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
| @@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 858 | nlmsg_len(nlh) < hdrlen) | 858 | nlmsg_len(nlh) < hdrlen) |
| 859 | return -EINVAL; | 859 | return -EINVAL; |
| 860 | 860 | ||
| 861 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { | 861 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
| 862 | if (nlmsg_attrlen(nlh, hdrlen)) { | 862 | if (nlmsg_attrlen(nlh, hdrlen)) { |
| 863 | struct nlattr *attr; | 863 | struct nlattr *attr; |
| 864 | 864 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5b189c97c2fc..24a1cf110d80 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -420,9 +420,6 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
| 420 | dev->type == ARPHRD_TUNNEL6 || | 420 | dev->type == ARPHRD_TUNNEL6 || |
| 421 | dev->type == ARPHRD_SIT || | 421 | dev->type == ARPHRD_SIT || |
| 422 | dev->type == ARPHRD_NONE) { | 422 | dev->type == ARPHRD_NONE) { |
| 423 | printk(KERN_INFO | ||
| 424 | "%s: Disabled Privacy Extensions\n", | ||
| 425 | dev->name); | ||
| 426 | ndev->cnf.use_tempaddr = -1; | 423 | ndev->cnf.use_tempaddr = -1; |
| 427 | } else { | 424 | } else { |
| 428 | in6_dev_hold(ndev); | 425 | in6_dev_hold(ndev); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 94b5bf132b2e..5f8d242be3f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -401,6 +401,9 @@ int ip6_forward(struct sk_buff *skb) | |||
| 401 | goto drop; | 401 | goto drop; |
| 402 | } | 402 | } |
| 403 | 403 | ||
| 404 | if (skb->pkt_type != PACKET_HOST) | ||
| 405 | goto drop; | ||
| 406 | |||
| 404 | skb_forward_csum(skb); | 407 | skb_forward_csum(skb); |
| 405 | 408 | ||
| 406 | /* | 409 | /* |
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index 99abfb53bab9..97c5b21b9674 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | |||
| @@ -19,13 +19,15 @@ | |||
| 19 | 19 | ||
| 20 | #include <linux/netfilter_ipv6.h> | 20 | #include <linux/netfilter_ipv6.h> |
| 21 | #include <linux/netfilter_bridge.h> | 21 | #include <linux/netfilter_bridge.h> |
| 22 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 22 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
| 23 | #include <net/netfilter/nf_conntrack_helper.h> | 24 | #include <net/netfilter/nf_conntrack_helper.h> |
| 24 | #include <net/netfilter/nf_conntrack_l4proto.h> | 25 | #include <net/netfilter/nf_conntrack_l4proto.h> |
| 25 | #include <net/netfilter/nf_conntrack_l3proto.h> | 26 | #include <net/netfilter/nf_conntrack_l3proto.h> |
| 26 | #include <net/netfilter/nf_conntrack_core.h> | 27 | #include <net/netfilter/nf_conntrack_core.h> |
| 27 | #include <net/netfilter/nf_conntrack_zones.h> | ||
| 28 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> | 28 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> |
| 29 | #endif | ||
| 30 | #include <net/netfilter/nf_conntrack_zones.h> | ||
| 29 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> | 31 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> |
| 30 | 32 | ||
| 31 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | 33 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, |
| @@ -33,8 +35,10 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | |||
| 33 | { | 35 | { |
| 34 | u16 zone = NF_CT_DEFAULT_ZONE; | 36 | u16 zone = NF_CT_DEFAULT_ZONE; |
| 35 | 37 | ||
| 38 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 36 | if (skb->nfct) | 39 | if (skb->nfct) |
| 37 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); | 40 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); |
| 41 | #endif | ||
| 38 | 42 | ||
| 39 | #ifdef CONFIG_BRIDGE_NETFILTER | 43 | #ifdef CONFIG_BRIDGE_NETFILTER |
| 40 | if (skb->nf_bridge && | 44 | if (skb->nf_bridge && |
| @@ -56,9 +60,11 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
| 56 | { | 60 | { |
| 57 | struct sk_buff *reasm; | 61 | struct sk_buff *reasm; |
| 58 | 62 | ||
| 63 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 59 | /* Previously seen (loopback)? */ | 64 | /* Previously seen (loopback)? */ |
| 60 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) | 65 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) |
| 61 | return NF_ACCEPT; | 66 | return NF_ACCEPT; |
| 67 | #endif | ||
| 62 | 68 | ||
| 63 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); | 69 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); |
| 64 | /* queued */ | 70 | /* queued */ |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index f138b195d657..227ca82eef72 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
| @@ -185,8 +185,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
| 185 | struct ieee80211_mgmt *mgmt, | 185 | struct ieee80211_mgmt *mgmt, |
| 186 | size_t len) | 186 | size_t len) |
| 187 | { | 187 | { |
| 188 | struct ieee80211_hw *hw = &local->hw; | ||
| 189 | struct ieee80211_conf *conf = &hw->conf; | ||
| 190 | struct tid_ampdu_rx *tid_agg_rx; | 188 | struct tid_ampdu_rx *tid_agg_rx; |
| 191 | u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; | 189 | u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; |
| 192 | u8 dialog_token; | 190 | u8 dialog_token; |
| @@ -231,13 +229,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
| 231 | goto end_no_lock; | 229 | goto end_no_lock; |
| 232 | } | 230 | } |
| 233 | /* determine default buffer size */ | 231 | /* determine default buffer size */ |
| 234 | if (buf_size == 0) { | 232 | if (buf_size == 0) |
| 235 | struct ieee80211_supported_band *sband; | 233 | buf_size = IEEE80211_MAX_AMPDU_BUF; |
| 236 | |||
| 237 | sband = local->hw.wiphy->bands[conf->channel->band]; | ||
| 238 | buf_size = IEEE80211_MIN_AMPDU_BUF; | ||
| 239 | buf_size = buf_size << sband->ht_cap.ampdu_factor; | ||
| 240 | } | ||
| 241 | 234 | ||
| 242 | 235 | ||
| 243 | /* examine state machine */ | 236 | /* examine state machine */ |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 485d36bc9a46..a46ff06d7cb8 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
| @@ -39,6 +39,8 @@ module_param(ieee80211_disable_40mhz_24ghz, bool, 0644); | |||
| 39 | MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz, | 39 | MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz, |
| 40 | "Disable 40MHz support in the 2.4GHz band"); | 40 | "Disable 40MHz support in the 2.4GHz band"); |
| 41 | 41 | ||
| 42 | static struct lock_class_key ieee80211_rx_skb_queue_class; | ||
| 43 | |||
| 42 | void ieee80211_configure_filter(struct ieee80211_local *local) | 44 | void ieee80211_configure_filter(struct ieee80211_local *local) |
| 43 | { | 45 | { |
| 44 | u64 mc; | 46 | u64 mc; |
| @@ -569,7 +571,15 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
| 569 | spin_lock_init(&local->filter_lock); | 571 | spin_lock_init(&local->filter_lock); |
| 570 | spin_lock_init(&local->queue_stop_reason_lock); | 572 | spin_lock_init(&local->queue_stop_reason_lock); |
| 571 | 573 | ||
| 572 | skb_queue_head_init(&local->rx_skb_queue); | 574 | /* |
| 575 | * The rx_skb_queue is only accessed from tasklets, | ||
| 576 | * but other SKB queues are used from within IRQ | ||
| 577 | * context. Therefore, this one needs a different | ||
| 578 | * locking class so our direct, non-irq-safe use of | ||
| 579 | * the queue's lock doesn't throw lockdep warnings. | ||
| 580 | */ | ||
| 581 | skb_queue_head_init_class(&local->rx_skb_queue, | ||
| 582 | &ieee80211_rx_skb_queue_class); | ||
| 573 | 583 | ||
| 574 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); | 584 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); |
| 575 | 585 | ||
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 5cb8d3027b18..93297aaceb2b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
| @@ -924,7 +924,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
| 924 | u16 zone; | 924 | u16 zone; |
| 925 | int err; | 925 | int err; |
| 926 | 926 | ||
| 927 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) | 927 | if (nlh->nlmsg_flags & NLM_F_DUMP) |
| 928 | return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, | 928 | return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, |
| 929 | ctnetlink_done); | 929 | ctnetlink_done); |
| 930 | 930 | ||
| @@ -972,7 +972,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
| 972 | free: | 972 | free: |
| 973 | kfree_skb(skb2); | 973 | kfree_skb(skb2); |
| 974 | out: | 974 | out: |
| 975 | return err; | 975 | /* this avoids a loop in nfnetlink. */ |
| 976 | return err == -EAGAIN ? -ENOBUFS : err; | ||
| 976 | } | 977 | } |
| 977 | 978 | ||
| 978 | #ifdef CONFIG_NF_NAT_NEEDED | 979 | #ifdef CONFIG_NF_NAT_NEEDED |
| @@ -1786,7 +1787,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
| 1786 | u16 zone; | 1787 | u16 zone; |
| 1787 | int err; | 1788 | int err; |
| 1788 | 1789 | ||
| 1789 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { | 1790 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
| 1790 | return netlink_dump_start(ctnl, skb, nlh, | 1791 | return netlink_dump_start(ctnl, skb, nlh, |
| 1791 | ctnetlink_exp_dump_table, | 1792 | ctnetlink_exp_dump_table, |
| 1792 | ctnetlink_exp_done); | 1793 | ctnetlink_exp_done); |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index f83cb370292b..1781d99145e2 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
| @@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 519 | security_netlink_recv(skb, CAP_NET_ADMIN)) | 519 | security_netlink_recv(skb, CAP_NET_ADMIN)) |
| 520 | return -EPERM; | 520 | return -EPERM; |
| 521 | 521 | ||
| 522 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { | 522 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
| 523 | if (ops->dumpit == NULL) | 523 | if (ops->dumpit == NULL) |
| 524 | return -EOPNOTSUPP; | 524 | return -EOPNOTSUPP; |
| 525 | 525 | ||
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 0b9bb2085ce4..74c064c0dfdd 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
| @@ -808,7 +808,7 @@ static int __init af_rxrpc_init(void) | |||
| 808 | goto error_call_jar; | 808 | goto error_call_jar; |
| 809 | } | 809 | } |
| 810 | 810 | ||
| 811 | rxrpc_workqueue = create_workqueue("krxrpcd"); | 811 | rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); |
| 812 | if (!rxrpc_workqueue) { | 812 | if (!rxrpc_workqueue) { |
| 813 | printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); | 813 | printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); |
| 814 | goto error_work_queue; | 814 | goto error_work_queue; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index af9360d1f6eb..84ce48eadff4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
| @@ -59,6 +59,10 @@ struct teql_master | |||
| 59 | struct net_device *dev; | 59 | struct net_device *dev; |
| 60 | struct Qdisc *slaves; | 60 | struct Qdisc *slaves; |
| 61 | struct list_head master_list; | 61 | struct list_head master_list; |
| 62 | unsigned long tx_bytes; | ||
| 63 | unsigned long tx_packets; | ||
| 64 | unsigned long tx_errors; | ||
| 65 | unsigned long tx_dropped; | ||
| 62 | }; | 66 | }; |
| 63 | 67 | ||
| 64 | struct teql_sched_data | 68 | struct teql_sched_data |
| @@ -274,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
| 274 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 278 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
| 275 | { | 279 | { |
| 276 | struct teql_master *master = netdev_priv(dev); | 280 | struct teql_master *master = netdev_priv(dev); |
| 277 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
| 278 | struct Qdisc *start, *q; | 281 | struct Qdisc *start, *q; |
| 279 | int busy; | 282 | int busy; |
| 280 | int nores; | 283 | int nores; |
| @@ -314,8 +317,8 @@ restart: | |||
| 314 | __netif_tx_unlock(slave_txq); | 317 | __netif_tx_unlock(slave_txq); |
| 315 | master->slaves = NEXT_SLAVE(q); | 318 | master->slaves = NEXT_SLAVE(q); |
| 316 | netif_wake_queue(dev); | 319 | netif_wake_queue(dev); |
| 317 | txq->tx_packets++; | 320 | master->tx_packets++; |
| 318 | txq->tx_bytes += length; | 321 | master->tx_bytes += length; |
| 319 | return NETDEV_TX_OK; | 322 | return NETDEV_TX_OK; |
| 320 | } | 323 | } |
| 321 | __netif_tx_unlock(slave_txq); | 324 | __netif_tx_unlock(slave_txq); |
| @@ -342,10 +345,10 @@ restart: | |||
| 342 | netif_stop_queue(dev); | 345 | netif_stop_queue(dev); |
| 343 | return NETDEV_TX_BUSY; | 346 | return NETDEV_TX_BUSY; |
| 344 | } | 347 | } |
| 345 | dev->stats.tx_errors++; | 348 | master->tx_errors++; |
| 346 | 349 | ||
| 347 | drop: | 350 | drop: |
| 348 | txq->tx_dropped++; | 351 | master->tx_dropped++; |
| 349 | dev_kfree_skb(skb); | 352 | dev_kfree_skb(skb); |
| 350 | return NETDEV_TX_OK; | 353 | return NETDEV_TX_OK; |
| 351 | } | 354 | } |
| @@ -398,6 +401,18 @@ static int teql_master_close(struct net_device *dev) | |||
| 398 | return 0; | 401 | return 0; |
| 399 | } | 402 | } |
| 400 | 403 | ||
| 404 | static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, | ||
| 405 | struct rtnl_link_stats64 *stats) | ||
| 406 | { | ||
| 407 | struct teql_master *m = netdev_priv(dev); | ||
| 408 | |||
| 409 | stats->tx_packets = m->tx_packets; | ||
| 410 | stats->tx_bytes = m->tx_bytes; | ||
| 411 | stats->tx_errors = m->tx_errors; | ||
| 412 | stats->tx_dropped = m->tx_dropped; | ||
| 413 | return stats; | ||
| 414 | } | ||
| 415 | |||
| 401 | static int teql_master_mtu(struct net_device *dev, int new_mtu) | 416 | static int teql_master_mtu(struct net_device *dev, int new_mtu) |
| 402 | { | 417 | { |
| 403 | struct teql_master *m = netdev_priv(dev); | 418 | struct teql_master *m = netdev_priv(dev); |
| @@ -422,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = { | |||
| 422 | .ndo_open = teql_master_open, | 437 | .ndo_open = teql_master_open, |
| 423 | .ndo_stop = teql_master_close, | 438 | .ndo_stop = teql_master_close, |
| 424 | .ndo_start_xmit = teql_master_xmit, | 439 | .ndo_start_xmit = teql_master_xmit, |
| 440 | .ndo_get_stats64 = teql_master_stats64, | ||
| 425 | .ndo_change_mtu = teql_master_mtu, | 441 | .ndo_change_mtu = teql_master_mtu, |
| 426 | }; | 442 | }; |
| 427 | 443 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a09b0dd25f50..8e02550ff3e8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -3428,7 +3428,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, | |||
| 3428 | retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); | 3428 | retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); |
| 3429 | break; | 3429 | break; |
| 3430 | 3430 | ||
| 3431 | case SCTP_DELAYED_ACK: | 3431 | case SCTP_DELAYED_SACK: |
| 3432 | retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); | 3432 | retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); |
| 3433 | break; | 3433 | break; |
| 3434 | case SCTP_PARTIAL_DELIVERY_POINT: | 3434 | case SCTP_PARTIAL_DELIVERY_POINT: |
| @@ -5333,7 +5333,7 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, | |||
| 5333 | retval = sctp_getsockopt_peer_addr_params(sk, len, optval, | 5333 | retval = sctp_getsockopt_peer_addr_params(sk, len, optval, |
| 5334 | optlen); | 5334 | optlen); |
| 5335 | break; | 5335 | break; |
| 5336 | case SCTP_DELAYED_ACK: | 5336 | case SCTP_DELAYED_SACK: |
| 5337 | retval = sctp_getsockopt_delayed_ack(sk, len, optval, | 5337 | retval = sctp_getsockopt_delayed_ack(sk, len, optval, |
| 5338 | optlen); | 5338 | optlen); |
| 5339 | break; | 5339 | break; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 75ee993ea057..9576f35ab701 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
| @@ -137,7 +137,7 @@ arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) | |||
| 137 | ms_usage = 13; | 137 | ms_usage = 13; |
| 138 | break; | 138 | break; |
| 139 | default: | 139 | default: |
| 140 | return EINVAL;; | 140 | return -EINVAL; |
| 141 | } | 141 | } |
| 142 | salt[0] = (ms_usage >> 0) & 0xff; | 142 | salt[0] = (ms_usage >> 0) & 0xff; |
| 143 | salt[1] = (ms_usage >> 8) & 0xff; | 143 | salt[1] = (ms_usage >> 8) & 0xff; |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index dec2a6fc7c12..bcdae78fdfc6 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -67,7 +67,6 @@ static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b) | |||
| 67 | 67 | ||
| 68 | #define RSI_HASHBITS 6 | 68 | #define RSI_HASHBITS 6 |
| 69 | #define RSI_HASHMAX (1<<RSI_HASHBITS) | 69 | #define RSI_HASHMAX (1<<RSI_HASHBITS) |
| 70 | #define RSI_HASHMASK (RSI_HASHMAX-1) | ||
| 71 | 70 | ||
| 72 | struct rsi { | 71 | struct rsi { |
| 73 | struct cache_head h; | 72 | struct cache_head h; |
| @@ -319,7 +318,6 @@ static struct rsi *rsi_update(struct rsi *new, struct rsi *old) | |||
| 319 | 318 | ||
| 320 | #define RSC_HASHBITS 10 | 319 | #define RSC_HASHBITS 10 |
| 321 | #define RSC_HASHMAX (1<<RSC_HASHBITS) | 320 | #define RSC_HASHMAX (1<<RSC_HASHBITS) |
| 322 | #define RSC_HASHMASK (RSC_HASHMAX-1) | ||
| 323 | 321 | ||
| 324 | #define GSS_SEQ_WIN 128 | 322 | #define GSS_SEQ_WIN 128 |
| 325 | 323 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index e433e7580e27..72ad836e4fe0 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | #define RPCDBG_FACILITY RPCDBG_CACHE | 38 | #define RPCDBG_FACILITY RPCDBG_CACHE |
| 39 | 39 | ||
| 40 | static void cache_defer_req(struct cache_req *req, struct cache_head *item); | 40 | static bool cache_defer_req(struct cache_req *req, struct cache_head *item); |
| 41 | static void cache_revisit_request(struct cache_head *item); | 41 | static void cache_revisit_request(struct cache_head *item); |
| 42 | 42 | ||
| 43 | static void cache_init(struct cache_head *h) | 43 | static void cache_init(struct cache_head *h) |
| @@ -128,6 +128,7 @@ static void cache_fresh_locked(struct cache_head *head, time_t expiry) | |||
| 128 | { | 128 | { |
| 129 | head->expiry_time = expiry; | 129 | head->expiry_time = expiry; |
| 130 | head->last_refresh = seconds_since_boot(); | 130 | head->last_refresh = seconds_since_boot(); |
| 131 | smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ | ||
| 131 | set_bit(CACHE_VALID, &head->flags); | 132 | set_bit(CACHE_VALID, &head->flags); |
| 132 | } | 133 | } |
| 133 | 134 | ||
| @@ -208,11 +209,36 @@ static inline int cache_is_valid(struct cache_detail *detail, struct cache_head | |||
| 208 | /* entry is valid */ | 209 | /* entry is valid */ |
| 209 | if (test_bit(CACHE_NEGATIVE, &h->flags)) | 210 | if (test_bit(CACHE_NEGATIVE, &h->flags)) |
| 210 | return -ENOENT; | 211 | return -ENOENT; |
| 211 | else | 212 | else { |
| 213 | /* | ||
| 214 | * In combination with write barrier in | ||
| 215 | * sunrpc_cache_update, ensures that anyone | ||
| 216 | * using the cache entry after this sees the | ||
| 217 | * updated contents: | ||
| 218 | */ | ||
| 219 | smp_rmb(); | ||
| 212 | return 0; | 220 | return 0; |
| 221 | } | ||
| 213 | } | 222 | } |
| 214 | } | 223 | } |
| 215 | 224 | ||
| 225 | static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) | ||
| 226 | { | ||
| 227 | int rv; | ||
| 228 | |||
| 229 | write_lock(&detail->hash_lock); | ||
| 230 | rv = cache_is_valid(detail, h); | ||
| 231 | if (rv != -EAGAIN) { | ||
| 232 | write_unlock(&detail->hash_lock); | ||
| 233 | return rv; | ||
| 234 | } | ||
| 235 | set_bit(CACHE_NEGATIVE, &h->flags); | ||
| 236 | cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); | ||
| 237 | write_unlock(&detail->hash_lock); | ||
| 238 | cache_fresh_unlocked(h, detail); | ||
| 239 | return -ENOENT; | ||
| 240 | } | ||
| 241 | |||
| 216 | /* | 242 | /* |
| 217 | * This is the generic cache management routine for all | 243 | * This is the generic cache management routine for all |
| 218 | * the authentication caches. | 244 | * the authentication caches. |
| @@ -251,14 +277,8 @@ int cache_check(struct cache_detail *detail, | |||
| 251 | case -EINVAL: | 277 | case -EINVAL: |
| 252 | clear_bit(CACHE_PENDING, &h->flags); | 278 | clear_bit(CACHE_PENDING, &h->flags); |
| 253 | cache_revisit_request(h); | 279 | cache_revisit_request(h); |
| 254 | if (rv == -EAGAIN) { | 280 | rv = try_to_negate_entry(detail, h); |
| 255 | set_bit(CACHE_NEGATIVE, &h->flags); | ||
| 256 | cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); | ||
| 257 | cache_fresh_unlocked(h, detail); | ||
| 258 | rv = -ENOENT; | ||
| 259 | } | ||
| 260 | break; | 281 | break; |
| 261 | |||
| 262 | case -EAGAIN: | 282 | case -EAGAIN: |
| 263 | clear_bit(CACHE_PENDING, &h->flags); | 283 | clear_bit(CACHE_PENDING, &h->flags); |
| 264 | cache_revisit_request(h); | 284 | cache_revisit_request(h); |
| @@ -268,9 +288,11 @@ int cache_check(struct cache_detail *detail, | |||
| 268 | } | 288 | } |
| 269 | 289 | ||
| 270 | if (rv == -EAGAIN) { | 290 | if (rv == -EAGAIN) { |
| 271 | cache_defer_req(rqstp, h); | 291 | if (!cache_defer_req(rqstp, h)) { |
| 272 | if (!test_bit(CACHE_PENDING, &h->flags)) { | 292 | /* |
| 273 | /* Request is not deferred */ | 293 | * Request was not deferred; handle it as best |
| 294 | * we can ourselves: | ||
| 295 | */ | ||
| 274 | rv = cache_is_valid(detail, h); | 296 | rv = cache_is_valid(detail, h); |
| 275 | if (rv == -EAGAIN) | 297 | if (rv == -EAGAIN) |
| 276 | rv = -ETIMEDOUT; | 298 | rv = -ETIMEDOUT; |
| @@ -618,18 +640,19 @@ static void cache_limit_defers(void) | |||
| 618 | discard->revisit(discard, 1); | 640 | discard->revisit(discard, 1); |
| 619 | } | 641 | } |
| 620 | 642 | ||
| 621 | static void cache_defer_req(struct cache_req *req, struct cache_head *item) | 643 | /* Return true if and only if a deferred request is queued. */ |
| 644 | static bool cache_defer_req(struct cache_req *req, struct cache_head *item) | ||
| 622 | { | 645 | { |
| 623 | struct cache_deferred_req *dreq; | 646 | struct cache_deferred_req *dreq; |
| 624 | 647 | ||
| 625 | if (req->thread_wait) { | 648 | if (req->thread_wait) { |
| 626 | cache_wait_req(req, item); | 649 | cache_wait_req(req, item); |
| 627 | if (!test_bit(CACHE_PENDING, &item->flags)) | 650 | if (!test_bit(CACHE_PENDING, &item->flags)) |
| 628 | return; | 651 | return false; |
| 629 | } | 652 | } |
| 630 | dreq = req->defer(req); | 653 | dreq = req->defer(req); |
| 631 | if (dreq == NULL) | 654 | if (dreq == NULL) |
| 632 | return; | 655 | return false; |
| 633 | setup_deferral(dreq, item, 1); | 656 | setup_deferral(dreq, item, 1); |
| 634 | if (!test_bit(CACHE_PENDING, &item->flags)) | 657 | if (!test_bit(CACHE_PENDING, &item->flags)) |
| 635 | /* Bit could have been cleared before we managed to | 658 | /* Bit could have been cleared before we managed to |
| @@ -638,6 +661,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
| 638 | cache_revisit_request(item); | 661 | cache_revisit_request(item); |
| 639 | 662 | ||
| 640 | cache_limit_defers(); | 663 | cache_limit_defers(); |
| 664 | return true; | ||
| 641 | } | 665 | } |
| 642 | 666 | ||
| 643 | static void cache_revisit_request(struct cache_head *item) | 667 | static void cache_revisit_request(struct cache_head *item) |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 0e659c665a8d..08e05a8ce025 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
| @@ -1001,6 +1001,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) | |||
| 1001 | rqstp->rq_splice_ok = 1; | 1001 | rqstp->rq_splice_ok = 1; |
| 1002 | /* Will be turned off only when NFSv4 Sessions are used */ | 1002 | /* Will be turned off only when NFSv4 Sessions are used */ |
| 1003 | rqstp->rq_usedeferral = 1; | 1003 | rqstp->rq_usedeferral = 1; |
| 1004 | rqstp->rq_dropme = false; | ||
| 1004 | 1005 | ||
| 1005 | /* Setup reply header */ | 1006 | /* Setup reply header */ |
| 1006 | rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); | 1007 | rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); |
| @@ -1102,7 +1103,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) | |||
| 1102 | *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); | 1103 | *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); |
| 1103 | 1104 | ||
| 1104 | /* Encode reply */ | 1105 | /* Encode reply */ |
| 1105 | if (*statp == rpc_drop_reply) { | 1106 | if (rqstp->rq_dropme) { |
| 1106 | if (procp->pc_release) | 1107 | if (procp->pc_release) |
| 1107 | procp->pc_release(rqstp, NULL, rqstp->rq_resp); | 1108 | procp->pc_release(rqstp, NULL, rqstp->rq_resp); |
| 1108 | goto dropit; | 1109 | goto dropit; |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 3f2c5559ca1a..ab86b7927f84 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/sunrpc/stats.h> | 13 | #include <linux/sunrpc/stats.h> |
| 14 | #include <linux/sunrpc/svc_xprt.h> | 14 | #include <linux/sunrpc/svc_xprt.h> |
| 15 | #include <linux/sunrpc/svcsock.h> | 15 | #include <linux/sunrpc/svcsock.h> |
| 16 | #include <linux/sunrpc/xprt.h> | ||
| 16 | 17 | ||
| 17 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 18 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
| 18 | 19 | ||
| @@ -128,6 +129,9 @@ static void svc_xprt_free(struct kref *kref) | |||
| 128 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) | 129 | if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) |
| 129 | svcauth_unix_info_release(xprt); | 130 | svcauth_unix_info_release(xprt); |
| 130 | put_net(xprt->xpt_net); | 131 | put_net(xprt->xpt_net); |
| 132 | /* See comment on corresponding get in xs_setup_bc_tcp(): */ | ||
| 133 | if (xprt->xpt_bc_xprt) | ||
| 134 | xprt_put(xprt->xpt_bc_xprt); | ||
| 131 | xprt->xpt_ops->xpo_free(xprt); | 135 | xprt->xpt_ops->xpo_free(xprt); |
| 132 | module_put(owner); | 136 | module_put(owner); |
| 133 | } | 137 | } |
| @@ -303,6 +307,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) | |||
| 303 | list_del(&rqstp->rq_list); | 307 | list_del(&rqstp->rq_list); |
| 304 | } | 308 | } |
| 305 | 309 | ||
| 310 | static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) | ||
| 311 | { | ||
| 312 | if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) | ||
| 313 | return true; | ||
| 314 | if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) | ||
| 315 | return xprt->xpt_ops->xpo_has_wspace(xprt); | ||
| 316 | return false; | ||
| 317 | } | ||
| 318 | |||
| 306 | /* | 319 | /* |
| 307 | * Queue up a transport with data pending. If there are idle nfsd | 320 | * Queue up a transport with data pending. If there are idle nfsd |
| 308 | * processes, wake 'em up. | 321 | * processes, wake 'em up. |
| @@ -315,8 +328,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
| 315 | struct svc_rqst *rqstp; | 328 | struct svc_rqst *rqstp; |
| 316 | int cpu; | 329 | int cpu; |
| 317 | 330 | ||
| 318 | if (!(xprt->xpt_flags & | 331 | if (!svc_xprt_has_something_to_do(xprt)) |
| 319 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) | ||
| 320 | return; | 332 | return; |
| 321 | 333 | ||
| 322 | cpu = get_cpu(); | 334 | cpu = get_cpu(); |
| @@ -343,28 +355,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
| 343 | dprintk("svc: transport %p busy, not enqueued\n", xprt); | 355 | dprintk("svc: transport %p busy, not enqueued\n", xprt); |
| 344 | goto out_unlock; | 356 | goto out_unlock; |
| 345 | } | 357 | } |
| 346 | BUG_ON(xprt->xpt_pool != NULL); | ||
| 347 | xprt->xpt_pool = pool; | ||
| 348 | |||
| 349 | /* Handle pending connection */ | ||
| 350 | if (test_bit(XPT_CONN, &xprt->xpt_flags)) | ||
| 351 | goto process; | ||
| 352 | |||
| 353 | /* Handle close in-progress */ | ||
| 354 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) | ||
| 355 | goto process; | ||
| 356 | |||
| 357 | /* Check if we have space to reply to a request */ | ||
| 358 | if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { | ||
| 359 | /* Don't enqueue while not enough space for reply */ | ||
| 360 | dprintk("svc: no write space, transport %p not enqueued\n", | ||
| 361 | xprt); | ||
| 362 | xprt->xpt_pool = NULL; | ||
| 363 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
| 364 | goto out_unlock; | ||
| 365 | } | ||
| 366 | 358 | ||
| 367 | process: | ||
| 368 | if (!list_empty(&pool->sp_threads)) { | 359 | if (!list_empty(&pool->sp_threads)) { |
| 369 | rqstp = list_entry(pool->sp_threads.next, | 360 | rqstp = list_entry(pool->sp_threads.next, |
| 370 | struct svc_rqst, | 361 | struct svc_rqst, |
| @@ -381,13 +372,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
| 381 | rqstp->rq_reserved = serv->sv_max_mesg; | 372 | rqstp->rq_reserved = serv->sv_max_mesg; |
| 382 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 373 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
| 383 | pool->sp_stats.threads_woken++; | 374 | pool->sp_stats.threads_woken++; |
| 384 | BUG_ON(xprt->xpt_pool != pool); | ||
| 385 | wake_up(&rqstp->rq_wait); | 375 | wake_up(&rqstp->rq_wait); |
| 386 | } else { | 376 | } else { |
| 387 | dprintk("svc: transport %p put into queue\n", xprt); | 377 | dprintk("svc: transport %p put into queue\n", xprt); |
| 388 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | 378 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); |
| 389 | pool->sp_stats.sockets_queued++; | 379 | pool->sp_stats.sockets_queued++; |
| 390 | BUG_ON(xprt->xpt_pool != pool); | ||
| 391 | } | 380 | } |
| 392 | 381 | ||
| 393 | out_unlock: | 382 | out_unlock: |
| @@ -426,7 +415,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | |||
| 426 | void svc_xprt_received(struct svc_xprt *xprt) | 415 | void svc_xprt_received(struct svc_xprt *xprt) |
| 427 | { | 416 | { |
| 428 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | 417 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); |
| 429 | xprt->xpt_pool = NULL; | ||
| 430 | /* As soon as we clear busy, the xprt could be closed and | 418 | /* As soon as we clear busy, the xprt could be closed and |
| 431 | * 'put', so we need a reference to call svc_xprt_enqueue with: | 419 | * 'put', so we need a reference to call svc_xprt_enqueue with: |
| 432 | */ | 420 | */ |
| @@ -722,7 +710,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 722 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | 710 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
| 723 | dprintk("svc_recv: found XPT_CLOSE\n"); | 711 | dprintk("svc_recv: found XPT_CLOSE\n"); |
| 724 | svc_delete_xprt(xprt); | 712 | svc_delete_xprt(xprt); |
| 725 | } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | 713 | /* Leave XPT_BUSY set on the dead xprt: */ |
| 714 | goto out; | ||
| 715 | } | ||
| 716 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | ||
| 726 | struct svc_xprt *newxpt; | 717 | struct svc_xprt *newxpt; |
| 727 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | 718 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
| 728 | if (newxpt) { | 719 | if (newxpt) { |
| @@ -747,28 +738,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 747 | spin_unlock_bh(&serv->sv_lock); | 738 | spin_unlock_bh(&serv->sv_lock); |
| 748 | svc_xprt_received(newxpt); | 739 | svc_xprt_received(newxpt); |
| 749 | } | 740 | } |
| 750 | svc_xprt_received(xprt); | 741 | } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { |
| 751 | } else { | ||
| 752 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | 742 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
| 753 | rqstp, pool->sp_id, xprt, | 743 | rqstp, pool->sp_id, xprt, |
| 754 | atomic_read(&xprt->xpt_ref.refcount)); | 744 | atomic_read(&xprt->xpt_ref.refcount)); |
| 755 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); | 745 | rqstp->rq_deferred = svc_deferred_dequeue(xprt); |
| 756 | if (rqstp->rq_deferred) { | 746 | if (rqstp->rq_deferred) |
| 757 | svc_xprt_received(xprt); | ||
| 758 | len = svc_deferred_recv(rqstp); | 747 | len = svc_deferred_recv(rqstp); |
| 759 | } else { | 748 | else |
| 760 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); | 749 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
| 761 | svc_xprt_received(xprt); | ||
| 762 | } | ||
| 763 | dprintk("svc: got len=%d\n", len); | 750 | dprintk("svc: got len=%d\n", len); |
| 764 | } | 751 | } |
| 752 | svc_xprt_received(xprt); | ||
| 765 | 753 | ||
| 766 | /* No data, incomplete (TCP) read, or accept() */ | 754 | /* No data, incomplete (TCP) read, or accept() */ |
| 767 | if (len == 0 || len == -EAGAIN) { | 755 | if (len == 0 || len == -EAGAIN) |
| 768 | rqstp->rq_res.len = 0; | 756 | goto out; |
| 769 | svc_xprt_release(rqstp); | 757 | |
| 770 | return -EAGAIN; | ||
| 771 | } | ||
| 772 | clear_bit(XPT_OLD, &xprt->xpt_flags); | 758 | clear_bit(XPT_OLD, &xprt->xpt_flags); |
| 773 | 759 | ||
| 774 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); | 760 | rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); |
| @@ -777,6 +763,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 777 | if (serv->sv_stats) | 763 | if (serv->sv_stats) |
| 778 | serv->sv_stats->netcnt++; | 764 | serv->sv_stats->netcnt++; |
| 779 | return len; | 765 | return len; |
| 766 | out: | ||
| 767 | rqstp->rq_res.len = 0; | ||
| 768 | svc_xprt_release(rqstp); | ||
| 769 | return -EAGAIN; | ||
| 780 | } | 770 | } |
| 781 | EXPORT_SYMBOL_GPL(svc_recv); | 771 | EXPORT_SYMBOL_GPL(svc_recv); |
| 782 | 772 | ||
| @@ -935,7 +925,12 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
| 935 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) | 925 | if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) |
| 936 | /* someone else will have to effect the close */ | 926 | /* someone else will have to effect the close */ |
| 937 | return; | 927 | return; |
| 938 | 928 | /* | |
| 929 | * We expect svc_close_xprt() to work even when no threads are | ||
| 930 | * running (e.g., while configuring the server before starting | ||
| 931 | * any threads), so if the transport isn't busy, we delete | ||
| 932 | * it ourself: | ||
| 933 | */ | ||
| 939 | svc_delete_xprt(xprt); | 934 | svc_delete_xprt(xprt); |
| 940 | } | 935 | } |
| 941 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 936 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
| @@ -945,16 +940,16 @@ void svc_close_all(struct list_head *xprt_list) | |||
| 945 | struct svc_xprt *xprt; | 940 | struct svc_xprt *xprt; |
| 946 | struct svc_xprt *tmp; | 941 | struct svc_xprt *tmp; |
| 947 | 942 | ||
| 943 | /* | ||
| 944 | * The server is shutting down, and no more threads are running. | ||
| 945 | * svc_xprt_enqueue() might still be running, but at worst it | ||
| 946 | * will re-add the xprt to sp_sockets, which will soon get | ||
| 947 | * freed. So we don't bother with any more locking, and don't | ||
| 948 | * leave the close to the (nonexistent) server threads: | ||
| 949 | */ | ||
| 948 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | 950 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { |
| 949 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 951 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 950 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { | 952 | svc_delete_xprt(xprt); |
| 951 | /* Waiting to be processed, but no threads left, | ||
| 952 | * So just remove it from the waiting list | ||
| 953 | */ | ||
| 954 | list_del_init(&xprt->xpt_ready); | ||
| 955 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
| 956 | } | ||
| 957 | svc_close_xprt(xprt); | ||
| 958 | } | 953 | } |
| 959 | } | 954 | } |
| 960 | 955 | ||
| @@ -1028,6 +1023,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req) | |||
| 1028 | } | 1023 | } |
| 1029 | svc_xprt_get(rqstp->rq_xprt); | 1024 | svc_xprt_get(rqstp->rq_xprt); |
| 1030 | dr->xprt = rqstp->rq_xprt; | 1025 | dr->xprt = rqstp->rq_xprt; |
| 1026 | rqstp->rq_dropme = true; | ||
| 1031 | 1027 | ||
| 1032 | dr->handle.revisit = svc_revisit; | 1028 | dr->handle.revisit = svc_revisit; |
| 1033 | return &dr->handle; | 1029 | return &dr->handle; |
| @@ -1065,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) | |||
| 1065 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) | 1061 | if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) |
| 1066 | return NULL; | 1062 | return NULL; |
| 1067 | spin_lock(&xprt->xpt_lock); | 1063 | spin_lock(&xprt->xpt_lock); |
| 1068 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
| 1069 | if (!list_empty(&xprt->xpt_deferred)) { | 1064 | if (!list_empty(&xprt->xpt_deferred)) { |
| 1070 | dr = list_entry(xprt->xpt_deferred.next, | 1065 | dr = list_entry(xprt->xpt_deferred.next, |
| 1071 | struct svc_deferred_req, | 1066 | struct svc_deferred_req, |
| 1072 | handle.recent); | 1067 | handle.recent); |
| 1073 | list_del_init(&dr->handle.recent); | 1068 | list_del_init(&dr->handle.recent); |
| 1074 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | 1069 | } else |
| 1075 | } | 1070 | clear_bit(XPT_DEFERRED, &xprt->xpt_flags); |
| 1076 | spin_unlock(&xprt->xpt_lock); | 1071 | spin_unlock(&xprt->xpt_lock); |
| 1077 | return dr; | 1072 | return dr; |
| 1078 | } | 1073 | } |
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 4e9393c24687..7963569fc04f 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c | |||
| @@ -118,7 +118,6 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister); | |||
| 118 | 118 | ||
| 119 | #define DN_HASHBITS 6 | 119 | #define DN_HASHBITS 6 |
| 120 | #define DN_HASHMAX (1<<DN_HASHBITS) | 120 | #define DN_HASHMAX (1<<DN_HASHBITS) |
| 121 | #define DN_HASHMASK (DN_HASHMAX-1) | ||
| 122 | 121 | ||
| 123 | static struct hlist_head auth_domain_table[DN_HASHMAX]; | 122 | static struct hlist_head auth_domain_table[DN_HASHMAX]; |
| 124 | static spinlock_t auth_domain_lock = | 123 | static spinlock_t auth_domain_lock = |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 560677d187f1..30916b06c12b 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
| @@ -30,7 +30,9 @@ | |||
| 30 | 30 | ||
| 31 | struct unix_domain { | 31 | struct unix_domain { |
| 32 | struct auth_domain h; | 32 | struct auth_domain h; |
| 33 | #ifdef CONFIG_NFSD_DEPRECATED | ||
| 33 | int addr_changes; | 34 | int addr_changes; |
| 35 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
| 34 | /* other stuff later */ | 36 | /* other stuff later */ |
| 35 | }; | 37 | }; |
| 36 | 38 | ||
| @@ -64,7 +66,9 @@ struct auth_domain *unix_domain_find(char *name) | |||
| 64 | return NULL; | 66 | return NULL; |
| 65 | } | 67 | } |
| 66 | new->h.flavour = &svcauth_unix; | 68 | new->h.flavour = &svcauth_unix; |
| 69 | #ifdef CONFIG_NFSD_DEPRECATED | ||
| 67 | new->addr_changes = 0; | 70 | new->addr_changes = 0; |
| 71 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
| 68 | rv = auth_domain_lookup(name, &new->h); | 72 | rv = auth_domain_lookup(name, &new->h); |
| 69 | } | 73 | } |
| 70 | } | 74 | } |
| @@ -85,14 +89,15 @@ static void svcauth_unix_domain_release(struct auth_domain *dom) | |||
| 85 | */ | 89 | */ |
| 86 | #define IP_HASHBITS 8 | 90 | #define IP_HASHBITS 8 |
| 87 | #define IP_HASHMAX (1<<IP_HASHBITS) | 91 | #define IP_HASHMAX (1<<IP_HASHBITS) |
| 88 | #define IP_HASHMASK (IP_HASHMAX-1) | ||
| 89 | 92 | ||
| 90 | struct ip_map { | 93 | struct ip_map { |
| 91 | struct cache_head h; | 94 | struct cache_head h; |
| 92 | char m_class[8]; /* e.g. "nfsd" */ | 95 | char m_class[8]; /* e.g. "nfsd" */ |
| 93 | struct in6_addr m_addr; | 96 | struct in6_addr m_addr; |
| 94 | struct unix_domain *m_client; | 97 | struct unix_domain *m_client; |
| 98 | #ifdef CONFIG_NFSD_DEPRECATED | ||
| 95 | int m_add_change; | 99 | int m_add_change; |
| 100 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
| 96 | }; | 101 | }; |
| 97 | 102 | ||
| 98 | static void ip_map_put(struct kref *kref) | 103 | static void ip_map_put(struct kref *kref) |
| @@ -146,7 +151,9 @@ static void update(struct cache_head *cnew, struct cache_head *citem) | |||
| 146 | 151 | ||
| 147 | kref_get(&item->m_client->h.ref); | 152 | kref_get(&item->m_client->h.ref); |
| 148 | new->m_client = item->m_client; | 153 | new->m_client = item->m_client; |
| 154 | #ifdef CONFIG_NFSD_DEPRECATED | ||
| 149 | new->m_add_change = item->m_add_change; | 155 | new->m_add_change = item->m_add_change; |
| 156 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
| 150 | } | 157 | } |
| 151 | static struct cache_head *ip_map_alloc(void) | 158 | static struct cache_head *ip_map_alloc(void) |
| 152 | { | 159 | { |
| @@ -331,6 +338,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, | |||
| 331 | ip.h.flags = 0; | 338 | ip.h.flags = 0; |
| 332 | if (!udom) | 339 | if (!udom) |
| 333 | set_bit(CACHE_NEGATIVE, &ip.h.flags); | 340 | set_bit(CACHE_NEGATIVE, &ip.h.flags); |
| 341 | #ifdef CONFIG_NFSD_DEPRECATED | ||
| 334 | else { | 342 | else { |
| 335 | ip.m_add_change = udom->addr_changes; | 343 | ip.m_add_change = udom->addr_changes; |
| 336 | /* if this is from the legacy set_client system call, | 344 | /* if this is from the legacy set_client system call, |
| @@ -339,6 +347,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, | |||
| 339 | if (expiry == NEVER) | 347 | if (expiry == NEVER) |
| 340 | ip.m_add_change++; | 348 | ip.m_add_change++; |
| 341 | } | 349 | } |
| 350 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
| 342 | ip.h.expiry_time = expiry; | 351 | ip.h.expiry_time = expiry; |
| 343 | ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, | 352 | ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, |
| 344 | hash_str(ipm->m_class, IP_HASHBITS) ^ | 353 | hash_str(ipm->m_class, IP_HASHBITS) ^ |
| @@ -358,6 +367,7 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm, | |||
| 358 | return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); | 367 | return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); |
| 359 | } | 368 | } |
| 360 | 369 | ||
| 370 | #ifdef CONFIG_NFSD_DEPRECATED | ||
| 361 | int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) | 371 | int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) |
| 362 | { | 372 | { |
| 363 | struct unix_domain *udom; | 373 | struct unix_domain *udom; |
| @@ -402,8 +412,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) | |||
| 402 | return NULL; | 412 | return NULL; |
| 403 | 413 | ||
| 404 | if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { | 414 | if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { |
| 405 | if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0) | 415 | sunrpc_invalidate(&ipm->h, sn->ip_map_cache); |
| 406 | auth_domain_put(&ipm->m_client->h); | ||
| 407 | rv = NULL; | 416 | rv = NULL; |
| 408 | } else { | 417 | } else { |
| 409 | rv = &ipm->m_client->h; | 418 | rv = &ipm->m_client->h; |
| @@ -413,6 +422,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr) | |||
| 413 | return rv; | 422 | return rv; |
| 414 | } | 423 | } |
| 415 | EXPORT_SYMBOL_GPL(auth_unix_lookup); | 424 | EXPORT_SYMBOL_GPL(auth_unix_lookup); |
| 425 | #endif /* CONFIG_NFSD_DEPRECATED */ | ||
| 416 | 426 | ||
| 417 | void svcauth_unix_purge(void) | 427 | void svcauth_unix_purge(void) |
| 418 | { | 428 | { |
| @@ -497,7 +507,6 @@ svcauth_unix_info_release(struct svc_xprt *xpt) | |||
| 497 | */ | 507 | */ |
| 498 | #define GID_HASHBITS 8 | 508 | #define GID_HASHBITS 8 |
| 499 | #define GID_HASHMAX (1<<GID_HASHBITS) | 509 | #define GID_HASHMAX (1<<GID_HASHBITS) |
| 500 | #define GID_HASHMASK (GID_HASHMAX - 1) | ||
| 501 | 510 | ||
| 502 | struct unix_gid { | 511 | struct unix_gid { |
| 503 | struct cache_head h; | 512 | struct cache_head h; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index d265aa700bb3..7bd3bbba4710 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
| @@ -331,19 +331,21 @@ int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen, | |||
| 331 | len = onelen; | 331 | len = onelen; |
| 332 | break; | 332 | break; |
| 333 | } | 333 | } |
| 334 | if (toclose && strcmp(toclose, buf + len) == 0) | 334 | if (toclose && strcmp(toclose, buf + len) == 0) { |
| 335 | closesk = svsk; | 335 | closesk = svsk; |
| 336 | else | 336 | svc_xprt_get(&closesk->sk_xprt); |
| 337 | } else | ||
| 337 | len += onelen; | 338 | len += onelen; |
| 338 | } | 339 | } |
| 339 | spin_unlock_bh(&serv->sv_lock); | 340 | spin_unlock_bh(&serv->sv_lock); |
| 340 | 341 | ||
| 341 | if (closesk) | 342 | if (closesk) { |
| 342 | /* Should unregister with portmap, but you cannot | 343 | /* Should unregister with portmap, but you cannot |
| 343 | * unregister just one protocol... | 344 | * unregister just one protocol... |
| 344 | */ | 345 | */ |
| 345 | svc_close_xprt(&closesk->sk_xprt); | 346 | svc_close_xprt(&closesk->sk_xprt); |
| 346 | else if (toclose) | 347 | svc_xprt_put(&closesk->sk_xprt); |
| 348 | } else if (toclose) | ||
| 347 | return -ENOENT; | 349 | return -ENOENT; |
| 348 | return len; | 350 | return len; |
| 349 | } | 351 | } |
| @@ -992,15 +994,17 @@ static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp, | |||
| 992 | vec[0] = rqstp->rq_arg.head[0]; | 994 | vec[0] = rqstp->rq_arg.head[0]; |
| 993 | } else { | 995 | } else { |
| 994 | /* REPLY */ | 996 | /* REPLY */ |
| 995 | if (svsk->sk_bc_xprt) | 997 | struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt; |
| 996 | req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid); | 998 | |
| 999 | if (bc_xprt) | ||
| 1000 | req = xprt_lookup_rqst(bc_xprt, xid); | ||
| 997 | 1001 | ||
| 998 | if (!req) { | 1002 | if (!req) { |
| 999 | printk(KERN_NOTICE | 1003 | printk(KERN_NOTICE |
| 1000 | "%s: Got unrecognized reply: " | 1004 | "%s: Got unrecognized reply: " |
| 1001 | "calldir 0x%x sk_bc_xprt %p xid %08x\n", | 1005 | "calldir 0x%x xpt_bc_xprt %p xid %08x\n", |
| 1002 | __func__, ntohl(calldir), | 1006 | __func__, ntohl(calldir), |
| 1003 | svsk->sk_bc_xprt, xid); | 1007 | bc_xprt, xid); |
| 1004 | vec[0] = rqstp->rq_arg.head[0]; | 1008 | vec[0] = rqstp->rq_arg.head[0]; |
| 1005 | goto out; | 1009 | goto out; |
| 1006 | } | 1010 | } |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 4c8f18aff7c3..856274d7e85c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -965,6 +965,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req) | |||
| 965 | xprt = kzalloc(size, GFP_KERNEL); | 965 | xprt = kzalloc(size, GFP_KERNEL); |
| 966 | if (xprt == NULL) | 966 | if (xprt == NULL) |
| 967 | goto out; | 967 | goto out; |
| 968 | kref_init(&xprt->kref); | ||
| 968 | 969 | ||
| 969 | xprt->max_reqs = max_req; | 970 | xprt->max_reqs = max_req; |
| 970 | xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); | 971 | xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); |
| @@ -1101,8 +1102,10 @@ found: | |||
| 1101 | -PTR_ERR(xprt)); | 1102 | -PTR_ERR(xprt)); |
| 1102 | return xprt; | 1103 | return xprt; |
| 1103 | } | 1104 | } |
| 1105 | if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state)) | ||
| 1106 | /* ->setup returned a pre-initialized xprt: */ | ||
| 1107 | return xprt; | ||
| 1104 | 1108 | ||
| 1105 | kref_init(&xprt->kref); | ||
| 1106 | spin_lock_init(&xprt->transport_lock); | 1109 | spin_lock_init(&xprt->transport_lock); |
| 1107 | spin_lock_init(&xprt->reserve_lock); | 1110 | spin_lock_init(&xprt->reserve_lock); |
| 1108 | 1111 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 96549df836ee..c431f5a57960 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -2359,6 +2359,15 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 2359 | struct svc_sock *bc_sock; | 2359 | struct svc_sock *bc_sock; |
| 2360 | struct rpc_xprt *ret; | 2360 | struct rpc_xprt *ret; |
| 2361 | 2361 | ||
| 2362 | if (args->bc_xprt->xpt_bc_xprt) { | ||
| 2363 | /* | ||
| 2364 | * This server connection already has a backchannel | ||
| 2365 | * export; we can't create a new one, as we wouldn't be | ||
| 2366 | * able to match replies based on xid any more. So, | ||
| 2367 | * reuse the already-existing one: | ||
| 2368 | */ | ||
| 2369 | return args->bc_xprt->xpt_bc_xprt; | ||
| 2370 | } | ||
| 2362 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2371 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
| 2363 | if (IS_ERR(xprt)) | 2372 | if (IS_ERR(xprt)) |
| 2364 | return xprt; | 2373 | return xprt; |
| @@ -2375,16 +2384,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 2375 | xprt->reestablish_timeout = 0; | 2384 | xprt->reestablish_timeout = 0; |
| 2376 | xprt->idle_timeout = 0; | 2385 | xprt->idle_timeout = 0; |
| 2377 | 2386 | ||
| 2378 | /* | ||
| 2379 | * The backchannel uses the same socket connection as the | ||
| 2380 | * forechannel | ||
| 2381 | */ | ||
| 2382 | xprt->bc_xprt = args->bc_xprt; | ||
| 2383 | bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); | ||
| 2384 | bc_sock->sk_bc_xprt = xprt; | ||
| 2385 | transport->sock = bc_sock->sk_sock; | ||
| 2386 | transport->inet = bc_sock->sk_sk; | ||
| 2387 | |||
| 2388 | xprt->ops = &bc_tcp_ops; | 2387 | xprt->ops = &bc_tcp_ops; |
| 2389 | 2388 | ||
| 2390 | switch (addr->sa_family) { | 2389 | switch (addr->sa_family) { |
| @@ -2407,6 +2406,20 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 2407 | xprt->address_strings[RPC_DISPLAY_PROTO]); | 2406 | xprt->address_strings[RPC_DISPLAY_PROTO]); |
| 2408 | 2407 | ||
| 2409 | /* | 2408 | /* |
| 2409 | * Once we've associated a backchannel xprt with a connection, | ||
| 2410 | * we want to keep it around as long as long as the connection | ||
| 2411 | * lasts, in case we need to start using it for a backchannel | ||
| 2412 | * again; this reference won't be dropped until bc_xprt is | ||
| 2413 | * destroyed. | ||
| 2414 | */ | ||
| 2415 | xprt_get(xprt); | ||
| 2416 | args->bc_xprt->xpt_bc_xprt = xprt; | ||
| 2417 | xprt->bc_xprt = args->bc_xprt; | ||
| 2418 | bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); | ||
| 2419 | transport->sock = bc_sock->sk_sock; | ||
| 2420 | transport->inet = bc_sock->sk_sk; | ||
| 2421 | |||
| 2422 | /* | ||
| 2410 | * Since we don't want connections for the backchannel, we set | 2423 | * Since we don't want connections for the backchannel, we set |
| 2411 | * the xprt status to connected | 2424 | * the xprt status to connected |
| 2412 | */ | 2425 | */ |
| @@ -2415,6 +2428,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 2415 | 2428 | ||
| 2416 | if (try_module_get(THIS_MODULE)) | 2429 | if (try_module_get(THIS_MODULE)) |
| 2417 | return xprt; | 2430 | return xprt; |
| 2431 | xprt_put(xprt); | ||
| 2418 | ret = ERR_PTR(-EINVAL); | 2432 | ret = ERR_PTR(-EINVAL); |
| 2419 | out_err: | 2433 | out_err: |
| 2420 | xprt_free(xprt); | 2434 | xprt_free(xprt); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index d5e1e0b08890..61291965c5f6 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
| @@ -2189,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 2189 | 2189 | ||
| 2190 | if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || | 2190 | if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || |
| 2191 | type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && | 2191 | type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && |
| 2192 | (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { | 2192 | (nlh->nlmsg_flags & NLM_F_DUMP)) { |
| 2193 | if (link->dump == NULL) | 2193 | if (link->dump == NULL) |
| 2194 | return -EINVAL; | 2194 | return -EINVAL; |
| 2195 | 2195 | ||
