diff options
Diffstat (limited to 'net')
87 files changed, 1407 insertions, 687 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 3d59c9bf8feb..3bccdd12a264 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -510,7 +510,8 @@ static int vlan_dev_open(struct net_device *dev) | |||
510 | if (vlan->flags & VLAN_FLAG_GVRP) | 510 | if (vlan->flags & VLAN_FLAG_GVRP) |
511 | vlan_gvrp_request_join(dev); | 511 | vlan_gvrp_request_join(dev); |
512 | 512 | ||
513 | netif_carrier_on(dev); | 513 | if (netif_carrier_ok(real_dev)) |
514 | netif_carrier_on(dev); | ||
514 | return 0; | 515 | return 0; |
515 | 516 | ||
516 | clear_allmulti: | 517 | clear_allmulti: |
diff --git a/net/Kconfig b/net/Kconfig index e24fa0873f32..e926884c1675 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -213,10 +213,11 @@ source "net/phonet/Kconfig" | |||
213 | source "net/ieee802154/Kconfig" | 213 | source "net/ieee802154/Kconfig" |
214 | source "net/sched/Kconfig" | 214 | source "net/sched/Kconfig" |
215 | source "net/dcb/Kconfig" | 215 | source "net/dcb/Kconfig" |
216 | source "net/dns_resolver/Kconfig" | ||
216 | 217 | ||
217 | config RPS | 218 | config RPS |
218 | boolean | 219 | boolean |
219 | depends on SMP && SYSFS | 220 | depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS |
220 | default y | 221 | default y |
221 | 222 | ||
222 | menu "Network testing" | 223 | menu "Network testing" |
diff --git a/net/Makefile b/net/Makefile index 41d420070a38..ea60fbce9b1b 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -67,3 +67,4 @@ ifeq ($(CONFIG_NET),y) | |||
67 | obj-$(CONFIG_SYSCTL) += sysctl_net.o | 67 | obj-$(CONFIG_SYSCTL) += sysctl_net.o |
68 | endif | 68 | endif |
69 | obj-$(CONFIG_WIMAX) += wimax/ | 69 | obj-$(CONFIG_WIMAX) += wimax/ |
70 | obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ | ||
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 2ce79df00680..c7d81436213d 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c | |||
@@ -112,8 +112,8 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25) | |||
112 | if (sk) { | 112 | if (sk) { |
113 | sock_hold(sk); | 113 | sock_hold(sk); |
114 | ax25_destroy_socket(ax25); | 114 | ax25_destroy_socket(ax25); |
115 | sock_put(sk); | ||
116 | bh_unlock_sock(sk); | 115 | bh_unlock_sock(sk); |
116 | sock_put(sk); | ||
117 | } else | 117 | } else |
118 | ax25_destroy_socket(ax25); | 118 | ax25_destroy_socket(ax25); |
119 | return; | 119 | return; |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 2c911c0759c2..137f23259a93 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -162,8 +162,8 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb) | |||
162 | if (tmp) { | 162 | if (tmp) { |
163 | memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); | 163 | memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); |
164 | atomic_set(&tmp->use, 1); | 164 | atomic_set(&tmp->use, 1); |
165 | nf_bridge_put(nf_bridge); | ||
166 | } | 165 | } |
166 | nf_bridge_put(nf_bridge); | ||
167 | nf_bridge = tmp; | 167 | nf_bridge = tmp; |
168 | } | 168 | } |
169 | return nf_bridge; | 169 | return nf_bridge; |
@@ -761,9 +761,11 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
761 | { | 761 | { |
762 | if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && | 762 | if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && |
763 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && | 763 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && |
764 | !skb_is_gso(skb)) | 764 | !skb_is_gso(skb)) { |
765 | /* BUG: Should really parse the IP options here. */ | ||
766 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | ||
765 | return ip_fragment(skb, br_dev_queue_push_xmit); | 767 | return ip_fragment(skb, br_dev_queue_push_xmit); |
766 | else | 768 | } else |
767 | return br_dev_queue_push_xmit(skb); | 769 | return br_dev_queue_push_xmit(skb); |
768 | } | 770 | } |
769 | #else | 771 | #else |
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 01f238ff2346..c49a6695793a 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/hardirq.h> | 9 | #include <linux/hardirq.h> |
10 | #include <net/caif/cfpkt.h> | 10 | #include <net/caif/cfpkt.h> |
11 | 11 | ||
12 | #define PKT_PREFIX 16 | 12 | #define PKT_PREFIX 48 |
13 | #define PKT_POSTFIX 2 | 13 | #define PKT_POSTFIX 2 |
14 | #define PKT_LEN_WHEN_EXTENDING 128 | 14 | #define PKT_LEN_WHEN_EXTENDING 128 |
15 | #define PKT_ERROR(pkt, errmsg) do { \ | 15 | #define PKT_ERROR(pkt, errmsg) do { \ |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index eb1602022ac0..9a699242d104 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/stddef.h> | 7 | #include <linux/stddef.h> |
8 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/unaligned/le_byteshift.h> | 10 | #include <asm/unaligned.h> |
11 | #include <net/caif/caif_layer.h> | 11 | #include <net/caif/caif_layer.h> |
12 | #include <net/caif/cfsrvl.h> | 12 | #include <net/caif/cfsrvl.h> |
13 | #include <net/caif/cfpkt.h> | 13 | #include <net/caif/cfpkt.h> |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 9c65e9deb9c3..08ffe9e4be20 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -60,6 +60,13 @@ | |||
60 | #include <net/sock.h> | 60 | #include <net/sock.h> |
61 | #include <net/net_namespace.h> | 61 | #include <net/net_namespace.h> |
62 | 62 | ||
63 | /* | ||
64 | * To send multiple CAN frame content within TX_SETUP or to filter | ||
65 | * CAN messages with multiplex index within RX_SETUP, the number of | ||
66 | * different filters is limited to 256 due to the one byte index value. | ||
67 | */ | ||
68 | #define MAX_NFRAMES 256 | ||
69 | |||
63 | /* use of last_frames[index].can_dlc */ | 70 | /* use of last_frames[index].can_dlc */ |
64 | #define RX_RECV 0x40 /* received data for this element */ | 71 | #define RX_RECV 0x40 /* received data for this element */ |
65 | #define RX_THR 0x80 /* element not been sent due to throttle feature */ | 72 | #define RX_THR 0x80 /* element not been sent due to throttle feature */ |
@@ -89,16 +96,16 @@ struct bcm_op { | |||
89 | struct list_head list; | 96 | struct list_head list; |
90 | int ifindex; | 97 | int ifindex; |
91 | canid_t can_id; | 98 | canid_t can_id; |
92 | int flags; | 99 | u32 flags; |
93 | unsigned long frames_abs, frames_filtered; | 100 | unsigned long frames_abs, frames_filtered; |
94 | struct timeval ival1, ival2; | 101 | struct timeval ival1, ival2; |
95 | struct hrtimer timer, thrtimer; | 102 | struct hrtimer timer, thrtimer; |
96 | struct tasklet_struct tsklet, thrtsklet; | 103 | struct tasklet_struct tsklet, thrtsklet; |
97 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; | 104 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; |
98 | int rx_ifindex; | 105 | int rx_ifindex; |
99 | int count; | 106 | u32 count; |
100 | int nframes; | 107 | u32 nframes; |
101 | int currframe; | 108 | u32 currframe; |
102 | struct can_frame *frames; | 109 | struct can_frame *frames; |
103 | struct can_frame *last_frames; | 110 | struct can_frame *last_frames; |
104 | struct can_frame sframe; | 111 | struct can_frame sframe; |
@@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) | |||
175 | 182 | ||
176 | seq_printf(m, "rx_op: %03X %-5s ", | 183 | seq_printf(m, "rx_op: %03X %-5s ", |
177 | op->can_id, bcm_proc_getifname(ifname, op->ifindex)); | 184 | op->can_id, bcm_proc_getifname(ifname, op->ifindex)); |
178 | seq_printf(m, "[%d]%c ", op->nframes, | 185 | seq_printf(m, "[%u]%c ", op->nframes, |
179 | (op->flags & RX_CHECK_DLC)?'d':' '); | 186 | (op->flags & RX_CHECK_DLC)?'d':' '); |
180 | if (op->kt_ival1.tv64) | 187 | if (op->kt_ival1.tv64) |
181 | seq_printf(m, "timeo=%lld ", | 188 | seq_printf(m, "timeo=%lld ", |
@@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) | |||
198 | 205 | ||
199 | list_for_each_entry(op, &bo->tx_ops, list) { | 206 | list_for_each_entry(op, &bo->tx_ops, list) { |
200 | 207 | ||
201 | seq_printf(m, "tx_op: %03X %s [%d] ", | 208 | seq_printf(m, "tx_op: %03X %s [%u] ", |
202 | op->can_id, | 209 | op->can_id, |
203 | bcm_proc_getifname(ifname, op->ifindex), | 210 | bcm_proc_getifname(ifname, op->ifindex), |
204 | op->nframes); | 211 | op->nframes); |
@@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, | |||
283 | struct can_frame *firstframe; | 290 | struct can_frame *firstframe; |
284 | struct sockaddr_can *addr; | 291 | struct sockaddr_can *addr; |
285 | struct sock *sk = op->sk; | 292 | struct sock *sk = op->sk; |
286 | int datalen = head->nframes * CFSIZ; | 293 | unsigned int datalen = head->nframes * CFSIZ; |
287 | int err; | 294 | int err; |
288 | 295 | ||
289 | skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); | 296 | skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); |
@@ -468,7 +475,7 @@ rx_changed_settime: | |||
468 | * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly | 475 | * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly |
469 | * received data stored in op->last_frames[] | 476 | * received data stored in op->last_frames[] |
470 | */ | 477 | */ |
471 | static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, | 478 | static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, |
472 | const struct can_frame *rxdata) | 479 | const struct can_frame *rxdata) |
473 | { | 480 | { |
474 | /* | 481 | /* |
@@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | |||
554 | /* | 561 | /* |
555 | * bcm_rx_do_flush - helper for bcm_rx_thr_flush | 562 | * bcm_rx_do_flush - helper for bcm_rx_thr_flush |
556 | */ | 563 | */ |
557 | static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) | 564 | static inline int bcm_rx_do_flush(struct bcm_op *op, int update, |
565 | unsigned int index) | ||
558 | { | 566 | { |
559 | if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { | 567 | if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { |
560 | if (update) | 568 | if (update) |
@@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update) | |||
575 | int updated = 0; | 583 | int updated = 0; |
576 | 584 | ||
577 | if (op->nframes > 1) { | 585 | if (op->nframes > 1) { |
578 | int i; | 586 | unsigned int i; |
579 | 587 | ||
580 | /* for MUX filter we start at index 1 */ | 588 | /* for MUX filter we start at index 1 */ |
581 | for (i = 1; i < op->nframes; i++) | 589 | for (i = 1; i < op->nframes; i++) |
@@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
624 | { | 632 | { |
625 | struct bcm_op *op = (struct bcm_op *)data; | 633 | struct bcm_op *op = (struct bcm_op *)data; |
626 | const struct can_frame *rxframe = (struct can_frame *)skb->data; | 634 | const struct can_frame *rxframe = (struct can_frame *)skb->data; |
627 | int i; | 635 | unsigned int i; |
628 | 636 | ||
629 | /* disable timeout */ | 637 | /* disable timeout */ |
630 | hrtimer_cancel(&op->timer); | 638 | hrtimer_cancel(&op->timer); |
@@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
822 | { | 830 | { |
823 | struct bcm_sock *bo = bcm_sk(sk); | 831 | struct bcm_sock *bo = bcm_sk(sk); |
824 | struct bcm_op *op; | 832 | struct bcm_op *op; |
825 | int i, err; | 833 | unsigned int i; |
834 | int err; | ||
826 | 835 | ||
827 | /* we need a real device to send frames */ | 836 | /* we need a real device to send frames */ |
828 | if (!ifindex) | 837 | if (!ifindex) |
829 | return -ENODEV; | 838 | return -ENODEV; |
830 | 839 | ||
831 | /* we need at least one can_frame */ | 840 | /* check nframes boundaries - we need at least one can_frame */ |
832 | if (msg_head->nframes < 1) | 841 | if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) |
833 | return -EINVAL; | 842 | return -EINVAL; |
834 | 843 | ||
835 | /* check the given can_id */ | 844 | /* check the given can_id */ |
@@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
993 | msg_head->nframes = 0; | 1002 | msg_head->nframes = 0; |
994 | } | 1003 | } |
995 | 1004 | ||
1005 | /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ | ||
1006 | if (msg_head->nframes > MAX_NFRAMES + 1) | ||
1007 | return -EINVAL; | ||
1008 | |||
996 | if ((msg_head->flags & RX_RTR_FRAME) && | 1009 | if ((msg_head->flags & RX_RTR_FRAME) && |
997 | ((msg_head->nframes != 1) || | 1010 | ((msg_head->nframes != 1) || |
998 | (!(msg_head->can_id & CAN_RTR_FLAG)))) | 1011 | (!(msg_head->can_id & CAN_RTR_FLAG)))) |
diff --git a/net/core/dev.c b/net/core/dev.c index 1ae654391442..660dd41aaaa6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2058,16 +2058,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
2058 | struct sk_buff *skb) | 2058 | struct sk_buff *skb) |
2059 | { | 2059 | { |
2060 | int queue_index; | 2060 | int queue_index; |
2061 | struct sock *sk = skb->sk; | 2061 | const struct net_device_ops *ops = dev->netdev_ops; |
2062 | 2062 | ||
2063 | queue_index = sk_tx_queue_get(sk); | 2063 | if (ops->ndo_select_queue) { |
2064 | if (queue_index < 0) { | 2064 | queue_index = ops->ndo_select_queue(dev, skb); |
2065 | const struct net_device_ops *ops = dev->netdev_ops; | 2065 | queue_index = dev_cap_txqueue(dev, queue_index); |
2066 | } else { | ||
2067 | struct sock *sk = skb->sk; | ||
2068 | queue_index = sk_tx_queue_get(sk); | ||
2069 | if (queue_index < 0) { | ||
2066 | 2070 | ||
2067 | if (ops->ndo_select_queue) { | ||
2068 | queue_index = ops->ndo_select_queue(dev, skb); | ||
2069 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
2070 | } else { | ||
2071 | queue_index = 0; | 2071 | queue_index = 0; |
2072 | if (dev->real_num_tx_queues > 1) | 2072 | if (dev->real_num_tx_queues > 1) |
2073 | queue_index = skb_tx_hash(dev, skb); | 2073 | queue_index = skb_tx_hash(dev, skb); |
@@ -3143,7 +3143,7 @@ pull: | |||
3143 | put_page(skb_shinfo(skb)->frags[0].page); | 3143 | put_page(skb_shinfo(skb)->frags[0].page); |
3144 | memmove(skb_shinfo(skb)->frags, | 3144 | memmove(skb_shinfo(skb)->frags, |
3145 | skb_shinfo(skb)->frags + 1, | 3145 | skb_shinfo(skb)->frags + 1, |
3146 | --skb_shinfo(skb)->nr_frags); | 3146 | --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); |
3147 | } | 3147 | } |
3148 | } | 3148 | } |
3149 | 3149 | ||
@@ -4845,7 +4845,7 @@ static void rollback_registered_many(struct list_head *head) | |||
4845 | dev = list_first_entry(head, struct net_device, unreg_list); | 4845 | dev = list_first_entry(head, struct net_device, unreg_list); |
4846 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); | 4846 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); |
4847 | 4847 | ||
4848 | synchronize_net(); | 4848 | rcu_barrier(); |
4849 | 4849 | ||
4850 | list_for_each_entry(dev, head, unreg_list) | 4850 | list_for_each_entry(dev, head, unreg_list) |
4851 | dev_put(dev); | 4851 | dev_put(dev); |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 9fbe7f7429b0..6743146e4d6b 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
234 | 234 | ||
235 | spin_lock(&est_tree_lock); | 235 | spin_lock_bh(&est_tree_lock); |
236 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
237 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
238 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
243 | 243 | ||
244 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
245 | gen_add_node(est); | 245 | gen_add_node(est); |
246 | spin_unlock(&est_tree_lock); | 246 | spin_unlock_bh(&est_tree_lock); |
247 | 247 | ||
248 | return 0; | 248 | return 0; |
249 | } | 249 | } |
@@ -270,7 +270,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
270 | { | 270 | { |
271 | struct gen_estimator *e; | 271 | struct gen_estimator *e; |
272 | 272 | ||
273 | spin_lock(&est_tree_lock); | 273 | spin_lock_bh(&est_tree_lock); |
274 | while ((e = gen_find_node(bstats, rate_est))) { | 274 | while ((e = gen_find_node(bstats, rate_est))) { |
275 | rb_erase(&e->node, &est_root); | 275 | rb_erase(&e->node, &est_root); |
276 | 276 | ||
@@ -281,7 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
281 | list_del_rcu(&e->list); | 281 | list_del_rcu(&e->list); |
282 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 282 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
283 | } | 283 | } |
284 | spin_unlock(&est_tree_lock); | 284 | spin_unlock_bh(&est_tree_lock); |
285 | } | 285 | } |
286 | EXPORT_SYMBOL(gen_kill_estimator); | 286 | EXPORT_SYMBOL(gen_kill_estimator); |
287 | 287 | ||
@@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | |||
320 | 320 | ||
321 | ASSERT_RTNL(); | 321 | ASSERT_RTNL(); |
322 | 322 | ||
323 | spin_lock(&est_tree_lock); | 323 | spin_lock_bh(&est_tree_lock); |
324 | res = gen_find_node(bstats, rate_est) != NULL; | 324 | res = gen_find_node(bstats, rate_est) != NULL; |
325 | spin_unlock(&est_tree_lock); | 325 | spin_unlock_bh(&est_tree_lock); |
326 | 326 | ||
327 | return res; | 327 | return res; |
328 | } | 328 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3a2513f0d0c3..c83b421341c0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2573,6 +2573,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2573 | __copy_skb_header(nskb, skb); | 2573 | __copy_skb_header(nskb, skb); |
2574 | nskb->mac_len = skb->mac_len; | 2574 | nskb->mac_len = skb->mac_len; |
2575 | 2575 | ||
2576 | /* nskb and skb might have different headroom */ | ||
2577 | if (nskb->ip_summed == CHECKSUM_PARTIAL) | ||
2578 | nskb->csum_start += skb_headroom(nskb) - headroom; | ||
2579 | |||
2576 | skb_reset_mac_header(nskb); | 2580 | skb_reset_mac_header(nskb); |
2577 | skb_set_network_header(nskb, skb->mac_len); | 2581 | skb_set_network_header(nskb, skb->mac_len); |
2578 | nskb->transport_header = (nskb->network_header + | 2582 | nskb->transport_header = (nskb->network_header + |
@@ -2703,7 +2707,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2703 | return -E2BIG; | 2707 | return -E2BIG; |
2704 | 2708 | ||
2705 | headroom = skb_headroom(p); | 2709 | headroom = skb_headroom(p); |
2706 | nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); | 2710 | nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); |
2707 | if (unlikely(!nskb)) | 2711 | if (unlikely(!nskb)) |
2708 | return -ENOMEM; | 2712 | return -ENOMEM; |
2709 | 2713 | ||
diff --git a/net/dns_resolver/Kconfig b/net/dns_resolver/Kconfig new file mode 100644 index 000000000000..50d49f7e0472 --- /dev/null +++ b/net/dns_resolver/Kconfig | |||
@@ -0,0 +1,27 @@ | |||
1 | # | ||
2 | # Configuration for DNS Resolver | ||
3 | # | ||
4 | config DNS_RESOLVER | ||
5 | tristate "DNS Resolver support" | ||
6 | depends on NET && KEYS | ||
7 | help | ||
8 | Saying Y here will include support for the DNS Resolver key type | ||
9 | which can be used to make upcalls to perform DNS lookups in | ||
10 | userspace. | ||
11 | |||
12 | DNS Resolver is used to query DNS server for information. Examples | ||
13 | being resolving a UNC hostname element to an IP address for CIFS or | ||
14 | performing a DNS query for AFSDB records so that AFS can locate a | ||
15 | cell's volume location database servers. | ||
16 | |||
17 | DNS Resolver is used by the CIFS and AFS modules, and would support | ||
18 | SMB2 later. DNS Resolver is supported by the userspace upcall | ||
19 | helper "/sbin/dns.resolver" via /etc/request-key.conf. | ||
20 | |||
21 | See <file:Documentation/networking/dns_resolver.txt> for further | ||
22 | information. | ||
23 | |||
24 | To compile this as a module, choose M here: the module will be called | ||
25 | dnsresolver. | ||
26 | |||
27 | If unsure, say N. | ||
diff --git a/net/dns_resolver/Makefile b/net/dns_resolver/Makefile new file mode 100644 index 000000000000..c0ef4e71dc49 --- /dev/null +++ b/net/dns_resolver/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for the Linux DNS Resolver. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_DNS_RESOLVER) += dns_resolver.o | ||
6 | |||
7 | dns_resolver-objs := dns_key.o dns_query.o | ||
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c new file mode 100644 index 000000000000..739435a6af39 --- /dev/null +++ b/net/dns_resolver/dns_key.c | |||
@@ -0,0 +1,293 @@ | |||
1 | /* Key type used to cache DNS lookups made by the kernel | ||
2 | * | ||
3 | * See Documentation/networking/dns_resolver.txt | ||
4 | * | ||
5 | * Copyright (c) 2007 Igor Mammedov | ||
6 | * Author(s): Igor Mammedov (niallain@gmail.com) | ||
7 | * Steve French (sfrench@us.ibm.com) | ||
8 | * Wang Lei (wang840925@gmail.com) | ||
9 | * David Howells (dhowells@redhat.com) | ||
10 | * | ||
11 | * This library is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU Lesser General Public License as published | ||
13 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This library is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
19 | * the GNU Lesser General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU Lesser General Public License | ||
22 | * along with this library; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
24 | */ | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/moduleparam.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/keyctl.h> | ||
31 | #include <linux/err.h> | ||
32 | #include <linux/seq_file.h> | ||
33 | #include <keys/dns_resolver-type.h> | ||
34 | #include <keys/user-type.h> | ||
35 | #include "internal.h" | ||
36 | |||
37 | MODULE_DESCRIPTION("DNS Resolver"); | ||
38 | MODULE_AUTHOR("Wang Lei"); | ||
39 | MODULE_LICENSE("GPL"); | ||
40 | |||
41 | unsigned dns_resolver_debug; | ||
42 | module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO); | ||
43 | MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); | ||
44 | |||
45 | const struct cred *dns_resolver_cache; | ||
46 | |||
47 | #define DNS_ERRORNO_OPTION "dnserror" | ||
48 | |||
49 | /* | ||
50 | * Instantiate a user defined key for dns_resolver. | ||
51 | * | ||
52 | * The data must be a NUL-terminated string, with the NUL char accounted in | ||
53 | * datalen. | ||
54 | * | ||
55 | * If the data contains a '#' characters, then we take the clause after each | ||
56 | * one to be an option of the form 'key=value'. The actual data of interest is | ||
57 | * the string leading up to the first '#'. For instance: | ||
58 | * | ||
59 | * "ip1,ip2,...#foo=bar" | ||
60 | */ | ||
61 | static int | ||
62 | dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) | ||
63 | { | ||
64 | struct user_key_payload *upayload; | ||
65 | unsigned long derrno; | ||
66 | int ret; | ||
67 | size_t result_len = 0; | ||
68 | const char *data = _data, *end, *opt; | ||
69 | |||
70 | kenter("%%%d,%s,'%s',%zu", | ||
71 | key->serial, key->description, data, datalen); | ||
72 | |||
73 | if (datalen <= 1 || !data || data[datalen - 1] != '\0') | ||
74 | return -EINVAL; | ||
75 | datalen--; | ||
76 | |||
77 | /* deal with any options embedded in the data */ | ||
78 | end = data + datalen; | ||
79 | opt = memchr(data, '#', datalen); | ||
80 | if (!opt) { | ||
81 | /* no options: the entire data is the result */ | ||
82 | kdebug("no options"); | ||
83 | result_len = datalen; | ||
84 | } else { | ||
85 | const char *next_opt; | ||
86 | |||
87 | result_len = opt - data; | ||
88 | opt++; | ||
89 | kdebug("options: '%s'", opt); | ||
90 | do { | ||
91 | const char *eq; | ||
92 | int opt_len, opt_nlen, opt_vlen, tmp; | ||
93 | |||
94 | next_opt = memchr(opt, '#', end - opt) ?: end; | ||
95 | opt_len = next_opt - opt; | ||
96 | if (!opt_len) { | ||
97 | printk(KERN_WARNING | ||
98 | "Empty option to dns_resolver key %d\n", | ||
99 | key->serial); | ||
100 | return -EINVAL; | ||
101 | } | ||
102 | |||
103 | eq = memchr(opt, '=', opt_len) ?: end; | ||
104 | opt_nlen = eq - opt; | ||
105 | eq++; | ||
106 | opt_vlen = next_opt - eq; /* will be -1 if no value */ | ||
107 | |||
108 | tmp = opt_vlen >= 0 ? opt_vlen : 0; | ||
109 | kdebug("option '%*.*s' val '%*.*s'", | ||
110 | opt_nlen, opt_nlen, opt, tmp, tmp, eq); | ||
111 | |||
112 | /* see if it's an error number representing a DNS error | ||
113 | * that's to be recorded as the result in this key */ | ||
114 | if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && | ||
115 | memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { | ||
116 | kdebug("dns error number option"); | ||
117 | if (opt_vlen <= 0) | ||
118 | goto bad_option_value; | ||
119 | |||
120 | ret = strict_strtoul(eq, 10, &derrno); | ||
121 | if (ret < 0) | ||
122 | goto bad_option_value; | ||
123 | |||
124 | if (derrno < 1 || derrno > 511) | ||
125 | goto bad_option_value; | ||
126 | |||
127 | kdebug("dns error no. = %lu", derrno); | ||
128 | key->type_data.x[0] = -derrno; | ||
129 | continue; | ||
130 | } | ||
131 | |||
132 | bad_option_value: | ||
133 | printk(KERN_WARNING | ||
134 | "Option '%*.*s' to dns_resolver key %d:" | ||
135 | " bad/missing value\n", | ||
136 | opt_nlen, opt_nlen, opt, key->serial); | ||
137 | return -EINVAL; | ||
138 | } while (opt = next_opt + 1, opt < end); | ||
139 | } | ||
140 | |||
141 | /* don't cache the result if we're caching an error saying there's no | ||
142 | * result */ | ||
143 | if (key->type_data.x[0]) { | ||
144 | kleave(" = 0 [h_error %ld]", key->type_data.x[0]); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | kdebug("store result"); | ||
149 | ret = key_payload_reserve(key, result_len); | ||
150 | if (ret < 0) | ||
151 | return -EINVAL; | ||
152 | |||
153 | upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL); | ||
154 | if (!upayload) { | ||
155 | kleave(" = -ENOMEM"); | ||
156 | return -ENOMEM; | ||
157 | } | ||
158 | |||
159 | upayload->datalen = result_len; | ||
160 | memcpy(upayload->data, data, result_len); | ||
161 | upayload->data[result_len] = '\0'; | ||
162 | rcu_assign_pointer(key->payload.data, upayload); | ||
163 | |||
164 | kleave(" = 0"); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * The description is of the form "[<type>:]<domain_name>" | ||
170 | * | ||
171 | * The domain name may be a simple name or an absolute domain name (which | ||
172 | * should end with a period). The domain name is case-independent. | ||
173 | */ | ||
174 | static int | ||
175 | dns_resolver_match(const struct key *key, const void *description) | ||
176 | { | ||
177 | int slen, dlen, ret = 0; | ||
178 | const char *src = key->description, *dsp = description; | ||
179 | |||
180 | kenter("%s,%s", src, dsp); | ||
181 | |||
182 | if (!src || !dsp) | ||
183 | goto no_match; | ||
184 | |||
185 | if (strcasecmp(src, dsp) == 0) | ||
186 | goto matched; | ||
187 | |||
188 | slen = strlen(src); | ||
189 | dlen = strlen(dsp); | ||
190 | if (slen <= 0 || dlen <= 0) | ||
191 | goto no_match; | ||
192 | if (src[slen - 1] == '.') | ||
193 | slen--; | ||
194 | if (dsp[dlen - 1] == '.') | ||
195 | dlen--; | ||
196 | if (slen != dlen || strncasecmp(src, dsp, slen) != 0) | ||
197 | goto no_match; | ||
198 | |||
199 | matched: | ||
200 | ret = 1; | ||
201 | no_match: | ||
202 | kleave(" = %d", ret); | ||
203 | return ret; | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * Describe a DNS key | ||
208 | */ | ||
209 | static void dns_resolver_describe(const struct key *key, struct seq_file *m) | ||
210 | { | ||
211 | int err = key->type_data.x[0]; | ||
212 | |||
213 | seq_puts(m, key->description); | ||
214 | if (err) | ||
215 | seq_printf(m, ": %d", err); | ||
216 | else | ||
217 | seq_printf(m, ": %u", key->datalen); | ||
218 | } | ||
219 | |||
220 | struct key_type key_type_dns_resolver = { | ||
221 | .name = "dns_resolver", | ||
222 | .instantiate = dns_resolver_instantiate, | ||
223 | .match = dns_resolver_match, | ||
224 | .revoke = user_revoke, | ||
225 | .destroy = user_destroy, | ||
226 | .describe = dns_resolver_describe, | ||
227 | .read = user_read, | ||
228 | }; | ||
229 | |||
230 | static int __init init_dns_resolver(void) | ||
231 | { | ||
232 | struct cred *cred; | ||
233 | struct key *keyring; | ||
234 | int ret; | ||
235 | |||
236 | printk(KERN_NOTICE "Registering the %s key type\n", | ||
237 | key_type_dns_resolver.name); | ||
238 | |||
239 | /* create an override credential set with a special thread keyring in | ||
240 | * which DNS requests are cached | ||
241 | * | ||
242 | * this is used to prevent malicious redirections from being installed | ||
243 | * with add_key(). | ||
244 | */ | ||
245 | cred = prepare_kernel_cred(NULL); | ||
246 | if (!cred) | ||
247 | return -ENOMEM; | ||
248 | |||
249 | keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred, | ||
250 | (KEY_POS_ALL & ~KEY_POS_SETATTR) | | ||
251 | KEY_USR_VIEW | KEY_USR_READ, | ||
252 | KEY_ALLOC_NOT_IN_QUOTA); | ||
253 | if (IS_ERR(keyring)) { | ||
254 | ret = PTR_ERR(keyring); | ||
255 | goto failed_put_cred; | ||
256 | } | ||
257 | |||
258 | ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL); | ||
259 | if (ret < 0) | ||
260 | goto failed_put_key; | ||
261 | |||
262 | ret = register_key_type(&key_type_dns_resolver); | ||
263 | if (ret < 0) | ||
264 | goto failed_put_key; | ||
265 | |||
266 | /* instruct request_key() to use this special keyring as a cache for | ||
267 | * the results it looks up */ | ||
268 | cred->thread_keyring = keyring; | ||
269 | cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; | ||
270 | dns_resolver_cache = cred; | ||
271 | |||
272 | kdebug("DNS resolver keyring: %d\n", key_serial(keyring)); | ||
273 | return 0; | ||
274 | |||
275 | failed_put_key: | ||
276 | key_put(keyring); | ||
277 | failed_put_cred: | ||
278 | put_cred(cred); | ||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | static void __exit exit_dns_resolver(void) | ||
283 | { | ||
284 | key_revoke(dns_resolver_cache->thread_keyring); | ||
285 | unregister_key_type(&key_type_dns_resolver); | ||
286 | put_cred(dns_resolver_cache); | ||
287 | printk(KERN_NOTICE "Unregistered %s key type\n", | ||
288 | key_type_dns_resolver.name); | ||
289 | } | ||
290 | |||
291 | module_init(init_dns_resolver) | ||
292 | module_exit(exit_dns_resolver) | ||
293 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c new file mode 100644 index 000000000000..c32be292c7e3 --- /dev/null +++ b/net/dns_resolver/dns_query.c | |||
@@ -0,0 +1,165 @@ | |||
1 | /* Upcall routine, designed to work as a key type and working through | ||
2 | * /sbin/request-key to contact userspace when handling DNS queries. | ||
3 | * | ||
4 | * See Documentation/networking/dns_resolver.txt | ||
5 | * | ||
6 | * Copyright (c) 2007 Igor Mammedov | ||
7 | * Author(s): Igor Mammedov (niallain@gmail.com) | ||
8 | * Steve French (sfrench@us.ibm.com) | ||
9 | * Wang Lei (wang840925@gmail.com) | ||
10 | * David Howells (dhowells@redhat.com) | ||
11 | * | ||
12 | * The upcall wrapper used to make an arbitrary DNS query. | ||
13 | * | ||
14 | * This function requires the appropriate userspace tool dns.upcall to be | ||
15 | * installed and something like the following lines should be added to the | ||
16 | * /etc/request-key.conf file: | ||
17 | * | ||
18 | * create dns_resolver * * /sbin/dns.upcall %k | ||
19 | * | ||
20 | * For example to use this module to query AFSDB RR: | ||
21 | * | ||
22 | * create dns_resolver afsdb:* * /sbin/dns.afsdb %k | ||
23 | * | ||
24 | * This library is free software; you can redistribute it and/or modify | ||
25 | * it under the terms of the GNU Lesser General Public License as published | ||
26 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
27 | * (at your option) any later version. | ||
28 | * | ||
29 | * This library is distributed in the hope that it will be useful, | ||
30 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
31 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
32 | * the GNU Lesser General Public License for more details. | ||
33 | * | ||
34 | * You should have received a copy of the GNU Lesser General Public License | ||
35 | * along with this library; if not, write to the Free Software | ||
36 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
37 | */ | ||
38 | |||
39 | #include <linux/module.h> | ||
40 | #include <linux/slab.h> | ||
41 | #include <linux/dns_resolver.h> | ||
42 | #include <linux/err.h> | ||
43 | #include <keys/dns_resolver-type.h> | ||
44 | #include <keys/user-type.h> | ||
45 | |||
46 | #include "internal.h" | ||
47 | |||
48 | /** | ||
49 | * dns_query - Query the DNS | ||
50 | * @type: Query type (or NULL for straight host->IP lookup) | ||
51 | * @name: Name to look up | ||
52 | * @namelen: Length of name | ||
53 | * @options: Request options (or NULL if no options) | ||
54 | * @_result: Where to place the returned data. | ||
55 | * @_expiry: Where to store the result expiry time (or NULL) | ||
56 | * | ||
57 | * The data will be returned in the pointer at *result, and the caller is | ||
58 | * responsible for freeing it. | ||
59 | * | ||
60 | * The description should be of the form "[<query_type>:]<domain_name>", and | ||
61 | * the options need to be appropriate for the query type requested. If no | ||
62 | * query_type is given, then the query is a straight hostname to IP address | ||
63 | * lookup. | ||
64 | * | ||
65 | * The DNS resolution lookup is performed by upcalling to userspace by way of | ||
66 | * requesting a key of type dns_resolver. | ||
67 | * | ||
68 | * Returns the size of the result on success, -ve error code otherwise. | ||
69 | */ | ||
70 | int dns_query(const char *type, const char *name, size_t namelen, | ||
71 | const char *options, char **_result, time_t *_expiry) | ||
72 | { | ||
73 | struct key *rkey; | ||
74 | struct user_key_payload *upayload; | ||
75 | const struct cred *saved_cred; | ||
76 | size_t typelen, desclen; | ||
77 | char *desc, *cp; | ||
78 | int ret, len; | ||
79 | |||
80 | kenter("%s,%*.*s,%zu,%s", | ||
81 | type, (int)namelen, (int)namelen, name, namelen, options); | ||
82 | |||
83 | if (!name || namelen == 0 || !_result) | ||
84 | return -EINVAL; | ||
85 | |||
86 | /* construct the query key description as "[<type>:]<name>" */ | ||
87 | typelen = 0; | ||
88 | desclen = 0; | ||
89 | if (type) { | ||
90 | typelen = strlen(type); | ||
91 | if (typelen < 1) | ||
92 | return -EINVAL; | ||
93 | desclen += typelen + 1; | ||
94 | } | ||
95 | |||
96 | if (!namelen) | ||
97 | namelen = strlen(name); | ||
98 | if (namelen < 3) | ||
99 | return -EINVAL; | ||
100 | desclen += namelen + 1; | ||
101 | |||
102 | desc = kmalloc(desclen, GFP_KERNEL); | ||
103 | if (!desc) | ||
104 | return -ENOMEM; | ||
105 | |||
106 | cp = desc; | ||
107 | if (type) { | ||
108 | memcpy(cp, type, typelen); | ||
109 | cp += typelen; | ||
110 | *cp++ = ':'; | ||
111 | } | ||
112 | memcpy(cp, name, namelen); | ||
113 | cp += namelen; | ||
114 | *cp = '\0'; | ||
115 | |||
116 | if (!options) | ||
117 | options = ""; | ||
118 | kdebug("call request_key(,%s,%s)", desc, options); | ||
119 | |||
120 | /* make the upcall, using special credentials to prevent the use of | ||
121 | * add_key() to preinstall malicious redirections | ||
122 | */ | ||
123 | saved_cred = override_creds(dns_resolver_cache); | ||
124 | rkey = request_key(&key_type_dns_resolver, desc, options); | ||
125 | revert_creds(saved_cred); | ||
126 | kfree(desc); | ||
127 | if (IS_ERR(rkey)) { | ||
128 | ret = PTR_ERR(rkey); | ||
129 | goto out; | ||
130 | } | ||
131 | |||
132 | down_read(&rkey->sem); | ||
133 | rkey->perm |= KEY_USR_VIEW; | ||
134 | |||
135 | ret = key_validate(rkey); | ||
136 | if (ret < 0) | ||
137 | goto put; | ||
138 | |||
139 | /* If the DNS server gave an error, return that to the caller */ | ||
140 | ret = rkey->type_data.x[0]; | ||
141 | if (ret) | ||
142 | goto put; | ||
143 | |||
144 | upayload = rcu_dereference_protected(rkey->payload.data, | ||
145 | lockdep_is_held(&rkey->sem)); | ||
146 | len = upayload->datalen; | ||
147 | |||
148 | ret = -ENOMEM; | ||
149 | *_result = kmalloc(len + 1, GFP_KERNEL); | ||
150 | if (!*_result) | ||
151 | goto put; | ||
152 | |||
153 | memcpy(*_result, upayload->data, len + 1); | ||
154 | if (_expiry) | ||
155 | *_expiry = rkey->expiry; | ||
156 | |||
157 | ret = len; | ||
158 | put: | ||
159 | up_read(&rkey->sem); | ||
160 | key_put(rkey); | ||
161 | out: | ||
162 | kleave(" = %d", ret); | ||
163 | return ret; | ||
164 | } | ||
165 | EXPORT_SYMBOL(dns_query); | ||
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h new file mode 100644 index 000000000000..189ca9e9b785 --- /dev/null +++ b/net/dns_resolver/internal.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010 Wang Lei | ||
3 | * Author(s): Wang Lei (wang840925@gmail.com). All Rights Reserved. | ||
4 | * | ||
5 | * Internal DNS Rsolver stuff | ||
6 | * | ||
7 | * This library is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU Lesser General Public License as published | ||
9 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This library is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
15 | * the GNU Lesser General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public License | ||
18 | * along with this library; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #include <linux/compiler.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/sched.h> | ||
25 | |||
26 | /* | ||
27 | * dns_key.c | ||
28 | */ | ||
29 | extern const struct cred *dns_resolver_cache; | ||
30 | |||
31 | /* | ||
32 | * debug tracing | ||
33 | */ | ||
34 | extern unsigned dns_resolver_debug; | ||
35 | |||
36 | #define kdebug(FMT, ...) \ | ||
37 | do { \ | ||
38 | if (unlikely(dns_resolver_debug)) \ | ||
39 | printk(KERN_DEBUG "[%-6.6s] "FMT"\n", \ | ||
40 | current->comm, ##__VA_ARGS__); \ | ||
41 | } while (0) | ||
42 | |||
43 | #define kenter(FMT, ...) kdebug("==> %s("FMT")", __func__, ##__VA_ARGS__) | ||
44 | #define kleave(FMT, ...) kdebug("<== %s()"FMT"", __func__, ##__VA_ARGS__) | ||
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 11201784d29a..87bb5f4de0e8 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | menuconfig NET_DSA | 1 | menuconfig NET_DSA |
2 | bool "Distributed Switch Architecture support" | 2 | bool "Distributed Switch Architecture support" |
3 | default n | 3 | default n |
4 | depends on EXPERIMENTAL && NET_ETHERNET && !S390 | 4 | depends on EXPERIMENTAL && NETDEVICES && !S390 |
5 | select PHYLIB | 5 | select PHYLIB |
6 | ---help--- | 6 | ---help--- |
7 | This allows you to use hardware switch chips that use | 7 | This allows you to use hardware switch chips that use |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 7c3a7d191249..571f8950ed06 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -46,7 +46,7 @@ config IP_ADVANCED_ROUTER | |||
46 | rp_filter on use: | 46 | rp_filter on use: |
47 | 47 | ||
48 | echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter | 48 | echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter |
49 | and | 49 | or |
50 | echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter | 50 | echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter |
51 | 51 | ||
52 | Note that some distributions enable it in startup scripts. | 52 | Note that some distributions enable it in startup scripts. |
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index f0550941df7b..721a8a37b45c 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
62 | } | 62 | } |
63 | if (!inet->inet_saddr) | 63 | if (!inet->inet_saddr) |
64 | inet->inet_saddr = rt->rt_src; /* Update source address */ | 64 | inet->inet_saddr = rt->rt_src; /* Update source address */ |
65 | if (!inet->inet_rcv_saddr) | 65 | if (!inet->inet_rcv_saddr) { |
66 | inet->inet_rcv_saddr = rt->rt_src; | 66 | inet->inet_rcv_saddr = rt->rt_src; |
67 | if (sk->sk_prot->rehash) | ||
68 | sk->sk_prot->rehash(sk); | ||
69 | } | ||
67 | inet->inet_daddr = rt->rt_dst; | 70 | inet->inet_daddr = rt->rt_dst; |
68 | inet->inet_dport = usin->sin_port; | 71 | inet->inet_dport = usin->sin_port; |
69 | sk->sk_state = TCP_ESTABLISHED; | 72 | sk->sk_state = TCP_ESTABLISHED; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index a43968918350..7d02a9f999fa 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -246,6 +246,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
246 | 246 | ||
247 | struct fib_result res; | 247 | struct fib_result res; |
248 | int no_addr, rpf, accept_local; | 248 | int no_addr, rpf, accept_local; |
249 | bool dev_match; | ||
249 | int ret; | 250 | int ret; |
250 | struct net *net; | 251 | struct net *net; |
251 | 252 | ||
@@ -273,12 +274,22 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
273 | } | 274 | } |
274 | *spec_dst = FIB_RES_PREFSRC(res); | 275 | *spec_dst = FIB_RES_PREFSRC(res); |
275 | fib_combine_itag(itag, &res); | 276 | fib_combine_itag(itag, &res); |
277 | dev_match = false; | ||
278 | |||
276 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 279 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
277 | if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1) | 280 | for (ret = 0; ret < res.fi->fib_nhs; ret++) { |
281 | struct fib_nh *nh = &res.fi->fib_nh[ret]; | ||
282 | |||
283 | if (nh->nh_dev == dev) { | ||
284 | dev_match = true; | ||
285 | break; | ||
286 | } | ||
287 | } | ||
278 | #else | 288 | #else |
279 | if (FIB_RES_DEV(res) == dev) | 289 | if (FIB_RES_DEV(res) == dev) |
290 | dev_match = true; | ||
280 | #endif | 291 | #endif |
281 | { | 292 | if (dev_match) { |
282 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; | 293 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; |
283 | fib_res_put(&res); | 294 | fib_res_put(&res); |
284 | return ret; | 295 | return ret; |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 79d057a939ba..4a8e370862bc 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -186,7 +186,9 @@ static inline struct tnode *node_parent_rcu(struct node *node) | |||
186 | { | 186 | { |
187 | struct tnode *ret = node_parent(node); | 187 | struct tnode *ret = node_parent(node); |
188 | 188 | ||
189 | return rcu_dereference(ret); | 189 | return rcu_dereference_check(ret, |
190 | rcu_read_lock_held() || | ||
191 | lockdep_rtnl_is_held()); | ||
190 | } | 192 | } |
191 | 193 | ||
192 | /* Same as rcu_assign_pointer | 194 | /* Same as rcu_assign_pointer |
@@ -1753,7 +1755,9 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c) | |||
1753 | 1755 | ||
1754 | static struct leaf *trie_firstleaf(struct trie *t) | 1756 | static struct leaf *trie_firstleaf(struct trie *t) |
1755 | { | 1757 | { |
1756 | struct tnode *n = (struct tnode *) rcu_dereference(t->trie); | 1758 | struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie, |
1759 | rcu_read_lock_held() || | ||
1760 | lockdep_rtnl_is_held()); | ||
1757 | 1761 | ||
1758 | if (!n) | 1762 | if (!n) |
1759 | return NULL; | 1763 | return NULL; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index a1ad0e7180d2..1fdcacd36ce7 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
834 | int mark = 0; | 834 | int mark = 0; |
835 | 835 | ||
836 | 836 | ||
837 | if (len == 8) { | 837 | if (len == 8 || IGMP_V2_SEEN(in_dev)) { |
838 | if (ih->code == 0) { | 838 | if (ih->code == 0) { |
839 | /* Alas, old v1 router presents here. */ | 839 | /* Alas, old v1 router presents here. */ |
840 | 840 | ||
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 945b20a5ad50..35c93e8b6a46 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include <net/netns/generic.h> | 45 | #include <net/netns/generic.h> |
46 | #include <net/rtnetlink.h> | 46 | #include <net/rtnetlink.h> |
47 | 47 | ||
48 | #ifdef CONFIG_IPV6 | 48 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
49 | #include <net/ipv6.h> | 49 | #include <net/ipv6.h> |
50 | #include <net/ip6_fib.h> | 50 | #include <net/ip6_fib.h> |
51 | #include <net/ip6_route.h> | 51 | #include <net/ip6_route.h> |
@@ -699,7 +699,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
699 | if ((dst = rt->rt_gateway) == 0) | 699 | if ((dst = rt->rt_gateway) == 0) |
700 | goto tx_error_icmp; | 700 | goto tx_error_icmp; |
701 | } | 701 | } |
702 | #ifdef CONFIG_IPV6 | 702 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
703 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 703 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
704 | struct in6_addr *addr6; | 704 | struct in6_addr *addr6; |
705 | int addr_type; | 705 | int addr_type; |
@@ -774,7 +774,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
774 | goto tx_error; | 774 | goto tx_error; |
775 | } | 775 | } |
776 | } | 776 | } |
777 | #ifdef CONFIG_IPV6 | 777 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
778 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 778 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
779 | struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); | 779 | struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); |
780 | 780 | ||
@@ -850,7 +850,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
850 | if ((iph->ttl = tiph->ttl) == 0) { | 850 | if ((iph->ttl = tiph->ttl) == 0) { |
851 | if (skb->protocol == htons(ETH_P_IP)) | 851 | if (skb->protocol == htons(ETH_P_IP)) |
852 | iph->ttl = old_iph->ttl; | 852 | iph->ttl = old_iph->ttl; |
853 | #ifdef CONFIG_IPV6 | 853 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
854 | else if (skb->protocol == htons(ETH_P_IPV6)) | 854 | else if (skb->protocol == htons(ETH_P_IPV6)) |
855 | iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; | 855 | iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; |
856 | #endif | 856 | #endif |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 6c40a8c46e79..64b70ad162e3 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -1129,6 +1129,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1129 | case IP_HDRINCL: | 1129 | case IP_HDRINCL: |
1130 | val = inet->hdrincl; | 1130 | val = inet->hdrincl; |
1131 | break; | 1131 | break; |
1132 | case IP_NODEFRAG: | ||
1133 | val = inet->nodefrag; | ||
1134 | break; | ||
1132 | case IP_MTU_DISCOVER: | 1135 | case IP_MTU_DISCOVER: |
1133 | val = inet->pmtudisc; | 1136 | val = inet->pmtudisc; |
1134 | break; | 1137 | break; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 6bccba31d132..e8f4f9a57f12 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -735,6 +735,7 @@ static void get_counters(const struct xt_table_info *t, | |||
735 | if (cpu == curcpu) | 735 | if (cpu == curcpu) |
736 | continue; | 736 | continue; |
737 | i = 0; | 737 | i = 0; |
738 | local_bh_disable(); | ||
738 | xt_info_wrlock(cpu); | 739 | xt_info_wrlock(cpu); |
739 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 740 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
740 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 741 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
@@ -742,6 +743,7 @@ static void get_counters(const struct xt_table_info *t, | |||
742 | ++i; | 743 | ++i; |
743 | } | 744 | } |
744 | xt_info_wrunlock(cpu); | 745 | xt_info_wrunlock(cpu); |
746 | local_bh_enable(); | ||
745 | } | 747 | } |
746 | put_cpu(); | 748 | put_cpu(); |
747 | } | 749 | } |
@@ -1418,6 +1420,9 @@ static int translate_compat_table(const char *name, | |||
1418 | if (ret != 0) | 1420 | if (ret != 0) |
1419 | break; | 1421 | break; |
1420 | ++i; | 1422 | ++i; |
1423 | if (strcmp(arpt_get_target(iter1)->u.user.name, | ||
1424 | XT_ERROR_TARGET) == 0) | ||
1425 | ++newinfo->stacksize; | ||
1421 | } | 1426 | } |
1422 | if (ret) { | 1427 | if (ret) { |
1423 | /* | 1428 | /* |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index c439721b165a..d163f2e3b2e9 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -909,6 +909,7 @@ get_counters(const struct xt_table_info *t, | |||
909 | if (cpu == curcpu) | 909 | if (cpu == curcpu) |
910 | continue; | 910 | continue; |
911 | i = 0; | 911 | i = 0; |
912 | local_bh_disable(); | ||
912 | xt_info_wrlock(cpu); | 913 | xt_info_wrlock(cpu); |
913 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 914 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
914 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 915 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
@@ -916,6 +917,7 @@ get_counters(const struct xt_table_info *t, | |||
916 | ++i; /* macro does multi eval of i */ | 917 | ++i; /* macro does multi eval of i */ |
917 | } | 918 | } |
918 | xt_info_wrunlock(cpu); | 919 | xt_info_wrunlock(cpu); |
920 | local_bh_enable(); | ||
919 | } | 921 | } |
920 | put_cpu(); | 922 | put_cpu(); |
921 | } | 923 | } |
@@ -1749,6 +1751,9 @@ translate_compat_table(struct net *net, | |||
1749 | if (ret != 0) | 1751 | if (ret != 0) |
1750 | break; | 1752 | break; |
1751 | ++i; | 1753 | ++i; |
1754 | if (strcmp(ipt_get_target(iter1)->u.user.name, | ||
1755 | XT_ERROR_TARGET) == 0) | ||
1756 | ++newinfo->stacksize; | ||
1752 | } | 1757 | } |
1753 | if (ret) { | 1758 | if (ret) { |
1754 | /* | 1759 | /* |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3f56b6e6c6aa..6298f75d5e93 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2738,6 +2738,11 @@ slow_output: | |||
2738 | } | 2738 | } |
2739 | EXPORT_SYMBOL_GPL(__ip_route_output_key); | 2739 | EXPORT_SYMBOL_GPL(__ip_route_output_key); |
2740 | 2740 | ||
2741 | static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) | ||
2742 | { | ||
2743 | return NULL; | ||
2744 | } | ||
2745 | |||
2741 | static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) | 2746 | static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) |
2742 | { | 2747 | { |
2743 | } | 2748 | } |
@@ -2746,7 +2751,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
2746 | .family = AF_INET, | 2751 | .family = AF_INET, |
2747 | .protocol = cpu_to_be16(ETH_P_IP), | 2752 | .protocol = cpu_to_be16(ETH_P_IP), |
2748 | .destroy = ipv4_dst_destroy, | 2753 | .destroy = ipv4_dst_destroy, |
2749 | .check = ipv4_dst_check, | 2754 | .check = ipv4_blackhole_dst_check, |
2750 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, | 2755 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, |
2751 | .entries = ATOMIC_INIT(0), | 2756 | .entries = ATOMIC_INIT(0), |
2752 | }; | 2757 | }; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 176e11aaea77..95d75d443927 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
386 | */ | 386 | */ |
387 | 387 | ||
388 | mask = 0; | 388 | mask = 0; |
389 | if (sk->sk_err) | ||
390 | mask = POLLERR; | ||
391 | 389 | ||
392 | /* | 390 | /* |
393 | * POLLHUP is certainly not done right. But poll() doesn't | 391 | * POLLHUP is certainly not done right. But poll() doesn't |
@@ -451,11 +449,17 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
451 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) | 449 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) |
452 | mask |= POLLOUT | POLLWRNORM; | 450 | mask |= POLLOUT | POLLWRNORM; |
453 | } | 451 | } |
454 | } | 452 | } else |
453 | mask |= POLLOUT | POLLWRNORM; | ||
455 | 454 | ||
456 | if (tp->urg_data & TCP_URG_VALID) | 455 | if (tp->urg_data & TCP_URG_VALID) |
457 | mask |= POLLPRI; | 456 | mask |= POLLPRI; |
458 | } | 457 | } |
458 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ | ||
459 | smp_rmb(); | ||
460 | if (sk->sk_err) | ||
461 | mask |= POLLERR; | ||
462 | |||
459 | return mask; | 463 | return mask; |
460 | } | 464 | } |
461 | EXPORT_SYMBOL(tcp_poll); | 465 | EXPORT_SYMBOL(tcp_poll); |
@@ -2011,11 +2015,8 @@ adjudge_to_death: | |||
2011 | } | 2015 | } |
2012 | } | 2016 | } |
2013 | if (sk->sk_state != TCP_CLOSE) { | 2017 | if (sk->sk_state != TCP_CLOSE) { |
2014 | int orphan_count = percpu_counter_read_positive( | ||
2015 | sk->sk_prot->orphan_count); | ||
2016 | |||
2017 | sk_mem_reclaim(sk); | 2018 | sk_mem_reclaim(sk); |
2018 | if (tcp_too_many_orphans(sk, orphan_count)) { | 2019 | if (tcp_too_many_orphans(sk, 0)) { |
2019 | if (net_ratelimit()) | 2020 | if (net_ratelimit()) |
2020 | printk(KERN_INFO "TCP: too many of orphaned " | 2021 | printk(KERN_INFO "TCP: too many of orphaned " |
2021 | "sockets\n"); | 2022 | "sockets\n"); |
@@ -3212,7 +3213,7 @@ void __init tcp_init(void) | |||
3212 | { | 3213 | { |
3213 | struct sk_buff *skb = NULL; | 3214 | struct sk_buff *skb = NULL; |
3214 | unsigned long nr_pages, limit; | 3215 | unsigned long nr_pages, limit; |
3215 | int order, i, max_share; | 3216 | int i, max_share, cnt; |
3216 | unsigned long jiffy = jiffies; | 3217 | unsigned long jiffy = jiffies; |
3217 | 3218 | ||
3218 | BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); | 3219 | BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); |
@@ -3261,22 +3262,12 @@ void __init tcp_init(void) | |||
3261 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); | 3262 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); |
3262 | } | 3263 | } |
3263 | 3264 | ||
3264 | /* Try to be a bit smarter and adjust defaults depending | 3265 | |
3265 | * on available memory. | 3266 | cnt = tcp_hashinfo.ehash_mask + 1; |
3266 | */ | 3267 | |
3267 | for (order = 0; ((1 << order) << PAGE_SHIFT) < | 3268 | tcp_death_row.sysctl_max_tw_buckets = cnt / 2; |
3268 | (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); | 3269 | sysctl_tcp_max_orphans = cnt / 2; |
3269 | order++) | 3270 | sysctl_max_syn_backlog = max(128, cnt / 256); |
3270 | ; | ||
3271 | if (order >= 4) { | ||
3272 | tcp_death_row.sysctl_max_tw_buckets = 180000; | ||
3273 | sysctl_tcp_max_orphans = 4096 << (order - 4); | ||
3274 | sysctl_max_syn_backlog = 1024; | ||
3275 | } else if (order < 3) { | ||
3276 | tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); | ||
3277 | sysctl_tcp_max_orphans >>= (3 - order); | ||
3278 | sysctl_max_syn_backlog = 128; | ||
3279 | } | ||
3280 | 3271 | ||
3281 | /* Set the pressure threshold to be a fraction of global memory that | 3272 | /* Set the pressure threshold to be a fraction of global memory that |
3282 | * is up to 1/2 at 256 MB, decreasing toward zero with the amount of | 3273 | * is up to 1/2 at 256 MB, decreasing toward zero with the amount of |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 0ec9bd0ae94f..850c737e08e2 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -196,10 +196,10 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) | |||
196 | int tcp_set_allowed_congestion_control(char *val) | 196 | int tcp_set_allowed_congestion_control(char *val) |
197 | { | 197 | { |
198 | struct tcp_congestion_ops *ca; | 198 | struct tcp_congestion_ops *ca; |
199 | char *clone, *name; | 199 | char *saved_clone, *clone, *name; |
200 | int ret = 0; | 200 | int ret = 0; |
201 | 201 | ||
202 | clone = kstrdup(val, GFP_USER); | 202 | saved_clone = clone = kstrdup(val, GFP_USER); |
203 | if (!clone) | 203 | if (!clone) |
204 | return -ENOMEM; | 204 | return -ENOMEM; |
205 | 205 | ||
@@ -226,6 +226,7 @@ int tcp_set_allowed_congestion_control(char *val) | |||
226 | } | 226 | } |
227 | out: | 227 | out: |
228 | spin_unlock(&tcp_cong_list_lock); | 228 | spin_unlock(&tcp_cong_list_lock); |
229 | kfree(saved_clone); | ||
229 | 230 | ||
230 | return ret; | 231 | return ret; |
231 | } | 232 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e663b78a2ef6..149e79ac2891 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4048,6 +4048,8 @@ static void tcp_reset(struct sock *sk) | |||
4048 | default: | 4048 | default: |
4049 | sk->sk_err = ECONNRESET; | 4049 | sk->sk_err = ECONNRESET; |
4050 | } | 4050 | } |
4051 | /* This barrier is coupled with smp_rmb() in tcp_poll() */ | ||
4052 | smp_wmb(); | ||
4051 | 4053 | ||
4052 | if (!sock_flag(sk, SOCK_DEAD)) | 4054 | if (!sock_flag(sk, SOCK_DEAD)) |
4053 | sk->sk_error_report(sk); | 4055 | sk->sk_error_report(sk); |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 808bb920c9f5..c35b469e851c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -66,18 +66,18 @@ static void tcp_write_err(struct sock *sk) | |||
66 | static int tcp_out_of_resources(struct sock *sk, int do_reset) | 66 | static int tcp_out_of_resources(struct sock *sk, int do_reset) |
67 | { | 67 | { |
68 | struct tcp_sock *tp = tcp_sk(sk); | 68 | struct tcp_sock *tp = tcp_sk(sk); |
69 | int orphans = percpu_counter_read_positive(&tcp_orphan_count); | 69 | int shift = 0; |
70 | 70 | ||
71 | /* If peer does not open window for long time, or did not transmit | 71 | /* If peer does not open window for long time, or did not transmit |
72 | * anything for long time, penalize it. */ | 72 | * anything for long time, penalize it. */ |
73 | if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) | 73 | if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) |
74 | orphans <<= 1; | 74 | shift++; |
75 | 75 | ||
76 | /* If some dubious ICMP arrived, penalize even more. */ | 76 | /* If some dubious ICMP arrived, penalize even more. */ |
77 | if (sk->sk_err_soft) | 77 | if (sk->sk_err_soft) |
78 | orphans <<= 1; | 78 | shift++; |
79 | 79 | ||
80 | if (tcp_too_many_orphans(sk, orphans)) { | 80 | if (tcp_too_many_orphans(sk, shift)) { |
81 | if (net_ratelimit()) | 81 | if (net_ratelimit()) |
82 | printk(KERN_INFO "Out of socket memory\n"); | 82 | printk(KERN_INFO "Out of socket memory\n"); |
83 | 83 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 32e0bef60d0a..fb23c2e63b52 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk) | |||
1260 | } | 1260 | } |
1261 | EXPORT_SYMBOL(udp_lib_unhash); | 1261 | EXPORT_SYMBOL(udp_lib_unhash); |
1262 | 1262 | ||
1263 | /* | ||
1264 | * inet_rcv_saddr was changed, we must rehash secondary hash | ||
1265 | */ | ||
1266 | void udp_lib_rehash(struct sock *sk, u16 newhash) | ||
1267 | { | ||
1268 | if (sk_hashed(sk)) { | ||
1269 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | ||
1270 | struct udp_hslot *hslot, *hslot2, *nhslot2; | ||
1271 | |||
1272 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); | ||
1273 | nhslot2 = udp_hashslot2(udptable, newhash); | ||
1274 | udp_sk(sk)->udp_portaddr_hash = newhash; | ||
1275 | if (hslot2 != nhslot2) { | ||
1276 | hslot = udp_hashslot(udptable, sock_net(sk), | ||
1277 | udp_sk(sk)->udp_port_hash); | ||
1278 | /* we must lock primary chain too */ | ||
1279 | spin_lock_bh(&hslot->lock); | ||
1280 | |||
1281 | spin_lock(&hslot2->lock); | ||
1282 | hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); | ||
1283 | hslot2->count--; | ||
1284 | spin_unlock(&hslot2->lock); | ||
1285 | |||
1286 | spin_lock(&nhslot2->lock); | ||
1287 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, | ||
1288 | &nhslot2->head); | ||
1289 | nhslot2->count++; | ||
1290 | spin_unlock(&nhslot2->lock); | ||
1291 | |||
1292 | spin_unlock_bh(&hslot->lock); | ||
1293 | } | ||
1294 | } | ||
1295 | } | ||
1296 | EXPORT_SYMBOL(udp_lib_rehash); | ||
1297 | |||
1298 | static void udp_v4_rehash(struct sock *sk) | ||
1299 | { | ||
1300 | u16 new_hash = udp4_portaddr_hash(sock_net(sk), | ||
1301 | inet_sk(sk)->inet_rcv_saddr, | ||
1302 | inet_sk(sk)->inet_num); | ||
1303 | udp_lib_rehash(sk, new_hash); | ||
1304 | } | ||
1305 | |||
1263 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1306 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1264 | { | 1307 | { |
1265 | int rc; | 1308 | int rc; |
@@ -1843,6 +1886,7 @@ struct proto udp_prot = { | |||
1843 | .backlog_rcv = __udp_queue_rcv_skb, | 1886 | .backlog_rcv = __udp_queue_rcv_skb, |
1844 | .hash = udp_lib_hash, | 1887 | .hash = udp_lib_hash, |
1845 | .unhash = udp_lib_unhash, | 1888 | .unhash = udp_lib_unhash, |
1889 | .rehash = udp_v4_rehash, | ||
1846 | .get_port = udp_v4_get_port, | 1890 | .get_port = udp_v4_get_port, |
1847 | .memory_allocated = &udp_memory_allocated, | 1891 | .memory_allocated = &udp_memory_allocated, |
1848 | .sysctl_mem = sysctl_udp_mem, | 1892 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 1ef1366a0a03..47947624eccc 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c | |||
@@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x) | |||
21 | } | 21 | } |
22 | 22 | ||
23 | static void | 23 | static void |
24 | __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, | 24 | __xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl) |
25 | struct xfrm_tmpl *tmpl, | 25 | { |
26 | xfrm_address_t *daddr, xfrm_address_t *saddr) | 26 | sel->daddr.a4 = fl->fl4_dst; |
27 | sel->saddr.a4 = fl->fl4_src; | ||
28 | sel->dport = xfrm_flowi_dport(fl); | ||
29 | sel->dport_mask = htons(0xffff); | ||
30 | sel->sport = xfrm_flowi_sport(fl); | ||
31 | sel->sport_mask = htons(0xffff); | ||
32 | sel->family = AF_INET; | ||
33 | sel->prefixlen_d = 32; | ||
34 | sel->prefixlen_s = 32; | ||
35 | sel->proto = fl->proto; | ||
36 | sel->ifindex = fl->oif; | ||
37 | } | ||
38 | |||
39 | static void | ||
40 | xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl, | ||
41 | xfrm_address_t *daddr, xfrm_address_t *saddr) | ||
27 | { | 42 | { |
28 | x->sel.daddr.a4 = fl->fl4_dst; | ||
29 | x->sel.saddr.a4 = fl->fl4_src; | ||
30 | x->sel.dport = xfrm_flowi_dport(fl); | ||
31 | x->sel.dport_mask = htons(0xffff); | ||
32 | x->sel.sport = xfrm_flowi_sport(fl); | ||
33 | x->sel.sport_mask = htons(0xffff); | ||
34 | x->sel.family = AF_INET; | ||
35 | x->sel.prefixlen_d = 32; | ||
36 | x->sel.prefixlen_s = 32; | ||
37 | x->sel.proto = fl->proto; | ||
38 | x->sel.ifindex = fl->oif; | ||
39 | x->id = tmpl->id; | 43 | x->id = tmpl->id; |
40 | if (x->id.daddr.a4 == 0) | 44 | if (x->id.daddr.a4 == 0) |
41 | x->id.daddr.a4 = daddr->a4; | 45 | x->id.daddr.a4 = daddr->a4; |
@@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { | |||
70 | .owner = THIS_MODULE, | 74 | .owner = THIS_MODULE, |
71 | .init_flags = xfrm4_init_flags, | 75 | .init_flags = xfrm4_init_flags, |
72 | .init_tempsel = __xfrm4_init_tempsel, | 76 | .init_tempsel = __xfrm4_init_tempsel, |
77 | .init_temprop = xfrm4_init_temprop, | ||
73 | .output = xfrm4_output, | 78 | .output = xfrm4_output, |
74 | .extract_input = xfrm4_extract_input, | 79 | .extract_input = xfrm4_extract_input, |
75 | .extract_output = xfrm4_extract_output, | 80 | .extract_output = xfrm4_extract_output, |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7d929a22cbc2..ef371aa01ac5 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -105,9 +105,12 @@ ipv4_connected: | |||
105 | if (ipv6_addr_any(&np->saddr)) | 105 | if (ipv6_addr_any(&np->saddr)) |
106 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); | 106 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); |
107 | 107 | ||
108 | if (ipv6_addr_any(&np->rcv_saddr)) | 108 | if (ipv6_addr_any(&np->rcv_saddr)) { |
109 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, | 109 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, |
110 | &np->rcv_saddr); | 110 | &np->rcv_saddr); |
111 | if (sk->sk_prot->rehash) | ||
112 | sk->sk_prot->rehash(sk); | ||
113 | } | ||
111 | 114 | ||
112 | goto out; | 115 | goto out; |
113 | } | 116 | } |
@@ -181,6 +184,8 @@ ipv4_connected: | |||
181 | if (ipv6_addr_any(&np->rcv_saddr)) { | 184 | if (ipv6_addr_any(&np->rcv_saddr)) { |
182 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); | 185 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); |
183 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | 186 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
187 | if (sk->sk_prot->rehash) | ||
188 | sk->sk_prot->rehash(sk); | ||
184 | } | 189 | } |
185 | 190 | ||
186 | ip6_dst_store(sk, dst, | 191 | ip6_dst_store(sk, dst, |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 5359ef4daac5..8e754be92c24 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -922,6 +922,7 @@ get_counters(const struct xt_table_info *t, | |||
922 | if (cpu == curcpu) | 922 | if (cpu == curcpu) |
923 | continue; | 923 | continue; |
924 | i = 0; | 924 | i = 0; |
925 | local_bh_disable(); | ||
925 | xt_info_wrlock(cpu); | 926 | xt_info_wrlock(cpu); |
926 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 927 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
927 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 928 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
@@ -929,6 +930,7 @@ get_counters(const struct xt_table_info *t, | |||
929 | ++i; | 930 | ++i; |
930 | } | 931 | } |
931 | xt_info_wrunlock(cpu); | 932 | xt_info_wrunlock(cpu); |
933 | local_bh_enable(); | ||
932 | } | 934 | } |
933 | put_cpu(); | 935 | put_cpu(); |
934 | } | 936 | } |
@@ -1764,6 +1766,9 @@ translate_compat_table(struct net *net, | |||
1764 | if (ret != 0) | 1766 | if (ret != 0) |
1765 | break; | 1767 | break; |
1766 | ++i; | 1768 | ++i; |
1769 | if (strcmp(ip6t_get_target(iter1)->u.user.name, | ||
1770 | XT_ERROR_TARGET) == 0) | ||
1771 | ++newinfo->stacksize; | ||
1767 | } | 1772 | } |
1768 | if (ret) { | 1773 | if (ret) { |
1769 | /* | 1774 | /* |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 13ef5bc05cf5..578f3c1a16db 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb) | |||
113 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | 113 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); |
114 | } | 114 | } |
115 | 115 | ||
116 | /* Memory Tracking Functions. */ | ||
117 | static void frag_kfree_skb(struct sk_buff *skb) | ||
118 | { | ||
119 | atomic_sub(skb->truesize, &nf_init_frags.mem); | ||
120 | nf_skb_free(skb); | ||
121 | kfree_skb(skb); | ||
122 | } | ||
123 | |||
124 | /* Destruction primitives. */ | 116 | /* Destruction primitives. */ |
125 | 117 | ||
126 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) | 118 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) |
@@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
282 | } | 274 | } |
283 | 275 | ||
284 | found: | 276 | found: |
285 | /* We found where to put this one. Check for overlap with | 277 | /* RFC5722, Section 4: |
286 | * preceding fragment, and, if needed, align things so that | 278 | * When reassembling an IPv6 datagram, if |
287 | * any overlaps are eliminated. | 279 | * one or more its constituent fragments is determined to be an |
288 | */ | 280 | * overlapping fragment, the entire datagram (and any constituent |
289 | if (prev) { | 281 | * fragments, including those not yet received) MUST be silently |
290 | int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset; | 282 | * discarded. |
291 | |||
292 | if (i > 0) { | ||
293 | offset += i; | ||
294 | if (end <= offset) { | ||
295 | pr_debug("overlap\n"); | ||
296 | goto err; | ||
297 | } | ||
298 | if (!pskb_pull(skb, i)) { | ||
299 | pr_debug("Can't pull\n"); | ||
300 | goto err; | ||
301 | } | ||
302 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | ||
303 | skb->ip_summed = CHECKSUM_NONE; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | /* Look for overlap with succeeding segments. | ||
308 | * If we can merge fragments, do it. | ||
309 | */ | 283 | */ |
310 | while (next && NFCT_FRAG6_CB(next)->offset < end) { | ||
311 | /* overlap is 'i' bytes */ | ||
312 | int i = end - NFCT_FRAG6_CB(next)->offset; | ||
313 | |||
314 | if (i < next->len) { | ||
315 | /* Eat head of the next overlapped fragment | ||
316 | * and leave the loop. The next ones cannot overlap. | ||
317 | */ | ||
318 | pr_debug("Eat head of the overlapped parts.: %d", i); | ||
319 | if (!pskb_pull(next, i)) | ||
320 | goto err; | ||
321 | 284 | ||
322 | /* next fragment */ | 285 | /* Check for overlap with preceding fragment. */ |
323 | NFCT_FRAG6_CB(next)->offset += i; | 286 | if (prev && |
324 | fq->q.meat -= i; | 287 | (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) |
325 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 288 | goto discard_fq; |
326 | next->ip_summed = CHECKSUM_NONE; | ||
327 | break; | ||
328 | } else { | ||
329 | struct sk_buff *free_it = next; | ||
330 | |||
331 | /* Old fragmnet is completely overridden with | ||
332 | * new one drop it. | ||
333 | */ | ||
334 | next = next->next; | ||
335 | 289 | ||
336 | if (prev) | 290 | /* Look for overlap with succeeding segment. */ |
337 | prev->next = next; | 291 | if (next && NFCT_FRAG6_CB(next)->offset < end) |
338 | else | 292 | goto discard_fq; |
339 | fq->q.fragments = next; | ||
340 | |||
341 | fq->q.meat -= free_it->len; | ||
342 | frag_kfree_skb(free_it); | ||
343 | } | ||
344 | } | ||
345 | 293 | ||
346 | NFCT_FRAG6_CB(skb)->offset = offset; | 294 | NFCT_FRAG6_CB(skb)->offset = offset; |
347 | 295 | ||
@@ -371,6 +319,8 @@ found: | |||
371 | write_unlock(&nf_frags.lock); | 319 | write_unlock(&nf_frags.lock); |
372 | return 0; | 320 | return 0; |
373 | 321 | ||
322 | discard_fq: | ||
323 | fq_kill(fq); | ||
374 | err: | 324 | err: |
375 | return -1; | 325 | return -1; |
376 | } | 326 | } |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 545c4141b755..64cfef1b0a4c 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) | |||
149 | } | 149 | } |
150 | EXPORT_SYMBOL(ip6_frag_match); | 150 | EXPORT_SYMBOL(ip6_frag_match); |
151 | 151 | ||
152 | /* Memory Tracking Functions. */ | ||
153 | static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) | ||
154 | { | ||
155 | atomic_sub(skb->truesize, &nf->mem); | ||
156 | kfree_skb(skb); | ||
157 | } | ||
158 | |||
159 | void ip6_frag_init(struct inet_frag_queue *q, void *a) | 152 | void ip6_frag_init(struct inet_frag_queue *q, void *a) |
160 | { | 153 | { |
161 | struct frag_queue *fq = container_of(q, struct frag_queue, q); | 154 | struct frag_queue *fq = container_of(q, struct frag_queue, q); |
@@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
346 | } | 339 | } |
347 | 340 | ||
348 | found: | 341 | found: |
349 | /* We found where to put this one. Check for overlap with | 342 | /* RFC5722, Section 4: |
350 | * preceding fragment, and, if needed, align things so that | 343 | * When reassembling an IPv6 datagram, if |
351 | * any overlaps are eliminated. | 344 | * one or more its constituent fragments is determined to be an |
345 | * overlapping fragment, the entire datagram (and any constituent | ||
346 | * fragments, including those not yet received) MUST be silently | ||
347 | * discarded. | ||
352 | */ | 348 | */ |
353 | if (prev) { | ||
354 | int i = (FRAG6_CB(prev)->offset + prev->len) - offset; | ||
355 | 349 | ||
356 | if (i > 0) { | 350 | /* Check for overlap with preceding fragment. */ |
357 | offset += i; | 351 | if (prev && |
358 | if (end <= offset) | 352 | (FRAG6_CB(prev)->offset + prev->len) - offset > 0) |
359 | goto err; | 353 | goto discard_fq; |
360 | if (!pskb_pull(skb, i)) | ||
361 | goto err; | ||
362 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | ||
363 | skb->ip_summed = CHECKSUM_NONE; | ||
364 | } | ||
365 | } | ||
366 | 354 | ||
367 | /* Look for overlap with succeeding segments. | 355 | /* Look for overlap with succeeding segment. */ |
368 | * If we can merge fragments, do it. | 356 | if (next && FRAG6_CB(next)->offset < end) |
369 | */ | 357 | goto discard_fq; |
370 | while (next && FRAG6_CB(next)->offset < end) { | ||
371 | int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ | ||
372 | |||
373 | if (i < next->len) { | ||
374 | /* Eat head of the next overlapped fragment | ||
375 | * and leave the loop. The next ones cannot overlap. | ||
376 | */ | ||
377 | if (!pskb_pull(next, i)) | ||
378 | goto err; | ||
379 | FRAG6_CB(next)->offset += i; /* next fragment */ | ||
380 | fq->q.meat -= i; | ||
381 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | ||
382 | next->ip_summed = CHECKSUM_NONE; | ||
383 | break; | ||
384 | } else { | ||
385 | struct sk_buff *free_it = next; | ||
386 | |||
387 | /* Old fragment is completely overridden with | ||
388 | * new one drop it. | ||
389 | */ | ||
390 | next = next->next; | ||
391 | |||
392 | if (prev) | ||
393 | prev->next = next; | ||
394 | else | ||
395 | fq->q.fragments = next; | ||
396 | |||
397 | fq->q.meat -= free_it->len; | ||
398 | frag_kfree_skb(fq->q.net, free_it); | ||
399 | } | ||
400 | } | ||
401 | 358 | ||
402 | FRAG6_CB(skb)->offset = offset; | 359 | FRAG6_CB(skb)->offset = offset; |
403 | 360 | ||
@@ -436,6 +393,8 @@ found: | |||
436 | write_unlock(&ip6_frags.lock); | 393 | write_unlock(&ip6_frags.lock); |
437 | return -1; | 394 | return -1; |
438 | 395 | ||
396 | discard_fq: | ||
397 | fq_kill(fq); | ||
439 | err: | 398 | err: |
440 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 399 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
441 | IPSTATS_MIB_REASMFAILS); | 400 | IPSTATS_MIB_REASMFAILS); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8f2d0400cf8a..d126365ac046 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2580,7 +2580,7 @@ ctl_table ipv6_route_table_template[] = { | |||
2580 | .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, | 2580 | .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, |
2581 | .maxlen = sizeof(int), | 2581 | .maxlen = sizeof(int), |
2582 | .mode = 0644, | 2582 | .mode = 0644, |
2583 | .proc_handler = proc_dointvec_jiffies, | 2583 | .proc_handler = proc_dointvec, |
2584 | }, | 2584 | }, |
2585 | { | 2585 | { |
2586 | .procname = "mtu_expires", | 2586 | .procname = "mtu_expires", |
@@ -2594,7 +2594,7 @@ ctl_table ipv6_route_table_template[] = { | |||
2594 | .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, | 2594 | .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, |
2595 | .maxlen = sizeof(int), | 2595 | .maxlen = sizeof(int), |
2596 | .mode = 0644, | 2596 | .mode = 0644, |
2597 | .proc_handler = proc_dointvec_jiffies, | 2597 | .proc_handler = proc_dointvec, |
2598 | }, | 2598 | }, |
2599 | { | 2599 | { |
2600 | .procname = "gc_min_interval_ms", | 2600 | .procname = "gc_min_interval_ms", |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1dd1affdead2..5acb3560ff15 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) | |||
111 | return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); | 111 | return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); |
112 | } | 112 | } |
113 | 113 | ||
114 | static void udp_v6_rehash(struct sock *sk) | ||
115 | { | ||
116 | u16 new_hash = udp6_portaddr_hash(sock_net(sk), | ||
117 | &inet6_sk(sk)->rcv_saddr, | ||
118 | inet_sk(sk)->inet_num); | ||
119 | |||
120 | udp_lib_rehash(sk, new_hash); | ||
121 | } | ||
122 | |||
114 | static inline int compute_score(struct sock *sk, struct net *net, | 123 | static inline int compute_score(struct sock *sk, struct net *net, |
115 | unsigned short hnum, | 124 | unsigned short hnum, |
116 | struct in6_addr *saddr, __be16 sport, | 125 | struct in6_addr *saddr, __be16 sport, |
@@ -1447,6 +1456,7 @@ struct proto udpv6_prot = { | |||
1447 | .backlog_rcv = udpv6_queue_rcv_skb, | 1456 | .backlog_rcv = udpv6_queue_rcv_skb, |
1448 | .hash = udp_lib_hash, | 1457 | .hash = udp_lib_hash, |
1449 | .unhash = udp_lib_unhash, | 1458 | .unhash = udp_lib_unhash, |
1459 | .rehash = udp_v6_rehash, | ||
1450 | .get_port = udp_v6_get_port, | 1460 | .get_port = udp_v6_get_port, |
1451 | .memory_allocated = &udp_memory_allocated, | 1461 | .memory_allocated = &udp_memory_allocated, |
1452 | .sysctl_mem = sysctl_udp_mem, | 1462 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index f417b77fa0e1..a67575d472a3 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c | |||
@@ -20,23 +20,27 @@ | |||
20 | #include <net/addrconf.h> | 20 | #include <net/addrconf.h> |
21 | 21 | ||
22 | static void | 22 | static void |
23 | __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl, | 23 | __xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl) |
24 | struct xfrm_tmpl *tmpl, | ||
25 | xfrm_address_t *daddr, xfrm_address_t *saddr) | ||
26 | { | 24 | { |
27 | /* Initialize temporary selector matching only | 25 | /* Initialize temporary selector matching only |
28 | * to current session. */ | 26 | * to current session. */ |
29 | ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst); | 27 | ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst); |
30 | ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src); | 28 | ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src); |
31 | x->sel.dport = xfrm_flowi_dport(fl); | 29 | sel->dport = xfrm_flowi_dport(fl); |
32 | x->sel.dport_mask = htons(0xffff); | 30 | sel->dport_mask = htons(0xffff); |
33 | x->sel.sport = xfrm_flowi_sport(fl); | 31 | sel->sport = xfrm_flowi_sport(fl); |
34 | x->sel.sport_mask = htons(0xffff); | 32 | sel->sport_mask = htons(0xffff); |
35 | x->sel.family = AF_INET6; | 33 | sel->family = AF_INET6; |
36 | x->sel.prefixlen_d = 128; | 34 | sel->prefixlen_d = 128; |
37 | x->sel.prefixlen_s = 128; | 35 | sel->prefixlen_s = 128; |
38 | x->sel.proto = fl->proto; | 36 | sel->proto = fl->proto; |
39 | x->sel.ifindex = fl->oif; | 37 | sel->ifindex = fl->oif; |
38 | } | ||
39 | |||
40 | static void | ||
41 | xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl, | ||
42 | xfrm_address_t *daddr, xfrm_address_t *saddr) | ||
43 | { | ||
40 | x->id = tmpl->id; | 44 | x->id = tmpl->id; |
41 | if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) | 45 | if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) |
42 | memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); | 46 | memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); |
@@ -168,6 +172,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { | |||
168 | .eth_proto = htons(ETH_P_IPV6), | 172 | .eth_proto = htons(ETH_P_IPV6), |
169 | .owner = THIS_MODULE, | 173 | .owner = THIS_MODULE, |
170 | .init_tempsel = __xfrm6_init_tempsel, | 174 | .init_tempsel = __xfrm6_init_tempsel, |
175 | .init_temprop = xfrm6_init_temprop, | ||
171 | .tmpl_sort = __xfrm6_tmpl_sort, | 176 | .tmpl_sort = __xfrm6_tmpl_sort, |
172 | .state_sort = __xfrm6_state_sort, | 177 | .state_sort = __xfrm6_state_sort, |
173 | .output = xfrm6_output, | 178 | .output = xfrm6_output, |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 79986a674f6e..fd55b5135de5 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
824 | 824 | ||
825 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); | 825 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); |
826 | if (err < 0) { | 826 | if (err < 0) { |
827 | kfree(self->ias_obj->name); | 827 | irias_delete_object(self->ias_obj); |
828 | kfree(self->ias_obj); | 828 | self->ias_obj = NULL; |
829 | goto out; | 829 | goto out; |
830 | } | 830 | } |
831 | 831 | ||
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index a788f9e9427d..6130f9d9dbe1 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c | |||
@@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) | |||
1102 | memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ | 1102 | memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ |
1103 | le16_to_cpus(&val_len); n+=2; | 1103 | le16_to_cpus(&val_len); n+=2; |
1104 | 1104 | ||
1105 | if (val_len > 1016) { | 1105 | if (val_len >= 1016) { |
1106 | IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); | 1106 | IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); |
1107 | return -RSP_INVALID_COMMAND_FORMAT; | 1107 | return -RSP_INVALID_COMMAND_FORMAT; |
1108 | } | 1108 | } |
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 9616c32d1076..5bb8353105cc 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c | |||
@@ -169,6 +169,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, | |||
169 | { | 169 | { |
170 | struct irlan_cb *self = netdev_priv(dev); | 170 | struct irlan_cb *self = netdev_priv(dev); |
171 | int ret; | 171 | int ret; |
172 | unsigned int len; | ||
172 | 173 | ||
173 | /* skb headroom large enough to contain all IrDA-headers? */ | 174 | /* skb headroom large enough to contain all IrDA-headers? */ |
174 | if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) { | 175 | if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) { |
@@ -188,6 +189,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, | |||
188 | 189 | ||
189 | dev->trans_start = jiffies; | 190 | dev->trans_start = jiffies; |
190 | 191 | ||
192 | len = skb->len; | ||
191 | /* Now queue the packet in the transport layer */ | 193 | /* Now queue the packet in the transport layer */ |
192 | if (self->use_udata) | 194 | if (self->use_udata) |
193 | ret = irttp_udata_request(self->tsap_data, skb); | 195 | ret = irttp_udata_request(self->tsap_data, skb); |
@@ -209,7 +211,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, | |||
209 | self->stats.tx_dropped++; | 211 | self->stats.tx_dropped++; |
210 | } else { | 212 | } else { |
211 | self->stats.tx_packets++; | 213 | self->stats.tx_packets++; |
212 | self->stats.tx_bytes += skb->len; | 214 | self->stats.tx_bytes += len; |
213 | } | 215 | } |
214 | 216 | ||
215 | return NETDEV_TX_OK; | 217 | return NETDEV_TX_OK; |
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 58c6c4cda73b..1ae697681bc7 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, | |||
132 | printk("\n"); | 132 | printk("\n"); |
133 | } | 133 | } |
134 | 134 | ||
135 | if (data_len < ETH_HLEN) | 135 | if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) |
136 | goto error; | 136 | goto error; |
137 | 137 | ||
138 | secpath_reset(skb); | 138 | secpath_reset(skb); |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 023ba820236f..582612998211 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -1024,7 +1024,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, | |||
1024 | { | 1024 | { |
1025 | struct sock *sk = sock->sk; | 1025 | struct sock *sk = sock->sk; |
1026 | struct llc_sock *llc = llc_sk(sk); | 1026 | struct llc_sock *llc = llc_sk(sk); |
1027 | int rc = -EINVAL, opt; | 1027 | unsigned int opt; |
1028 | int rc = -EINVAL; | ||
1028 | 1029 | ||
1029 | lock_sock(sk); | 1030 | lock_sock(sk); |
1030 | if (unlikely(level != SOL_LLC || optlen != sizeof(int))) | 1031 | if (unlikely(level != SOL_LLC || optlen != sizeof(int))) |
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index e4dae0244d76..cf4aea3ba30f 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c | |||
@@ -689,7 +689,7 @@ static void llc_station_rcv(struct sk_buff *skb) | |||
689 | 689 | ||
690 | int __init llc_station_init(void) | 690 | int __init llc_station_init(void) |
691 | { | 691 | { |
692 | u16 rc = -ENOBUFS; | 692 | int rc = -ENOBUFS; |
693 | struct sk_buff *skb; | 693 | struct sk_buff *skb; |
694 | struct llc_station_state_ev *ev; | 694 | struct llc_station_state_ev *ev; |
695 | 695 | ||
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 6d0bd198af19..be04d46110fe 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -103,6 +103,7 @@ ieee80211_rate_control_ops_get(const char *name) | |||
103 | struct rate_control_ops *ops; | 103 | struct rate_control_ops *ops; |
104 | const char *alg_name; | 104 | const char *alg_name; |
105 | 105 | ||
106 | kparam_block_sysfs_write(ieee80211_default_rc_algo); | ||
106 | if (!name) | 107 | if (!name) |
107 | alg_name = ieee80211_default_rc_algo; | 108 | alg_name = ieee80211_default_rc_algo; |
108 | else | 109 | else |
@@ -120,6 +121,7 @@ ieee80211_rate_control_ops_get(const char *name) | |||
120 | /* try built-in one if specific alg requested but not found */ | 121 | /* try built-in one if specific alg requested but not found */ |
121 | if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT)) | 122 | if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT)) |
122 | ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT); | 123 | ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT); |
124 | kparam_unblock_sysfs_write(ieee80211_default_rc_algo); | ||
123 | 125 | ||
124 | return ops; | 126 | return ops; |
125 | } | 127 | } |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 4f8ddba48011..4c2f89df5cce 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -924,6 +924,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
924 | 924 | ||
925 | ip_vs_out_stats(cp, skb); | 925 | ip_vs_out_stats(cp, skb); |
926 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); | 926 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); |
927 | ip_vs_update_conntrack(skb, cp, 0); | ||
927 | ip_vs_conn_put(cp); | 928 | ip_vs_conn_put(cp); |
928 | 929 | ||
929 | skb->ipvs_property = 1; | 930 | skb->ipvs_property = 1; |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index f228a17ec649..7e9af5b76d9e 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/netfilter.h> | 45 | #include <linux/netfilter.h> |
46 | #include <net/netfilter/nf_conntrack.h> | 46 | #include <net/netfilter/nf_conntrack.h> |
47 | #include <net/netfilter/nf_conntrack_expect.h> | 47 | #include <net/netfilter/nf_conntrack_expect.h> |
48 | #include <net/netfilter/nf_nat.h> | ||
48 | #include <net/netfilter/nf_nat_helper.h> | 49 | #include <net/netfilter/nf_nat_helper.h> |
49 | #include <linux/gfp.h> | 50 | #include <linux/gfp.h> |
50 | #include <net/protocol.h> | 51 | #include <net/protocol.h> |
@@ -359,7 +360,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
359 | buf_len = strlen(buf); | 360 | buf_len = strlen(buf); |
360 | 361 | ||
361 | ct = nf_ct_get(skb, &ctinfo); | 362 | ct = nf_ct_get(skb, &ctinfo); |
362 | if (ct && !nf_ct_is_untracked(ct)) { | 363 | if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) { |
363 | /* If mangling fails this function will return 0 | 364 | /* If mangling fails this function will return 0 |
364 | * which will cause the packet to be dropped. | 365 | * which will cause the packet to be dropped. |
365 | * Mangling can only fail under memory pressure, | 366 | * Mangling can only fail under memory pressure, |
@@ -409,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
409 | union nf_inet_addr to; | 410 | union nf_inet_addr to; |
410 | __be16 port; | 411 | __be16 port; |
411 | struct ip_vs_conn *n_cp; | 412 | struct ip_vs_conn *n_cp; |
412 | struct nf_conn *ct; | ||
413 | 413 | ||
414 | #ifdef CONFIG_IP_VS_IPV6 | 414 | #ifdef CONFIG_IP_VS_IPV6 |
415 | /* This application helper doesn't work with IPv6 yet, | 415 | /* This application helper doesn't work with IPv6 yet, |
@@ -496,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
496 | ip_vs_control_add(n_cp, cp); | 496 | ip_vs_control_add(n_cp, cp); |
497 | } | 497 | } |
498 | 498 | ||
499 | ct = (struct nf_conn *)skb->nfct; | ||
500 | if (ct && ct != &nf_conntrack_untracked) | ||
501 | ip_vs_expect_related(skb, ct, n_cp, | ||
502 | IPPROTO_TCP, &n_cp->dport, 1); | ||
503 | |||
504 | /* | 499 | /* |
505 | * Move tunnel to listen state | 500 | * Move tunnel to listen state |
506 | */ | 501 | */ |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 21e1a5e9b9d3..49df6bea6a2d 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
349 | } | 349 | } |
350 | #endif | 350 | #endif |
351 | 351 | ||
352 | static void | 352 | void |
353 | ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) | 353 | ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) |
354 | { | 354 | { |
355 | struct nf_conn *ct = (struct nf_conn *)skb->nfct; | 355 | struct nf_conn *ct = (struct nf_conn *)skb->nfct; |
356 | struct nf_conntrack_tuple new_tuple; | 356 | struct nf_conntrack_tuple new_tuple; |
@@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) | |||
365 | * real-server we will see RIP->DIP. | 365 | * real-server we will see RIP->DIP. |
366 | */ | 366 | */ |
367 | new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 367 | new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
368 | new_tuple.src.u3 = cp->daddr; | 368 | if (outin) |
369 | new_tuple.src.u3 = cp->daddr; | ||
370 | else | ||
371 | new_tuple.dst.u3 = cp->vaddr; | ||
369 | /* | 372 | /* |
370 | * This will also take care of UDP and other protocols. | 373 | * This will also take care of UDP and other protocols. |
371 | */ | 374 | */ |
372 | new_tuple.src.u.tcp.port = cp->dport; | 375 | if (outin) |
376 | new_tuple.src.u.tcp.port = cp->dport; | ||
377 | else | ||
378 | new_tuple.dst.u.tcp.port = cp->vport; | ||
373 | nf_conntrack_alter_reply(ct, &new_tuple); | 379 | nf_conntrack_alter_reply(ct, &new_tuple); |
374 | } | 380 | } |
375 | 381 | ||
@@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
428 | 434 | ||
429 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | 435 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); |
430 | 436 | ||
431 | ip_vs_update_conntrack(skb, cp); | 437 | ip_vs_update_conntrack(skb, cp, 1); |
432 | 438 | ||
433 | /* FIXME: when application helper enlarges the packet and the length | 439 | /* FIXME: when application helper enlarges the packet and the length |
434 | is larger than the MTU of outgoing device, there will be still | 440 | is larger than the MTU of outgoing device, there will be still |
@@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
506 | 512 | ||
507 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | 513 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); |
508 | 514 | ||
509 | ip_vs_update_conntrack(skb, cp); | 515 | ip_vs_update_conntrack(skb, cp, 1); |
510 | 516 | ||
511 | /* FIXME: when application helper enlarges the packet and the length | 517 | /* FIXME: when application helper enlarges the packet and the length |
512 | is larger than the MTU of outgoing device, there will be still | 518 | is larger than the MTU of outgoing device, there will be still |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2cbf380377d5..cd96ed3ccee4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1406 | struct netlink_sock *nlk = nlk_sk(sk); | 1406 | struct netlink_sock *nlk = nlk_sk(sk); |
1407 | int noblock = flags&MSG_DONTWAIT; | 1407 | int noblock = flags&MSG_DONTWAIT; |
1408 | size_t copied; | 1408 | size_t copied; |
1409 | struct sk_buff *skb; | 1409 | struct sk_buff *skb, *data_skb; |
1410 | int err; | 1410 | int err; |
1411 | 1411 | ||
1412 | if (flags&MSG_OOB) | 1412 | if (flags&MSG_OOB) |
@@ -1418,59 +1418,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1418 | if (skb == NULL) | 1418 | if (skb == NULL) |
1419 | goto out; | 1419 | goto out; |
1420 | 1420 | ||
1421 | data_skb = skb; | ||
1422 | |||
1421 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | 1423 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES |
1422 | if (unlikely(skb_shinfo(skb)->frag_list)) { | 1424 | if (unlikely(skb_shinfo(skb)->frag_list)) { |
1423 | bool need_compat = !!(flags & MSG_CMSG_COMPAT); | ||
1424 | |||
1425 | /* | 1425 | /* |
1426 | * If this skb has a frag_list, then here that means that | 1426 | * If this skb has a frag_list, then here that means that we |
1427 | * we will have to use the frag_list skb for compat tasks | 1427 | * will have to use the frag_list skb's data for compat tasks |
1428 | * and the regular skb for non-compat tasks. | 1428 | * and the regular skb's data for normal (non-compat) tasks. |
1429 | * | 1429 | * |
1430 | * The skb might (and likely will) be cloned, so we can't | 1430 | * If we need to send the compat skb, assign it to the |
1431 | * just reset frag_list and go on with things -- we need to | 1431 | * 'data_skb' variable so that it will be used below for data |
1432 | * keep that. For the compat case that's easy -- simply get | 1432 | * copying. We keep 'skb' for everything else, including |
1433 | * a reference to the compat skb and free the regular one | 1433 | * freeing both later. |
1434 | * including the frag. For the non-compat case, we need to | ||
1435 | * avoid sending the frag to the user -- so assign NULL but | ||
1436 | * restore it below before freeing the skb. | ||
1437 | */ | 1434 | */ |
1438 | if (need_compat) { | 1435 | if (flags & MSG_CMSG_COMPAT) |
1439 | struct sk_buff *compskb = skb_shinfo(skb)->frag_list; | 1436 | data_skb = skb_shinfo(skb)->frag_list; |
1440 | skb_get(compskb); | ||
1441 | kfree_skb(skb); | ||
1442 | skb = compskb; | ||
1443 | } else { | ||
1444 | /* | ||
1445 | * Before setting frag_list to NULL, we must get a | ||
1446 | * private copy of skb if shared (because of MSG_PEEK) | ||
1447 | */ | ||
1448 | if (skb_shared(skb)) { | ||
1449 | struct sk_buff *nskb; | ||
1450 | |||
1451 | nskb = pskb_copy(skb, GFP_KERNEL); | ||
1452 | kfree_skb(skb); | ||
1453 | skb = nskb; | ||
1454 | err = -ENOMEM; | ||
1455 | if (!skb) | ||
1456 | goto out; | ||
1457 | } | ||
1458 | kfree_skb(skb_shinfo(skb)->frag_list); | ||
1459 | skb_shinfo(skb)->frag_list = NULL; | ||
1460 | } | ||
1461 | } | 1437 | } |
1462 | #endif | 1438 | #endif |
1463 | 1439 | ||
1464 | msg->msg_namelen = 0; | 1440 | msg->msg_namelen = 0; |
1465 | 1441 | ||
1466 | copied = skb->len; | 1442 | copied = data_skb->len; |
1467 | if (len < copied) { | 1443 | if (len < copied) { |
1468 | msg->msg_flags |= MSG_TRUNC; | 1444 | msg->msg_flags |= MSG_TRUNC; |
1469 | copied = len; | 1445 | copied = len; |
1470 | } | 1446 | } |
1471 | 1447 | ||
1472 | skb_reset_transport_header(skb); | 1448 | skb_reset_transport_header(data_skb); |
1473 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1449 | err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); |
1474 | 1450 | ||
1475 | if (msg->msg_name) { | 1451 | if (msg->msg_name) { |
1476 | struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; | 1452 | struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; |
@@ -1490,7 +1466,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1490 | } | 1466 | } |
1491 | siocb->scm->creds = *NETLINK_CREDS(skb); | 1467 | siocb->scm->creds = *NETLINK_CREDS(skb); |
1492 | if (flags & MSG_TRUNC) | 1468 | if (flags & MSG_TRUNC) |
1493 | copied = skb->len; | 1469 | copied = data_skb->len; |
1494 | 1470 | ||
1495 | skb_free_datagram(sk, skb); | 1471 | skb_free_datagram(sk, skb); |
1496 | 1472 | ||
@@ -2126,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net) | |||
2126 | #endif | 2102 | #endif |
2127 | } | 2103 | } |
2128 | 2104 | ||
2105 | static void __init netlink_add_usersock_entry(void) | ||
2106 | { | ||
2107 | unsigned long *listeners; | ||
2108 | int groups = 32; | ||
2109 | |||
2110 | listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), | ||
2111 | GFP_KERNEL); | ||
2112 | if (!listeners) | ||
2113 | panic("netlink_add_usersock_entry: Cannot allocate listneres\n"); | ||
2114 | |||
2115 | netlink_table_grab(); | ||
2116 | |||
2117 | nl_table[NETLINK_USERSOCK].groups = groups; | ||
2118 | nl_table[NETLINK_USERSOCK].listeners = listeners; | ||
2119 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; | ||
2120 | nl_table[NETLINK_USERSOCK].registered = 1; | ||
2121 | |||
2122 | netlink_table_ungrab(); | ||
2123 | } | ||
2124 | |||
2129 | static struct pernet_operations __net_initdata netlink_net_ops = { | 2125 | static struct pernet_operations __net_initdata netlink_net_ops = { |
2130 | .init = netlink_net_init, | 2126 | .init = netlink_net_init, |
2131 | .exit = netlink_net_exit, | 2127 | .exit = netlink_net_exit, |
@@ -2174,6 +2170,8 @@ static int __init netlink_proto_init(void) | |||
2174 | hash->rehash_time = jiffies; | 2170 | hash->rehash_time = jiffies; |
2175 | } | 2171 | } |
2176 | 2172 | ||
2173 | netlink_add_usersock_entry(); | ||
2174 | |||
2177 | sock_register(&netlink_family_ops); | 2175 | sock_register(&netlink_family_ops); |
2178 | register_pernet_subsys(&netlink_net_ops); | 2176 | register_pernet_subsys(&netlink_net_ops); |
2179 | /* The netlink device handler may be needed early. */ | 2177 | /* The netlink device handler may be needed early. */ |
diff --git a/net/rds/recv.c b/net/rds/recv.c index 795a00b7f2cb..c93588c2d553 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
@@ -297,7 +297,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, | |||
297 | int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) | 297 | int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) |
298 | { | 298 | { |
299 | struct rds_notifier *notifier; | 299 | struct rds_notifier *notifier; |
300 | struct rds_rdma_notify cmsg; | 300 | struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ |
301 | unsigned int count = 0, max_messages = ~0U; | 301 | unsigned int count = 0, max_messages = ~0U; |
302 | unsigned long flags; | 302 | unsigned long flags; |
303 | LIST_HEAD(copy); | 303 | LIST_HEAD(copy); |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 8e45e76a95f5..d952e7eac188 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
679 | if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) | 679 | if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) |
680 | return -EINVAL; | 680 | return -EINVAL; |
681 | 681 | ||
682 | if (addr->srose_ndigis > ROSE_MAX_DIGIS) | 682 | if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) |
683 | return -EINVAL; | 683 | return -EINVAL; |
684 | 684 | ||
685 | if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { | 685 | if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { |
@@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le | |||
739 | if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) | 739 | if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) |
740 | return -EINVAL; | 740 | return -EINVAL; |
741 | 741 | ||
742 | if (addr->srose_ndigis > ROSE_MAX_DIGIS) | 742 | if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) |
743 | return -EINVAL; | 743 | return -EINVAL; |
744 | 744 | ||
745 | /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ | 745 | /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ |
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 7043b294bb67..8e22bd345e71 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -597,12 +597,6 @@ extern unsigned rxrpc_debug; | |||
597 | #define dbgprintk(FMT,...) \ | 597 | #define dbgprintk(FMT,...) \ |
598 | printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) | 598 | printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) |
599 | 599 | ||
600 | /* make sure we maintain the format strings, even when debugging is disabled */ | ||
601 | static inline __attribute__((format(printf,1,2))) | ||
602 | void _dbprintk(const char *fmt, ...) | ||
603 | { | ||
604 | } | ||
605 | |||
606 | #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__) | 600 | #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__) |
607 | #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) | 601 | #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) |
608 | #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) | 602 | #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) |
@@ -655,11 +649,11 @@ do { \ | |||
655 | } while (0) | 649 | } while (0) |
656 | 650 | ||
657 | #else | 651 | #else |
658 | #define _enter(FMT,...) _dbprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__) | 652 | #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__) |
659 | #define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) | 653 | #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) |
660 | #define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__) | 654 | #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__) |
661 | #define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__) | 655 | #define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__) |
662 | #define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__) | 656 | #define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__) |
663 | #endif | 657 | #endif |
664 | 658 | ||
665 | /* | 659 | /* |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 8406c6654990..c2ed90a4c0b4 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -152,21 +152,24 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
152 | static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | 152 | static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) |
153 | { | 153 | { |
154 | unsigned char *b = skb_tail_pointer(skb); | 154 | unsigned char *b = skb_tail_pointer(skb); |
155 | struct tc_gact opt; | ||
156 | struct tcf_gact *gact = a->priv; | 155 | struct tcf_gact *gact = a->priv; |
156 | struct tc_gact opt = { | ||
157 | .index = gact->tcf_index, | ||
158 | .refcnt = gact->tcf_refcnt - ref, | ||
159 | .bindcnt = gact->tcf_bindcnt - bind, | ||
160 | .action = gact->tcf_action, | ||
161 | }; | ||
157 | struct tcf_t t; | 162 | struct tcf_t t; |
158 | 163 | ||
159 | opt.index = gact->tcf_index; | ||
160 | opt.refcnt = gact->tcf_refcnt - ref; | ||
161 | opt.bindcnt = gact->tcf_bindcnt - bind; | ||
162 | opt.action = gact->tcf_action; | ||
163 | NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); | 164 | NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); |
164 | #ifdef CONFIG_GACT_PROB | 165 | #ifdef CONFIG_GACT_PROB |
165 | if (gact->tcfg_ptype) { | 166 | if (gact->tcfg_ptype) { |
166 | struct tc_gact_p p_opt; | 167 | struct tc_gact_p p_opt = { |
167 | p_opt.paction = gact->tcfg_paction; | 168 | .paction = gact->tcfg_paction, |
168 | p_opt.pval = gact->tcfg_pval; | 169 | .pval = gact->tcfg_pval, |
169 | p_opt.ptype = gact->tcfg_ptype; | 170 | .ptype = gact->tcfg_ptype, |
171 | }; | ||
172 | |||
170 | NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); | 173 | NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); |
171 | } | 174 | } |
172 | #endif | 175 | #endif |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 11f195af2da0..0c311be92827 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -219,15 +219,16 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i | |||
219 | { | 219 | { |
220 | unsigned char *b = skb_tail_pointer(skb); | 220 | unsigned char *b = skb_tail_pointer(skb); |
221 | struct tcf_mirred *m = a->priv; | 221 | struct tcf_mirred *m = a->priv; |
222 | struct tc_mirred opt; | 222 | struct tc_mirred opt = { |
223 | .index = m->tcf_index, | ||
224 | .action = m->tcf_action, | ||
225 | .refcnt = m->tcf_refcnt - ref, | ||
226 | .bindcnt = m->tcf_bindcnt - bind, | ||
227 | .eaction = m->tcfm_eaction, | ||
228 | .ifindex = m->tcfm_ifindex, | ||
229 | }; | ||
223 | struct tcf_t t; | 230 | struct tcf_t t; |
224 | 231 | ||
225 | opt.index = m->tcf_index; | ||
226 | opt.action = m->tcf_action; | ||
227 | opt.refcnt = m->tcf_refcnt - ref; | ||
228 | opt.bindcnt = m->tcf_bindcnt - bind; | ||
229 | opt.eaction = m->tcfm_eaction; | ||
230 | opt.ifindex = m->tcfm_ifindex; | ||
231 | NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); | 232 | NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); |
232 | t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); | 233 | t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); |
233 | t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); | 234 | t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 509a2d53a99d..186eb837e600 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -272,19 +272,19 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, | |||
272 | { | 272 | { |
273 | unsigned char *b = skb_tail_pointer(skb); | 273 | unsigned char *b = skb_tail_pointer(skb); |
274 | struct tcf_nat *p = a->priv; | 274 | struct tcf_nat *p = a->priv; |
275 | struct tc_nat opt; | 275 | struct tc_nat opt = { |
276 | .old_addr = p->old_addr, | ||
277 | .new_addr = p->new_addr, | ||
278 | .mask = p->mask, | ||
279 | .flags = p->flags, | ||
280 | |||
281 | .index = p->tcf_index, | ||
282 | .action = p->tcf_action, | ||
283 | .refcnt = p->tcf_refcnt - ref, | ||
284 | .bindcnt = p->tcf_bindcnt - bind, | ||
285 | }; | ||
276 | struct tcf_t t; | 286 | struct tcf_t t; |
277 | 287 | ||
278 | opt.old_addr = p->old_addr; | ||
279 | opt.new_addr = p->new_addr; | ||
280 | opt.mask = p->mask; | ||
281 | opt.flags = p->flags; | ||
282 | |||
283 | opt.index = p->tcf_index; | ||
284 | opt.action = p->tcf_action; | ||
285 | opt.refcnt = p->tcf_refcnt - ref; | ||
286 | opt.bindcnt = p->tcf_bindcnt - bind; | ||
287 | |||
288 | NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); | 288 | NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); |
289 | t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); | 289 | t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); |
290 | t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); | 290 | t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 537a48732e9e..7ebf7439b478 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -350,22 +350,19 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
350 | { | 350 | { |
351 | unsigned char *b = skb_tail_pointer(skb); | 351 | unsigned char *b = skb_tail_pointer(skb); |
352 | struct tcf_police *police = a->priv; | 352 | struct tcf_police *police = a->priv; |
353 | struct tc_police opt; | 353 | struct tc_police opt = { |
354 | 354 | .index = police->tcf_index, | |
355 | opt.index = police->tcf_index; | 355 | .action = police->tcf_action, |
356 | opt.action = police->tcf_action; | 356 | .mtu = police->tcfp_mtu, |
357 | opt.mtu = police->tcfp_mtu; | 357 | .burst = police->tcfp_burst, |
358 | opt.burst = police->tcfp_burst; | 358 | .refcnt = police->tcf_refcnt - ref, |
359 | opt.refcnt = police->tcf_refcnt - ref; | 359 | .bindcnt = police->tcf_bindcnt - bind, |
360 | opt.bindcnt = police->tcf_bindcnt - bind; | 360 | }; |
361 | |||
361 | if (police->tcfp_R_tab) | 362 | if (police->tcfp_R_tab) |
362 | opt.rate = police->tcfp_R_tab->rate; | 363 | opt.rate = police->tcfp_R_tab->rate; |
363 | else | ||
364 | memset(&opt.rate, 0, sizeof(opt.rate)); | ||
365 | if (police->tcfp_P_tab) | 364 | if (police->tcfp_P_tab) |
366 | opt.peakrate = police->tcfp_P_tab->rate; | 365 | opt.peakrate = police->tcfp_P_tab->rate; |
367 | else | ||
368 | memset(&opt.peakrate, 0, sizeof(opt.peakrate)); | ||
369 | NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); | 366 | NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); |
370 | if (police->tcfp_result) | 367 | if (police->tcfp_result) |
371 | NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); | 368 | NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 4a1d640b0cf1..97e84f3ee775 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -164,13 +164,14 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, | |||
164 | { | 164 | { |
165 | unsigned char *b = skb_tail_pointer(skb); | 165 | unsigned char *b = skb_tail_pointer(skb); |
166 | struct tcf_defact *d = a->priv; | 166 | struct tcf_defact *d = a->priv; |
167 | struct tc_defact opt; | 167 | struct tc_defact opt = { |
168 | .index = d->tcf_index, | ||
169 | .refcnt = d->tcf_refcnt - ref, | ||
170 | .bindcnt = d->tcf_bindcnt - bind, | ||
171 | .action = d->tcf_action, | ||
172 | }; | ||
168 | struct tcf_t t; | 173 | struct tcf_t t; |
169 | 174 | ||
170 | opt.index = d->tcf_index; | ||
171 | opt.refcnt = d->tcf_refcnt - ref; | ||
172 | opt.bindcnt = d->tcf_bindcnt - bind; | ||
173 | opt.action = d->tcf_action; | ||
174 | NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); | 175 | NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); |
175 | NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); | 176 | NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); |
176 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); | 177 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index e9607fe55b58..66cbf4eb8855 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -159,13 +159,14 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
159 | { | 159 | { |
160 | unsigned char *b = skb_tail_pointer(skb); | 160 | unsigned char *b = skb_tail_pointer(skb); |
161 | struct tcf_skbedit *d = a->priv; | 161 | struct tcf_skbedit *d = a->priv; |
162 | struct tc_skbedit opt; | 162 | struct tc_skbedit opt = { |
163 | .index = d->tcf_index, | ||
164 | .refcnt = d->tcf_refcnt - ref, | ||
165 | .bindcnt = d->tcf_bindcnt - bind, | ||
166 | .action = d->tcf_action, | ||
167 | }; | ||
163 | struct tcf_t t; | 168 | struct tcf_t t; |
164 | 169 | ||
165 | opt.index = d->tcf_index; | ||
166 | opt.refcnt = d->tcf_refcnt - ref; | ||
167 | opt.bindcnt = d->tcf_bindcnt - bind; | ||
168 | opt.action = d->tcf_action; | ||
169 | NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); | 170 | NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); |
170 | if (d->flags & SKBEDIT_F_PRIORITY) | 171 | if (d->flags & SKBEDIT_F_PRIORITY) |
171 | NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), | 172 | NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index b9e8c3b7d406..408eea7086aa 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -150,22 +150,34 @@ int register_qdisc(struct Qdisc_ops *qops) | |||
150 | if (qops->enqueue == NULL) | 150 | if (qops->enqueue == NULL) |
151 | qops->enqueue = noop_qdisc_ops.enqueue; | 151 | qops->enqueue = noop_qdisc_ops.enqueue; |
152 | if (qops->peek == NULL) { | 152 | if (qops->peek == NULL) { |
153 | if (qops->dequeue == NULL) { | 153 | if (qops->dequeue == NULL) |
154 | qops->peek = noop_qdisc_ops.peek; | 154 | qops->peek = noop_qdisc_ops.peek; |
155 | } else { | 155 | else |
156 | rc = -EINVAL; | 156 | goto out_einval; |
157 | goto out; | ||
158 | } | ||
159 | } | 157 | } |
160 | if (qops->dequeue == NULL) | 158 | if (qops->dequeue == NULL) |
161 | qops->dequeue = noop_qdisc_ops.dequeue; | 159 | qops->dequeue = noop_qdisc_ops.dequeue; |
162 | 160 | ||
161 | if (qops->cl_ops) { | ||
162 | const struct Qdisc_class_ops *cops = qops->cl_ops; | ||
163 | |||
164 | if (!(cops->get && cops->put && cops->walk && cops->leaf)) | ||
165 | goto out_einval; | ||
166 | |||
167 | if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf)) | ||
168 | goto out_einval; | ||
169 | } | ||
170 | |||
163 | qops->next = NULL; | 171 | qops->next = NULL; |
164 | *qp = qops; | 172 | *qp = qops; |
165 | rc = 0; | 173 | rc = 0; |
166 | out: | 174 | out: |
167 | write_unlock(&qdisc_mod_lock); | 175 | write_unlock(&qdisc_mod_lock); |
168 | return rc; | 176 | return rc; |
177 | |||
178 | out_einval: | ||
179 | rc = -EINVAL; | ||
180 | goto out; | ||
169 | } | 181 | } |
170 | EXPORT_SYMBOL(register_qdisc); | 182 | EXPORT_SYMBOL(register_qdisc); |
171 | 183 | ||
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index e114f23d5eae..6318e1136b83 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -255,10 +255,6 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
255 | error = -EINVAL; | 255 | error = -EINVAL; |
256 | goto err_out; | 256 | goto err_out; |
257 | } | 257 | } |
258 | if (!list_empty(&flow->list)) { | ||
259 | error = -EEXIST; | ||
260 | goto err_out; | ||
261 | } | ||
262 | } else { | 258 | } else { |
263 | int i; | 259 | int i; |
264 | unsigned long cl; | 260 | unsigned long cl; |
@@ -418,7 +414,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
418 | } | 414 | } |
419 | 415 | ||
420 | ret = qdisc_enqueue(skb, flow->q); | 416 | ret = qdisc_enqueue(skb, flow->q); |
421 | if (ret != 0) { | 417 | if (ret != NET_XMIT_SUCCESS) { |
422 | drop: __maybe_unused | 418 | drop: __maybe_unused |
423 | if (net_xmit_drop_count(ret)) { | 419 | if (net_xmit_drop_count(ret)) { |
424 | sch->qstats.drops++; | 420 | sch->qstats.drops++; |
@@ -442,7 +438,7 @@ drop: __maybe_unused | |||
442 | */ | 438 | */ |
443 | if (flow == &p->link) { | 439 | if (flow == &p->link) { |
444 | sch->q.qlen++; | 440 | sch->q.qlen++; |
445 | return 0; | 441 | return NET_XMIT_SUCCESS; |
446 | } | 442 | } |
447 | tasklet_schedule(&p->task); | 443 | tasklet_schedule(&p->task); |
448 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | 444 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index abd904be4287..47496098d35c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -761,8 +761,8 @@ init_vf(struct hfsc_class *cl, unsigned int len) | |||
761 | if (f != cl->cl_f) { | 761 | if (f != cl->cl_f) { |
762 | cl->cl_f = f; | 762 | cl->cl_f = f; |
763 | cftree_update(cl); | 763 | cftree_update(cl); |
764 | update_cfmin(cl->cl_parent); | ||
765 | } | 764 | } |
765 | update_cfmin(cl->cl_parent); | ||
766 | } | 766 | } |
767 | } | 767 | } |
768 | 768 | ||
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 534f33231c17..201cbac2b32c 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -334,7 +334,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
334 | if (++sch->q.qlen <= q->limit) { | 334 | if (++sch->q.qlen <= q->limit) { |
335 | sch->bstats.bytes += qdisc_pkt_len(skb); | 335 | sch->bstats.bytes += qdisc_pkt_len(skb); |
336 | sch->bstats.packets++; | 336 | sch->bstats.packets++; |
337 | return 0; | 337 | return NET_XMIT_SUCCESS; |
338 | } | 338 | } |
339 | 339 | ||
340 | sfq_drop(sch); | 340 | sfq_drop(sch); |
@@ -508,6 +508,11 @@ nla_put_failure: | |||
508 | return -1; | 508 | return -1; |
509 | } | 509 | } |
510 | 510 | ||
511 | static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg) | ||
512 | { | ||
513 | return NULL; | ||
514 | } | ||
515 | |||
511 | static unsigned long sfq_get(struct Qdisc *sch, u32 classid) | 516 | static unsigned long sfq_get(struct Qdisc *sch, u32 classid) |
512 | { | 517 | { |
513 | return 0; | 518 | return 0; |
@@ -519,6 +524,10 @@ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, | |||
519 | return 0; | 524 | return 0; |
520 | } | 525 | } |
521 | 526 | ||
527 | static void sfq_put(struct Qdisc *q, unsigned long cl) | ||
528 | { | ||
529 | } | ||
530 | |||
522 | static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) | 531 | static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) |
523 | { | 532 | { |
524 | struct sfq_sched_data *q = qdisc_priv(sch); | 533 | struct sfq_sched_data *q = qdisc_priv(sch); |
@@ -571,9 +580,12 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
571 | } | 580 | } |
572 | 581 | ||
573 | static const struct Qdisc_class_ops sfq_class_ops = { | 582 | static const struct Qdisc_class_ops sfq_class_ops = { |
583 | .leaf = sfq_leaf, | ||
574 | .get = sfq_get, | 584 | .get = sfq_get, |
585 | .put = sfq_put, | ||
575 | .tcf_chain = sfq_find_tcf, | 586 | .tcf_chain = sfq_find_tcf, |
576 | .bind_tcf = sfq_bind, | 587 | .bind_tcf = sfq_bind, |
588 | .unbind_tcf = sfq_put, | ||
577 | .dump = sfq_dump_class, | 589 | .dump = sfq_dump_class, |
578 | .dump_stats = sfq_dump_class_stats, | 590 | .dump_stats = sfq_dump_class_stats, |
579 | .walk = sfq_walk, | 591 | .walk = sfq_walk, |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 0991c640cd3e..641a30d64635 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -127,7 +127,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
127 | return qdisc_reshape_fail(skb, sch); | 127 | return qdisc_reshape_fail(skb, sch); |
128 | 128 | ||
129 | ret = qdisc_enqueue(skb, q->qdisc); | 129 | ret = qdisc_enqueue(skb, q->qdisc); |
130 | if (ret != 0) { | 130 | if (ret != NET_XMIT_SUCCESS) { |
131 | if (net_xmit_drop_count(ret)) | 131 | if (net_xmit_drop_count(ret)) |
132 | sch->qstats.drops++; | 132 | sch->qstats.drops++; |
133 | return ret; | 133 | return ret; |
@@ -136,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
136 | sch->q.qlen++; | 136 | sch->q.qlen++; |
137 | sch->bstats.bytes += qdisc_pkt_len(skb); | 137 | sch->bstats.bytes += qdisc_pkt_len(skb); |
138 | sch->bstats.packets++; | 138 | sch->bstats.packets++; |
139 | return 0; | 139 | return NET_XMIT_SUCCESS; |
140 | } | 140 | } |
141 | 141 | ||
142 | static unsigned int tbf_drop(struct Qdisc* sch) | 142 | static unsigned int tbf_drop(struct Qdisc* sch) |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 807643bdcbac..feaabc103ce6 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -85,7 +85,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
85 | __skb_queue_tail(&q->q, skb); | 85 | __skb_queue_tail(&q->q, skb); |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | 86 | sch->bstats.bytes += qdisc_pkt_len(skb); |
87 | sch->bstats.packets++; | 87 | sch->bstats.packets++; |
88 | return 0; | 88 | return NET_XMIT_SUCCESS; |
89 | } | 89 | } |
90 | 90 | ||
91 | kfree_skb(skb); | 91 | kfree_skb(skb); |
diff --git a/net/sctp/output.c b/net/sctp/output.c index a646681f5acd..bcc4590ccaf2 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -92,7 +92,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, | |||
92 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, | 92 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, |
93 | packet, vtag); | 93 | packet, vtag); |
94 | 94 | ||
95 | sctp_packet_reset(packet); | ||
96 | packet->vtag = vtag; | 95 | packet->vtag = vtag; |
97 | 96 | ||
98 | if (ecn_capable && sctp_packet_empty(packet)) { | 97 | if (ecn_capable && sctp_packet_empty(packet)) { |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 24b2cd555637..d344dc481ccc 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -1232,6 +1232,18 @@ out: | |||
1232 | return 0; | 1232 | return 0; |
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | static bool list_has_sctp_addr(const struct list_head *list, | ||
1236 | union sctp_addr *ipaddr) | ||
1237 | { | ||
1238 | struct sctp_transport *addr; | ||
1239 | |||
1240 | list_for_each_entry(addr, list, transports) { | ||
1241 | if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr)) | ||
1242 | return true; | ||
1243 | } | ||
1244 | |||
1245 | return false; | ||
1246 | } | ||
1235 | /* A restart is occurring, check to make sure no new addresses | 1247 | /* A restart is occurring, check to make sure no new addresses |
1236 | * are being added as we may be under a takeover attack. | 1248 | * are being added as we may be under a takeover attack. |
1237 | */ | 1249 | */ |
@@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, | |||
1240 | struct sctp_chunk *init, | 1252 | struct sctp_chunk *init, |
1241 | sctp_cmd_seq_t *commands) | 1253 | sctp_cmd_seq_t *commands) |
1242 | { | 1254 | { |
1243 | struct sctp_transport *new_addr, *addr; | 1255 | struct sctp_transport *new_addr; |
1244 | int found; | 1256 | int ret = 1; |
1245 | 1257 | ||
1246 | /* Implementor's Guide - Sectin 5.2.2 | 1258 | /* Implementor's Guide - Section 5.2.2 |
1247 | * ... | 1259 | * ... |
1248 | * Before responding the endpoint MUST check to see if the | 1260 | * Before responding the endpoint MUST check to see if the |
1249 | * unexpected INIT adds new addresses to the association. If new | 1261 | * unexpected INIT adds new addresses to the association. If new |
@@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, | |||
1254 | /* Search through all current addresses and make sure | 1266 | /* Search through all current addresses and make sure |
1255 | * we aren't adding any new ones. | 1267 | * we aren't adding any new ones. |
1256 | */ | 1268 | */ |
1257 | new_addr = NULL; | ||
1258 | found = 0; | ||
1259 | |||
1260 | list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, | 1269 | list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, |
1261 | transports) { | 1270 | transports) { |
1262 | found = 0; | 1271 | if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, |
1263 | list_for_each_entry(addr, &asoc->peer.transport_addr_list, | 1272 | &new_addr->ipaddr)) { |
1264 | transports) { | 1273 | sctp_sf_send_restart_abort(&new_addr->ipaddr, init, |
1265 | if (sctp_cmp_addr_exact(&new_addr->ipaddr, | 1274 | commands); |
1266 | &addr->ipaddr)) { | 1275 | ret = 0; |
1267 | found = 1; | ||
1268 | break; | ||
1269 | } | ||
1270 | } | ||
1271 | if (!found) | ||
1272 | break; | 1276 | break; |
1273 | } | 1277 | } |
1274 | |||
1275 | /* If a new address was added, ABORT the sender. */ | ||
1276 | if (!found && new_addr) { | ||
1277 | sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands); | ||
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | /* Return success if all addresses were found. */ | 1280 | /* Return success if all addresses were found. */ |
1281 | return found; | 1281 | return ret; |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | /* Populate the verification/tie tags based on overlapping INIT | 1284 | /* Populate the verification/tie tags based on overlapping INIT |
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 443c161eb8bd..3376d7657185 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
@@ -18,10 +18,11 @@ config SUNRPC_XPRT_RDMA | |||
18 | If unsure, say N. | 18 | If unsure, say N. |
19 | 19 | ||
20 | config RPCSEC_GSS_KRB5 | 20 | config RPCSEC_GSS_KRB5 |
21 | tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" | 21 | tristate |
22 | depends on SUNRPC && EXPERIMENTAL | 22 | depends on SUNRPC && CRYPTO |
23 | prompt "Secure RPC: Kerberos V mechanism" if !(NFS_V4 || NFSD_V4) | ||
24 | default y | ||
23 | select SUNRPC_GSS | 25 | select SUNRPC_GSS |
24 | select CRYPTO | ||
25 | select CRYPTO_MD5 | 26 | select CRYPTO_MD5 |
26 | select CRYPTO_DES | 27 | select CRYPTO_DES |
27 | select CRYPTO_CBC | 28 | select CRYPTO_CBC |
@@ -34,7 +35,7 @@ config RPCSEC_GSS_KRB5 | |||
34 | available from http://linux-nfs.org/. In addition, user-space | 35 | available from http://linux-nfs.org/. In addition, user-space |
35 | Kerberos support should be installed. | 36 | Kerberos support should be installed. |
36 | 37 | ||
37 | If unsure, say N. | 38 | If unsure, say Y. |
38 | 39 | ||
39 | config RPCSEC_GSS_SPKM3 | 40 | config RPCSEC_GSS_SPKM3 |
40 | tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" | 41 | tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 8dc47f1d0001..36cb66022a27 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -19,6 +19,15 @@ | |||
19 | # define RPCDBG_FACILITY RPCDBG_AUTH | 19 | # define RPCDBG_FACILITY RPCDBG_AUTH |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #define RPC_CREDCACHE_DEFAULT_HASHBITS (4) | ||
23 | struct rpc_cred_cache { | ||
24 | struct hlist_head *hashtable; | ||
25 | unsigned int hashbits; | ||
26 | spinlock_t lock; | ||
27 | }; | ||
28 | |||
29 | static unsigned int auth_hashbits = RPC_CREDCACHE_DEFAULT_HASHBITS; | ||
30 | |||
22 | static DEFINE_SPINLOCK(rpc_authflavor_lock); | 31 | static DEFINE_SPINLOCK(rpc_authflavor_lock); |
23 | static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { | 32 | static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { |
24 | &authnull_ops, /* AUTH_NULL */ | 33 | &authnull_ops, /* AUTH_NULL */ |
@@ -29,6 +38,47 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { | |||
29 | static LIST_HEAD(cred_unused); | 38 | static LIST_HEAD(cred_unused); |
30 | static unsigned long number_cred_unused; | 39 | static unsigned long number_cred_unused; |
31 | 40 | ||
41 | #define MAX_HASHTABLE_BITS (10) | ||
42 | static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp) | ||
43 | { | ||
44 | unsigned long num; | ||
45 | unsigned int nbits; | ||
46 | int ret; | ||
47 | |||
48 | if (!val) | ||
49 | goto out_inval; | ||
50 | ret = strict_strtoul(val, 0, &num); | ||
51 | if (ret == -EINVAL) | ||
52 | goto out_inval; | ||
53 | nbits = fls(num); | ||
54 | if (num > (1U << nbits)) | ||
55 | nbits++; | ||
56 | if (nbits > MAX_HASHTABLE_BITS || nbits < 2) | ||
57 | goto out_inval; | ||
58 | *(unsigned int *)kp->arg = nbits; | ||
59 | return 0; | ||
60 | out_inval: | ||
61 | return -EINVAL; | ||
62 | } | ||
63 | |||
64 | static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp) | ||
65 | { | ||
66 | unsigned int nbits; | ||
67 | |||
68 | nbits = *(unsigned int *)kp->arg; | ||
69 | return sprintf(buffer, "%u", 1U << nbits); | ||
70 | } | ||
71 | |||
72 | #define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int); | ||
73 | |||
74 | static struct kernel_param_ops param_ops_hashtbl_sz = { | ||
75 | .set = param_set_hashtbl_sz, | ||
76 | .get = param_get_hashtbl_sz, | ||
77 | }; | ||
78 | |||
79 | module_param_named(auth_hashtable_size, auth_hashbits, hashtbl_sz, 0644); | ||
80 | MODULE_PARM_DESC(auth_hashtable_size, "RPC credential cache hashtable size"); | ||
81 | |||
32 | static u32 | 82 | static u32 |
33 | pseudoflavor_to_flavor(u32 flavor) { | 83 | pseudoflavor_to_flavor(u32 flavor) { |
34 | if (flavor >= RPC_AUTH_MAXFLAVOR) | 84 | if (flavor >= RPC_AUTH_MAXFLAVOR) |
@@ -145,16 +195,23 @@ int | |||
145 | rpcauth_init_credcache(struct rpc_auth *auth) | 195 | rpcauth_init_credcache(struct rpc_auth *auth) |
146 | { | 196 | { |
147 | struct rpc_cred_cache *new; | 197 | struct rpc_cred_cache *new; |
148 | int i; | 198 | unsigned int hashsize; |
149 | 199 | ||
150 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 200 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
151 | if (!new) | 201 | if (!new) |
152 | return -ENOMEM; | 202 | goto out_nocache; |
153 | for (i = 0; i < RPC_CREDCACHE_NR; i++) | 203 | new->hashbits = auth_hashbits; |
154 | INIT_HLIST_HEAD(&new->hashtable[i]); | 204 | hashsize = 1U << new->hashbits; |
205 | new->hashtable = kcalloc(hashsize, sizeof(new->hashtable[0]), GFP_KERNEL); | ||
206 | if (!new->hashtable) | ||
207 | goto out_nohashtbl; | ||
155 | spin_lock_init(&new->lock); | 208 | spin_lock_init(&new->lock); |
156 | auth->au_credcache = new; | 209 | auth->au_credcache = new; |
157 | return 0; | 210 | return 0; |
211 | out_nohashtbl: | ||
212 | kfree(new); | ||
213 | out_nocache: | ||
214 | return -ENOMEM; | ||
158 | } | 215 | } |
159 | EXPORT_SYMBOL_GPL(rpcauth_init_credcache); | 216 | EXPORT_SYMBOL_GPL(rpcauth_init_credcache); |
160 | 217 | ||
@@ -183,11 +240,12 @@ rpcauth_clear_credcache(struct rpc_cred_cache *cache) | |||
183 | LIST_HEAD(free); | 240 | LIST_HEAD(free); |
184 | struct hlist_head *head; | 241 | struct hlist_head *head; |
185 | struct rpc_cred *cred; | 242 | struct rpc_cred *cred; |
243 | unsigned int hashsize = 1U << cache->hashbits; | ||
186 | int i; | 244 | int i; |
187 | 245 | ||
188 | spin_lock(&rpc_credcache_lock); | 246 | spin_lock(&rpc_credcache_lock); |
189 | spin_lock(&cache->lock); | 247 | spin_lock(&cache->lock); |
190 | for (i = 0; i < RPC_CREDCACHE_NR; i++) { | 248 | for (i = 0; i < hashsize; i++) { |
191 | head = &cache->hashtable[i]; | 249 | head = &cache->hashtable[i]; |
192 | while (!hlist_empty(head)) { | 250 | while (!hlist_empty(head)) { |
193 | cred = hlist_entry(head->first, struct rpc_cred, cr_hash); | 251 | cred = hlist_entry(head->first, struct rpc_cred, cr_hash); |
@@ -216,6 +274,7 @@ rpcauth_destroy_credcache(struct rpc_auth *auth) | |||
216 | if (cache) { | 274 | if (cache) { |
217 | auth->au_credcache = NULL; | 275 | auth->au_credcache = NULL; |
218 | rpcauth_clear_credcache(cache); | 276 | rpcauth_clear_credcache(cache); |
277 | kfree(cache->hashtable); | ||
219 | kfree(cache); | 278 | kfree(cache); |
220 | } | 279 | } |
221 | } | 280 | } |
@@ -297,7 +356,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
297 | *entry, *new; | 356 | *entry, *new; |
298 | unsigned int nr; | 357 | unsigned int nr; |
299 | 358 | ||
300 | nr = hash_long(acred->uid, RPC_CREDCACHE_HASHBITS); | 359 | nr = hash_long(acred->uid, cache->hashbits); |
301 | 360 | ||
302 | rcu_read_lock(); | 361 | rcu_read_lock(); |
303 | hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { | 362 | hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { |
@@ -390,16 +449,16 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, | |||
390 | } | 449 | } |
391 | EXPORT_SYMBOL_GPL(rpcauth_init_cred); | 450 | EXPORT_SYMBOL_GPL(rpcauth_init_cred); |
392 | 451 | ||
393 | void | 452 | struct rpc_cred * |
394 | rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) | 453 | rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) |
395 | { | 454 | { |
396 | task->tk_msg.rpc_cred = get_rpccred(cred); | ||
397 | dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, | 455 | dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, |
398 | cred->cr_auth->au_ops->au_name, cred); | 456 | cred->cr_auth->au_ops->au_name, cred); |
457 | return get_rpccred(cred); | ||
399 | } | 458 | } |
400 | EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred); | 459 | EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred); |
401 | 460 | ||
402 | static void | 461 | static struct rpc_cred * |
403 | rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) | 462 | rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) |
404 | { | 463 | { |
405 | struct rpc_auth *auth = task->tk_client->cl_auth; | 464 | struct rpc_auth *auth = task->tk_client->cl_auth; |
@@ -407,45 +466,43 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) | |||
407 | .uid = 0, | 466 | .uid = 0, |
408 | .gid = 0, | 467 | .gid = 0, |
409 | }; | 468 | }; |
410 | struct rpc_cred *ret; | ||
411 | 469 | ||
412 | dprintk("RPC: %5u looking up %s cred\n", | 470 | dprintk("RPC: %5u looking up %s cred\n", |
413 | task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); | 471 | task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); |
414 | ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags); | 472 | return auth->au_ops->lookup_cred(auth, &acred, lookupflags); |
415 | if (!IS_ERR(ret)) | ||
416 | task->tk_msg.rpc_cred = ret; | ||
417 | else | ||
418 | task->tk_status = PTR_ERR(ret); | ||
419 | } | 473 | } |
420 | 474 | ||
421 | static void | 475 | static struct rpc_cred * |
422 | rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags) | 476 | rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags) |
423 | { | 477 | { |
424 | struct rpc_auth *auth = task->tk_client->cl_auth; | 478 | struct rpc_auth *auth = task->tk_client->cl_auth; |
425 | struct rpc_cred *ret; | ||
426 | 479 | ||
427 | dprintk("RPC: %5u looking up %s cred\n", | 480 | dprintk("RPC: %5u looking up %s cred\n", |
428 | task->tk_pid, auth->au_ops->au_name); | 481 | task->tk_pid, auth->au_ops->au_name); |
429 | ret = rpcauth_lookupcred(auth, lookupflags); | 482 | return rpcauth_lookupcred(auth, lookupflags); |
430 | if (!IS_ERR(ret)) | ||
431 | task->tk_msg.rpc_cred = ret; | ||
432 | else | ||
433 | task->tk_status = PTR_ERR(ret); | ||
434 | } | 483 | } |
435 | 484 | ||
436 | void | 485 | static int |
437 | rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) | 486 | rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) |
438 | { | 487 | { |
488 | struct rpc_rqst *req = task->tk_rqstp; | ||
489 | struct rpc_cred *new; | ||
439 | int lookupflags = 0; | 490 | int lookupflags = 0; |
440 | 491 | ||
441 | if (flags & RPC_TASK_ASYNC) | 492 | if (flags & RPC_TASK_ASYNC) |
442 | lookupflags |= RPCAUTH_LOOKUP_NEW; | 493 | lookupflags |= RPCAUTH_LOOKUP_NEW; |
443 | if (cred != NULL) | 494 | if (cred != NULL) |
444 | cred->cr_ops->crbind(task, cred, lookupflags); | 495 | new = cred->cr_ops->crbind(task, cred, lookupflags); |
445 | else if (flags & RPC_TASK_ROOTCREDS) | 496 | else if (flags & RPC_TASK_ROOTCREDS) |
446 | rpcauth_bind_root_cred(task, lookupflags); | 497 | new = rpcauth_bind_root_cred(task, lookupflags); |
447 | else | 498 | else |
448 | rpcauth_bind_new_cred(task, lookupflags); | 499 | new = rpcauth_bind_new_cred(task, lookupflags); |
500 | if (IS_ERR(new)) | ||
501 | return PTR_ERR(new); | ||
502 | if (req->rq_cred != NULL) | ||
503 | put_rpccred(req->rq_cred); | ||
504 | req->rq_cred = new; | ||
505 | return 0; | ||
449 | } | 506 | } |
450 | 507 | ||
451 | void | 508 | void |
@@ -484,22 +541,10 @@ out_nodestroy: | |||
484 | } | 541 | } |
485 | EXPORT_SYMBOL_GPL(put_rpccred); | 542 | EXPORT_SYMBOL_GPL(put_rpccred); |
486 | 543 | ||
487 | void | ||
488 | rpcauth_unbindcred(struct rpc_task *task) | ||
489 | { | ||
490 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | ||
491 | |||
492 | dprintk("RPC: %5u releasing %s cred %p\n", | ||
493 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); | ||
494 | |||
495 | put_rpccred(cred); | ||
496 | task->tk_msg.rpc_cred = NULL; | ||
497 | } | ||
498 | |||
499 | __be32 * | 544 | __be32 * |
500 | rpcauth_marshcred(struct rpc_task *task, __be32 *p) | 545 | rpcauth_marshcred(struct rpc_task *task, __be32 *p) |
501 | { | 546 | { |
502 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 547 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
503 | 548 | ||
504 | dprintk("RPC: %5u marshaling %s cred %p\n", | 549 | dprintk("RPC: %5u marshaling %s cred %p\n", |
505 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); | 550 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
@@ -510,7 +555,7 @@ rpcauth_marshcred(struct rpc_task *task, __be32 *p) | |||
510 | __be32 * | 555 | __be32 * |
511 | rpcauth_checkverf(struct rpc_task *task, __be32 *p) | 556 | rpcauth_checkverf(struct rpc_task *task, __be32 *p) |
512 | { | 557 | { |
513 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 558 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
514 | 559 | ||
515 | dprintk("RPC: %5u validating %s cred %p\n", | 560 | dprintk("RPC: %5u validating %s cred %p\n", |
516 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); | 561 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
@@ -522,7 +567,7 @@ int | |||
522 | rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, | 567 | rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, |
523 | __be32 *data, void *obj) | 568 | __be32 *data, void *obj) |
524 | { | 569 | { |
525 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 570 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
526 | 571 | ||
527 | dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", | 572 | dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", |
528 | task->tk_pid, cred->cr_ops->cr_name, cred); | 573 | task->tk_pid, cred->cr_ops->cr_name, cred); |
@@ -536,7 +581,7 @@ int | |||
536 | rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | 581 | rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, |
537 | __be32 *data, void *obj) | 582 | __be32 *data, void *obj) |
538 | { | 583 | { |
539 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 584 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
540 | 585 | ||
541 | dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", | 586 | dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", |
542 | task->tk_pid, cred->cr_ops->cr_name, cred); | 587 | task->tk_pid, cred->cr_ops->cr_name, cred); |
@@ -550,13 +595,21 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, | |||
550 | int | 595 | int |
551 | rpcauth_refreshcred(struct rpc_task *task) | 596 | rpcauth_refreshcred(struct rpc_task *task) |
552 | { | 597 | { |
553 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 598 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
554 | int err; | 599 | int err; |
555 | 600 | ||
601 | cred = task->tk_rqstp->rq_cred; | ||
602 | if (cred == NULL) { | ||
603 | err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags); | ||
604 | if (err < 0) | ||
605 | goto out; | ||
606 | cred = task->tk_rqstp->rq_cred; | ||
607 | }; | ||
556 | dprintk("RPC: %5u refreshing %s cred %p\n", | 608 | dprintk("RPC: %5u refreshing %s cred %p\n", |
557 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); | 609 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
558 | 610 | ||
559 | err = cred->cr_ops->crrefresh(task); | 611 | err = cred->cr_ops->crrefresh(task); |
612 | out: | ||
560 | if (err < 0) | 613 | if (err < 0) |
561 | task->tk_status = err; | 614 | task->tk_status = err; |
562 | return err; | 615 | return err; |
@@ -565,7 +618,7 @@ rpcauth_refreshcred(struct rpc_task *task) | |||
565 | void | 618 | void |
566 | rpcauth_invalcred(struct rpc_task *task) | 619 | rpcauth_invalcred(struct rpc_task *task) |
567 | { | 620 | { |
568 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 621 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
569 | 622 | ||
570 | dprintk("RPC: %5u invalidating %s cred %p\n", | 623 | dprintk("RPC: %5u invalidating %s cred %p\n", |
571 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); | 624 | task->tk_pid, cred->cr_auth->au_ops->au_name, cred); |
@@ -576,7 +629,7 @@ rpcauth_invalcred(struct rpc_task *task) | |||
576 | int | 629 | int |
577 | rpcauth_uptodatecred(struct rpc_task *task) | 630 | rpcauth_uptodatecred(struct rpc_task *task) |
578 | { | 631 | { |
579 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 632 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
580 | 633 | ||
581 | return cred == NULL || | 634 | return cred == NULL || |
582 | test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; | 635 | test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; |
@@ -587,14 +640,27 @@ static struct shrinker rpc_cred_shrinker = { | |||
587 | .seeks = DEFAULT_SEEKS, | 640 | .seeks = DEFAULT_SEEKS, |
588 | }; | 641 | }; |
589 | 642 | ||
590 | void __init rpcauth_init_module(void) | 643 | int __init rpcauth_init_module(void) |
591 | { | 644 | { |
592 | rpc_init_authunix(); | 645 | int err; |
593 | rpc_init_generic_auth(); | 646 | |
647 | err = rpc_init_authunix(); | ||
648 | if (err < 0) | ||
649 | goto out1; | ||
650 | err = rpc_init_generic_auth(); | ||
651 | if (err < 0) | ||
652 | goto out2; | ||
594 | register_shrinker(&rpc_cred_shrinker); | 653 | register_shrinker(&rpc_cred_shrinker); |
654 | return 0; | ||
655 | out2: | ||
656 | rpc_destroy_authunix(); | ||
657 | out1: | ||
658 | return err; | ||
595 | } | 659 | } |
596 | 660 | ||
597 | void __exit rpcauth_remove_module(void) | 661 | void __exit rpcauth_remove_module(void) |
598 | { | 662 | { |
663 | rpc_destroy_authunix(); | ||
664 | rpc_destroy_generic_auth(); | ||
599 | unregister_shrinker(&rpc_cred_shrinker); | 665 | unregister_shrinker(&rpc_cred_shrinker); |
600 | } | 666 | } |
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 8f623b0f03dd..43162bb3b78f 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c | |||
@@ -27,7 +27,6 @@ struct generic_cred { | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | static struct rpc_auth generic_auth; | 29 | static struct rpc_auth generic_auth; |
30 | static struct rpc_cred_cache generic_cred_cache; | ||
31 | static const struct rpc_credops generic_credops; | 30 | static const struct rpc_credops generic_credops; |
32 | 31 | ||
33 | /* | 32 | /* |
@@ -55,18 +54,13 @@ struct rpc_cred *rpc_lookup_machine_cred(void) | |||
55 | } | 54 | } |
56 | EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred); | 55 | EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred); |
57 | 56 | ||
58 | static void | 57 | static struct rpc_cred *generic_bind_cred(struct rpc_task *task, |
59 | generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) | 58 | struct rpc_cred *cred, int lookupflags) |
60 | { | 59 | { |
61 | struct rpc_auth *auth = task->tk_client->cl_auth; | 60 | struct rpc_auth *auth = task->tk_client->cl_auth; |
62 | struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; | 61 | struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; |
63 | struct rpc_cred *ret; | ||
64 | 62 | ||
65 | ret = auth->au_ops->lookup_cred(auth, acred, lookupflags); | 63 | return auth->au_ops->lookup_cred(auth, acred, lookupflags); |
66 | if (!IS_ERR(ret)) | ||
67 | task->tk_msg.rpc_cred = ret; | ||
68 | else | ||
69 | task->tk_status = PTR_ERR(ret); | ||
70 | } | 64 | } |
71 | 65 | ||
72 | /* | 66 | /* |
@@ -159,20 +153,16 @@ out_nomatch: | |||
159 | return 0; | 153 | return 0; |
160 | } | 154 | } |
161 | 155 | ||
162 | void __init rpc_init_generic_auth(void) | 156 | int __init rpc_init_generic_auth(void) |
163 | { | 157 | { |
164 | spin_lock_init(&generic_cred_cache.lock); | 158 | return rpcauth_init_credcache(&generic_auth); |
165 | } | 159 | } |
166 | 160 | ||
167 | void __exit rpc_destroy_generic_auth(void) | 161 | void __exit rpc_destroy_generic_auth(void) |
168 | { | 162 | { |
169 | rpcauth_clear_credcache(&generic_cred_cache); | 163 | rpcauth_destroy_credcache(&generic_auth); |
170 | } | 164 | } |
171 | 165 | ||
172 | static struct rpc_cred_cache generic_cred_cache = { | ||
173 | {{ NULL, },}, | ||
174 | }; | ||
175 | |||
176 | static const struct rpc_authops generic_auth_ops = { | 166 | static const struct rpc_authops generic_auth_ops = { |
177 | .owner = THIS_MODULE, | 167 | .owner = THIS_MODULE, |
178 | .au_name = "Generic", | 168 | .au_name = "Generic", |
@@ -183,7 +173,6 @@ static const struct rpc_authops generic_auth_ops = { | |||
183 | static struct rpc_auth generic_auth = { | 173 | static struct rpc_auth generic_auth = { |
184 | .au_ops = &generic_auth_ops, | 174 | .au_ops = &generic_auth_ops, |
185 | .au_count = ATOMIC_INIT(0), | 175 | .au_count = ATOMIC_INIT(0), |
186 | .au_credcache = &generic_cred_cache, | ||
187 | }; | 176 | }; |
188 | 177 | ||
189 | static const struct rpc_credops generic_credops = { | 178 | static const struct rpc_credops generic_credops = { |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 8da2a0e68574..dcfc66bab2bb 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -373,7 +373,7 @@ gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss | |||
373 | static void | 373 | static void |
374 | gss_upcall_callback(struct rpc_task *task) | 374 | gss_upcall_callback(struct rpc_task *task) |
375 | { | 375 | { |
376 | struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, | 376 | struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, |
377 | struct gss_cred, gc_base); | 377 | struct gss_cred, gc_base); |
378 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; | 378 | struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; |
379 | struct inode *inode = &gss_msg->inode->vfs_inode; | 379 | struct inode *inode = &gss_msg->inode->vfs_inode; |
@@ -502,7 +502,7 @@ static void warn_gssd(void) | |||
502 | static inline int | 502 | static inline int |
503 | gss_refresh_upcall(struct rpc_task *task) | 503 | gss_refresh_upcall(struct rpc_task *task) |
504 | { | 504 | { |
505 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 505 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
506 | struct gss_auth *gss_auth = container_of(cred->cr_auth, | 506 | struct gss_auth *gss_auth = container_of(cred->cr_auth, |
507 | struct gss_auth, rpc_auth); | 507 | struct gss_auth, rpc_auth); |
508 | struct gss_cred *gss_cred = container_of(cred, | 508 | struct gss_cred *gss_cred = container_of(cred, |
@@ -928,6 +928,7 @@ gss_do_free_ctx(struct gss_cl_ctx *ctx) | |||
928 | { | 928 | { |
929 | dprintk("RPC: gss_free_ctx\n"); | 929 | dprintk("RPC: gss_free_ctx\n"); |
930 | 930 | ||
931 | gss_delete_sec_context(&ctx->gc_gss_ctx); | ||
931 | kfree(ctx->gc_wire_ctx.data); | 932 | kfree(ctx->gc_wire_ctx.data); |
932 | kfree(ctx); | 933 | kfree(ctx); |
933 | } | 934 | } |
@@ -942,13 +943,7 @@ gss_free_ctx_callback(struct rcu_head *head) | |||
942 | static void | 943 | static void |
943 | gss_free_ctx(struct gss_cl_ctx *ctx) | 944 | gss_free_ctx(struct gss_cl_ctx *ctx) |
944 | { | 945 | { |
945 | struct gss_ctx *gc_gss_ctx; | ||
946 | |||
947 | gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx); | ||
948 | rcu_assign_pointer(ctx->gc_gss_ctx, NULL); | ||
949 | call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); | 946 | call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); |
950 | if (gc_gss_ctx) | ||
951 | gss_delete_sec_context(&gc_gss_ctx); | ||
952 | } | 947 | } |
953 | 948 | ||
954 | static void | 949 | static void |
@@ -1064,12 +1059,12 @@ out: | |||
1064 | static __be32 * | 1059 | static __be32 * |
1065 | gss_marshal(struct rpc_task *task, __be32 *p) | 1060 | gss_marshal(struct rpc_task *task, __be32 *p) |
1066 | { | 1061 | { |
1067 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 1062 | struct rpc_rqst *req = task->tk_rqstp; |
1063 | struct rpc_cred *cred = req->rq_cred; | ||
1068 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, | 1064 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, |
1069 | gc_base); | 1065 | gc_base); |
1070 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1066 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
1071 | __be32 *cred_len; | 1067 | __be32 *cred_len; |
1072 | struct rpc_rqst *req = task->tk_rqstp; | ||
1073 | u32 maj_stat = 0; | 1068 | u32 maj_stat = 0; |
1074 | struct xdr_netobj mic; | 1069 | struct xdr_netobj mic; |
1075 | struct kvec iov; | 1070 | struct kvec iov; |
@@ -1119,7 +1114,7 @@ out_put_ctx: | |||
1119 | 1114 | ||
1120 | static int gss_renew_cred(struct rpc_task *task) | 1115 | static int gss_renew_cred(struct rpc_task *task) |
1121 | { | 1116 | { |
1122 | struct rpc_cred *oldcred = task->tk_msg.rpc_cred; | 1117 | struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; |
1123 | struct gss_cred *gss_cred = container_of(oldcred, | 1118 | struct gss_cred *gss_cred = container_of(oldcred, |
1124 | struct gss_cred, | 1119 | struct gss_cred, |
1125 | gc_base); | 1120 | gc_base); |
@@ -1133,7 +1128,7 @@ static int gss_renew_cred(struct rpc_task *task) | |||
1133 | new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); | 1128 | new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); |
1134 | if (IS_ERR(new)) | 1129 | if (IS_ERR(new)) |
1135 | return PTR_ERR(new); | 1130 | return PTR_ERR(new); |
1136 | task->tk_msg.rpc_cred = new; | 1131 | task->tk_rqstp->rq_cred = new; |
1137 | put_rpccred(oldcred); | 1132 | put_rpccred(oldcred); |
1138 | return 0; | 1133 | return 0; |
1139 | } | 1134 | } |
@@ -1161,7 +1156,7 @@ static int gss_cred_is_negative_entry(struct rpc_cred *cred) | |||
1161 | static int | 1156 | static int |
1162 | gss_refresh(struct rpc_task *task) | 1157 | gss_refresh(struct rpc_task *task) |
1163 | { | 1158 | { |
1164 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 1159 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
1165 | int ret = 0; | 1160 | int ret = 0; |
1166 | 1161 | ||
1167 | if (gss_cred_is_negative_entry(cred)) | 1162 | if (gss_cred_is_negative_entry(cred)) |
@@ -1172,7 +1167,7 @@ gss_refresh(struct rpc_task *task) | |||
1172 | ret = gss_renew_cred(task); | 1167 | ret = gss_renew_cred(task); |
1173 | if (ret < 0) | 1168 | if (ret < 0) |
1174 | goto out; | 1169 | goto out; |
1175 | cred = task->tk_msg.rpc_cred; | 1170 | cred = task->tk_rqstp->rq_cred; |
1176 | } | 1171 | } |
1177 | 1172 | ||
1178 | if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) | 1173 | if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) |
@@ -1191,7 +1186,7 @@ gss_refresh_null(struct rpc_task *task) | |||
1191 | static __be32 * | 1186 | static __be32 * |
1192 | gss_validate(struct rpc_task *task, __be32 *p) | 1187 | gss_validate(struct rpc_task *task, __be32 *p) |
1193 | { | 1188 | { |
1194 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 1189 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
1195 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1190 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
1196 | __be32 seq; | 1191 | __be32 seq; |
1197 | struct kvec iov; | 1192 | struct kvec iov; |
@@ -1400,7 +1395,7 @@ static int | |||
1400 | gss_wrap_req(struct rpc_task *task, | 1395 | gss_wrap_req(struct rpc_task *task, |
1401 | kxdrproc_t encode, void *rqstp, __be32 *p, void *obj) | 1396 | kxdrproc_t encode, void *rqstp, __be32 *p, void *obj) |
1402 | { | 1397 | { |
1403 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 1398 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
1404 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, | 1399 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, |
1405 | gc_base); | 1400 | gc_base); |
1406 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1401 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
@@ -1503,7 +1498,7 @@ static int | |||
1503 | gss_unwrap_resp(struct rpc_task *task, | 1498 | gss_unwrap_resp(struct rpc_task *task, |
1504 | kxdrproc_t decode, void *rqstp, __be32 *p, void *obj) | 1499 | kxdrproc_t decode, void *rqstp, __be32 *p, void *obj) |
1505 | { | 1500 | { |
1506 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 1501 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
1507 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, | 1502 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, |
1508 | gc_base); | 1503 | gc_base); |
1509 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1504 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 1db618f56ecb..a5c36c01707b 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c | |||
@@ -75,7 +75,7 @@ nul_marshal(struct rpc_task *task, __be32 *p) | |||
75 | static int | 75 | static int |
76 | nul_refresh(struct rpc_task *task) | 76 | nul_refresh(struct rpc_task *task) |
77 | { | 77 | { |
78 | set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); | 78 | set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags); |
79 | return 0; | 79 | return 0; |
80 | } | 80 | } |
81 | 81 | ||
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index aac2f8b4ee21..4cb70dc6e7ad 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -29,7 +29,6 @@ struct unx_cred { | |||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | static struct rpc_auth unix_auth; | 31 | static struct rpc_auth unix_auth; |
32 | static struct rpc_cred_cache unix_cred_cache; | ||
33 | static const struct rpc_credops unix_credops; | 32 | static const struct rpc_credops unix_credops; |
34 | 33 | ||
35 | static struct rpc_auth * | 34 | static struct rpc_auth * |
@@ -141,7 +140,7 @@ static __be32 * | |||
141 | unx_marshal(struct rpc_task *task, __be32 *p) | 140 | unx_marshal(struct rpc_task *task, __be32 *p) |
142 | { | 141 | { |
143 | struct rpc_clnt *clnt = task->tk_client; | 142 | struct rpc_clnt *clnt = task->tk_client; |
144 | struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base); | 143 | struct unx_cred *cred = container_of(task->tk_rqstp->rq_cred, struct unx_cred, uc_base); |
145 | __be32 *base, *hold; | 144 | __be32 *base, *hold; |
146 | int i; | 145 | int i; |
147 | 146 | ||
@@ -174,7 +173,7 @@ unx_marshal(struct rpc_task *task, __be32 *p) | |||
174 | static int | 173 | static int |
175 | unx_refresh(struct rpc_task *task) | 174 | unx_refresh(struct rpc_task *task) |
176 | { | 175 | { |
177 | set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); | 176 | set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags); |
178 | return 0; | 177 | return 0; |
179 | } | 178 | } |
180 | 179 | ||
@@ -197,15 +196,20 @@ unx_validate(struct rpc_task *task, __be32 *p) | |||
197 | printk("RPC: giant verf size: %u\n", size); | 196 | printk("RPC: giant verf size: %u\n", size); |
198 | return NULL; | 197 | return NULL; |
199 | } | 198 | } |
200 | task->tk_msg.rpc_cred->cr_auth->au_rslack = (size >> 2) + 2; | 199 | task->tk_rqstp->rq_cred->cr_auth->au_rslack = (size >> 2) + 2; |
201 | p += (size >> 2); | 200 | p += (size >> 2); |
202 | 201 | ||
203 | return p; | 202 | return p; |
204 | } | 203 | } |
205 | 204 | ||
206 | void __init rpc_init_authunix(void) | 205 | int __init rpc_init_authunix(void) |
207 | { | 206 | { |
208 | spin_lock_init(&unix_cred_cache.lock); | 207 | return rpcauth_init_credcache(&unix_auth); |
208 | } | ||
209 | |||
210 | void rpc_destroy_authunix(void) | ||
211 | { | ||
212 | rpcauth_destroy_credcache(&unix_auth); | ||
209 | } | 213 | } |
210 | 214 | ||
211 | const struct rpc_authops authunix_ops = { | 215 | const struct rpc_authops authunix_ops = { |
@@ -219,17 +223,12 @@ const struct rpc_authops authunix_ops = { | |||
219 | }; | 223 | }; |
220 | 224 | ||
221 | static | 225 | static |
222 | struct rpc_cred_cache unix_cred_cache = { | ||
223 | }; | ||
224 | |||
225 | static | ||
226 | struct rpc_auth unix_auth = { | 226 | struct rpc_auth unix_auth = { |
227 | .au_cslack = UNX_WRITESLACK, | 227 | .au_cslack = UNX_WRITESLACK, |
228 | .au_rslack = 2, /* assume AUTH_NULL verf */ | 228 | .au_rslack = 2, /* assume AUTH_NULL verf */ |
229 | .au_ops = &authunix_ops, | 229 | .au_ops = &authunix_ops, |
230 | .au_flavor = RPC_AUTH_UNIX, | 230 | .au_flavor = RPC_AUTH_UNIX, |
231 | .au_count = ATOMIC_INIT(0), | 231 | .au_count = ATOMIC_INIT(0), |
232 | .au_credcache = &unix_cred_cache, | ||
233 | }; | 232 | }; |
234 | 233 | ||
235 | static | 234 | static |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 58de76c8540c..2b06410e584e 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/sunrpc/cache.h> | 34 | #include <linux/sunrpc/cache.h> |
35 | #include <linux/sunrpc/stats.h> | 35 | #include <linux/sunrpc/stats.h> |
36 | #include <linux/sunrpc/rpc_pipe_fs.h> | 36 | #include <linux/sunrpc/rpc_pipe_fs.h> |
37 | #include <linux/smp_lock.h> | ||
38 | 37 | ||
39 | #define RPCDBG_FACILITY RPCDBG_CACHE | 38 | #define RPCDBG_FACILITY RPCDBG_CACHE |
40 | 39 | ||
@@ -320,7 +319,7 @@ static struct cache_detail *current_detail; | |||
320 | static int current_index; | 319 | static int current_index; |
321 | 320 | ||
322 | static void do_cache_clean(struct work_struct *work); | 321 | static void do_cache_clean(struct work_struct *work); |
323 | static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); | 322 | static struct delayed_work cache_cleaner; |
324 | 323 | ||
325 | static void sunrpc_init_cache_detail(struct cache_detail *cd) | 324 | static void sunrpc_init_cache_detail(struct cache_detail *cd) |
326 | { | 325 | { |
@@ -1504,6 +1503,11 @@ static int create_cache_proc_entries(struct cache_detail *cd) | |||
1504 | } | 1503 | } |
1505 | #endif | 1504 | #endif |
1506 | 1505 | ||
1506 | void __init cache_initialize(void) | ||
1507 | { | ||
1508 | INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); | ||
1509 | } | ||
1510 | |||
1507 | int cache_register(struct cache_detail *cd) | 1511 | int cache_register(struct cache_detail *cd) |
1508 | { | 1512 | { |
1509 | int ret; | 1513 | int ret; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 756fc324db9e..2388d83b68ff 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -414,6 +414,35 @@ out_no_clnt: | |||
414 | EXPORT_SYMBOL_GPL(rpc_clone_client); | 414 | EXPORT_SYMBOL_GPL(rpc_clone_client); |
415 | 415 | ||
416 | /* | 416 | /* |
417 | * Kill all tasks for the given client. | ||
418 | * XXX: kill their descendants as well? | ||
419 | */ | ||
420 | void rpc_killall_tasks(struct rpc_clnt *clnt) | ||
421 | { | ||
422 | struct rpc_task *rovr; | ||
423 | |||
424 | |||
425 | if (list_empty(&clnt->cl_tasks)) | ||
426 | return; | ||
427 | dprintk("RPC: killing all tasks for client %p\n", clnt); | ||
428 | /* | ||
429 | * Spin lock all_tasks to prevent changes... | ||
430 | */ | ||
431 | spin_lock(&clnt->cl_lock); | ||
432 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { | ||
433 | if (!RPC_IS_ACTIVATED(rovr)) | ||
434 | continue; | ||
435 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { | ||
436 | rovr->tk_flags |= RPC_TASK_KILLED; | ||
437 | rpc_exit(rovr, -EIO); | ||
438 | rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr); | ||
439 | } | ||
440 | } | ||
441 | spin_unlock(&clnt->cl_lock); | ||
442 | } | ||
443 | EXPORT_SYMBOL_GPL(rpc_killall_tasks); | ||
444 | |||
445 | /* | ||
417 | * Properly shut down an RPC client, terminating all outstanding | 446 | * Properly shut down an RPC client, terminating all outstanding |
418 | * requests. | 447 | * requests. |
419 | */ | 448 | */ |
@@ -538,6 +567,49 @@ out: | |||
538 | } | 567 | } |
539 | EXPORT_SYMBOL_GPL(rpc_bind_new_program); | 568 | EXPORT_SYMBOL_GPL(rpc_bind_new_program); |
540 | 569 | ||
570 | void rpc_task_release_client(struct rpc_task *task) | ||
571 | { | ||
572 | struct rpc_clnt *clnt = task->tk_client; | ||
573 | |||
574 | if (clnt != NULL) { | ||
575 | /* Remove from client task list */ | ||
576 | spin_lock(&clnt->cl_lock); | ||
577 | list_del(&task->tk_task); | ||
578 | spin_unlock(&clnt->cl_lock); | ||
579 | task->tk_client = NULL; | ||
580 | |||
581 | rpc_release_client(clnt); | ||
582 | } | ||
583 | } | ||
584 | |||
585 | static | ||
586 | void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) | ||
587 | { | ||
588 | if (clnt != NULL) { | ||
589 | rpc_task_release_client(task); | ||
590 | task->tk_client = clnt; | ||
591 | kref_get(&clnt->cl_kref); | ||
592 | if (clnt->cl_softrtry) | ||
593 | task->tk_flags |= RPC_TASK_SOFT; | ||
594 | /* Add to the client's list of all tasks */ | ||
595 | spin_lock(&clnt->cl_lock); | ||
596 | list_add_tail(&task->tk_task, &clnt->cl_tasks); | ||
597 | spin_unlock(&clnt->cl_lock); | ||
598 | } | ||
599 | } | ||
600 | |||
601 | static void | ||
602 | rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) | ||
603 | { | ||
604 | if (msg != NULL) { | ||
605 | task->tk_msg.rpc_proc = msg->rpc_proc; | ||
606 | task->tk_msg.rpc_argp = msg->rpc_argp; | ||
607 | task->tk_msg.rpc_resp = msg->rpc_resp; | ||
608 | if (msg->rpc_cred != NULL) | ||
609 | task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred); | ||
610 | } | ||
611 | } | ||
612 | |||
541 | /* | 613 | /* |
542 | * Default callback for async RPC calls | 614 | * Default callback for async RPC calls |
543 | */ | 615 | */ |
@@ -562,6 +634,18 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) | |||
562 | if (IS_ERR(task)) | 634 | if (IS_ERR(task)) |
563 | goto out; | 635 | goto out; |
564 | 636 | ||
637 | rpc_task_set_client(task, task_setup_data->rpc_client); | ||
638 | rpc_task_set_rpc_message(task, task_setup_data->rpc_message); | ||
639 | |||
640 | if (task->tk_status != 0) { | ||
641 | int ret = task->tk_status; | ||
642 | rpc_put_task(task); | ||
643 | return ERR_PTR(ret); | ||
644 | } | ||
645 | |||
646 | if (task->tk_action == NULL) | ||
647 | rpc_call_start(task); | ||
648 | |||
565 | atomic_inc(&task->tk_count); | 649 | atomic_inc(&task->tk_count); |
566 | rpc_execute(task); | 650 | rpc_execute(task); |
567 | out: | 651 | out: |
@@ -756,12 +840,13 @@ EXPORT_SYMBOL_GPL(rpc_force_rebind); | |||
756 | * Restart an (async) RPC call from the call_prepare state. | 840 | * Restart an (async) RPC call from the call_prepare state. |
757 | * Usually called from within the exit handler. | 841 | * Usually called from within the exit handler. |
758 | */ | 842 | */ |
759 | void | 843 | int |
760 | rpc_restart_call_prepare(struct rpc_task *task) | 844 | rpc_restart_call_prepare(struct rpc_task *task) |
761 | { | 845 | { |
762 | if (RPC_ASSASSINATED(task)) | 846 | if (RPC_ASSASSINATED(task)) |
763 | return; | 847 | return 0; |
764 | task->tk_action = rpc_prepare_task; | 848 | task->tk_action = rpc_prepare_task; |
849 | return 1; | ||
765 | } | 850 | } |
766 | EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); | 851 | EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); |
767 | 852 | ||
@@ -769,13 +854,13 @@ EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); | |||
769 | * Restart an (async) RPC call. Usually called from within the | 854 | * Restart an (async) RPC call. Usually called from within the |
770 | * exit handler. | 855 | * exit handler. |
771 | */ | 856 | */ |
772 | void | 857 | int |
773 | rpc_restart_call(struct rpc_task *task) | 858 | rpc_restart_call(struct rpc_task *task) |
774 | { | 859 | { |
775 | if (RPC_ASSASSINATED(task)) | 860 | if (RPC_ASSASSINATED(task)) |
776 | return; | 861 | return 0; |
777 | |||
778 | task->tk_action = call_start; | 862 | task->tk_action = call_start; |
863 | return 1; | ||
779 | } | 864 | } |
780 | EXPORT_SYMBOL_GPL(rpc_restart_call); | 865 | EXPORT_SYMBOL_GPL(rpc_restart_call); |
781 | 866 | ||
@@ -824,11 +909,6 @@ call_reserve(struct rpc_task *task) | |||
824 | { | 909 | { |
825 | dprint_status(task); | 910 | dprint_status(task); |
826 | 911 | ||
827 | if (!rpcauth_uptodatecred(task)) { | ||
828 | task->tk_action = call_refresh; | ||
829 | return; | ||
830 | } | ||
831 | |||
832 | task->tk_status = 0; | 912 | task->tk_status = 0; |
833 | task->tk_action = call_reserveresult; | 913 | task->tk_action = call_reserveresult; |
834 | xprt_reserve(task); | 914 | xprt_reserve(task); |
@@ -892,7 +972,7 @@ call_reserveresult(struct rpc_task *task) | |||
892 | static void | 972 | static void |
893 | call_allocate(struct rpc_task *task) | 973 | call_allocate(struct rpc_task *task) |
894 | { | 974 | { |
895 | unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; | 975 | unsigned int slack = task->tk_client->cl_auth->au_cslack; |
896 | struct rpc_rqst *req = task->tk_rqstp; | 976 | struct rpc_rqst *req = task->tk_rqstp; |
897 | struct rpc_xprt *xprt = task->tk_xprt; | 977 | struct rpc_xprt *xprt = task->tk_xprt; |
898 | struct rpc_procinfo *proc = task->tk_msg.rpc_proc; | 978 | struct rpc_procinfo *proc = task->tk_msg.rpc_proc; |
@@ -900,7 +980,7 @@ call_allocate(struct rpc_task *task) | |||
900 | dprint_status(task); | 980 | dprint_status(task); |
901 | 981 | ||
902 | task->tk_status = 0; | 982 | task->tk_status = 0; |
903 | task->tk_action = call_bind; | 983 | task->tk_action = call_refresh; |
904 | 984 | ||
905 | if (req->rq_buffer) | 985 | if (req->rq_buffer) |
906 | return; | 986 | return; |
@@ -937,6 +1017,47 @@ call_allocate(struct rpc_task *task) | |||
937 | rpc_exit(task, -ERESTARTSYS); | 1017 | rpc_exit(task, -ERESTARTSYS); |
938 | } | 1018 | } |
939 | 1019 | ||
1020 | /* | ||
1021 | * 2a. Bind and/or refresh the credentials | ||
1022 | */ | ||
1023 | static void | ||
1024 | call_refresh(struct rpc_task *task) | ||
1025 | { | ||
1026 | dprint_status(task); | ||
1027 | |||
1028 | task->tk_action = call_refreshresult; | ||
1029 | task->tk_status = 0; | ||
1030 | task->tk_client->cl_stats->rpcauthrefresh++; | ||
1031 | rpcauth_refreshcred(task); | ||
1032 | } | ||
1033 | |||
1034 | /* | ||
1035 | * 2b. Process the results of a credential refresh | ||
1036 | */ | ||
1037 | static void | ||
1038 | call_refreshresult(struct rpc_task *task) | ||
1039 | { | ||
1040 | int status = task->tk_status; | ||
1041 | |||
1042 | dprint_status(task); | ||
1043 | |||
1044 | task->tk_status = 0; | ||
1045 | task->tk_action = call_bind; | ||
1046 | if (status >= 0 && rpcauth_uptodatecred(task)) | ||
1047 | return; | ||
1048 | switch (status) { | ||
1049 | case -EACCES: | ||
1050 | rpc_exit(task, -EACCES); | ||
1051 | return; | ||
1052 | case -ENOMEM: | ||
1053 | rpc_exit(task, -ENOMEM); | ||
1054 | return; | ||
1055 | case -ETIMEDOUT: | ||
1056 | rpc_delay(task, 3*HZ); | ||
1057 | } | ||
1058 | task->tk_action = call_refresh; | ||
1059 | } | ||
1060 | |||
940 | static inline int | 1061 | static inline int |
941 | rpc_task_need_encode(struct rpc_task *task) | 1062 | rpc_task_need_encode(struct rpc_task *task) |
942 | { | 1063 | { |
@@ -1472,43 +1593,6 @@ out_retry: | |||
1472 | } | 1593 | } |
1473 | } | 1594 | } |
1474 | 1595 | ||
1475 | /* | ||
1476 | * 8. Refresh the credentials if rejected by the server | ||
1477 | */ | ||
1478 | static void | ||
1479 | call_refresh(struct rpc_task *task) | ||
1480 | { | ||
1481 | dprint_status(task); | ||
1482 | |||
1483 | task->tk_action = call_refreshresult; | ||
1484 | task->tk_status = 0; | ||
1485 | task->tk_client->cl_stats->rpcauthrefresh++; | ||
1486 | rpcauth_refreshcred(task); | ||
1487 | } | ||
1488 | |||
1489 | /* | ||
1490 | * 8a. Process the results of a credential refresh | ||
1491 | */ | ||
1492 | static void | ||
1493 | call_refreshresult(struct rpc_task *task) | ||
1494 | { | ||
1495 | int status = task->tk_status; | ||
1496 | |||
1497 | dprint_status(task); | ||
1498 | |||
1499 | task->tk_status = 0; | ||
1500 | task->tk_action = call_reserve; | ||
1501 | if (status >= 0 && rpcauth_uptodatecred(task)) | ||
1502 | return; | ||
1503 | if (status == -EACCES) { | ||
1504 | rpc_exit(task, -EACCES); | ||
1505 | return; | ||
1506 | } | ||
1507 | task->tk_action = call_refresh; | ||
1508 | if (status != -ETIMEDOUT) | ||
1509 | rpc_delay(task, 3*HZ); | ||
1510 | } | ||
1511 | |||
1512 | static __be32 * | 1596 | static __be32 * |
1513 | rpc_encode_header(struct rpc_task *task) | 1597 | rpc_encode_header(struct rpc_task *task) |
1514 | { | 1598 | { |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 4a843b883b89..cace6049e4a5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -246,17 +246,8 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task) | |||
246 | 246 | ||
247 | static void rpc_set_active(struct rpc_task *task) | 247 | static void rpc_set_active(struct rpc_task *task) |
248 | { | 248 | { |
249 | struct rpc_clnt *clnt; | ||
250 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) | ||
251 | return; | ||
252 | rpc_task_set_debuginfo(task); | 249 | rpc_task_set_debuginfo(task); |
253 | /* Add to global list of all tasks */ | 250 | set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
254 | clnt = task->tk_client; | ||
255 | if (clnt != NULL) { | ||
256 | spin_lock(&clnt->cl_lock); | ||
257 | list_add_tail(&task->tk_task, &clnt->cl_tasks); | ||
258 | spin_unlock(&clnt->cl_lock); | ||
259 | } | ||
260 | } | 251 | } |
261 | 252 | ||
262 | /* | 253 | /* |
@@ -319,11 +310,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
319 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", | 310 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
320 | task->tk_pid, rpc_qname(q), jiffies); | 311 | task->tk_pid, rpc_qname(q), jiffies); |
321 | 312 | ||
322 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | ||
323 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | ||
324 | return; | ||
325 | } | ||
326 | |||
327 | __rpc_add_wait_queue(q, task); | 313 | __rpc_add_wait_queue(q, task); |
328 | 314 | ||
329 | BUG_ON(task->tk_callback != NULL); | 315 | BUG_ON(task->tk_callback != NULL); |
@@ -334,8 +320,8 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
334 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 320 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
335 | rpc_action action) | 321 | rpc_action action) |
336 | { | 322 | { |
337 | /* Mark the task as being activated if so needed */ | 323 | /* We shouldn't ever put an inactive task to sleep */ |
338 | rpc_set_active(task); | 324 | BUG_ON(!RPC_IS_ACTIVATED(task)); |
339 | 325 | ||
340 | /* | 326 | /* |
341 | * Protect the queue operations. | 327 | * Protect the queue operations. |
@@ -406,14 +392,6 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task | |||
406 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); | 392 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
407 | 393 | ||
408 | /* | 394 | /* |
409 | * Wake up the specified task | ||
410 | */ | ||
411 | static void rpc_wake_up_task(struct rpc_task *task) | ||
412 | { | ||
413 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * Wake up the next task on a priority queue. | 395 | * Wake up the next task on a priority queue. |
418 | */ | 396 | */ |
419 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) | 397 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) |
@@ -600,7 +578,15 @@ void rpc_exit_task(struct rpc_task *task) | |||
600 | } | 578 | } |
601 | } | 579 | } |
602 | } | 580 | } |
603 | EXPORT_SYMBOL_GPL(rpc_exit_task); | 581 | |
582 | void rpc_exit(struct rpc_task *task, int status) | ||
583 | { | ||
584 | task->tk_status = status; | ||
585 | task->tk_action = rpc_exit_task; | ||
586 | if (RPC_IS_QUEUED(task)) | ||
587 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | ||
588 | } | ||
589 | EXPORT_SYMBOL_GPL(rpc_exit); | ||
604 | 590 | ||
605 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) | 591 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
606 | { | 592 | { |
@@ -690,7 +676,6 @@ static void __rpc_execute(struct rpc_task *task) | |||
690 | dprintk("RPC: %5u got signal\n", task->tk_pid); | 676 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
691 | task->tk_flags |= RPC_TASK_KILLED; | 677 | task->tk_flags |= RPC_TASK_KILLED; |
692 | rpc_exit(task, -ERESTARTSYS); | 678 | rpc_exit(task, -ERESTARTSYS); |
693 | rpc_wake_up_task(task); | ||
694 | } | 679 | } |
695 | rpc_set_running(task); | 680 | rpc_set_running(task); |
696 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); | 681 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
@@ -714,8 +699,9 @@ static void __rpc_execute(struct rpc_task *task) | |||
714 | void rpc_execute(struct rpc_task *task) | 699 | void rpc_execute(struct rpc_task *task) |
715 | { | 700 | { |
716 | rpc_set_active(task); | 701 | rpc_set_active(task); |
717 | rpc_set_running(task); | 702 | rpc_make_runnable(task); |
718 | __rpc_execute(task); | 703 | if (!RPC_IS_ASYNC(task)) |
704 | __rpc_execute(task); | ||
719 | } | 705 | } |
720 | 706 | ||
721 | static void rpc_async_schedule(struct work_struct *work) | 707 | static void rpc_async_schedule(struct work_struct *work) |
@@ -808,26 +794,9 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta | |||
808 | /* Initialize workqueue for async tasks */ | 794 | /* Initialize workqueue for async tasks */ |
809 | task->tk_workqueue = task_setup_data->workqueue; | 795 | task->tk_workqueue = task_setup_data->workqueue; |
810 | 796 | ||
811 | task->tk_client = task_setup_data->rpc_client; | ||
812 | if (task->tk_client != NULL) { | ||
813 | kref_get(&task->tk_client->cl_kref); | ||
814 | if (task->tk_client->cl_softrtry) | ||
815 | task->tk_flags |= RPC_TASK_SOFT; | ||
816 | } | ||
817 | |||
818 | if (task->tk_ops->rpc_call_prepare != NULL) | 797 | if (task->tk_ops->rpc_call_prepare != NULL) |
819 | task->tk_action = rpc_prepare_task; | 798 | task->tk_action = rpc_prepare_task; |
820 | 799 | ||
821 | if (task_setup_data->rpc_message != NULL) { | ||
822 | task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc; | ||
823 | task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp; | ||
824 | task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp; | ||
825 | /* Bind the user cred */ | ||
826 | rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags); | ||
827 | if (task->tk_action == NULL) | ||
828 | rpc_call_start(task); | ||
829 | } | ||
830 | |||
831 | /* starting timestamp */ | 800 | /* starting timestamp */ |
832 | task->tk_start = ktime_get(); | 801 | task->tk_start = ktime_get(); |
833 | 802 | ||
@@ -896,11 +865,8 @@ void rpc_put_task(struct rpc_task *task) | |||
896 | if (task->tk_rqstp) | 865 | if (task->tk_rqstp) |
897 | xprt_release(task); | 866 | xprt_release(task); |
898 | if (task->tk_msg.rpc_cred) | 867 | if (task->tk_msg.rpc_cred) |
899 | rpcauth_unbindcred(task); | 868 | put_rpccred(task->tk_msg.rpc_cred); |
900 | if (task->tk_client) { | 869 | rpc_task_release_client(task); |
901 | rpc_release_client(task->tk_client); | ||
902 | task->tk_client = NULL; | ||
903 | } | ||
904 | if (task->tk_workqueue != NULL) { | 870 | if (task->tk_workqueue != NULL) { |
905 | INIT_WORK(&task->u.tk_work, rpc_async_release); | 871 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
906 | queue_work(task->tk_workqueue, &task->u.tk_work); | 872 | queue_work(task->tk_workqueue, &task->u.tk_work); |
@@ -913,13 +879,6 @@ static void rpc_release_task(struct rpc_task *task) | |||
913 | { | 879 | { |
914 | dprintk("RPC: %5u release task\n", task->tk_pid); | 880 | dprintk("RPC: %5u release task\n", task->tk_pid); |
915 | 881 | ||
916 | if (!list_empty(&task->tk_task)) { | ||
917 | struct rpc_clnt *clnt = task->tk_client; | ||
918 | /* Remove from client task list */ | ||
919 | spin_lock(&clnt->cl_lock); | ||
920 | list_del(&task->tk_task); | ||
921 | spin_unlock(&clnt->cl_lock); | ||
922 | } | ||
923 | BUG_ON (RPC_IS_QUEUED(task)); | 882 | BUG_ON (RPC_IS_QUEUED(task)); |
924 | 883 | ||
925 | /* Wake up anyone who is waiting for task completion */ | 884 | /* Wake up anyone who is waiting for task completion */ |
@@ -928,35 +887,6 @@ static void rpc_release_task(struct rpc_task *task) | |||
928 | rpc_put_task(task); | 887 | rpc_put_task(task); |
929 | } | 888 | } |
930 | 889 | ||
931 | /* | ||
932 | * Kill all tasks for the given client. | ||
933 | * XXX: kill their descendants as well? | ||
934 | */ | ||
935 | void rpc_killall_tasks(struct rpc_clnt *clnt) | ||
936 | { | ||
937 | struct rpc_task *rovr; | ||
938 | |||
939 | |||
940 | if (list_empty(&clnt->cl_tasks)) | ||
941 | return; | ||
942 | dprintk("RPC: killing all tasks for client %p\n", clnt); | ||
943 | /* | ||
944 | * Spin lock all_tasks to prevent changes... | ||
945 | */ | ||
946 | spin_lock(&clnt->cl_lock); | ||
947 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { | ||
948 | if (! RPC_IS_ACTIVATED(rovr)) | ||
949 | continue; | ||
950 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { | ||
951 | rovr->tk_flags |= RPC_TASK_KILLED; | ||
952 | rpc_exit(rovr, -EIO); | ||
953 | rpc_wake_up_task(rovr); | ||
954 | } | ||
955 | } | ||
956 | spin_unlock(&clnt->cl_lock); | ||
957 | } | ||
958 | EXPORT_SYMBOL_GPL(rpc_killall_tasks); | ||
959 | |||
960 | int rpciod_up(void) | 890 | int rpciod_up(void) |
961 | { | 891 | { |
962 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | 892 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index f438347d817b..c0d085013a2b 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -33,21 +33,27 @@ init_sunrpc(void) | |||
33 | if (err) | 33 | if (err) |
34 | goto out; | 34 | goto out; |
35 | err = rpc_init_mempool(); | 35 | err = rpc_init_mempool(); |
36 | if (err) { | 36 | if (err) |
37 | unregister_rpc_pipefs(); | 37 | goto out2; |
38 | goto out; | 38 | err = rpcauth_init_module(); |
39 | } | 39 | if (err) |
40 | goto out3; | ||
40 | #ifdef RPC_DEBUG | 41 | #ifdef RPC_DEBUG |
41 | rpc_register_sysctl(); | 42 | rpc_register_sysctl(); |
42 | #endif | 43 | #endif |
43 | #ifdef CONFIG_PROC_FS | 44 | #ifdef CONFIG_PROC_FS |
44 | rpc_proc_init(); | 45 | rpc_proc_init(); |
45 | #endif | 46 | #endif |
47 | cache_initialize(); | ||
46 | cache_register(&ip_map_cache); | 48 | cache_register(&ip_map_cache); |
47 | cache_register(&unix_gid_cache); | 49 | cache_register(&unix_gid_cache); |
48 | svc_init_xprt_sock(); /* svc sock transport */ | 50 | svc_init_xprt_sock(); /* svc sock transport */ |
49 | init_socket_xprt(); /* clnt sock transport */ | 51 | init_socket_xprt(); /* clnt sock transport */ |
50 | rpcauth_init_module(); | 52 | return 0; |
53 | out3: | ||
54 | rpc_destroy_mempool(); | ||
55 | out2: | ||
56 | unregister_rpc_pipefs(); | ||
51 | out: | 57 | out: |
52 | return err; | 58 | return err; |
53 | } | 59 | } |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index dcd0132396ba..970fb00f388c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -1032,6 +1032,8 @@ void xprt_release(struct rpc_task *task) | |||
1032 | spin_unlock_bh(&xprt->transport_lock); | 1032 | spin_unlock_bh(&xprt->transport_lock); |
1033 | if (req->rq_buffer) | 1033 | if (req->rq_buffer) |
1034 | xprt->ops->buf_free(req->rq_buffer); | 1034 | xprt->ops->buf_free(req->rq_buffer); |
1035 | if (req->rq_cred != NULL) | ||
1036 | put_rpccred(req->rq_cred); | ||
1035 | task->tk_rqstp = NULL; | 1037 | task->tk_rqstp = NULL; |
1036 | if (req->rq_release_snd_buf) | 1038 | if (req->rq_release_snd_buf) |
1037 | req->rq_release_snd_buf(req); | 1039 | req->rq_release_snd_buf(req); |
@@ -1129,6 +1131,7 @@ static void xprt_destroy(struct kref *kref) | |||
1129 | rpc_destroy_wait_queue(&xprt->sending); | 1131 | rpc_destroy_wait_queue(&xprt->sending); |
1130 | rpc_destroy_wait_queue(&xprt->resend); | 1132 | rpc_destroy_wait_queue(&xprt->resend); |
1131 | rpc_destroy_wait_queue(&xprt->backlog); | 1133 | rpc_destroy_wait_queue(&xprt->backlog); |
1134 | cancel_work_sync(&xprt->task_cleanup); | ||
1132 | /* | 1135 | /* |
1133 | * Tear down transport state and free the rpc_xprt | 1136 | * Tear down transport state and free the rpc_xprt |
1134 | */ | 1137 | */ |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index e5e28d1946a4..2ac3f6e8adff 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -249,6 +249,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
249 | req->rl_nchunks = nchunks; | 249 | req->rl_nchunks = nchunks; |
250 | 250 | ||
251 | BUG_ON(nchunks == 0); | 251 | BUG_ON(nchunks == 0); |
252 | BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) | ||
253 | && (nchunks > 3)); | ||
252 | 254 | ||
253 | /* | 255 | /* |
254 | * finish off header. If write, marshal discrim and nchunks. | 256 | * finish off header. If write, marshal discrim and nchunks. |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 27015c6d8eb5..5f4c7b3bc711 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -650,10 +650,22 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
650 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; | 650 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; |
651 | switch (ia->ri_memreg_strategy) { | 651 | switch (ia->ri_memreg_strategy) { |
652 | case RPCRDMA_FRMR: | 652 | case RPCRDMA_FRMR: |
653 | /* Add room for frmr register and invalidate WRs */ | 653 | /* Add room for frmr register and invalidate WRs. |
654 | ep->rep_attr.cap.max_send_wr *= 3; | 654 | * 1. FRMR reg WR for head |
655 | if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) | 655 | * 2. FRMR invalidate WR for head |
656 | return -EINVAL; | 656 | * 3. FRMR reg WR for pagelist |
657 | * 4. FRMR invalidate WR for pagelist | ||
658 | * 5. FRMR reg WR for tail | ||
659 | * 6. FRMR invalidate WR for tail | ||
660 | * 7. The RDMA_SEND WR | ||
661 | */ | ||
662 | ep->rep_attr.cap.max_send_wr *= 7; | ||
663 | if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) { | ||
664 | cdata->max_requests = devattr.max_qp_wr / 7; | ||
665 | if (!cdata->max_requests) | ||
666 | return -EINVAL; | ||
667 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * 7; | ||
668 | } | ||
657 | break; | 669 | break; |
658 | case RPCRDMA_MEMWINDOWS_ASYNC: | 670 | case RPCRDMA_MEMWINDOWS_ASYNC: |
659 | case RPCRDMA_MEMWINDOWS: | 671 | case RPCRDMA_MEMWINDOWS: |
@@ -1490,7 +1502,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, | |||
1490 | memset(&frmr_wr, 0, sizeof frmr_wr); | 1502 | memset(&frmr_wr, 0, sizeof frmr_wr); |
1491 | frmr_wr.opcode = IB_WR_FAST_REG_MR; | 1503 | frmr_wr.opcode = IB_WR_FAST_REG_MR; |
1492 | frmr_wr.send_flags = 0; /* unsignaled */ | 1504 | frmr_wr.send_flags = 0; /* unsignaled */ |
1493 | frmr_wr.wr.fast_reg.iova_start = (unsigned long)seg1->mr_dma; | 1505 | frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma; |
1494 | frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl; | 1506 | frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl; |
1495 | frmr_wr.wr.fast_reg.page_list_len = i; | 1507 | frmr_wr.wr.fast_reg.page_list_len = i; |
1496 | frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | 1508 | frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7ca65c7005ea..b6309db56226 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1305,10 +1305,11 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1305 | if (!(xprt = xprt_from_sock(sk))) | 1305 | if (!(xprt = xprt_from_sock(sk))) |
1306 | goto out; | 1306 | goto out; |
1307 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); | 1307 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
1308 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", | 1308 | dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", |
1309 | sk->sk_state, xprt_connected(xprt), | 1309 | sk->sk_state, xprt_connected(xprt), |
1310 | sock_flag(sk, SOCK_DEAD), | 1310 | sock_flag(sk, SOCK_DEAD), |
1311 | sock_flag(sk, SOCK_ZAPPED)); | 1311 | sock_flag(sk, SOCK_ZAPPED), |
1312 | sk->sk_shutdown); | ||
1312 | 1313 | ||
1313 | switch (sk->sk_state) { | 1314 | switch (sk->sk_state) { |
1314 | case TCP_ESTABLISHED: | 1315 | case TCP_ESTABLISHED: |
@@ -1779,10 +1780,25 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra | |||
1779 | { | 1780 | { |
1780 | unsigned int state = transport->inet->sk_state; | 1781 | unsigned int state = transport->inet->sk_state; |
1781 | 1782 | ||
1782 | if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) | 1783 | if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) { |
1783 | return; | 1784 | /* we don't need to abort the connection if the socket |
1784 | if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) | 1785 | * hasn't undergone a shutdown |
1785 | return; | 1786 | */ |
1787 | if (transport->inet->sk_shutdown == 0) | ||
1788 | return; | ||
1789 | dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n", | ||
1790 | __func__, transport->inet->sk_shutdown); | ||
1791 | } | ||
1792 | if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) { | ||
1793 | /* we don't need to abort the connection if the socket | ||
1794 | * hasn't undergone a shutdown | ||
1795 | */ | ||
1796 | if (transport->inet->sk_shutdown == 0) | ||
1797 | return; | ||
1798 | dprintk("RPC: %s: ESTABLISHED/SYN_SENT " | ||
1799 | "sk_shutdown set to %d\n", | ||
1800 | __func__, transport->inet->sk_shutdown); | ||
1801 | } | ||
1786 | xs_abort_connection(xprt, transport); | 1802 | xs_abort_connection(xprt, transport); |
1787 | } | 1803 | } |
1788 | 1804 | ||
@@ -2577,7 +2593,8 @@ void cleanup_socket_xprt(void) | |||
2577 | xprt_unregister_transport(&xs_bc_tcp_transport); | 2593 | xprt_unregister_transport(&xs_bc_tcp_transport); |
2578 | } | 2594 | } |
2579 | 2595 | ||
2580 | static int param_set_uint_minmax(const char *val, struct kernel_param *kp, | 2596 | static int param_set_uint_minmax(const char *val, |
2597 | const struct kernel_param *kp, | ||
2581 | unsigned int min, unsigned int max) | 2598 | unsigned int min, unsigned int max) |
2582 | { | 2599 | { |
2583 | unsigned long num; | 2600 | unsigned long num; |
@@ -2592,34 +2609,37 @@ static int param_set_uint_minmax(const char *val, struct kernel_param *kp, | |||
2592 | return 0; | 2609 | return 0; |
2593 | } | 2610 | } |
2594 | 2611 | ||
2595 | static int param_set_portnr(const char *val, struct kernel_param *kp) | 2612 | static int param_set_portnr(const char *val, const struct kernel_param *kp) |
2596 | { | 2613 | { |
2597 | return param_set_uint_minmax(val, kp, | 2614 | return param_set_uint_minmax(val, kp, |
2598 | RPC_MIN_RESVPORT, | 2615 | RPC_MIN_RESVPORT, |
2599 | RPC_MAX_RESVPORT); | 2616 | RPC_MAX_RESVPORT); |
2600 | } | 2617 | } |
2601 | 2618 | ||
2602 | static int param_get_portnr(char *buffer, struct kernel_param *kp) | 2619 | static struct kernel_param_ops param_ops_portnr = { |
2603 | { | 2620 | .set = param_set_portnr, |
2604 | return param_get_uint(buffer, kp); | 2621 | .get = param_get_uint, |
2605 | } | 2622 | }; |
2623 | |||
2606 | #define param_check_portnr(name, p) \ | 2624 | #define param_check_portnr(name, p) \ |
2607 | __param_check(name, p, unsigned int); | 2625 | __param_check(name, p, unsigned int); |
2608 | 2626 | ||
2609 | module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); | 2627 | module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); |
2610 | module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); | 2628 | module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); |
2611 | 2629 | ||
2612 | static int param_set_slot_table_size(const char *val, struct kernel_param *kp) | 2630 | static int param_set_slot_table_size(const char *val, |
2631 | const struct kernel_param *kp) | ||
2613 | { | 2632 | { |
2614 | return param_set_uint_minmax(val, kp, | 2633 | return param_set_uint_minmax(val, kp, |
2615 | RPC_MIN_SLOT_TABLE, | 2634 | RPC_MIN_SLOT_TABLE, |
2616 | RPC_MAX_SLOT_TABLE); | 2635 | RPC_MAX_SLOT_TABLE); |
2617 | } | 2636 | } |
2618 | 2637 | ||
2619 | static int param_get_slot_table_size(char *buffer, struct kernel_param *kp) | 2638 | static struct kernel_param_ops param_ops_slot_table_size = { |
2620 | { | 2639 | .set = param_set_slot_table_size, |
2621 | return param_get_uint(buffer, kp); | 2640 | .get = param_get_uint, |
2622 | } | 2641 | }; |
2642 | |||
2623 | #define param_check_slot_table_size(name, p) \ | 2643 | #define param_check_slot_table_size(name, p) \ |
2624 | __param_check(name, p, unsigned int); | 2644 | __param_check(name, p, unsigned int); |
2625 | 2645 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4414a18c63b4..0b39b2451ea5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock) | |||
692 | static u32 ordernum = 1; | 692 | static u32 ordernum = 1; |
693 | struct unix_address *addr; | 693 | struct unix_address *addr; |
694 | int err; | 694 | int err; |
695 | unsigned int retries = 0; | ||
695 | 696 | ||
696 | mutex_lock(&u->readlock); | 697 | mutex_lock(&u->readlock); |
697 | 698 | ||
@@ -717,9 +718,17 @@ retry: | |||
717 | if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, | 718 | if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, |
718 | addr->hash)) { | 719 | addr->hash)) { |
719 | spin_unlock(&unix_table_lock); | 720 | spin_unlock(&unix_table_lock); |
720 | /* Sanity yield. It is unusual case, but yet... */ | 721 | /* |
721 | if (!(ordernum&0xFF)) | 722 | * __unix_find_socket_byname() may take long time if many names |
722 | yield(); | 723 | * are already in use. |
724 | */ | ||
725 | cond_resched(); | ||
726 | /* Give up if all names seems to be in use. */ | ||
727 | if (retries++ == 0xFFFFF) { | ||
728 | err = -ENOSPC; | ||
729 | kfree(addr); | ||
730 | goto out; | ||
731 | } | ||
723 | goto retry; | 732 | goto retry; |
724 | } | 733 | } |
725 | addr->hash ^= sk->sk_type; | 734 | addr->hash ^= sk->sk_type; |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index a3cca0a94346..64f2ae1fdc15 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -101,7 +101,7 @@ resume: | |||
101 | err = -EHOSTUNREACH; | 101 | err = -EHOSTUNREACH; |
102 | goto error_nolock; | 102 | goto error_nolock; |
103 | } | 103 | } |
104 | skb_dst_set_noref(skb, dst); | 104 | skb_dst_set(skb, dst_clone(dst)); |
105 | x = dst->xfrm; | 105 | x = dst->xfrm; |
106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); | 106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
107 | 107 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 2b3ed7ad4933..cbab6e1a8c9c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1175,9 +1175,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl, | |||
1175 | tmpl->mode == XFRM_MODE_BEET) { | 1175 | tmpl->mode == XFRM_MODE_BEET) { |
1176 | remote = &tmpl->id.daddr; | 1176 | remote = &tmpl->id.daddr; |
1177 | local = &tmpl->saddr; | 1177 | local = &tmpl->saddr; |
1178 | family = tmpl->encap_family; | 1178 | if (xfrm_addr_any(local, tmpl->encap_family)) { |
1179 | if (xfrm_addr_any(local, family)) { | 1179 | error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); |
1180 | error = xfrm_get_saddr(net, &tmp, remote, family); | ||
1181 | if (error) | 1180 | if (error) |
1182 | goto fail; | 1181 | goto fail; |
1183 | local = &tmp; | 1182 | local = &tmp; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 5208b12fbfb4..eb96ce52f178 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -656,15 +656,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) | |||
656 | EXPORT_SYMBOL(xfrm_sad_getinfo); | 656 | EXPORT_SYMBOL(xfrm_sad_getinfo); |
657 | 657 | ||
658 | static int | 658 | static int |
659 | xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl, | 659 | xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl, |
660 | struct xfrm_tmpl *tmpl, | 660 | struct xfrm_tmpl *tmpl, |
661 | xfrm_address_t *daddr, xfrm_address_t *saddr, | 661 | xfrm_address_t *daddr, xfrm_address_t *saddr, |
662 | unsigned short family) | 662 | unsigned short family) |
663 | { | 663 | { |
664 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); | 664 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); |
665 | if (!afinfo) | 665 | if (!afinfo) |
666 | return -1; | 666 | return -1; |
667 | afinfo->init_tempsel(x, fl, tmpl, daddr, saddr); | 667 | afinfo->init_tempsel(&x->sel, fl); |
668 | |||
669 | if (family != tmpl->encap_family) { | ||
670 | xfrm_state_put_afinfo(afinfo); | ||
671 | afinfo = xfrm_state_get_afinfo(tmpl->encap_family); | ||
672 | if (!afinfo) | ||
673 | return -1; | ||
674 | } | ||
675 | afinfo->init_temprop(x, tmpl, daddr, saddr); | ||
668 | xfrm_state_put_afinfo(afinfo); | 676 | xfrm_state_put_afinfo(afinfo); |
669 | return 0; | 677 | return 0; |
670 | } | 678 | } |
@@ -790,37 +798,38 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
790 | int error = 0; | 798 | int error = 0; |
791 | struct xfrm_state *best = NULL; | 799 | struct xfrm_state *best = NULL; |
792 | u32 mark = pol->mark.v & pol->mark.m; | 800 | u32 mark = pol->mark.v & pol->mark.m; |
801 | unsigned short encap_family = tmpl->encap_family; | ||
793 | 802 | ||
794 | to_put = NULL; | 803 | to_put = NULL; |
795 | 804 | ||
796 | spin_lock_bh(&xfrm_state_lock); | 805 | spin_lock_bh(&xfrm_state_lock); |
797 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family); | 806 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); |
798 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 807 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { |
799 | if (x->props.family == family && | 808 | if (x->props.family == encap_family && |
800 | x->props.reqid == tmpl->reqid && | 809 | x->props.reqid == tmpl->reqid && |
801 | (mark & x->mark.m) == x->mark.v && | 810 | (mark & x->mark.m) == x->mark.v && |
802 | !(x->props.flags & XFRM_STATE_WILDRECV) && | 811 | !(x->props.flags & XFRM_STATE_WILDRECV) && |
803 | xfrm_state_addr_check(x, daddr, saddr, family) && | 812 | xfrm_state_addr_check(x, daddr, saddr, encap_family) && |
804 | tmpl->mode == x->props.mode && | 813 | tmpl->mode == x->props.mode && |
805 | tmpl->id.proto == x->id.proto && | 814 | tmpl->id.proto == x->id.proto && |
806 | (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) | 815 | (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) |
807 | xfrm_state_look_at(pol, x, fl, family, daddr, saddr, | 816 | xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr, |
808 | &best, &acquire_in_progress, &error); | 817 | &best, &acquire_in_progress, &error); |
809 | } | 818 | } |
810 | if (best) | 819 | if (best) |
811 | goto found; | 820 | goto found; |
812 | 821 | ||
813 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); | 822 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); |
814 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { | 823 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { |
815 | if (x->props.family == family && | 824 | if (x->props.family == encap_family && |
816 | x->props.reqid == tmpl->reqid && | 825 | x->props.reqid == tmpl->reqid && |
817 | (mark & x->mark.m) == x->mark.v && | 826 | (mark & x->mark.m) == x->mark.v && |
818 | !(x->props.flags & XFRM_STATE_WILDRECV) && | 827 | !(x->props.flags & XFRM_STATE_WILDRECV) && |
819 | xfrm_state_addr_check(x, daddr, saddr, family) && | 828 | xfrm_state_addr_check(x, daddr, saddr, encap_family) && |
820 | tmpl->mode == x->props.mode && | 829 | tmpl->mode == x->props.mode && |
821 | tmpl->id.proto == x->id.proto && | 830 | tmpl->id.proto == x->id.proto && |
822 | (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) | 831 | (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) |
823 | xfrm_state_look_at(pol, x, fl, family, daddr, saddr, | 832 | xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr, |
824 | &best, &acquire_in_progress, &error); | 833 | &best, &acquire_in_progress, &error); |
825 | } | 834 | } |
826 | 835 | ||
@@ -829,7 +838,7 @@ found: | |||
829 | if (!x && !error && !acquire_in_progress) { | 838 | if (!x && !error && !acquire_in_progress) { |
830 | if (tmpl->id.spi && | 839 | if (tmpl->id.spi && |
831 | (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, | 840 | (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, |
832 | tmpl->id.proto, family)) != NULL) { | 841 | tmpl->id.proto, encap_family)) != NULL) { |
833 | to_put = x0; | 842 | to_put = x0; |
834 | error = -EEXIST; | 843 | error = -EEXIST; |
835 | goto out; | 844 | goto out; |
@@ -839,9 +848,9 @@ found: | |||
839 | error = -ENOMEM; | 848 | error = -ENOMEM; |
840 | goto out; | 849 | goto out; |
841 | } | 850 | } |
842 | /* Initialize temporary selector matching only | 851 | /* Initialize temporary state matching only |
843 | * to current session. */ | 852 | * to current session. */ |
844 | xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); | 853 | xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); |
845 | memcpy(&x->mark, &pol->mark, sizeof(x->mark)); | 854 | memcpy(&x->mark, &pol->mark, sizeof(x->mark)); |
846 | 855 | ||
847 | error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); | 856 | error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); |
@@ -856,10 +865,10 @@ found: | |||
856 | x->km.state = XFRM_STATE_ACQ; | 865 | x->km.state = XFRM_STATE_ACQ; |
857 | list_add(&x->km.all, &net->xfrm.state_all); | 866 | list_add(&x->km.all, &net->xfrm.state_all); |
858 | hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); | 867 | hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); |
859 | h = xfrm_src_hash(net, daddr, saddr, family); | 868 | h = xfrm_src_hash(net, daddr, saddr, encap_family); |
860 | hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); | 869 | hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); |
861 | if (x->id.spi) { | 870 | if (x->id.spi) { |
862 | h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family); | 871 | h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); |
863 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); | 872 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); |
864 | } | 873 | } |
865 | x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; | 874 | x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ba59983aaffe..8bae6b22c846 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1801,7 +1801,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1801 | struct xfrm_user_expire *ue = nlmsg_data(nlh); | 1801 | struct xfrm_user_expire *ue = nlmsg_data(nlh); |
1802 | struct xfrm_usersa_info *p = &ue->state; | 1802 | struct xfrm_usersa_info *p = &ue->state; |
1803 | struct xfrm_mark m; | 1803 | struct xfrm_mark m; |
1804 | u32 mark = xfrm_mark_get(attrs, &m);; | 1804 | u32 mark = xfrm_mark_get(attrs, &m); |
1805 | 1805 | ||
1806 | x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); | 1806 | x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); |
1807 | 1807 | ||
@@ -2504,7 +2504,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, | |||
2504 | if (p->dir > XFRM_POLICY_OUT) | 2504 | if (p->dir > XFRM_POLICY_OUT) |
2505 | return NULL; | 2505 | return NULL; |
2506 | 2506 | ||
2507 | xp = xfrm_policy_alloc(net, GFP_KERNEL); | 2507 | xp = xfrm_policy_alloc(net, GFP_ATOMIC); |
2508 | if (xp == NULL) { | 2508 | if (xp == NULL) { |
2509 | *dir = -ENOBUFS; | 2509 | *dir = -ENOBUFS; |
2510 | return NULL; | 2510 | return NULL; |