diff options
Diffstat (limited to 'net')
155 files changed, 1483 insertions, 1006 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 73a2a83ee2da..402442402af7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -137,9 +137,21 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
137 | return rc; | 137 | return rc; |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) | ||
141 | { | ||
142 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
143 | if (vlan->netpoll) | ||
144 | netpoll_send_skb(vlan->netpoll, skb); | ||
145 | #else | ||
146 | BUG(); | ||
147 | #endif | ||
148 | return NETDEV_TX_OK; | ||
149 | } | ||
150 | |||
140 | static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | 151 | static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, |
141 | struct net_device *dev) | 152 | struct net_device *dev) |
142 | { | 153 | { |
154 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | ||
143 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); | 155 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); |
144 | unsigned int len; | 156 | unsigned int len; |
145 | int ret; | 157 | int ret; |
@@ -150,29 +162,30 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | |||
150 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... | 162 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... |
151 | */ | 163 | */ |
152 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) || | 164 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) || |
153 | vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) { | 165 | vlan->flags & VLAN_FLAG_REORDER_HDR) { |
154 | u16 vlan_tci; | 166 | u16 vlan_tci; |
155 | vlan_tci = vlan_dev_priv(dev)->vlan_id; | 167 | vlan_tci = vlan->vlan_id; |
156 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); | 168 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); |
157 | skb = __vlan_hwaccel_put_tag(skb, vlan_tci); | 169 | skb = __vlan_hwaccel_put_tag(skb, vlan_tci); |
158 | } | 170 | } |
159 | 171 | ||
160 | skb->dev = vlan_dev_priv(dev)->real_dev; | 172 | skb->dev = vlan->real_dev; |
161 | len = skb->len; | 173 | len = skb->len; |
162 | if (netpoll_tx_running(dev)) | 174 | if (unlikely(netpoll_tx_running(dev))) |
163 | return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); | 175 | return vlan_netpoll_send_skb(vlan, skb); |
176 | |||
164 | ret = dev_queue_xmit(skb); | 177 | ret = dev_queue_xmit(skb); |
165 | 178 | ||
166 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { | 179 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
167 | struct vlan_pcpu_stats *stats; | 180 | struct vlan_pcpu_stats *stats; |
168 | 181 | ||
169 | stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats); | 182 | stats = this_cpu_ptr(vlan->vlan_pcpu_stats); |
170 | u64_stats_update_begin(&stats->syncp); | 183 | u64_stats_update_begin(&stats->syncp); |
171 | stats->tx_packets++; | 184 | stats->tx_packets++; |
172 | stats->tx_bytes += len; | 185 | stats->tx_bytes += len; |
173 | u64_stats_update_end(&stats->syncp); | 186 | u64_stats_update_end(&stats->syncp); |
174 | } else { | 187 | } else { |
175 | this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped); | 188 | this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped); |
176 | } | 189 | } |
177 | 190 | ||
178 | return ret; | 191 | return ret; |
@@ -669,25 +682,26 @@ static void vlan_dev_poll_controller(struct net_device *dev) | |||
669 | return; | 682 | return; |
670 | } | 683 | } |
671 | 684 | ||
672 | static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) | 685 | static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo, |
686 | gfp_t gfp) | ||
673 | { | 687 | { |
674 | struct vlan_dev_priv *info = vlan_dev_priv(dev); | 688 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
675 | struct net_device *real_dev = info->real_dev; | 689 | struct net_device *real_dev = vlan->real_dev; |
676 | struct netpoll *netpoll; | 690 | struct netpoll *netpoll; |
677 | int err = 0; | 691 | int err = 0; |
678 | 692 | ||
679 | netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); | 693 | netpoll = kzalloc(sizeof(*netpoll), gfp); |
680 | err = -ENOMEM; | 694 | err = -ENOMEM; |
681 | if (!netpoll) | 695 | if (!netpoll) |
682 | goto out; | 696 | goto out; |
683 | 697 | ||
684 | err = __netpoll_setup(netpoll, real_dev); | 698 | err = __netpoll_setup(netpoll, real_dev, gfp); |
685 | if (err) { | 699 | if (err) { |
686 | kfree(netpoll); | 700 | kfree(netpoll); |
687 | goto out; | 701 | goto out; |
688 | } | 702 | } |
689 | 703 | ||
690 | info->netpoll = netpoll; | 704 | vlan->netpoll = netpoll; |
691 | 705 | ||
692 | out: | 706 | out: |
693 | return err; | 707 | return err; |
@@ -695,19 +709,15 @@ out: | |||
695 | 709 | ||
696 | static void vlan_dev_netpoll_cleanup(struct net_device *dev) | 710 | static void vlan_dev_netpoll_cleanup(struct net_device *dev) |
697 | { | 711 | { |
698 | struct vlan_dev_priv *info = vlan_dev_priv(dev); | 712 | struct vlan_dev_priv *vlan= vlan_dev_priv(dev); |
699 | struct netpoll *netpoll = info->netpoll; | 713 | struct netpoll *netpoll = vlan->netpoll; |
700 | 714 | ||
701 | if (!netpoll) | 715 | if (!netpoll) |
702 | return; | 716 | return; |
703 | 717 | ||
704 | info->netpoll = NULL; | 718 | vlan->netpoll = NULL; |
705 | |||
706 | /* Wait for transmitting packets to finish before freeing. */ | ||
707 | synchronize_rcu_bh(); | ||
708 | 719 | ||
709 | __netpoll_cleanup(netpoll); | 720 | __netpoll_free_rcu(netpoll); |
710 | kfree(netpoll); | ||
711 | } | 721 | } |
712 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 722 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
713 | 723 | ||
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index c718fd3664b6..4de77ea5fa37 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
@@ -105,7 +105,7 @@ static const struct file_operations vlandev_fops = { | |||
105 | }; | 105 | }; |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * Proc filesystem derectory entries. | 108 | * Proc filesystem directory entries. |
109 | */ | 109 | */ |
110 | 110 | ||
111 | /* Strings */ | 111 | /* Strings */ |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 6449bae15702..505f0ce3f10b 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -1083,7 +1083,7 @@ int p9_trans_fd_init(void) | |||
1083 | 1083 | ||
1084 | void p9_trans_fd_exit(void) | 1084 | void p9_trans_fd_exit(void) |
1085 | { | 1085 | { |
1086 | flush_work_sync(&p9_poll_work); | 1086 | flush_work(&p9_poll_work); |
1087 | v9fs_unregister_trans(&p9_tcp_trans); | 1087 | v9fs_unregister_trans(&p9_tcp_trans); |
1088 | v9fs_unregister_trans(&p9_unix_trans); | 1088 | v9fs_unregister_trans(&p9_unix_trans); |
1089 | v9fs_unregister_trans(&p9_fd_trans); | 1089 | v9fs_unregister_trans(&p9_fd_trans); |
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index b5b1a221c242..c30f3a0717fb 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c | |||
@@ -183,7 +183,8 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v) | |||
183 | ntohs(at->dest_net), at->dest_node, at->dest_port, | 183 | ntohs(at->dest_net), at->dest_node, at->dest_port, |
184 | sk_wmem_alloc_get(s), | 184 | sk_wmem_alloc_get(s), |
185 | sk_rmem_alloc_get(s), | 185 | sk_rmem_alloc_get(s), |
186 | s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); | 186 | s->sk_state, |
187 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(s))); | ||
187 | out: | 188 | out: |
188 | return 0; | 189 | return 0; |
189 | } | 190 | } |
diff --git a/net/atm/common.c b/net/atm/common.c index b4b44dbed645..0c0ad930a632 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname, | |||
812 | 812 | ||
813 | if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) | 813 | if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) |
814 | return -ENOTCONN; | 814 | return -ENOTCONN; |
815 | memset(&pvc, 0, sizeof(pvc)); | ||
815 | pvc.sap_family = AF_ATMPVC; | 816 | pvc.sap_family = AF_ATMPVC; |
816 | pvc.sap_addr.itf = vcc->dev->number; | 817 | pvc.sap_addr.itf = vcc->dev->number; |
817 | pvc.sap_addr.vpi = vcc->vpi; | 818 | pvc.sap_addr.vpi = vcc->vpi; |
diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 3a734919c36c..ae0324021407 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c | |||
@@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr, | |||
95 | return -ENOTCONN; | 95 | return -ENOTCONN; |
96 | *sockaddr_len = sizeof(struct sockaddr_atmpvc); | 96 | *sockaddr_len = sizeof(struct sockaddr_atmpvc); |
97 | addr = (struct sockaddr_atmpvc *)sockaddr; | 97 | addr = (struct sockaddr_atmpvc *)sockaddr; |
98 | memset(addr, 0, sizeof(*addr)); | ||
98 | addr->sap_family = AF_ATMPVC; | 99 | addr->sap_family = AF_ATMPVC; |
99 | addr->sap_addr.itf = vcc->dev->number; | 100 | addr->sap_addr.itf = vcc->dev->number; |
100 | addr->sap_addr.vpi = vcc->vpi; | 101 | addr->sap_addr.vpi = vcc->vpi; |
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c index e3c579ba6325..957999e43ff7 100644 --- a/net/ax25/ax25_uid.c +++ b/net/ax25/ax25_uid.c | |||
@@ -51,14 +51,14 @@ int ax25_uid_policy; | |||
51 | 51 | ||
52 | EXPORT_SYMBOL(ax25_uid_policy); | 52 | EXPORT_SYMBOL(ax25_uid_policy); |
53 | 53 | ||
54 | ax25_uid_assoc *ax25_findbyuid(uid_t uid) | 54 | ax25_uid_assoc *ax25_findbyuid(kuid_t uid) |
55 | { | 55 | { |
56 | ax25_uid_assoc *ax25_uid, *res = NULL; | 56 | ax25_uid_assoc *ax25_uid, *res = NULL; |
57 | struct hlist_node *node; | 57 | struct hlist_node *node; |
58 | 58 | ||
59 | read_lock(&ax25_uid_lock); | 59 | read_lock(&ax25_uid_lock); |
60 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 60 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { |
61 | if (ax25_uid->uid == uid) { | 61 | if (uid_eq(ax25_uid->uid, uid)) { |
62 | ax25_uid_hold(ax25_uid); | 62 | ax25_uid_hold(ax25_uid); |
63 | res = ax25_uid; | 63 | res = ax25_uid; |
64 | break; | 64 | break; |
@@ -84,7 +84,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
84 | read_lock(&ax25_uid_lock); | 84 | read_lock(&ax25_uid_lock); |
85 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { | 85 | ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { |
86 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { | 86 | if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { |
87 | res = ax25_uid->uid; | 87 | res = from_kuid_munged(current_user_ns(), ax25_uid->uid); |
88 | break; | 88 | break; |
89 | } | 89 | } |
90 | } | 90 | } |
@@ -93,9 +93,14 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
93 | return res; | 93 | return res; |
94 | 94 | ||
95 | case SIOCAX25ADDUID: | 95 | case SIOCAX25ADDUID: |
96 | { | ||
97 | kuid_t sax25_kuid; | ||
96 | if (!capable(CAP_NET_ADMIN)) | 98 | if (!capable(CAP_NET_ADMIN)) |
97 | return -EPERM; | 99 | return -EPERM; |
98 | user = ax25_findbyuid(sax->sax25_uid); | 100 | sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid); |
101 | if (!uid_valid(sax25_kuid)) | ||
102 | return -EINVAL; | ||
103 | user = ax25_findbyuid(sax25_kuid); | ||
99 | if (user) { | 104 | if (user) { |
100 | ax25_uid_put(user); | 105 | ax25_uid_put(user); |
101 | return -EEXIST; | 106 | return -EEXIST; |
@@ -106,7 +111,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
106 | return -ENOMEM; | 111 | return -ENOMEM; |
107 | 112 | ||
108 | atomic_set(&ax25_uid->refcount, 1); | 113 | atomic_set(&ax25_uid->refcount, 1); |
109 | ax25_uid->uid = sax->sax25_uid; | 114 | ax25_uid->uid = sax25_kuid; |
110 | ax25_uid->call = sax->sax25_call; | 115 | ax25_uid->call = sax->sax25_call; |
111 | 116 | ||
112 | write_lock(&ax25_uid_lock); | 117 | write_lock(&ax25_uid_lock); |
@@ -114,7 +119,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | |||
114 | write_unlock(&ax25_uid_lock); | 119 | write_unlock(&ax25_uid_lock); |
115 | 120 | ||
116 | return 0; | 121 | return 0; |
117 | 122 | } | |
118 | case SIOCAX25DELUID: | 123 | case SIOCAX25DELUID: |
119 | if (!capable(CAP_NET_ADMIN)) | 124 | if (!capable(CAP_NET_ADMIN)) |
120 | return -EPERM; | 125 | return -EPERM; |
@@ -172,7 +177,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v) | |||
172 | struct ax25_uid_assoc *pt; | 177 | struct ax25_uid_assoc *pt; |
173 | 178 | ||
174 | pt = hlist_entry(v, struct ax25_uid_assoc, uid_node); | 179 | pt = hlist_entry(v, struct ax25_uid_assoc, uid_node); |
175 | seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call)); | 180 | seq_printf(seq, "%6d %s\n", |
181 | from_kuid_munged(seq_user_ns(seq), pt->uid), | ||
182 | ax2asc(buf, &pt->call)); | ||
176 | } | 183 | } |
177 | return 0; | 184 | return 0; |
178 | } | 185 | } |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index e877af8bdd1e..469daabd90c7 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -642,7 +642,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
642 | struct batadv_neigh_node *router = NULL; | 642 | struct batadv_neigh_node *router = NULL; |
643 | struct batadv_orig_node *orig_node_tmp; | 643 | struct batadv_orig_node *orig_node_tmp; |
644 | struct hlist_node *node; | 644 | struct hlist_node *node; |
645 | uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; | 645 | int if_num; |
646 | uint8_t sum_orig, sum_neigh; | ||
646 | uint8_t *neigh_addr; | 647 | uint8_t *neigh_addr; |
647 | 648 | ||
648 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 649 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
@@ -727,17 +728,17 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
727 | if (router && (neigh_node->tq_avg == router->tq_avg)) { | 728 | if (router && (neigh_node->tq_avg == router->tq_avg)) { |
728 | orig_node_tmp = router->orig_node; | 729 | orig_node_tmp = router->orig_node; |
729 | spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); | 730 | spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); |
730 | bcast_own_sum_orig = | 731 | if_num = router->if_incoming->if_num; |
731 | orig_node_tmp->bcast_own_sum[if_incoming->if_num]; | 732 | sum_orig = orig_node_tmp->bcast_own_sum[if_num]; |
732 | spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); | 733 | spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); |
733 | 734 | ||
734 | orig_node_tmp = neigh_node->orig_node; | 735 | orig_node_tmp = neigh_node->orig_node; |
735 | spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); | 736 | spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); |
736 | bcast_own_sum_neigh = | 737 | if_num = neigh_node->if_incoming->if_num; |
737 | orig_node_tmp->bcast_own_sum[if_incoming->if_num]; | 738 | sum_neigh = orig_node_tmp->bcast_own_sum[if_num]; |
738 | spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); | 739 | spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); |
739 | 740 | ||
740 | if (bcast_own_sum_orig >= bcast_own_sum_neigh) | 741 | if (sum_orig >= sum_neigh) |
741 | goto update_tt; | 742 | goto update_tt; |
742 | } | 743 | } |
743 | 744 | ||
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index a081ce1c0514..cebaae7e148b 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h | |||
@@ -20,8 +20,8 @@ | |||
20 | #ifndef _NET_BATMAN_ADV_BITARRAY_H_ | 20 | #ifndef _NET_BATMAN_ADV_BITARRAY_H_ |
21 | #define _NET_BATMAN_ADV_BITARRAY_H_ | 21 | #define _NET_BATMAN_ADV_BITARRAY_H_ |
22 | 22 | ||
23 | /* returns true if the corresponding bit in the given seq_bits indicates true | 23 | /* Returns 1 if the corresponding bit in the given seq_bits indicates true |
24 | * and curr_seqno is within range of last_seqno | 24 | * and curr_seqno is within range of last_seqno. Otherwise returns 0. |
25 | */ | 25 | */ |
26 | static inline int batadv_test_bit(const unsigned long *seq_bits, | 26 | static inline int batadv_test_bit(const unsigned long *seq_bits, |
27 | uint32_t last_seqno, uint32_t curr_seqno) | 27 | uint32_t last_seqno, uint32_t curr_seqno) |
@@ -32,7 +32,7 @@ static inline int batadv_test_bit(const unsigned long *seq_bits, | |||
32 | if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE) | 32 | if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE) |
33 | return 0; | 33 | return 0; |
34 | else | 34 | else |
35 | return test_bit(diff, seq_bits); | 35 | return test_bit(diff, seq_bits) != 0; |
36 | } | 36 | } |
37 | 37 | ||
38 | /* turn corresponding bit on, so we can remember that we got the packet */ | 38 | /* turn corresponding bit on, so we can remember that we got the packet */ |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 109ea2aae96c..21c53577c8d6 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -100,18 +100,21 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) | |||
100 | { | 100 | { |
101 | struct batadv_priv *bat_priv = netdev_priv(dev); | 101 | struct batadv_priv *bat_priv = netdev_priv(dev); |
102 | struct sockaddr *addr = p; | 102 | struct sockaddr *addr = p; |
103 | uint8_t old_addr[ETH_ALEN]; | ||
103 | 104 | ||
104 | if (!is_valid_ether_addr(addr->sa_data)) | 105 | if (!is_valid_ether_addr(addr->sa_data)) |
105 | return -EADDRNOTAVAIL; | 106 | return -EADDRNOTAVAIL; |
106 | 107 | ||
108 | memcpy(old_addr, dev->dev_addr, ETH_ALEN); | ||
109 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
110 | |||
107 | /* only modify transtable if it has been initialized before */ | 111 | /* only modify transtable if it has been initialized before */ |
108 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { | 112 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { |
109 | batadv_tt_local_remove(bat_priv, dev->dev_addr, | 113 | batadv_tt_local_remove(bat_priv, old_addr, |
110 | "mac address changed", false); | 114 | "mac address changed", false); |
111 | batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX); | 115 | batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX); |
112 | } | 116 | } |
113 | 117 | ||
114 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
115 | dev->addr_assign_type &= ~NET_ADDR_RANDOM; | 118 | dev->addr_assign_type &= ~NET_ADDR_RANDOM; |
116 | return 0; | 119 | return 0; |
117 | } | 120 | } |
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 5e5f5b410e0b..1eaacf10d19d 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -58,7 +58,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
58 | switch (cmd) { | 58 | switch (cmd) { |
59 | case BNEPCONNADD: | 59 | case BNEPCONNADD: |
60 | if (!capable(CAP_NET_ADMIN)) | 60 | if (!capable(CAP_NET_ADMIN)) |
61 | return -EACCES; | 61 | return -EPERM; |
62 | 62 | ||
63 | if (copy_from_user(&ca, argp, sizeof(ca))) | 63 | if (copy_from_user(&ca, argp, sizeof(ca))) |
64 | return -EFAULT; | 64 | return -EFAULT; |
@@ -84,7 +84,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
84 | 84 | ||
85 | case BNEPCONNDEL: | 85 | case BNEPCONNDEL: |
86 | if (!capable(CAP_NET_ADMIN)) | 86 | if (!capable(CAP_NET_ADMIN)) |
87 | return -EACCES; | 87 | return -EPERM; |
88 | 88 | ||
89 | if (copy_from_user(&cd, argp, sizeof(cd))) | 89 | if (copy_from_user(&cd, argp, sizeof(cd))) |
90 | return -EFAULT; | 90 | return -EFAULT; |
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index 311668d14571..32dc83dcb6b2 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c | |||
@@ -72,7 +72,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
72 | switch (cmd) { | 72 | switch (cmd) { |
73 | case CMTPCONNADD: | 73 | case CMTPCONNADD: |
74 | if (!capable(CAP_NET_ADMIN)) | 74 | if (!capable(CAP_NET_ADMIN)) |
75 | return -EACCES; | 75 | return -EPERM; |
76 | 76 | ||
77 | if (copy_from_user(&ca, argp, sizeof(ca))) | 77 | if (copy_from_user(&ca, argp, sizeof(ca))) |
78 | return -EFAULT; | 78 | return -EFAULT; |
@@ -97,7 +97,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
97 | 97 | ||
98 | case CMTPCONNDEL: | 98 | case CMTPCONNDEL: |
99 | if (!capable(CAP_NET_ADMIN)) | 99 | if (!capable(CAP_NET_ADMIN)) |
100 | return -EACCES; | 100 | return -EPERM; |
101 | 101 | ||
102 | if (copy_from_user(&cd, argp, sizeof(cd))) | 102 | if (copy_from_user(&cd, argp, sizeof(cd))) |
103 | return -EFAULT; | 103 | return -EFAULT; |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 5ad7da217474..3c094e78dde9 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <net/bluetooth/bluetooth.h> | 29 | #include <net/bluetooth/bluetooth.h> |
30 | #include <net/bluetooth/hci_core.h> | 30 | #include <net/bluetooth/hci_core.h> |
31 | #include <net/bluetooth/a2mp.h> | 31 | #include <net/bluetooth/a2mp.h> |
32 | #include <net/bluetooth/smp.h> | ||
32 | 33 | ||
33 | static void hci_le_connect(struct hci_conn *conn) | 34 | static void hci_le_connect(struct hci_conn *conn) |
34 | { | 35 | { |
@@ -619,6 +620,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
619 | { | 620 | { |
620 | BT_DBG("hcon %p", conn); | 621 | BT_DBG("hcon %p", conn); |
621 | 622 | ||
623 | if (conn->type == LE_LINK) | ||
624 | return smp_conn_security(conn, sec_level); | ||
625 | |||
622 | /* For sdp we don't need the link key. */ | 626 | /* For sdp we don't need the link key. */ |
623 | if (sec_level == BT_SECURITY_SDP) | 627 | if (sec_level == BT_SECURITY_SDP) |
624 | return 1; | 628 | return 1; |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index d4de5db18d5a..0b997c8f9655 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -734,6 +734,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
734 | 734 | ||
735 | cancel_work_sync(&hdev->le_scan); | 735 | cancel_work_sync(&hdev->le_scan); |
736 | 736 | ||
737 | cancel_delayed_work(&hdev->power_off); | ||
738 | |||
737 | hci_req_cancel(hdev, ENODEV); | 739 | hci_req_cancel(hdev, ENODEV); |
738 | hci_req_lock(hdev); | 740 | hci_req_lock(hdev); |
739 | 741 | ||
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 41ff978a33f9..715d7e33fba0 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -1365,6 +1365,9 @@ static bool hci_resolve_next_name(struct hci_dev *hdev) | |||
1365 | return false; | 1365 | return false; |
1366 | 1366 | ||
1367 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); | 1367 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); |
1368 | if (!e) | ||
1369 | return false; | ||
1370 | |||
1368 | if (hci_resolve_name(hdev, e) == 0) { | 1371 | if (hci_resolve_name(hdev, e) == 0) { |
1369 | e->name_state = NAME_PENDING; | 1372 | e->name_state = NAME_PENDING; |
1370 | return true; | 1373 | return true; |
@@ -1393,12 +1396,20 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, | |||
1393 | return; | 1396 | return; |
1394 | 1397 | ||
1395 | e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); | 1398 | e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); |
1396 | if (e) { | 1399 | /* If the device was not found in a list of found devices names of which |
1400 | * are pending. there is no need to continue resolving a next name as it | ||
1401 | * will be done upon receiving another Remote Name Request Complete | ||
1402 | * Event */ | ||
1403 | if (!e) | ||
1404 | return; | ||
1405 | |||
1406 | list_del(&e->list); | ||
1407 | if (name) { | ||
1397 | e->name_state = NAME_KNOWN; | 1408 | e->name_state = NAME_KNOWN; |
1398 | list_del(&e->list); | 1409 | mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, |
1399 | if (name) | 1410 | e->data.rssi, name, name_len); |
1400 | mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, | 1411 | } else { |
1401 | e->data.rssi, name, name_len); | 1412 | e->name_state = NAME_NOT_KNOWN; |
1402 | } | 1413 | } |
1403 | 1414 | ||
1404 | if (hci_resolve_next_name(hdev)) | 1415 | if (hci_resolve_next_name(hdev)) |
@@ -1762,7 +1773,12 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
1762 | if (conn->type == ACL_LINK) { | 1773 | if (conn->type == ACL_LINK) { |
1763 | conn->state = BT_CONFIG; | 1774 | conn->state = BT_CONFIG; |
1764 | hci_conn_hold(conn); | 1775 | hci_conn_hold(conn); |
1765 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | 1776 | |
1777 | if (!conn->out && !hci_conn_ssp_enabled(conn) && | ||
1778 | !hci_find_link_key(hdev, &ev->bdaddr)) | ||
1779 | conn->disc_timeout = HCI_PAIRING_TIMEOUT; | ||
1780 | else | ||
1781 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
1766 | } else | 1782 | } else |
1767 | conn->state = BT_CONNECTED; | 1783 | conn->state = BT_CONNECTED; |
1768 | 1784 | ||
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index a7f04de03d79..d5ace1eda3ed 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -490,7 +490,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, | |||
490 | switch (cmd) { | 490 | switch (cmd) { |
491 | case HCISETRAW: | 491 | case HCISETRAW: |
492 | if (!capable(CAP_NET_ADMIN)) | 492 | if (!capable(CAP_NET_ADMIN)) |
493 | return -EACCES; | 493 | return -EPERM; |
494 | 494 | ||
495 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) | 495 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) |
496 | return -EPERM; | 496 | return -EPERM; |
@@ -510,12 +510,12 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, | |||
510 | 510 | ||
511 | case HCIBLOCKADDR: | 511 | case HCIBLOCKADDR: |
512 | if (!capable(CAP_NET_ADMIN)) | 512 | if (!capable(CAP_NET_ADMIN)) |
513 | return -EACCES; | 513 | return -EPERM; |
514 | return hci_sock_blacklist_add(hdev, (void __user *) arg); | 514 | return hci_sock_blacklist_add(hdev, (void __user *) arg); |
515 | 515 | ||
516 | case HCIUNBLOCKADDR: | 516 | case HCIUNBLOCKADDR: |
517 | if (!capable(CAP_NET_ADMIN)) | 517 | if (!capable(CAP_NET_ADMIN)) |
518 | return -EACCES; | 518 | return -EPERM; |
519 | return hci_sock_blacklist_del(hdev, (void __user *) arg); | 519 | return hci_sock_blacklist_del(hdev, (void __user *) arg); |
520 | 520 | ||
521 | default: | 521 | default: |
@@ -546,22 +546,22 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, | |||
546 | 546 | ||
547 | case HCIDEVUP: | 547 | case HCIDEVUP: |
548 | if (!capable(CAP_NET_ADMIN)) | 548 | if (!capable(CAP_NET_ADMIN)) |
549 | return -EACCES; | 549 | return -EPERM; |
550 | return hci_dev_open(arg); | 550 | return hci_dev_open(arg); |
551 | 551 | ||
552 | case HCIDEVDOWN: | 552 | case HCIDEVDOWN: |
553 | if (!capable(CAP_NET_ADMIN)) | 553 | if (!capable(CAP_NET_ADMIN)) |
554 | return -EACCES; | 554 | return -EPERM; |
555 | return hci_dev_close(arg); | 555 | return hci_dev_close(arg); |
556 | 556 | ||
557 | case HCIDEVRESET: | 557 | case HCIDEVRESET: |
558 | if (!capable(CAP_NET_ADMIN)) | 558 | if (!capable(CAP_NET_ADMIN)) |
559 | return -EACCES; | 559 | return -EPERM; |
560 | return hci_dev_reset(arg); | 560 | return hci_dev_reset(arg); |
561 | 561 | ||
562 | case HCIDEVRESTAT: | 562 | case HCIDEVRESTAT: |
563 | if (!capable(CAP_NET_ADMIN)) | 563 | if (!capable(CAP_NET_ADMIN)) |
564 | return -EACCES; | 564 | return -EPERM; |
565 | return hci_dev_reset_stat(arg); | 565 | return hci_dev_reset_stat(arg); |
566 | 566 | ||
567 | case HCISETSCAN: | 567 | case HCISETSCAN: |
@@ -573,7 +573,7 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, | |||
573 | case HCISETACLMTU: | 573 | case HCISETACLMTU: |
574 | case HCISETSCOMTU: | 574 | case HCISETSCOMTU: |
575 | if (!capable(CAP_NET_ADMIN)) | 575 | if (!capable(CAP_NET_ADMIN)) |
576 | return -EACCES; | 576 | return -EPERM; |
577 | return hci_dev_cmd(cmd, argp); | 577 | return hci_dev_cmd(cmd, argp); |
578 | 578 | ||
579 | case HCIINQUIRY: | 579 | case HCIINQUIRY: |
@@ -694,6 +694,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, | |||
694 | *addr_len = sizeof(*haddr); | 694 | *addr_len = sizeof(*haddr); |
695 | haddr->hci_family = AF_BLUETOOTH; | 695 | haddr->hci_family = AF_BLUETOOTH; |
696 | haddr->hci_dev = hdev->id; | 696 | haddr->hci_dev = hdev->id; |
697 | haddr->hci_channel= 0; | ||
697 | 698 | ||
698 | release_sock(sk); | 699 | release_sock(sk); |
699 | return 0; | 700 | return 0; |
@@ -1009,6 +1010,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, | |||
1009 | { | 1010 | { |
1010 | struct hci_filter *f = &hci_pi(sk)->filter; | 1011 | struct hci_filter *f = &hci_pi(sk)->filter; |
1011 | 1012 | ||
1013 | memset(&uf, 0, sizeof(uf)); | ||
1012 | uf.type_mask = f->type_mask; | 1014 | uf.type_mask = f->type_mask; |
1013 | uf.opcode = f->opcode; | 1015 | uf.opcode = f->opcode; |
1014 | uf.event_mask[0] = *((u32 *) f->event_mask + 0); | 1016 | uf.event_mask[0] = *((u32 *) f->event_mask + 0); |
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 18b3f6892a36..b24fb3bd8625 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c | |||
@@ -56,7 +56,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
56 | switch (cmd) { | 56 | switch (cmd) { |
57 | case HIDPCONNADD: | 57 | case HIDPCONNADD: |
58 | if (!capable(CAP_NET_ADMIN)) | 58 | if (!capable(CAP_NET_ADMIN)) |
59 | return -EACCES; | 59 | return -EPERM; |
60 | 60 | ||
61 | if (copy_from_user(&ca, argp, sizeof(ca))) | 61 | if (copy_from_user(&ca, argp, sizeof(ca))) |
62 | return -EFAULT; | 62 | return -EFAULT; |
@@ -91,7 +91,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
91 | 91 | ||
92 | case HIDPCONNDEL: | 92 | case HIDPCONNDEL: |
93 | if (!capable(CAP_NET_ADMIN)) | 93 | if (!capable(CAP_NET_ADMIN)) |
94 | return -EACCES; | 94 | return -EPERM; |
95 | 95 | ||
96 | if (copy_from_user(&cd, argp, sizeof(cd))) | 96 | if (copy_from_user(&cd, argp, sizeof(cd))) |
97 | return -EFAULT; | 97 | return -EFAULT; |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index a8964db04bfb..38c00f142203 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -1008,7 +1008,7 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c | |||
1008 | if (!conn) | 1008 | if (!conn) |
1009 | return; | 1009 | return; |
1010 | 1010 | ||
1011 | if (chan->mode == L2CAP_MODE_ERTM) { | 1011 | if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { |
1012 | __clear_retrans_timer(chan); | 1012 | __clear_retrans_timer(chan); |
1013 | __clear_monitor_timer(chan); | 1013 | __clear_monitor_timer(chan); |
1014 | __clear_ack_timer(chan); | 1014 | __clear_ack_timer(chan); |
@@ -1181,6 +1181,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) | |||
1181 | sk = chan->sk; | 1181 | sk = chan->sk; |
1182 | 1182 | ||
1183 | hci_conn_hold(conn->hcon); | 1183 | hci_conn_hold(conn->hcon); |
1184 | conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
1184 | 1185 | ||
1185 | bacpy(&bt_sk(sk)->src, conn->src); | 1186 | bacpy(&bt_sk(sk)->src, conn->src); |
1186 | bacpy(&bt_sk(sk)->dst, conn->dst); | 1187 | bacpy(&bt_sk(sk)->dst, conn->dst); |
@@ -1198,14 +1199,15 @@ clean: | |||
1198 | static void l2cap_conn_ready(struct l2cap_conn *conn) | 1199 | static void l2cap_conn_ready(struct l2cap_conn *conn) |
1199 | { | 1200 | { |
1200 | struct l2cap_chan *chan; | 1201 | struct l2cap_chan *chan; |
1202 | struct hci_conn *hcon = conn->hcon; | ||
1201 | 1203 | ||
1202 | BT_DBG("conn %p", conn); | 1204 | BT_DBG("conn %p", conn); |
1203 | 1205 | ||
1204 | if (!conn->hcon->out && conn->hcon->type == LE_LINK) | 1206 | if (!hcon->out && hcon->type == LE_LINK) |
1205 | l2cap_le_conn_ready(conn); | 1207 | l2cap_le_conn_ready(conn); |
1206 | 1208 | ||
1207 | if (conn->hcon->out && conn->hcon->type == LE_LINK) | 1209 | if (hcon->out && hcon->type == LE_LINK) |
1208 | smp_conn_security(conn, conn->hcon->pending_sec_level); | 1210 | smp_conn_security(hcon, hcon->pending_sec_level); |
1209 | 1211 | ||
1210 | mutex_lock(&conn->chan_lock); | 1212 | mutex_lock(&conn->chan_lock); |
1211 | 1213 | ||
@@ -1218,8 +1220,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) | |||
1218 | continue; | 1220 | continue; |
1219 | } | 1221 | } |
1220 | 1222 | ||
1221 | if (conn->hcon->type == LE_LINK) { | 1223 | if (hcon->type == LE_LINK) { |
1222 | if (smp_conn_security(conn, chan->sec_level)) | 1224 | if (smp_conn_security(hcon, chan->sec_level)) |
1223 | l2cap_chan_ready(chan); | 1225 | l2cap_chan_ready(chan); |
1224 | 1226 | ||
1225 | } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { | 1227 | } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { |
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a4bb27e8427e..34bbe1c5e389 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
@@ -245,6 +245,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l | |||
245 | 245 | ||
246 | BT_DBG("sock %p, sk %p", sock, sk); | 246 | BT_DBG("sock %p, sk %p", sock, sk); |
247 | 247 | ||
248 | memset(la, 0, sizeof(struct sockaddr_l2)); | ||
248 | addr->sa_family = AF_BLUETOOTH; | 249 | addr->sa_family = AF_BLUETOOTH; |
249 | *len = sizeof(struct sockaddr_l2); | 250 | *len = sizeof(struct sockaddr_l2); |
250 | 251 | ||
@@ -615,7 +616,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
615 | break; | 616 | break; |
616 | } | 617 | } |
617 | 618 | ||
618 | if (smp_conn_security(conn, sec.level)) | 619 | if (smp_conn_security(conn->hcon, sec.level)) |
619 | break; | 620 | break; |
620 | sk->sk_state = BT_CONFIG; | 621 | sk->sk_state = BT_CONFIG; |
621 | chan->state = BT_CONFIG; | 622 | chan->state = BT_CONFIG; |
@@ -1174,7 +1175,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p | |||
1174 | 1175 | ||
1175 | chan = l2cap_chan_create(); | 1176 | chan = l2cap_chan_create(); |
1176 | if (!chan) { | 1177 | if (!chan) { |
1177 | l2cap_sock_kill(sk); | 1178 | sk_free(sk); |
1178 | return NULL; | 1179 | return NULL; |
1179 | } | 1180 | } |
1180 | 1181 | ||
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ad6613d17ca6..eba022de3c20 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
@@ -2875,6 +2875,22 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) | |||
2875 | if (scan) | 2875 | if (scan) |
2876 | hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | 2876 | hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
2877 | 2877 | ||
2878 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { | ||
2879 | u8 ssp = 1; | ||
2880 | |||
2881 | hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); | ||
2882 | } | ||
2883 | |||
2884 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { | ||
2885 | struct hci_cp_write_le_host_supported cp; | ||
2886 | |||
2887 | cp.le = 1; | ||
2888 | cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); | ||
2889 | |||
2890 | hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, | ||
2891 | sizeof(cp), &cp); | ||
2892 | } | ||
2893 | |||
2878 | update_class(hdev); | 2894 | update_class(hdev); |
2879 | update_name(hdev, hdev->dev_name); | 2895 | update_name(hdev, hdev->dev_name); |
2880 | update_eir(hdev); | 2896 | update_eir(hdev); |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7e1e59645c05..1a17850d093c 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -528,6 +528,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int * | |||
528 | 528 | ||
529 | BT_DBG("sock %p, sk %p", sock, sk); | 529 | BT_DBG("sock %p, sk %p", sock, sk); |
530 | 530 | ||
531 | memset(sa, 0, sizeof(*sa)); | ||
531 | sa->rc_family = AF_BLUETOOTH; | 532 | sa->rc_family = AF_BLUETOOTH; |
532 | sa->rc_channel = rfcomm_pi(sk)->channel; | 533 | sa->rc_channel = rfcomm_pi(sk)->channel; |
533 | if (peer) | 534 | if (peer) |
@@ -822,6 +823,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c | |||
822 | } | 823 | } |
823 | 824 | ||
824 | sec.level = rfcomm_pi(sk)->sec_level; | 825 | sec.level = rfcomm_pi(sk)->sec_level; |
826 | sec.key_size = 0; | ||
825 | 827 | ||
826 | len = min_t(unsigned int, len, sizeof(sec)); | 828 | len = min_t(unsigned int, len, sizeof(sec)); |
827 | if (copy_to_user(optval, (char *) &sec, len)) | 829 | if (copy_to_user(optval, (char *) &sec, len)) |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index cb960773c002..ccc248791d50 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -278,8 +278,8 @@ out: | |||
278 | if (err < 0) | 278 | if (err < 0) |
279 | goto free; | 279 | goto free; |
280 | 280 | ||
281 | dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL); | 281 | dev->tty_dev = tty_port_register_device(&dev->port, rfcomm_tty_driver, |
282 | 282 | dev->id, NULL); | |
283 | if (IS_ERR(dev->tty_dev)) { | 283 | if (IS_ERR(dev->tty_dev)) { |
284 | err = PTR_ERR(dev->tty_dev); | 284 | err = PTR_ERR(dev->tty_dev); |
285 | list_del(&dev->list); | 285 | list_del(&dev->list); |
@@ -456,7 +456,7 @@ static int rfcomm_get_dev_list(void __user *arg) | |||
456 | 456 | ||
457 | size = sizeof(*dl) + dev_num * sizeof(*di); | 457 | size = sizeof(*dl) + dev_num * sizeof(*di); |
458 | 458 | ||
459 | dl = kmalloc(size, GFP_KERNEL); | 459 | dl = kzalloc(size, GFP_KERNEL); |
460 | if (!dl) | 460 | if (!dl) |
461 | return -ENOMEM; | 461 | return -ENOMEM; |
462 | 462 | ||
@@ -705,9 +705,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
705 | break; | 705 | break; |
706 | } | 706 | } |
707 | 707 | ||
708 | tty_unlock(); | 708 | tty_unlock(tty); |
709 | schedule(); | 709 | schedule(); |
710 | tty_lock(); | 710 | tty_lock(tty); |
711 | } | 711 | } |
712 | set_current_state(TASK_RUNNING); | 712 | set_current_state(TASK_RUNNING); |
713 | remove_wait_queue(&dev->wait, &wait); | 713 | remove_wait_queue(&dev->wait, &wait); |
@@ -861,7 +861,7 @@ static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned l | |||
861 | 861 | ||
862 | static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) | 862 | static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) |
863 | { | 863 | { |
864 | struct ktermios *new = tty->termios; | 864 | struct ktermios *new = &tty->termios; |
865 | int old_baud_rate = tty_termios_baud_rate(old); | 865 | int old_baud_rate = tty_termios_baud_rate(old); |
866 | int new_baud_rate = tty_termios_baud_rate(new); | 866 | int new_baud_rate = tty_termios_baud_rate(new); |
867 | 867 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 40bbe25dcff7..3589e21edb09 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -131,6 +131,15 @@ static int sco_conn_del(struct hci_conn *hcon, int err) | |||
131 | sco_sock_clear_timer(sk); | 131 | sco_sock_clear_timer(sk); |
132 | sco_chan_del(sk, err); | 132 | sco_chan_del(sk, err); |
133 | bh_unlock_sock(sk); | 133 | bh_unlock_sock(sk); |
134 | |||
135 | sco_conn_lock(conn); | ||
136 | conn->sk = NULL; | ||
137 | sco_pi(sk)->conn = NULL; | ||
138 | sco_conn_unlock(conn); | ||
139 | |||
140 | if (conn->hcon) | ||
141 | hci_conn_put(conn->hcon); | ||
142 | |||
134 | sco_sock_kill(sk); | 143 | sco_sock_kill(sk); |
135 | } | 144 | } |
136 | 145 | ||
@@ -821,16 +830,6 @@ static void sco_chan_del(struct sock *sk, int err) | |||
821 | 830 | ||
822 | BT_DBG("sk %p, conn %p, err %d", sk, conn, err); | 831 | BT_DBG("sk %p, conn %p, err %d", sk, conn, err); |
823 | 832 | ||
824 | if (conn) { | ||
825 | sco_conn_lock(conn); | ||
826 | conn->sk = NULL; | ||
827 | sco_pi(sk)->conn = NULL; | ||
828 | sco_conn_unlock(conn); | ||
829 | |||
830 | if (conn->hcon) | ||
831 | hci_conn_put(conn->hcon); | ||
832 | } | ||
833 | |||
834 | sk->sk_state = BT_CLOSED; | 833 | sk->sk_state = BT_CLOSED; |
835 | sk->sk_err = err; | 834 | sk->sk_err = err; |
836 | sk->sk_state_change(sk); | 835 | sk->sk_state_change(sk); |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 16ef0dc85a0a..8c225ef349cd 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -267,10 +267,10 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) | |||
267 | mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, | 267 | mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, |
268 | hcon->dst_type, reason); | 268 | hcon->dst_type, reason); |
269 | 269 | ||
270 | if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { | 270 | cancel_delayed_work_sync(&conn->security_timer); |
271 | cancel_delayed_work_sync(&conn->security_timer); | 271 | |
272 | if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) | ||
272 | smp_chan_destroy(conn); | 273 | smp_chan_destroy(conn); |
273 | } | ||
274 | } | 274 | } |
275 | 275 | ||
276 | #define JUST_WORKS 0x00 | 276 | #define JUST_WORKS 0x00 |
@@ -579,8 +579,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
579 | 579 | ||
580 | if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) | 580 | if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) |
581 | smp = smp_chan_create(conn); | 581 | smp = smp_chan_create(conn); |
582 | else | ||
583 | smp = conn->smp_chan; | ||
582 | 584 | ||
583 | smp = conn->smp_chan; | 585 | if (!smp) |
586 | return SMP_UNSPECIFIED; | ||
584 | 587 | ||
585 | smp->preq[0] = SMP_CMD_PAIRING_REQ; | 588 | smp->preq[0] = SMP_CMD_PAIRING_REQ; |
586 | memcpy(&smp->preq[1], req, sizeof(*req)); | 589 | memcpy(&smp->preq[1], req, sizeof(*req)); |
@@ -757,9 +760,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) | |||
757 | return 0; | 760 | return 0; |
758 | } | 761 | } |
759 | 762 | ||
760 | int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) | 763 | int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) |
761 | { | 764 | { |
762 | struct hci_conn *hcon = conn->hcon; | 765 | struct l2cap_conn *conn = hcon->l2cap_data; |
763 | struct smp_chan *smp = conn->smp_chan; | 766 | struct smp_chan *smp = conn->smp_chan; |
764 | __u8 authreq; | 767 | __u8 authreq; |
765 | 768 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 333484537600..070e8a68cfc6 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -31,9 +31,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
31 | struct net_bridge_mdb_entry *mdst; | 31 | struct net_bridge_mdb_entry *mdst; |
32 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | 32 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); |
33 | 33 | ||
34 | rcu_read_lock(); | ||
34 | #ifdef CONFIG_BRIDGE_NETFILTER | 35 | #ifdef CONFIG_BRIDGE_NETFILTER |
35 | if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { | 36 | if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { |
36 | br_nf_pre_routing_finish_bridge_slow(skb); | 37 | br_nf_pre_routing_finish_bridge_slow(skb); |
38 | rcu_read_unlock(); | ||
37 | return NETDEV_TX_OK; | 39 | return NETDEV_TX_OK; |
38 | } | 40 | } |
39 | #endif | 41 | #endif |
@@ -48,7 +50,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
48 | skb_reset_mac_header(skb); | 50 | skb_reset_mac_header(skb); |
49 | skb_pull(skb, ETH_HLEN); | 51 | skb_pull(skb, ETH_HLEN); |
50 | 52 | ||
51 | rcu_read_lock(); | ||
52 | if (is_broadcast_ether_addr(dest)) | 53 | if (is_broadcast_ether_addr(dest)) |
53 | br_flood_deliver(br, skb); | 54 | br_flood_deliver(br, skb); |
54 | else if (is_multicast_ether_addr(dest)) { | 55 | else if (is_multicast_ether_addr(dest)) { |
@@ -206,24 +207,23 @@ static void br_poll_controller(struct net_device *br_dev) | |||
206 | static void br_netpoll_cleanup(struct net_device *dev) | 207 | static void br_netpoll_cleanup(struct net_device *dev) |
207 | { | 208 | { |
208 | struct net_bridge *br = netdev_priv(dev); | 209 | struct net_bridge *br = netdev_priv(dev); |
209 | struct net_bridge_port *p, *n; | 210 | struct net_bridge_port *p; |
210 | 211 | ||
211 | list_for_each_entry_safe(p, n, &br->port_list, list) { | 212 | list_for_each_entry(p, &br->port_list, list) |
212 | br_netpoll_disable(p); | 213 | br_netpoll_disable(p); |
213 | } | ||
214 | } | 214 | } |
215 | 215 | ||
216 | static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) | 216 | static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, |
217 | gfp_t gfp) | ||
217 | { | 218 | { |
218 | struct net_bridge *br = netdev_priv(dev); | 219 | struct net_bridge *br = netdev_priv(dev); |
219 | struct net_bridge_port *p, *n; | 220 | struct net_bridge_port *p; |
220 | int err = 0; | 221 | int err = 0; |
221 | 222 | ||
222 | list_for_each_entry_safe(p, n, &br->port_list, list) { | 223 | list_for_each_entry(p, &br->port_list, list) { |
223 | if (!p->dev) | 224 | if (!p->dev) |
224 | continue; | 225 | continue; |
225 | 226 | err = br_netpoll_enable(p, gfp); | |
226 | err = br_netpoll_enable(p); | ||
227 | if (err) | 227 | if (err) |
228 | goto fail; | 228 | goto fail; |
229 | } | 229 | } |
@@ -236,17 +236,17 @@ fail: | |||
236 | goto out; | 236 | goto out; |
237 | } | 237 | } |
238 | 238 | ||
239 | int br_netpoll_enable(struct net_bridge_port *p) | 239 | int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) |
240 | { | 240 | { |
241 | struct netpoll *np; | 241 | struct netpoll *np; |
242 | int err = 0; | 242 | int err = 0; |
243 | 243 | ||
244 | np = kzalloc(sizeof(*p->np), GFP_KERNEL); | 244 | np = kzalloc(sizeof(*p->np), gfp); |
245 | err = -ENOMEM; | 245 | err = -ENOMEM; |
246 | if (!np) | 246 | if (!np) |
247 | goto out; | 247 | goto out; |
248 | 248 | ||
249 | err = __netpoll_setup(np, p->dev); | 249 | err = __netpoll_setup(np, p->dev, gfp); |
250 | if (err) { | 250 | if (err) { |
251 | kfree(np); | 251 | kfree(np); |
252 | goto out; | 252 | goto out; |
@@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p) | |||
267 | 267 | ||
268 | p->np = NULL; | 268 | p->np = NULL; |
269 | 269 | ||
270 | /* Wait for transmitting packets to finish before freeing. */ | 270 | __netpoll_free_rcu(np); |
271 | synchronize_rcu_bh(); | ||
272 | |||
273 | __netpoll_cleanup(np); | ||
274 | kfree(np); | ||
275 | } | 271 | } |
276 | 272 | ||
277 | #endif | 273 | #endif |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index e9466d412707..02015a505d2a 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -65,7 +65,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |||
65 | { | 65 | { |
66 | skb->dev = to->dev; | 66 | skb->dev = to->dev; |
67 | 67 | ||
68 | if (unlikely(netpoll_tx_running(to->dev))) { | 68 | if (unlikely(netpoll_tx_running(to->br->dev))) { |
69 | if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) | 69 | if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) |
70 | kfree_skb(skb); | 70 | kfree_skb(skb); |
71 | else { | 71 | else { |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index e1144e1617be..1c8fdc3558cd 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -361,7 +361,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
361 | if (err) | 361 | if (err) |
362 | goto err2; | 362 | goto err2; |
363 | 363 | ||
364 | if (br_netpoll_info(br) && ((err = br_netpoll_enable(p)))) | 364 | if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL)))) |
365 | goto err3; | 365 | goto err3; |
366 | 366 | ||
367 | err = netdev_set_master(dev, br->dev); | 367 | err = netdev_set_master(dev, br->dev); |
@@ -427,6 +427,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
427 | if (!p || p->br != br) | 427 | if (!p || p->br != br) |
428 | return -EINVAL; | 428 | return -EINVAL; |
429 | 429 | ||
430 | /* Since more than one interface can be attached to a bridge, | ||
431 | * there still maybe an alternate path for netconsole to use; | ||
432 | * therefore there is no reason for a NETDEV_RELEASE event. | ||
433 | */ | ||
430 | del_nbp(p); | 434 | del_nbp(p); |
431 | 435 | ||
432 | spin_lock_bh(&br->lock); | 436 | spin_lock_bh(&br->lock); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index a768b2408edf..f507d2af9646 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -316,7 +316,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, | |||
316 | netpoll_send_skb(np, skb); | 316 | netpoll_send_skb(np, skb); |
317 | } | 317 | } |
318 | 318 | ||
319 | extern int br_netpoll_enable(struct net_bridge_port *p); | 319 | extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp); |
320 | extern void br_netpoll_disable(struct net_bridge_port *p); | 320 | extern void br_netpoll_disable(struct net_bridge_port *p); |
321 | #else | 321 | #else |
322 | static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) | 322 | static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) |
@@ -329,7 +329,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, | |||
329 | { | 329 | { |
330 | } | 330 | } |
331 | 331 | ||
332 | static inline int br_netpoll_enable(struct net_bridge_port *p) | 332 | static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) |
333 | { | 333 | { |
334 | return 0; | 334 | return 0; |
335 | } | 335 | } |
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index f88ee537fb2b..92de5e5f9db2 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -80,7 +80,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum, | |||
80 | unsigned int bitmask; | 80 | unsigned int bitmask; |
81 | 81 | ||
82 | spin_lock_bh(&ebt_log_lock); | 82 | spin_lock_bh(&ebt_log_lock); |
83 | printk("<%c>%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", | 83 | printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", |
84 | '0' + loginfo->u.log.level, prefix, | 84 | '0' + loginfo->u.log.level, prefix, |
85 | in ? in->name : "", out ? out->name : "", | 85 | in ? in->name : "", out ? out->name : "", |
86 | eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, | 86 | eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, |
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index dd485f6128e8..ba217e90765e 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c | |||
@@ -211,9 +211,10 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer, | |||
211 | void (*put)(struct cflayer *lyr)) | 211 | void (*put)(struct cflayer *lyr)) |
212 | { | 212 | { |
213 | struct cfsrvl *service; | 213 | struct cfsrvl *service; |
214 | service = container_of(adapt_layer->dn, struct cfsrvl, layer); | ||
215 | 214 | ||
216 | WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL); | 215 | if (WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL)) |
216 | return; | ||
217 | service = container_of(adapt_layer->dn, struct cfsrvl, layer); | ||
217 | service->hold = hold; | 218 | service->hold = hold; |
218 | service->put = put; | 219 | service->put = put; |
219 | } | 220 | } |
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 69771c04ba8f..e597733affb8 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c | |||
@@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | |||
94 | 94 | ||
95 | /* check the version of IP */ | 95 | /* check the version of IP */ |
96 | ip_version = skb_header_pointer(skb, 0, 1, &buf); | 96 | ip_version = skb_header_pointer(skb, 0, 1, &buf); |
97 | if (!ip_version) { | ||
98 | kfree_skb(skb); | ||
99 | return -EINVAL; | ||
100 | } | ||
97 | 101 | ||
98 | switch (*ip_version >> 4) { | 102 | switch (*ip_version >> 4) { |
99 | case 4: | 103 | case 4: |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 69e38db28e5f..a8020293f342 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
@@ -84,7 +84,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) | |||
84 | return -1; | 84 | return -1; |
85 | } | 85 | } |
86 | } else { | 86 | } else { |
87 | pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); | ||
88 | memcpy(&client->fsid, fsid, sizeof(*fsid)); | 87 | memcpy(&client->fsid, fsid, sizeof(*fsid)); |
89 | } | 88 | } |
90 | return 0; | 89 | return 0; |
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 54b531a01121..38b5dc1823d4 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c | |||
@@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client) | |||
189 | snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, | 189 | snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, |
190 | client->monc.auth->global_id); | 190 | client->monc.auth->global_id); |
191 | 191 | ||
192 | dout("ceph_debugfs_client_init %p %s\n", client, name); | ||
193 | |||
194 | BUG_ON(client->debugfs_dir); | ||
192 | client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); | 195 | client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); |
193 | if (!client->debugfs_dir) | 196 | if (!client->debugfs_dir) |
194 | goto out; | 197 | goto out; |
@@ -234,6 +237,7 @@ out: | |||
234 | 237 | ||
235 | void ceph_debugfs_client_cleanup(struct ceph_client *client) | 238 | void ceph_debugfs_client_cleanup(struct ceph_client *client) |
236 | { | 239 | { |
240 | dout("ceph_debugfs_client_cleanup %p\n", client); | ||
237 | debugfs_remove(client->debugfs_osdmap); | 241 | debugfs_remove(client->debugfs_osdmap); |
238 | debugfs_remove(client->debugfs_monmap); | 242 | debugfs_remove(client->debugfs_monmap); |
239 | debugfs_remove(client->osdc.debugfs_file); | 243 | debugfs_remove(client->osdc.debugfs_file); |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b9796750034a..159aa8bef9e7 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -915,7 +915,6 @@ static int prepare_write_connect(struct ceph_connection *con) | |||
915 | con->out_connect.authorizer_len = auth ? | 915 | con->out_connect.authorizer_len = auth ? |
916 | cpu_to_le32(auth->authorizer_buf_len) : 0; | 916 | cpu_to_le32(auth->authorizer_buf_len) : 0; |
917 | 917 | ||
918 | con_out_kvec_reset(con); | ||
919 | con_out_kvec_add(con, sizeof (con->out_connect), | 918 | con_out_kvec_add(con, sizeof (con->out_connect), |
920 | &con->out_connect); | 919 | &con->out_connect); |
921 | if (auth && auth->authorizer_buf_len) | 920 | if (auth && auth->authorizer_buf_len) |
@@ -1074,16 +1073,13 @@ static int write_partial_msg_pages(struct ceph_connection *con) | |||
1074 | BUG_ON(kaddr == NULL); | 1073 | BUG_ON(kaddr == NULL); |
1075 | base = kaddr + con->out_msg_pos.page_pos + bio_offset; | 1074 | base = kaddr + con->out_msg_pos.page_pos + bio_offset; |
1076 | crc = crc32c(crc, base, len); | 1075 | crc = crc32c(crc, base, len); |
1076 | kunmap(page); | ||
1077 | msg->footer.data_crc = cpu_to_le32(crc); | 1077 | msg->footer.data_crc = cpu_to_le32(crc); |
1078 | con->out_msg_pos.did_page_crc = true; | 1078 | con->out_msg_pos.did_page_crc = true; |
1079 | } | 1079 | } |
1080 | ret = ceph_tcp_sendpage(con->sock, page, | 1080 | ret = ceph_tcp_sendpage(con->sock, page, |
1081 | con->out_msg_pos.page_pos + bio_offset, | 1081 | con->out_msg_pos.page_pos + bio_offset, |
1082 | len, 1); | 1082 | len, 1); |
1083 | |||
1084 | if (do_datacrc) | ||
1085 | kunmap(page); | ||
1086 | |||
1087 | if (ret <= 0) | 1083 | if (ret <= 0) |
1088 | goto out; | 1084 | goto out; |
1089 | 1085 | ||
@@ -1557,6 +1553,7 @@ static int process_connect(struct ceph_connection *con) | |||
1557 | return -1; | 1553 | return -1; |
1558 | } | 1554 | } |
1559 | con->auth_retry = 1; | 1555 | con->auth_retry = 1; |
1556 | con_out_kvec_reset(con); | ||
1560 | ret = prepare_write_connect(con); | 1557 | ret = prepare_write_connect(con); |
1561 | if (ret < 0) | 1558 | if (ret < 0) |
1562 | return ret; | 1559 | return ret; |
@@ -1577,6 +1574,7 @@ static int process_connect(struct ceph_connection *con) | |||
1577 | ENTITY_NAME(con->peer_name), | 1574 | ENTITY_NAME(con->peer_name), |
1578 | ceph_pr_addr(&con->peer_addr.in_addr)); | 1575 | ceph_pr_addr(&con->peer_addr.in_addr)); |
1579 | reset_connection(con); | 1576 | reset_connection(con); |
1577 | con_out_kvec_reset(con); | ||
1580 | ret = prepare_write_connect(con); | 1578 | ret = prepare_write_connect(con); |
1581 | if (ret < 0) | 1579 | if (ret < 0) |
1582 | return ret; | 1580 | return ret; |
@@ -1601,6 +1599,7 @@ static int process_connect(struct ceph_connection *con) | |||
1601 | le32_to_cpu(con->out_connect.connect_seq), | 1599 | le32_to_cpu(con->out_connect.connect_seq), |
1602 | le32_to_cpu(con->in_reply.connect_seq)); | 1600 | le32_to_cpu(con->in_reply.connect_seq)); |
1603 | con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); | 1601 | con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); |
1602 | con_out_kvec_reset(con); | ||
1604 | ret = prepare_write_connect(con); | 1603 | ret = prepare_write_connect(con); |
1605 | if (ret < 0) | 1604 | if (ret < 0) |
1606 | return ret; | 1605 | return ret; |
@@ -1617,6 +1616,7 @@ static int process_connect(struct ceph_connection *con) | |||
1617 | le32_to_cpu(con->in_reply.global_seq)); | 1616 | le32_to_cpu(con->in_reply.global_seq)); |
1618 | get_global_seq(con->msgr, | 1617 | get_global_seq(con->msgr, |
1619 | le32_to_cpu(con->in_reply.global_seq)); | 1618 | le32_to_cpu(con->in_reply.global_seq)); |
1619 | con_out_kvec_reset(con); | ||
1620 | ret = prepare_write_connect(con); | 1620 | ret = prepare_write_connect(con); |
1621 | if (ret < 0) | 1621 | if (ret < 0) |
1622 | return ret; | 1622 | return ret; |
@@ -2135,7 +2135,11 @@ more: | |||
2135 | BUG_ON(con->state != CON_STATE_CONNECTING); | 2135 | BUG_ON(con->state != CON_STATE_CONNECTING); |
2136 | con->state = CON_STATE_NEGOTIATING; | 2136 | con->state = CON_STATE_NEGOTIATING; |
2137 | 2137 | ||
2138 | /* Banner is good, exchange connection info */ | 2138 | /* |
2139 | * Received banner is good, exchange connection info. | ||
2140 | * Do not reset out_kvec, as sending our banner raced | ||
2141 | * with receiving peer banner after connect completed. | ||
2142 | */ | ||
2139 | ret = prepare_write_connect(con); | 2143 | ret = prepare_write_connect(con); |
2140 | if (ret < 0) | 2144 | if (ret < 0) |
2141 | goto out; | 2145 | goto out; |
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 105d533b55f3..900ea0f043fc 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
@@ -311,6 +311,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc) | |||
311 | EXPORT_SYMBOL(ceph_monc_open_session); | 311 | EXPORT_SYMBOL(ceph_monc_open_session); |
312 | 312 | ||
313 | /* | 313 | /* |
314 | * We require the fsid and global_id in order to initialize our | ||
315 | * debugfs dir. | ||
316 | */ | ||
317 | static bool have_debugfs_info(struct ceph_mon_client *monc) | ||
318 | { | ||
319 | dout("have_debugfs_info fsid %d globalid %lld\n", | ||
320 | (int)monc->client->have_fsid, monc->auth->global_id); | ||
321 | return monc->client->have_fsid && monc->auth->global_id > 0; | ||
322 | } | ||
323 | |||
324 | /* | ||
314 | * The monitor responds with mount ack indicate mount success. The | 325 | * The monitor responds with mount ack indicate mount success. The |
315 | * included client ticket allows the client to talk to MDSs and OSDs. | 326 | * included client ticket allows the client to talk to MDSs and OSDs. |
316 | */ | 327 | */ |
@@ -320,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, | |||
320 | struct ceph_client *client = monc->client; | 331 | struct ceph_client *client = monc->client; |
321 | struct ceph_monmap *monmap = NULL, *old = monc->monmap; | 332 | struct ceph_monmap *monmap = NULL, *old = monc->monmap; |
322 | void *p, *end; | 333 | void *p, *end; |
334 | int had_debugfs_info, init_debugfs = 0; | ||
323 | 335 | ||
324 | mutex_lock(&monc->mutex); | 336 | mutex_lock(&monc->mutex); |
325 | 337 | ||
338 | had_debugfs_info = have_debugfs_info(monc); | ||
339 | |||
326 | dout("handle_monmap\n"); | 340 | dout("handle_monmap\n"); |
327 | p = msg->front.iov_base; | 341 | p = msg->front.iov_base; |
328 | end = p + msg->front.iov_len; | 342 | end = p + msg->front.iov_len; |
@@ -344,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, | |||
344 | 358 | ||
345 | if (!client->have_fsid) { | 359 | if (!client->have_fsid) { |
346 | client->have_fsid = true; | 360 | client->have_fsid = true; |
361 | if (!had_debugfs_info && have_debugfs_info(monc)) { | ||
362 | pr_info("client%lld fsid %pU\n", | ||
363 | ceph_client_id(monc->client), | ||
364 | &monc->client->fsid); | ||
365 | init_debugfs = 1; | ||
366 | } | ||
347 | mutex_unlock(&monc->mutex); | 367 | mutex_unlock(&monc->mutex); |
348 | /* | 368 | |
349 | * do debugfs initialization without mutex to avoid | 369 | if (init_debugfs) { |
350 | * creating a locking dependency | 370 | /* |
351 | */ | 371 | * do debugfs initialization without mutex to avoid |
352 | ceph_debugfs_client_init(client); | 372 | * creating a locking dependency |
373 | */ | ||
374 | ceph_debugfs_client_init(monc->client); | ||
375 | } | ||
376 | |||
353 | goto out_unlocked; | 377 | goto out_unlocked; |
354 | } | 378 | } |
355 | out: | 379 | out: |
@@ -865,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc, | |||
865 | { | 889 | { |
866 | int ret; | 890 | int ret; |
867 | int was_auth = 0; | 891 | int was_auth = 0; |
892 | int had_debugfs_info, init_debugfs = 0; | ||
868 | 893 | ||
869 | mutex_lock(&monc->mutex); | 894 | mutex_lock(&monc->mutex); |
895 | had_debugfs_info = have_debugfs_info(monc); | ||
870 | if (monc->auth->ops) | 896 | if (monc->auth->ops) |
871 | was_auth = monc->auth->ops->is_authenticated(monc->auth); | 897 | was_auth = monc->auth->ops->is_authenticated(monc->auth); |
872 | monc->pending_auth = 0; | 898 | monc->pending_auth = 0; |
@@ -889,7 +915,22 @@ static void handle_auth_reply(struct ceph_mon_client *monc, | |||
889 | __send_subscribe(monc); | 915 | __send_subscribe(monc); |
890 | __resend_generic_request(monc); | 916 | __resend_generic_request(monc); |
891 | } | 917 | } |
918 | |||
919 | if (!had_debugfs_info && have_debugfs_info(monc)) { | ||
920 | pr_info("client%lld fsid %pU\n", | ||
921 | ceph_client_id(monc->client), | ||
922 | &monc->client->fsid); | ||
923 | init_debugfs = 1; | ||
924 | } | ||
892 | mutex_unlock(&monc->mutex); | 925 | mutex_unlock(&monc->mutex); |
926 | |||
927 | if (init_debugfs) { | ||
928 | /* | ||
929 | * do debugfs initialization without mutex to avoid | ||
930 | * creating a locking dependency | ||
931 | */ | ||
932 | ceph_debugfs_client_init(monc->client); | ||
933 | } | ||
893 | } | 934 | } |
894 | 935 | ||
895 | static int __validate_auth(struct ceph_mon_client *monc) | 936 | static int __validate_auth(struct ceph_mon_client *monc) |
diff --git a/net/core/dev.c b/net/core/dev.c index a39354ee1432..17e912f9b711 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1642,6 +1642,19 @@ static inline int deliver_skb(struct sk_buff *skb, | |||
1642 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 1642 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
1643 | } | 1643 | } |
1644 | 1644 | ||
1645 | static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) | ||
1646 | { | ||
1647 | if (ptype->af_packet_priv == NULL) | ||
1648 | return false; | ||
1649 | |||
1650 | if (ptype->id_match) | ||
1651 | return ptype->id_match(ptype, skb->sk); | ||
1652 | else if ((struct sock *)ptype->af_packet_priv == skb->sk) | ||
1653 | return true; | ||
1654 | |||
1655 | return false; | ||
1656 | } | ||
1657 | |||
1645 | /* | 1658 | /* |
1646 | * Support routine. Sends outgoing frames to any network | 1659 | * Support routine. Sends outgoing frames to any network |
1647 | * taps currently in use. | 1660 | * taps currently in use. |
@@ -1659,8 +1672,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1659 | * they originated from - MvS (miquels@drinkel.ow.org) | 1672 | * they originated from - MvS (miquels@drinkel.ow.org) |
1660 | */ | 1673 | */ |
1661 | if ((ptype->dev == dev || !ptype->dev) && | 1674 | if ((ptype->dev == dev || !ptype->dev) && |
1662 | (ptype->af_packet_priv == NULL || | 1675 | (!skb_loop_sk(ptype, skb))) { |
1663 | (struct sock *)ptype->af_packet_priv != skb->sk)) { | ||
1664 | if (pt_prev) { | 1676 | if (pt_prev) { |
1665 | deliver_skb(skb2, pt_prev, skb->dev); | 1677 | deliver_skb(skb2, pt_prev, skb->dev); |
1666 | pt_prev = ptype; | 1678 | pt_prev = ptype; |
@@ -2122,7 +2134,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) | |||
2122 | static netdev_features_t harmonize_features(struct sk_buff *skb, | 2134 | static netdev_features_t harmonize_features(struct sk_buff *skb, |
2123 | __be16 protocol, netdev_features_t features) | 2135 | __be16 protocol, netdev_features_t features) |
2124 | { | 2136 | { |
2125 | if (!can_checksum_protocol(features, protocol)) { | 2137 | if (skb->ip_summed != CHECKSUM_NONE && |
2138 | !can_checksum_protocol(features, protocol)) { | ||
2126 | features &= ~NETIF_F_ALL_CSUM; | 2139 | features &= ~NETIF_F_ALL_CSUM; |
2127 | features &= ~NETIF_F_SG; | 2140 | features &= ~NETIF_F_SG; |
2128 | } else if (illegal_highdma(skb->dev, skb)) { | 2141 | } else if (illegal_highdma(skb->dev, skb)) { |
@@ -2635,15 +2648,16 @@ void __skb_get_rxhash(struct sk_buff *skb) | |||
2635 | if (!skb_flow_dissect(skb, &keys)) | 2648 | if (!skb_flow_dissect(skb, &keys)) |
2636 | return; | 2649 | return; |
2637 | 2650 | ||
2638 | if (keys.ports) { | 2651 | if (keys.ports) |
2639 | if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) | ||
2640 | swap(keys.port16[0], keys.port16[1]); | ||
2641 | skb->l4_rxhash = 1; | 2652 | skb->l4_rxhash = 1; |
2642 | } | ||
2643 | 2653 | ||
2644 | /* get a consistent hash (same value on both flow directions) */ | 2654 | /* get a consistent hash (same value on both flow directions) */ |
2645 | if ((__force u32)keys.dst < (__force u32)keys.src) | 2655 | if (((__force u32)keys.dst < (__force u32)keys.src) || |
2656 | (((__force u32)keys.dst == (__force u32)keys.src) && | ||
2657 | ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) { | ||
2646 | swap(keys.dst, keys.src); | 2658 | swap(keys.dst, keys.src); |
2659 | swap(keys.port16[0], keys.port16[1]); | ||
2660 | } | ||
2647 | 2661 | ||
2648 | hash = jhash_3words((__force u32)keys.dst, | 2662 | hash = jhash_3words((__force u32)keys.dst, |
2649 | (__force u32)keys.src, | 2663 | (__force u32)keys.src, |
@@ -3309,7 +3323,7 @@ ncls: | |||
3309 | 3323 | ||
3310 | if (pt_prev) { | 3324 | if (pt_prev) { |
3311 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) | 3325 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) |
3312 | ret = -ENOMEM; | 3326 | goto drop; |
3313 | else | 3327 | else |
3314 | ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 3328 | ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
3315 | } else { | 3329 | } else { |
@@ -4498,8 +4512,8 @@ static void dev_change_rx_flags(struct net_device *dev, int flags) | |||
4498 | static int __dev_set_promiscuity(struct net_device *dev, int inc) | 4512 | static int __dev_set_promiscuity(struct net_device *dev, int inc) |
4499 | { | 4513 | { |
4500 | unsigned int old_flags = dev->flags; | 4514 | unsigned int old_flags = dev->flags; |
4501 | uid_t uid; | 4515 | kuid_t uid; |
4502 | gid_t gid; | 4516 | kgid_t gid; |
4503 | 4517 | ||
4504 | ASSERT_RTNL(); | 4518 | ASSERT_RTNL(); |
4505 | 4519 | ||
@@ -4530,8 +4544,9 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc) | |||
4530 | "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", | 4544 | "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", |
4531 | dev->name, (dev->flags & IFF_PROMISC), | 4545 | dev->name, (dev->flags & IFF_PROMISC), |
4532 | (old_flags & IFF_PROMISC), | 4546 | (old_flags & IFF_PROMISC), |
4533 | audit_get_loginuid(current), | 4547 | from_kuid(&init_user_ns, audit_get_loginuid(current)), |
4534 | uid, gid, | 4548 | from_kuid(&init_user_ns, uid), |
4549 | from_kgid(&init_user_ns, gid), | ||
4535 | audit_get_sessionid(current)); | 4550 | audit_get_sessionid(current)); |
4536 | } | 4551 | } |
4537 | 4552 | ||
@@ -5732,6 +5747,7 @@ EXPORT_SYMBOL(netdev_refcnt_read); | |||
5732 | 5747 | ||
5733 | /** | 5748 | /** |
5734 | * netdev_wait_allrefs - wait until all references are gone. | 5749 | * netdev_wait_allrefs - wait until all references are gone. |
5750 | * @dev: target net_device | ||
5735 | * | 5751 | * |
5736 | * This is called when unregistering network devices. | 5752 | * This is called when unregistering network devices. |
5737 | * | 5753 | * |
@@ -6409,22 +6425,26 @@ const char *netdev_drivername(const struct net_device *dev) | |||
6409 | return empty; | 6425 | return empty; |
6410 | } | 6426 | } |
6411 | 6427 | ||
6412 | int __netdev_printk(const char *level, const struct net_device *dev, | 6428 | static int __netdev_printk(const char *level, const struct net_device *dev, |
6413 | struct va_format *vaf) | 6429 | struct va_format *vaf) |
6414 | { | 6430 | { |
6415 | int r; | 6431 | int r; |
6416 | 6432 | ||
6417 | if (dev && dev->dev.parent) | 6433 | if (dev && dev->dev.parent) { |
6418 | r = dev_printk(level, dev->dev.parent, "%s: %pV", | 6434 | r = dev_printk_emit(level[1] - '0', |
6419 | netdev_name(dev), vaf); | 6435 | dev->dev.parent, |
6420 | else if (dev) | 6436 | "%s %s %s: %pV", |
6437 | dev_driver_string(dev->dev.parent), | ||
6438 | dev_name(dev->dev.parent), | ||
6439 | netdev_name(dev), vaf); | ||
6440 | } else if (dev) { | ||
6421 | r = printk("%s%s: %pV", level, netdev_name(dev), vaf); | 6441 | r = printk("%s%s: %pV", level, netdev_name(dev), vaf); |
6422 | else | 6442 | } else { |
6423 | r = printk("%s(NULL net_device): %pV", level, vaf); | 6443 | r = printk("%s(NULL net_device): %pV", level, vaf); |
6444 | } | ||
6424 | 6445 | ||
6425 | return r; | 6446 | return r; |
6426 | } | 6447 | } |
6427 | EXPORT_SYMBOL(__netdev_printk); | ||
6428 | 6448 | ||
6429 | int netdev_printk(const char *level, const struct net_device *dev, | 6449 | int netdev_printk(const char *level, const struct net_device *dev, |
6430 | const char *format, ...) | 6450 | const char *format, ...) |
@@ -6439,6 +6459,7 @@ int netdev_printk(const char *level, const struct net_device *dev, | |||
6439 | vaf.va = &args; | 6459 | vaf.va = &args; |
6440 | 6460 | ||
6441 | r = __netdev_printk(level, dev, &vaf); | 6461 | r = __netdev_printk(level, dev, &vaf); |
6462 | |||
6442 | va_end(args); | 6463 | va_end(args); |
6443 | 6464 | ||
6444 | return r; | 6465 | return r; |
@@ -6458,6 +6479,7 @@ int func(const struct net_device *dev, const char *fmt, ...) \ | |||
6458 | vaf.va = &args; \ | 6479 | vaf.va = &args; \ |
6459 | \ | 6480 | \ |
6460 | r = __netdev_printk(level, dev, &vaf); \ | 6481 | r = __netdev_printk(level, dev, &vaf); \ |
6482 | \ | ||
6461 | va_end(args); \ | 6483 | va_end(args); \ |
6462 | \ | 6484 | \ |
6463 | return r; \ | 6485 | return r; \ |
diff --git a/net/core/dst.c b/net/core/dst.c index 56d63612e1e4..b8d7c700541d 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -222,8 +222,8 @@ void __dst_free(struct dst_entry *dst) | |||
222 | if (dst_garbage.timer_inc > DST_GC_INC) { | 222 | if (dst_garbage.timer_inc > DST_GC_INC) { |
223 | dst_garbage.timer_inc = DST_GC_INC; | 223 | dst_garbage.timer_inc = DST_GC_INC; |
224 | dst_garbage.timer_expires = DST_GC_MIN; | 224 | dst_garbage.timer_expires = DST_GC_MIN; |
225 | cancel_delayed_work(&dst_gc_work); | 225 | mod_delayed_work(system_wq, &dst_gc_work, |
226 | schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); | 226 | dst_garbage.timer_expires); |
227 | } | 227 | } |
228 | spin_unlock_bh(&dst_garbage.lock); | 228 | spin_unlock_bh(&dst_garbage.lock); |
229 | } | 229 | } |
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index c3519c6d1b16..8e397a69005a 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -120,22 +120,13 @@ static void linkwatch_schedule_work(int urgent) | |||
120 | delay = 0; | 120 | delay = 0; |
121 | 121 | ||
122 | /* | 122 | /* |
123 | * This is true if we've scheduled it immeditately or if we don't | 123 | * If urgent, schedule immediate execution; otherwise, don't |
124 | * need an immediate execution and it's already pending. | 124 | * override the existing timer. |
125 | */ | 125 | */ |
126 | if (schedule_delayed_work(&linkwatch_work, delay) == !delay) | 126 | if (test_bit(LW_URGENT, &linkwatch_flags)) |
127 | return; | 127 | mod_delayed_work(system_wq, &linkwatch_work, 0); |
128 | 128 | else | |
129 | /* Don't bother if there is nothing urgent. */ | 129 | schedule_delayed_work(&linkwatch_work, delay); |
130 | if (!test_bit(LW_URGENT, &linkwatch_flags)) | ||
131 | return; | ||
132 | |||
133 | /* It's already running which is good enough. */ | ||
134 | if (!__cancel_delayed_work(&linkwatch_work)) | ||
135 | return; | ||
136 | |||
137 | /* Otherwise we reschedule it again for immediate execution. */ | ||
138 | schedule_delayed_work(&linkwatch_work, 0); | ||
139 | } | 130 | } |
140 | 131 | ||
141 | 132 | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 117afaf51268..112c6e2266e9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1545,7 +1545,7 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl) | |||
1545 | panic("cannot allocate neighbour cache hashes"); | 1545 | panic("cannot allocate neighbour cache hashes"); |
1546 | 1546 | ||
1547 | rwlock_init(&tbl->lock); | 1547 | rwlock_init(&tbl->lock); |
1548 | INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); | 1548 | INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); |
1549 | schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); | 1549 | schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); |
1550 | setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); | 1550 | setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); |
1551 | skb_queue_head_init_class(&tbl->proxy_queue, | 1551 | skb_queue_head_init_class(&tbl->proxy_queue, |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b4c90e42b443..e4ba3e70c174 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/export.h> | 28 | #include <linux/export.h> |
29 | #include <linux/if_vlan.h> | ||
29 | #include <net/tcp.h> | 30 | #include <net/tcp.h> |
30 | #include <net/udp.h> | 31 | #include <net/udp.h> |
31 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
@@ -54,7 +55,7 @@ static atomic_t trapped; | |||
54 | MAX_UDP_CHUNK) | 55 | MAX_UDP_CHUNK) |
55 | 56 | ||
56 | static void zap_completion_queue(void); | 57 | static void zap_completion_queue(void); |
57 | static void arp_reply(struct sk_buff *skb); | 58 | static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo); |
58 | 59 | ||
59 | static unsigned int carrier_timeout = 4; | 60 | static unsigned int carrier_timeout = 4; |
60 | module_param(carrier_timeout, uint, 0644); | 61 | module_param(carrier_timeout, uint, 0644); |
@@ -170,7 +171,8 @@ static void poll_napi(struct net_device *dev) | |||
170 | list_for_each_entry(napi, &dev->napi_list, dev_list) { | 171 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
171 | if (napi->poll_owner != smp_processor_id() && | 172 | if (napi->poll_owner != smp_processor_id() && |
172 | spin_trylock(&napi->poll_lock)) { | 173 | spin_trylock(&napi->poll_lock)) { |
173 | budget = poll_one_napi(dev->npinfo, napi, budget); | 174 | budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), |
175 | napi, budget); | ||
174 | spin_unlock(&napi->poll_lock); | 176 | spin_unlock(&napi->poll_lock); |
175 | 177 | ||
176 | if (!budget) | 178 | if (!budget) |
@@ -185,13 +187,14 @@ static void service_arp_queue(struct netpoll_info *npi) | |||
185 | struct sk_buff *skb; | 187 | struct sk_buff *skb; |
186 | 188 | ||
187 | while ((skb = skb_dequeue(&npi->arp_tx))) | 189 | while ((skb = skb_dequeue(&npi->arp_tx))) |
188 | arp_reply(skb); | 190 | netpoll_arp_reply(skb, npi); |
189 | } | 191 | } |
190 | } | 192 | } |
191 | 193 | ||
192 | static void netpoll_poll_dev(struct net_device *dev) | 194 | static void netpoll_poll_dev(struct net_device *dev) |
193 | { | 195 | { |
194 | const struct net_device_ops *ops; | 196 | const struct net_device_ops *ops; |
197 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); | ||
195 | 198 | ||
196 | if (!dev || !netif_running(dev)) | 199 | if (!dev || !netif_running(dev)) |
197 | return; | 200 | return; |
@@ -206,17 +209,18 @@ static void netpoll_poll_dev(struct net_device *dev) | |||
206 | poll_napi(dev); | 209 | poll_napi(dev); |
207 | 210 | ||
208 | if (dev->flags & IFF_SLAVE) { | 211 | if (dev->flags & IFF_SLAVE) { |
209 | if (dev->npinfo) { | 212 | if (ni) { |
210 | struct net_device *bond_dev = dev->master; | 213 | struct net_device *bond_dev = dev->master; |
211 | struct sk_buff *skb; | 214 | struct sk_buff *skb; |
212 | while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { | 215 | struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); |
216 | while ((skb = skb_dequeue(&ni->arp_tx))) { | ||
213 | skb->dev = bond_dev; | 217 | skb->dev = bond_dev; |
214 | skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); | 218 | skb_queue_tail(&bond_ni->arp_tx, skb); |
215 | } | 219 | } |
216 | } | 220 | } |
217 | } | 221 | } |
218 | 222 | ||
219 | service_arp_queue(dev->npinfo); | 223 | service_arp_queue(ni); |
220 | 224 | ||
221 | zap_completion_queue(); | 225 | zap_completion_queue(); |
222 | } | 226 | } |
@@ -302,6 +306,7 @@ static int netpoll_owner_active(struct net_device *dev) | |||
302 | return 0; | 306 | return 0; |
303 | } | 307 | } |
304 | 308 | ||
309 | /* call with IRQ disabled */ | ||
305 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | 310 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
306 | struct net_device *dev) | 311 | struct net_device *dev) |
307 | { | 312 | { |
@@ -309,8 +314,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
309 | unsigned long tries; | 314 | unsigned long tries; |
310 | const struct net_device_ops *ops = dev->netdev_ops; | 315 | const struct net_device_ops *ops = dev->netdev_ops; |
311 | /* It is up to the caller to keep npinfo alive. */ | 316 | /* It is up to the caller to keep npinfo alive. */ |
312 | struct netpoll_info *npinfo = np->dev->npinfo; | 317 | struct netpoll_info *npinfo; |
318 | |||
319 | WARN_ON_ONCE(!irqs_disabled()); | ||
313 | 320 | ||
321 | npinfo = rcu_dereference_bh(np->dev->npinfo); | ||
314 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { | 322 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
315 | __kfree_skb(skb); | 323 | __kfree_skb(skb); |
316 | return; | 324 | return; |
@@ -319,16 +327,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
319 | /* don't get messages out of order, and no recursion */ | 327 | /* don't get messages out of order, and no recursion */ |
320 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 328 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
321 | struct netdev_queue *txq; | 329 | struct netdev_queue *txq; |
322 | unsigned long flags; | ||
323 | 330 | ||
324 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 331 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
325 | 332 | ||
326 | local_irq_save(flags); | ||
327 | /* try until next clock tick */ | 333 | /* try until next clock tick */ |
328 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 334 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
329 | tries > 0; --tries) { | 335 | tries > 0; --tries) { |
330 | if (__netif_tx_trylock(txq)) { | 336 | if (__netif_tx_trylock(txq)) { |
331 | if (!netif_xmit_stopped(txq)) { | 337 | if (!netif_xmit_stopped(txq)) { |
338 | if (vlan_tx_tag_present(skb) && | ||
339 | !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { | ||
340 | skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); | ||
341 | if (unlikely(!skb)) | ||
342 | break; | ||
343 | skb->vlan_tci = 0; | ||
344 | } | ||
345 | |||
332 | status = ops->ndo_start_xmit(skb, dev); | 346 | status = ops->ndo_start_xmit(skb, dev); |
333 | if (status == NETDEV_TX_OK) | 347 | if (status == NETDEV_TX_OK) |
334 | txq_trans_update(txq); | 348 | txq_trans_update(txq); |
@@ -347,10 +361,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
347 | } | 361 | } |
348 | 362 | ||
349 | WARN_ONCE(!irqs_disabled(), | 363 | WARN_ONCE(!irqs_disabled(), |
350 | "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", | 364 | "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", |
351 | dev->name, ops->ndo_start_xmit); | 365 | dev->name, ops->ndo_start_xmit); |
352 | 366 | ||
353 | local_irq_restore(flags); | ||
354 | } | 367 | } |
355 | 368 | ||
356 | if (status != NETDEV_TX_OK) { | 369 | if (status != NETDEV_TX_OK) { |
@@ -423,9 +436,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | |||
423 | } | 436 | } |
424 | EXPORT_SYMBOL(netpoll_send_udp); | 437 | EXPORT_SYMBOL(netpoll_send_udp); |
425 | 438 | ||
426 | static void arp_reply(struct sk_buff *skb) | 439 | static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo) |
427 | { | 440 | { |
428 | struct netpoll_info *npinfo = skb->dev->npinfo; | ||
429 | struct arphdr *arp; | 441 | struct arphdr *arp; |
430 | unsigned char *arp_ptr; | 442 | unsigned char *arp_ptr; |
431 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; | 443 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; |
@@ -543,13 +555,12 @@ static void arp_reply(struct sk_buff *skb) | |||
543 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 555 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
544 | } | 556 | } |
545 | 557 | ||
546 | int __netpoll_rx(struct sk_buff *skb) | 558 | int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) |
547 | { | 559 | { |
548 | int proto, len, ulen; | 560 | int proto, len, ulen; |
549 | int hits = 0; | 561 | int hits = 0; |
550 | const struct iphdr *iph; | 562 | const struct iphdr *iph; |
551 | struct udphdr *uh; | 563 | struct udphdr *uh; |
552 | struct netpoll_info *npinfo = skb->dev->npinfo; | ||
553 | struct netpoll *np, *tmp; | 564 | struct netpoll *np, *tmp; |
554 | 565 | ||
555 | if (list_empty(&npinfo->rx_np)) | 566 | if (list_empty(&npinfo->rx_np)) |
@@ -565,6 +576,12 @@ int __netpoll_rx(struct sk_buff *skb) | |||
565 | return 1; | 576 | return 1; |
566 | } | 577 | } |
567 | 578 | ||
579 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { | ||
580 | skb = vlan_untag(skb); | ||
581 | if (unlikely(!skb)) | ||
582 | goto out; | ||
583 | } | ||
584 | |||
568 | proto = ntohs(eth_hdr(skb)->h_proto); | 585 | proto = ntohs(eth_hdr(skb)->h_proto); |
569 | if (proto != ETH_P_IP) | 586 | if (proto != ETH_P_IP) |
570 | goto out; | 587 | goto out; |
@@ -715,7 +732,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
715 | } | 732 | } |
716 | EXPORT_SYMBOL(netpoll_parse_options); | 733 | EXPORT_SYMBOL(netpoll_parse_options); |
717 | 734 | ||
718 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | 735 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) |
719 | { | 736 | { |
720 | struct netpoll_info *npinfo; | 737 | struct netpoll_info *npinfo; |
721 | const struct net_device_ops *ops; | 738 | const struct net_device_ops *ops; |
@@ -734,7 +751,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |||
734 | } | 751 | } |
735 | 752 | ||
736 | if (!ndev->npinfo) { | 753 | if (!ndev->npinfo) { |
737 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | 754 | npinfo = kmalloc(sizeof(*npinfo), gfp); |
738 | if (!npinfo) { | 755 | if (!npinfo) { |
739 | err = -ENOMEM; | 756 | err = -ENOMEM; |
740 | goto out; | 757 | goto out; |
@@ -752,7 +769,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |||
752 | 769 | ||
753 | ops = np->dev->netdev_ops; | 770 | ops = np->dev->netdev_ops; |
754 | if (ops->ndo_netpoll_setup) { | 771 | if (ops->ndo_netpoll_setup) { |
755 | err = ops->ndo_netpoll_setup(ndev, npinfo); | 772 | err = ops->ndo_netpoll_setup(ndev, npinfo, gfp); |
756 | if (err) | 773 | if (err) |
757 | goto free_npinfo; | 774 | goto free_npinfo; |
758 | } | 775 | } |
@@ -857,7 +874,7 @@ int netpoll_setup(struct netpoll *np) | |||
857 | refill_skbs(); | 874 | refill_skbs(); |
858 | 875 | ||
859 | rtnl_lock(); | 876 | rtnl_lock(); |
860 | err = __netpoll_setup(np, ndev); | 877 | err = __netpoll_setup(np, ndev, GFP_KERNEL); |
861 | rtnl_unlock(); | 878 | rtnl_unlock(); |
862 | 879 | ||
863 | if (err) | 880 | if (err) |
@@ -878,6 +895,24 @@ static int __init netpoll_init(void) | |||
878 | } | 895 | } |
879 | core_initcall(netpoll_init); | 896 | core_initcall(netpoll_init); |
880 | 897 | ||
898 | static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) | ||
899 | { | ||
900 | struct netpoll_info *npinfo = | ||
901 | container_of(rcu_head, struct netpoll_info, rcu); | ||
902 | |||
903 | skb_queue_purge(&npinfo->arp_tx); | ||
904 | skb_queue_purge(&npinfo->txq); | ||
905 | |||
906 | /* we can't call cancel_delayed_work_sync here, as we are in softirq */ | ||
907 | cancel_delayed_work(&npinfo->tx_work); | ||
908 | |||
909 | /* clean after last, unfinished work */ | ||
910 | __skb_queue_purge(&npinfo->txq); | ||
911 | /* now cancel it again */ | ||
912 | cancel_delayed_work(&npinfo->tx_work); | ||
913 | kfree(npinfo); | ||
914 | } | ||
915 | |||
881 | void __netpoll_cleanup(struct netpoll *np) | 916 | void __netpoll_cleanup(struct netpoll *np) |
882 | { | 917 | { |
883 | struct netpoll_info *npinfo; | 918 | struct netpoll_info *npinfo; |
@@ -903,20 +938,24 @@ void __netpoll_cleanup(struct netpoll *np) | |||
903 | ops->ndo_netpoll_cleanup(np->dev); | 938 | ops->ndo_netpoll_cleanup(np->dev); |
904 | 939 | ||
905 | RCU_INIT_POINTER(np->dev->npinfo, NULL); | 940 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
941 | call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); | ||
942 | } | ||
943 | } | ||
944 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | ||
906 | 945 | ||
907 | /* avoid racing with NAPI reading npinfo */ | 946 | static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) |
908 | synchronize_rcu_bh(); | 947 | { |
948 | struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); | ||
909 | 949 | ||
910 | skb_queue_purge(&npinfo->arp_tx); | 950 | __netpoll_cleanup(np); |
911 | skb_queue_purge(&npinfo->txq); | 951 | kfree(np); |
912 | cancel_delayed_work_sync(&npinfo->tx_work); | 952 | } |
913 | 953 | ||
914 | /* clean after last, unfinished work */ | 954 | void __netpoll_free_rcu(struct netpoll *np) |
915 | __skb_queue_purge(&npinfo->txq); | 955 | { |
916 | kfree(npinfo); | 956 | call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); |
917 | } | ||
918 | } | 957 | } |
919 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | 958 | EXPORT_SYMBOL_GPL(__netpoll_free_rcu); |
920 | 959 | ||
921 | void netpoll_cleanup(struct netpoll *np) | 960 | void netpoll_cleanup(struct netpoll *np) |
922 | { | 961 | { |
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index ed0c0431fcd8..39e7e4d3cdb4 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c | |||
@@ -101,12 +101,10 @@ static int write_update_netdev_table(struct net_device *dev) | |||
101 | u32 max_len; | 101 | u32 max_len; |
102 | struct netprio_map *map; | 102 | struct netprio_map *map; |
103 | 103 | ||
104 | rtnl_lock(); | ||
105 | max_len = atomic_read(&max_prioidx) + 1; | 104 | max_len = atomic_read(&max_prioidx) + 1; |
106 | map = rtnl_dereference(dev->priomap); | 105 | map = rtnl_dereference(dev->priomap); |
107 | if (!map || map->priomap_len < max_len) | 106 | if (!map || map->priomap_len < max_len) |
108 | ret = extend_netdev_table(dev, max_len); | 107 | ret = extend_netdev_table(dev, max_len); |
109 | rtnl_unlock(); | ||
110 | 108 | ||
111 | return ret; | 109 | return ret; |
112 | } | 110 | } |
@@ -256,17 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft, | |||
256 | if (!dev) | 254 | if (!dev) |
257 | goto out_free_devname; | 255 | goto out_free_devname; |
258 | 256 | ||
257 | rtnl_lock(); | ||
259 | ret = write_update_netdev_table(dev); | 258 | ret = write_update_netdev_table(dev); |
260 | if (ret < 0) | 259 | if (ret < 0) |
261 | goto out_put_dev; | 260 | goto out_put_dev; |
262 | 261 | ||
263 | rcu_read_lock(); | 262 | map = rtnl_dereference(dev->priomap); |
264 | map = rcu_dereference(dev->priomap); | ||
265 | if (map) | 263 | if (map) |
266 | map->priomap[prioidx] = priority; | 264 | map->priomap[prioidx] = priority; |
267 | rcu_read_unlock(); | ||
268 | 265 | ||
269 | out_put_dev: | 266 | out_put_dev: |
267 | rtnl_unlock(); | ||
270 | dev_put(dev); | 268 | dev_put(dev); |
271 | 269 | ||
272 | out_free_devname: | 270 | out_free_devname: |
@@ -277,12 +275,6 @@ out_free_devname: | |||
277 | void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | 275 | void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
278 | { | 276 | { |
279 | struct task_struct *p; | 277 | struct task_struct *p; |
280 | char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL); | ||
281 | |||
282 | if (!tmp) { | ||
283 | pr_warn("Unable to attach cgrp due to alloc failure!\n"); | ||
284 | return; | ||
285 | } | ||
286 | 278 | ||
287 | cgroup_taskset_for_each(p, cgrp, tset) { | 279 | cgroup_taskset_for_each(p, cgrp, tset) { |
288 | unsigned int fd; | 280 | unsigned int fd; |
@@ -296,32 +288,24 @@ void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | |||
296 | continue; | 288 | continue; |
297 | } | 289 | } |
298 | 290 | ||
299 | rcu_read_lock(); | 291 | spin_lock(&files->file_lock); |
300 | fdt = files_fdtable(files); | 292 | fdt = files_fdtable(files); |
301 | for (fd = 0; fd < fdt->max_fds; fd++) { | 293 | for (fd = 0; fd < fdt->max_fds; fd++) { |
302 | char *path; | ||
303 | struct file *file; | 294 | struct file *file; |
304 | struct socket *sock; | 295 | struct socket *sock; |
305 | unsigned long s; | 296 | int err; |
306 | int rv, err = 0; | ||
307 | 297 | ||
308 | file = fcheck_files(files, fd); | 298 | file = fcheck_files(files, fd); |
309 | if (!file) | 299 | if (!file) |
310 | continue; | 300 | continue; |
311 | 301 | ||
312 | path = d_path(&file->f_path, tmp, PAGE_SIZE); | ||
313 | rv = sscanf(path, "socket:[%lu]", &s); | ||
314 | if (rv <= 0) | ||
315 | continue; | ||
316 | |||
317 | sock = sock_from_file(file, &err); | 302 | sock = sock_from_file(file, &err); |
318 | if (!err) | 303 | if (sock) |
319 | sock_update_netprioidx(sock->sk, p); | 304 | sock_update_netprioidx(sock->sk, p); |
320 | } | 305 | } |
321 | rcu_read_unlock(); | 306 | spin_unlock(&files->file_lock); |
322 | task_unlock(p); | 307 | task_unlock(p); |
323 | } | 308 | } |
324 | kfree(tmp); | ||
325 | } | 309 | } |
326 | 310 | ||
327 | static struct cftype ss_files[] = { | 311 | static struct cftype ss_files[] = { |
@@ -342,11 +326,19 @@ struct cgroup_subsys net_prio_subsys = { | |||
342 | .create = cgrp_create, | 326 | .create = cgrp_create, |
343 | .destroy = cgrp_destroy, | 327 | .destroy = cgrp_destroy, |
344 | .attach = net_prio_attach, | 328 | .attach = net_prio_attach, |
345 | #ifdef CONFIG_NETPRIO_CGROUP | ||
346 | .subsys_id = net_prio_subsys_id, | 329 | .subsys_id = net_prio_subsys_id, |
347 | #endif | ||
348 | .base_cftypes = ss_files, | 330 | .base_cftypes = ss_files, |
349 | .module = THIS_MODULE | 331 | .module = THIS_MODULE, |
332 | |||
333 | /* | ||
334 | * net_prio has artificial limit on the number of cgroups and | ||
335 | * disallows nesting making it impossible to co-mount it with other | ||
336 | * hierarchical subsystems. Remove the artificially low PRIOIDX_SZ | ||
337 | * limit and properly nest configuration such that children follow | ||
338 | * their parents' configurations by default and are allowed to | ||
339 | * override and remove the following. | ||
340 | */ | ||
341 | .broken_hierarchy = true, | ||
350 | }; | 342 | }; |
351 | 343 | ||
352 | static int netprio_device_event(struct notifier_block *unused, | 344 | static int netprio_device_event(struct notifier_block *unused, |
@@ -382,10 +374,6 @@ static int __init init_cgroup_netprio(void) | |||
382 | ret = cgroup_load_subsys(&net_prio_subsys); | 374 | ret = cgroup_load_subsys(&net_prio_subsys); |
383 | if (ret) | 375 | if (ret) |
384 | goto out; | 376 | goto out; |
385 | #ifndef CONFIG_NETPRIO_CGROUP | ||
386 | smp_wmb(); | ||
387 | net_prio_subsys_id = net_prio_subsys.subsys_id; | ||
388 | #endif | ||
389 | 377 | ||
390 | register_netdevice_notifier(&netprio_device_notifier); | 378 | register_netdevice_notifier(&netprio_device_notifier); |
391 | 379 | ||
@@ -402,11 +390,6 @@ static void __exit exit_cgroup_netprio(void) | |||
402 | 390 | ||
403 | cgroup_unload_subsys(&net_prio_subsys); | 391 | cgroup_unload_subsys(&net_prio_subsys); |
404 | 392 | ||
405 | #ifndef CONFIG_NETPRIO_CGROUP | ||
406 | net_prio_subsys_id = -1; | ||
407 | synchronize_rcu(); | ||
408 | #endif | ||
409 | |||
410 | rtnl_lock(); | 393 | rtnl_lock(); |
411 | for_each_netdev(&init_net, dev) { | 394 | for_each_netdev(&init_net, dev) { |
412 | old = rtnl_dereference(dev->priomap); | 395 | old = rtnl_dereference(dev->priomap); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index cce9e53528b1..148e73d2c451 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2721,7 +2721,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2721 | /* Eth + IPh + UDPh + mpls */ | 2721 | /* Eth + IPh + UDPh + mpls */ |
2722 | datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - | 2722 | datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - |
2723 | pkt_dev->pkt_overhead; | 2723 | pkt_dev->pkt_overhead; |
2724 | if (datalen < sizeof(struct pktgen_hdr)) | 2724 | if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) |
2725 | datalen = sizeof(struct pktgen_hdr); | 2725 | datalen = sizeof(struct pktgen_hdr); |
2726 | 2726 | ||
2727 | udph->source = htons(pkt_dev->cur_udp_src); | 2727 | udph->source = htons(pkt_dev->cur_udp_src); |
diff --git a/net/core/scm.c b/net/core/scm.c index 8f6ccfd68ef4..6ab491d6c26f 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -45,12 +45,17 @@ | |||
45 | static __inline__ int scm_check_creds(struct ucred *creds) | 45 | static __inline__ int scm_check_creds(struct ucred *creds) |
46 | { | 46 | { |
47 | const struct cred *cred = current_cred(); | 47 | const struct cred *cred = current_cred(); |
48 | kuid_t uid = make_kuid(cred->user_ns, creds->uid); | ||
49 | kgid_t gid = make_kgid(cred->user_ns, creds->gid); | ||
50 | |||
51 | if (!uid_valid(uid) || !gid_valid(gid)) | ||
52 | return -EINVAL; | ||
48 | 53 | ||
49 | if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && | 54 | if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && |
50 | ((creds->uid == cred->uid || creds->uid == cred->euid || | 55 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || |
51 | creds->uid == cred->suid) || capable(CAP_SETUID)) && | 56 | uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) && |
52 | ((creds->gid == cred->gid || creds->gid == cred->egid || | 57 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || |
53 | creds->gid == cred->sgid) || capable(CAP_SETGID))) { | 58 | gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) { |
54 | return 0; | 59 | return 0; |
55 | } | 60 | } |
56 | return -EPERM; | 61 | return -EPERM; |
@@ -149,6 +154,9 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) | |||
149 | goto error; | 154 | goto error; |
150 | break; | 155 | break; |
151 | case SCM_CREDENTIALS: | 156 | case SCM_CREDENTIALS: |
157 | { | ||
158 | kuid_t uid; | ||
159 | kgid_t gid; | ||
152 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) | 160 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) |
153 | goto error; | 161 | goto error; |
154 | memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); | 162 | memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); |
@@ -166,22 +174,29 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) | |||
166 | p->pid = pid; | 174 | p->pid = pid; |
167 | } | 175 | } |
168 | 176 | ||
177 | err = -EINVAL; | ||
178 | uid = make_kuid(current_user_ns(), p->creds.uid); | ||
179 | gid = make_kgid(current_user_ns(), p->creds.gid); | ||
180 | if (!uid_valid(uid) || !gid_valid(gid)) | ||
181 | goto error; | ||
182 | |||
169 | if (!p->cred || | 183 | if (!p->cred || |
170 | (p->cred->euid != p->creds.uid) || | 184 | !uid_eq(p->cred->euid, uid) || |
171 | (p->cred->egid != p->creds.gid)) { | 185 | !gid_eq(p->cred->egid, gid)) { |
172 | struct cred *cred; | 186 | struct cred *cred; |
173 | err = -ENOMEM; | 187 | err = -ENOMEM; |
174 | cred = prepare_creds(); | 188 | cred = prepare_creds(); |
175 | if (!cred) | 189 | if (!cred) |
176 | goto error; | 190 | goto error; |
177 | 191 | ||
178 | cred->uid = cred->euid = p->creds.uid; | 192 | cred->uid = cred->euid = uid; |
179 | cred->gid = cred->egid = p->creds.gid; | 193 | cred->gid = cred->egid = gid; |
180 | if (p->cred) | 194 | if (p->cred) |
181 | put_cred(p->cred); | 195 | put_cred(p->cred); |
182 | p->cred = cred; | 196 | p->cred = cred; |
183 | } | 197 | } |
184 | break; | 198 | break; |
199 | } | ||
185 | default: | 200 | default: |
186 | goto error; | 201 | goto error; |
187 | } | 202 | } |
@@ -265,6 +280,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) | |||
265 | for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; | 280 | for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; |
266 | i++, cmfptr++) | 281 | i++, cmfptr++) |
267 | { | 282 | { |
283 | struct socket *sock; | ||
268 | int new_fd; | 284 | int new_fd; |
269 | err = security_file_receive(fp[i]); | 285 | err = security_file_receive(fp[i]); |
270 | if (err) | 286 | if (err) |
@@ -281,6 +297,9 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) | |||
281 | } | 297 | } |
282 | /* Bump the usage count and install the file. */ | 298 | /* Bump the usage count and install the file. */ |
283 | get_file(fp[i]); | 299 | get_file(fp[i]); |
300 | sock = sock_from_file(fp[i], &err); | ||
301 | if (sock) | ||
302 | sock_update_netprioidx(sock->sk, current); | ||
284 | fd_install(new_fd, fp[i]); | 303 | fd_install(new_fd, fp[i]); |
285 | } | 304 | } |
286 | 305 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fe00d1208167..e33ebae519c8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3502,7 +3502,9 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, | |||
3502 | if (!skb_cloned(from)) | 3502 | if (!skb_cloned(from)) |
3503 | skb_shinfo(from)->nr_frags = 0; | 3503 | skb_shinfo(from)->nr_frags = 0; |
3504 | 3504 | ||
3505 | /* if the skb is cloned this does nothing since we set nr_frags to 0 */ | 3505 | /* if the skb is not cloned this does nothing |
3506 | * since we set nr_frags to 0. | ||
3507 | */ | ||
3506 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) | 3508 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) |
3507 | skb_frag_ref(from, i); | 3509 | skb_frag_ref(from, i); |
3508 | 3510 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 8f67ced8d6a8..12cddd037bce 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -326,17 +326,6 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
326 | } | 326 | } |
327 | EXPORT_SYMBOL(__sk_backlog_rcv); | 327 | EXPORT_SYMBOL(__sk_backlog_rcv); |
328 | 328 | ||
329 | #if defined(CONFIG_CGROUPS) | ||
330 | #if !defined(CONFIG_NET_CLS_CGROUP) | ||
331 | int net_cls_subsys_id = -1; | ||
332 | EXPORT_SYMBOL_GPL(net_cls_subsys_id); | ||
333 | #endif | ||
334 | #if !defined(CONFIG_NETPRIO_CGROUP) | ||
335 | int net_prio_subsys_id = -1; | ||
336 | EXPORT_SYMBOL_GPL(net_prio_subsys_id); | ||
337 | #endif | ||
338 | #endif | ||
339 | |||
340 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) | 329 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) |
341 | { | 330 | { |
342 | struct timeval tv; | 331 | struct timeval tv; |
@@ -691,7 +680,8 @@ set_rcvbuf: | |||
691 | 680 | ||
692 | case SO_KEEPALIVE: | 681 | case SO_KEEPALIVE: |
693 | #ifdef CONFIG_INET | 682 | #ifdef CONFIG_INET |
694 | if (sk->sk_protocol == IPPROTO_TCP) | 683 | if (sk->sk_protocol == IPPROTO_TCP && |
684 | sk->sk_type == SOCK_STREAM) | ||
695 | tcp_set_keepalive(sk, valbool); | 685 | tcp_set_keepalive(sk, valbool); |
696 | #endif | 686 | #endif |
697 | sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); | 687 | sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); |
@@ -868,8 +858,8 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred, | |||
868 | if (cred) { | 858 | if (cred) { |
869 | struct user_namespace *current_ns = current_user_ns(); | 859 | struct user_namespace *current_ns = current_user_ns(); |
870 | 860 | ||
871 | ucred->uid = from_kuid(current_ns, cred->euid); | 861 | ucred->uid = from_kuid_munged(current_ns, cred->euid); |
872 | ucred->gid = from_kgid(current_ns, cred->egid); | 862 | ucred->gid = from_kgid_munged(current_ns, cred->egid); |
873 | } | 863 | } |
874 | } | 864 | } |
875 | EXPORT_SYMBOL_GPL(cred_to_ucred); | 865 | EXPORT_SYMBOL_GPL(cred_to_ucred); |
@@ -1223,6 +1213,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) | |||
1223 | } | 1213 | } |
1224 | 1214 | ||
1225 | #ifdef CONFIG_CGROUPS | 1215 | #ifdef CONFIG_CGROUPS |
1216 | #if IS_ENABLED(CONFIG_NET_CLS_CGROUP) | ||
1226 | void sock_update_classid(struct sock *sk) | 1217 | void sock_update_classid(struct sock *sk) |
1227 | { | 1218 | { |
1228 | u32 classid; | 1219 | u32 classid; |
@@ -1234,7 +1225,9 @@ void sock_update_classid(struct sock *sk) | |||
1234 | sk->sk_classid = classid; | 1225 | sk->sk_classid = classid; |
1235 | } | 1226 | } |
1236 | EXPORT_SYMBOL(sock_update_classid); | 1227 | EXPORT_SYMBOL(sock_update_classid); |
1228 | #endif | ||
1237 | 1229 | ||
1230 | #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) | ||
1238 | void sock_update_netprioidx(struct sock *sk, struct task_struct *task) | 1231 | void sock_update_netprioidx(struct sock *sk, struct task_struct *task) |
1239 | { | 1232 | { |
1240 | if (in_interrupt()) | 1233 | if (in_interrupt()) |
@@ -1244,6 +1237,7 @@ void sock_update_netprioidx(struct sock *sk, struct task_struct *task) | |||
1244 | } | 1237 | } |
1245 | EXPORT_SYMBOL_GPL(sock_update_netprioidx); | 1238 | EXPORT_SYMBOL_GPL(sock_update_netprioidx); |
1246 | #endif | 1239 | #endif |
1240 | #endif | ||
1247 | 1241 | ||
1248 | /** | 1242 | /** |
1249 | * sk_alloc - All socket objects are allocated here | 1243 | * sk_alloc - All socket objects are allocated here |
@@ -1523,16 +1517,23 @@ EXPORT_SYMBOL(sock_rfree); | |||
1523 | 1517 | ||
1524 | void sock_edemux(struct sk_buff *skb) | 1518 | void sock_edemux(struct sk_buff *skb) |
1525 | { | 1519 | { |
1526 | sock_put(skb->sk); | 1520 | struct sock *sk = skb->sk; |
1521 | |||
1522 | #ifdef CONFIG_INET | ||
1523 | if (sk->sk_state == TCP_TIME_WAIT) | ||
1524 | inet_twsk_put(inet_twsk(sk)); | ||
1525 | else | ||
1526 | #endif | ||
1527 | sock_put(sk); | ||
1527 | } | 1528 | } |
1528 | EXPORT_SYMBOL(sock_edemux); | 1529 | EXPORT_SYMBOL(sock_edemux); |
1529 | 1530 | ||
1530 | int sock_i_uid(struct sock *sk) | 1531 | kuid_t sock_i_uid(struct sock *sk) |
1531 | { | 1532 | { |
1532 | int uid; | 1533 | kuid_t uid; |
1533 | 1534 | ||
1534 | read_lock_bh(&sk->sk_callback_lock); | 1535 | read_lock_bh(&sk->sk_callback_lock); |
1535 | uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; | 1536 | uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; |
1536 | read_unlock_bh(&sk->sk_callback_lock); | 1537 | read_unlock_bh(&sk->sk_callback_lock); |
1537 | return uid; | 1538 | return uid; |
1538 | } | 1539 | } |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 75c3582a7678..fb85d371a8de 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk, | |||
246 | u32 __user *optval, int __user *optlen) | 246 | u32 __user *optval, int __user *optlen) |
247 | { | 247 | { |
248 | int rc = -ENOPROTOOPT; | 248 | int rc = -ENOPROTOOPT; |
249 | if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) | 249 | if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) |
250 | rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len, | 250 | rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len, |
251 | optval, optlen); | 251 | optval, optlen); |
252 | return rc; | 252 | return rc; |
@@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk, | |||
257 | u32 __user *optval, int __user *optlen) | 257 | u32 __user *optval, int __user *optlen) |
258 | { | 258 | { |
259 | int rc = -ENOPROTOOPT; | 259 | int rc = -ENOPROTOOPT; |
260 | if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) | 260 | if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) |
261 | rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len, | 261 | rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len, |
262 | optval, optlen); | 262 | optval, optlen); |
263 | return rc; | 263 | return rc; |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index d65e98798eca..119c04317d48 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -535,6 +535,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, | |||
535 | case DCCP_SOCKOPT_CCID_TX_INFO: | 535 | case DCCP_SOCKOPT_CCID_TX_INFO: |
536 | if (len < sizeof(tfrc)) | 536 | if (len < sizeof(tfrc)) |
537 | return -EINVAL; | 537 | return -EINVAL; |
538 | memset(&tfrc, 0, sizeof(tfrc)); | ||
538 | tfrc.tfrctx_x = hc->tx_x; | 539 | tfrc.tfrctx_x = hc->tx_x; |
539 | tfrc.tfrctx_x_recv = hc->tx_x_recv; | 540 | tfrc.tfrctx_x_recv = hc->tx_x_recv; |
540 | tfrc.tfrctx_x_calc = hc->tx_x_calc; | 541 | tfrc.tfrctx_x_calc = hc->tx_x_calc; |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index d9507dd05818..9807945a56d9 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
@@ -259,7 +259,8 @@ static int __init init_dns_resolver(void) | |||
259 | if (!cred) | 259 | if (!cred) |
260 | return -ENOMEM; | 260 | return -ENOMEM; |
261 | 261 | ||
262 | keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred, | 262 | keyring = key_alloc(&key_type_keyring, ".dns_resolver", |
263 | GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, | ||
263 | (KEY_POS_ALL & ~KEY_POS_SETATTR) | | 264 | (KEY_POS_ALL & ~KEY_POS_SETATTR) | |
264 | KEY_USR_VIEW | KEY_USR_READ, | 265 | KEY_USR_VIEW | KEY_USR_READ, |
265 | KEY_ALLOC_NOT_IN_QUOTA); | 266 | KEY_ALLOC_NOT_IN_QUOTA); |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 88e7c2f3fa0d..45295ca09571 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -370,7 +370,7 @@ static int dsa_remove(struct platform_device *pdev) | |||
370 | if (dst->link_poll_needed) | 370 | if (dst->link_poll_needed) |
371 | del_timer_sync(&dst->link_poll_timer); | 371 | del_timer_sync(&dst->link_poll_timer); |
372 | 372 | ||
373 | flush_work_sync(&dst->link_poll_work); | 373 | flush_work(&dst->link_poll_work); |
374 | 374 | ||
375 | for (i = 0; i < dst->pd->nr_chips; i++) { | 375 | for (i = 0; i < dst->pd->nr_chips; i++) { |
376 | struct dsa_switch *ds = dst->ds[i]; | 376 | struct dsa_switch *ds = dst->ds[i]; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 77e87aff419a..47800459e4cb 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1225,7 +1225,7 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, | |||
1225 | switch (event) { | 1225 | switch (event) { |
1226 | case NETDEV_CHANGEADDR: | 1226 | case NETDEV_CHANGEADDR: |
1227 | neigh_changeaddr(&arp_tbl, dev); | 1227 | neigh_changeaddr(&arp_tbl, dev); |
1228 | rt_cache_flush(dev_net(dev), 0); | 1228 | rt_cache_flush(dev_net(dev)); |
1229 | break; | 1229 | break; |
1230 | default: | 1230 | default: |
1231 | break; | 1231 | break; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 44bf82e3aef7..e12fad773852 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -725,7 +725,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
725 | break; | 725 | break; |
726 | 726 | ||
727 | case SIOCSIFFLAGS: | 727 | case SIOCSIFFLAGS: |
728 | ret = -EACCES; | 728 | ret = -EPERM; |
729 | if (!capable(CAP_NET_ADMIN)) | 729 | if (!capable(CAP_NET_ADMIN)) |
730 | goto out; | 730 | goto out; |
731 | break; | 731 | break; |
@@ -733,7 +733,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
733 | case SIOCSIFBRDADDR: /* Set the broadcast address */ | 733 | case SIOCSIFBRDADDR: /* Set the broadcast address */ |
734 | case SIOCSIFDSTADDR: /* Set the destination address */ | 734 | case SIOCSIFDSTADDR: /* Set the destination address */ |
735 | case SIOCSIFNETMASK: /* Set the netmask for the interface */ | 735 | case SIOCSIFNETMASK: /* Set the netmask for the interface */ |
736 | ret = -EACCES; | 736 | ret = -EPERM; |
737 | if (!capable(CAP_NET_ADMIN)) | 737 | if (!capable(CAP_NET_ADMIN)) |
738 | goto out; | 738 | goto out; |
739 | ret = -EINVAL; | 739 | ret = -EINVAL; |
@@ -1503,7 +1503,7 @@ static int devinet_conf_proc(ctl_table *ctl, int write, | |||
1503 | if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 || | 1503 | if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 || |
1504 | i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) | 1504 | i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) |
1505 | if ((new_value == 0) && (old_value != 0)) | 1505 | if ((new_value == 0) && (old_value != 0)) |
1506 | rt_cache_flush(net, 0); | 1506 | rt_cache_flush(net); |
1507 | } | 1507 | } |
1508 | 1508 | ||
1509 | return ret; | 1509 | return ret; |
@@ -1537,7 +1537,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, | |||
1537 | dev_disable_lro(idev->dev); | 1537 | dev_disable_lro(idev->dev); |
1538 | } | 1538 | } |
1539 | rtnl_unlock(); | 1539 | rtnl_unlock(); |
1540 | rt_cache_flush(net, 0); | 1540 | rt_cache_flush(net); |
1541 | } | 1541 | } |
1542 | } | 1542 | } |
1543 | 1543 | ||
@@ -1554,7 +1554,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write, | |||
1554 | struct net *net = ctl->extra2; | 1554 | struct net *net = ctl->extra2; |
1555 | 1555 | ||
1556 | if (write && *valp != val) | 1556 | if (write && *valp != val) |
1557 | rt_cache_flush(net, 0); | 1557 | rt_cache_flush(net); |
1558 | 1558 | ||
1559 | return ret; | 1559 | return ret; |
1560 | } | 1560 | } |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c43ae3fba792..8e2b475da9fa 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -148,7 +148,7 @@ static void fib_flush(struct net *net) | |||
148 | } | 148 | } |
149 | 149 | ||
150 | if (flushed) | 150 | if (flushed) |
151 | rt_cache_flush(net, -1); | 151 | rt_cache_flush(net); |
152 | } | 152 | } |
153 | 153 | ||
154 | /* | 154 | /* |
@@ -999,11 +999,11 @@ static void nl_fib_lookup_exit(struct net *net) | |||
999 | net->ipv4.fibnl = NULL; | 999 | net->ipv4.fibnl = NULL; |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | static void fib_disable_ip(struct net_device *dev, int force, int delay) | 1002 | static void fib_disable_ip(struct net_device *dev, int force) |
1003 | { | 1003 | { |
1004 | if (fib_sync_down_dev(dev, force)) | 1004 | if (fib_sync_down_dev(dev, force)) |
1005 | fib_flush(dev_net(dev)); | 1005 | fib_flush(dev_net(dev)); |
1006 | rt_cache_flush(dev_net(dev), delay); | 1006 | rt_cache_flush(dev_net(dev)); |
1007 | arp_ifdown(dev); | 1007 | arp_ifdown(dev); |
1008 | } | 1008 | } |
1009 | 1009 | ||
@@ -1020,7 +1020,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
1020 | fib_sync_up(dev); | 1020 | fib_sync_up(dev); |
1021 | #endif | 1021 | #endif |
1022 | atomic_inc(&net->ipv4.dev_addr_genid); | 1022 | atomic_inc(&net->ipv4.dev_addr_genid); |
1023 | rt_cache_flush(dev_net(dev), -1); | 1023 | rt_cache_flush(dev_net(dev)); |
1024 | break; | 1024 | break; |
1025 | case NETDEV_DOWN: | 1025 | case NETDEV_DOWN: |
1026 | fib_del_ifaddr(ifa, NULL); | 1026 | fib_del_ifaddr(ifa, NULL); |
@@ -1029,9 +1029,9 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
1029 | /* Last address was deleted from this interface. | 1029 | /* Last address was deleted from this interface. |
1030 | * Disable IP. | 1030 | * Disable IP. |
1031 | */ | 1031 | */ |
1032 | fib_disable_ip(dev, 1, 0); | 1032 | fib_disable_ip(dev, 1); |
1033 | } else { | 1033 | } else { |
1034 | rt_cache_flush(dev_net(dev), -1); | 1034 | rt_cache_flush(dev_net(dev)); |
1035 | } | 1035 | } |
1036 | break; | 1036 | break; |
1037 | } | 1037 | } |
@@ -1045,7 +1045,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
1045 | struct net *net = dev_net(dev); | 1045 | struct net *net = dev_net(dev); |
1046 | 1046 | ||
1047 | if (event == NETDEV_UNREGISTER) { | 1047 | if (event == NETDEV_UNREGISTER) { |
1048 | fib_disable_ip(dev, 2, -1); | 1048 | fib_disable_ip(dev, 2); |
1049 | rt_flush_dev(dev); | 1049 | rt_flush_dev(dev); |
1050 | return NOTIFY_DONE; | 1050 | return NOTIFY_DONE; |
1051 | } | 1051 | } |
@@ -1062,14 +1062,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
1062 | fib_sync_up(dev); | 1062 | fib_sync_up(dev); |
1063 | #endif | 1063 | #endif |
1064 | atomic_inc(&net->ipv4.dev_addr_genid); | 1064 | atomic_inc(&net->ipv4.dev_addr_genid); |
1065 | rt_cache_flush(dev_net(dev), -1); | 1065 | rt_cache_flush(dev_net(dev)); |
1066 | break; | 1066 | break; |
1067 | case NETDEV_DOWN: | 1067 | case NETDEV_DOWN: |
1068 | fib_disable_ip(dev, 0, 0); | 1068 | fib_disable_ip(dev, 0); |
1069 | break; | 1069 | break; |
1070 | case NETDEV_CHANGEMTU: | 1070 | case NETDEV_CHANGEMTU: |
1071 | case NETDEV_CHANGE: | 1071 | case NETDEV_CHANGE: |
1072 | rt_cache_flush(dev_net(dev), 0); | 1072 | rt_cache_flush(dev_net(dev)); |
1073 | break; | 1073 | break; |
1074 | case NETDEV_UNREGISTER_BATCH: | 1074 | case NETDEV_UNREGISTER_BATCH: |
1075 | break; | 1075 | break; |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index a83d74e498d2..274309d3aded 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -259,7 +259,7 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) | |||
259 | 259 | ||
260 | static void fib4_rule_flush_cache(struct fib_rules_ops *ops) | 260 | static void fib4_rule_flush_cache(struct fib_rules_ops *ops) |
261 | { | 261 | { |
262 | rt_cache_flush(ops->fro_net, -1); | 262 | rt_cache_flush(ops->fro_net); |
263 | } | 263 | } |
264 | 264 | ||
265 | static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { | 265 | static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 57bd978483e1..d1b93595b4a7 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1286,7 +1286,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1286 | 1286 | ||
1287 | fib_release_info(fi_drop); | 1287 | fib_release_info(fi_drop); |
1288 | if (state & FA_S_ACCESSED) | 1288 | if (state & FA_S_ACCESSED) |
1289 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); | 1289 | rt_cache_flush(cfg->fc_nlinfo.nl_net); |
1290 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, | 1290 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, |
1291 | tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); | 1291 | tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); |
1292 | 1292 | ||
@@ -1333,7 +1333,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1333 | list_add_tail_rcu(&new_fa->fa_list, | 1333 | list_add_tail_rcu(&new_fa->fa_list, |
1334 | (fa ? &fa->fa_list : fa_head)); | 1334 | (fa ? &fa->fa_list : fa_head)); |
1335 | 1335 | ||
1336 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); | 1336 | rt_cache_flush(cfg->fc_nlinfo.nl_net); |
1337 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, | 1337 | rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, |
1338 | &cfg->fc_nlinfo, 0); | 1338 | &cfg->fc_nlinfo, 0); |
1339 | succeeded: | 1339 | succeeded: |
@@ -1708,7 +1708,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) | |||
1708 | trie_leaf_remove(t, l); | 1708 | trie_leaf_remove(t, l); |
1709 | 1709 | ||
1710 | if (fa->fa_state & FA_S_ACCESSED) | 1710 | if (fa->fa_state & FA_S_ACCESSED) |
1711 | rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); | 1711 | rt_cache_flush(cfg->fc_nlinfo.nl_net); |
1712 | 1712 | ||
1713 | fib_release_info(fa->fa_info); | 1713 | fib_release_info(fa->fa_info); |
1714 | alias_free_mem_rcu(fa); | 1714 | alias_free_mem_rcu(fa); |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index db0cf17c00f7..7f75f21d7b83 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -404,12 +404,15 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, | |||
404 | { | 404 | { |
405 | const struct inet_request_sock *ireq = inet_rsk(req); | 405 | const struct inet_request_sock *ireq = inet_rsk(req); |
406 | struct inet_sock *newinet = inet_sk(newsk); | 406 | struct inet_sock *newinet = inet_sk(newsk); |
407 | struct ip_options_rcu *opt = ireq->opt; | 407 | struct ip_options_rcu *opt; |
408 | struct net *net = sock_net(sk); | 408 | struct net *net = sock_net(sk); |
409 | struct flowi4 *fl4; | 409 | struct flowi4 *fl4; |
410 | struct rtable *rt; | 410 | struct rtable *rt; |
411 | 411 | ||
412 | fl4 = &newinet->cork.fl.u.ip4; | 412 | fl4 = &newinet->cork.fl.u.ip4; |
413 | |||
414 | rcu_read_lock(); | ||
415 | opt = rcu_dereference(newinet->inet_opt); | ||
413 | flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, | 416 | flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, |
414 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, | 417 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
415 | sk->sk_protocol, inet_sk_flowi_flags(sk), | 418 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
@@ -421,11 +424,13 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, | |||
421 | goto no_route; | 424 | goto no_route; |
422 | if (opt && opt->opt.is_strictroute && rt->rt_gateway) | 425 | if (opt && opt->opt.is_strictroute && rt->rt_gateway) |
423 | goto route_err; | 426 | goto route_err; |
427 | rcu_read_unlock(); | ||
424 | return &rt->dst; | 428 | return &rt->dst; |
425 | 429 | ||
426 | route_err: | 430 | route_err: |
427 | ip_rt_put(rt); | 431 | ip_rt_put(rt); |
428 | no_route: | 432 | no_route: |
433 | rcu_read_unlock(); | ||
429 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | 434 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
430 | return NULL; | 435 | return NULL; |
431 | } | 436 | } |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 570e61f9611f..8bc005b1435f 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -69,6 +69,7 @@ static inline void inet_diag_unlock_handler( | |||
69 | 69 | ||
70 | int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | 70 | int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, |
71 | struct sk_buff *skb, struct inet_diag_req_v2 *req, | 71 | struct sk_buff *skb, struct inet_diag_req_v2 *req, |
72 | struct user_namespace *user_ns, | ||
72 | u32 pid, u32 seq, u16 nlmsg_flags, | 73 | u32 pid, u32 seq, u16 nlmsg_flags, |
73 | const struct nlmsghdr *unlh) | 74 | const struct nlmsghdr *unlh) |
74 | { | 75 | { |
@@ -124,7 +125,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
124 | } | 125 | } |
125 | #endif | 126 | #endif |
126 | 127 | ||
127 | r->idiag_uid = sock_i_uid(sk); | 128 | r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); |
128 | r->idiag_inode = sock_i_ino(sk); | 129 | r->idiag_inode = sock_i_ino(sk); |
129 | 130 | ||
130 | if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { | 131 | if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { |
@@ -199,11 +200,12 @@ EXPORT_SYMBOL_GPL(inet_sk_diag_fill); | |||
199 | 200 | ||
200 | static int inet_csk_diag_fill(struct sock *sk, | 201 | static int inet_csk_diag_fill(struct sock *sk, |
201 | struct sk_buff *skb, struct inet_diag_req_v2 *req, | 202 | struct sk_buff *skb, struct inet_diag_req_v2 *req, |
203 | struct user_namespace *user_ns, | ||
202 | u32 pid, u32 seq, u16 nlmsg_flags, | 204 | u32 pid, u32 seq, u16 nlmsg_flags, |
203 | const struct nlmsghdr *unlh) | 205 | const struct nlmsghdr *unlh) |
204 | { | 206 | { |
205 | return inet_sk_diag_fill(sk, inet_csk(sk), | 207 | return inet_sk_diag_fill(sk, inet_csk(sk), |
206 | skb, req, pid, seq, nlmsg_flags, unlh); | 208 | skb, req, user_ns, pid, seq, nlmsg_flags, unlh); |
207 | } | 209 | } |
208 | 210 | ||
209 | static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, | 211 | static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, |
@@ -256,14 +258,16 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, | |||
256 | } | 258 | } |
257 | 259 | ||
258 | static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, | 260 | static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, |
259 | struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags, | 261 | struct inet_diag_req_v2 *r, |
262 | struct user_namespace *user_ns, | ||
263 | u32 pid, u32 seq, u16 nlmsg_flags, | ||
260 | const struct nlmsghdr *unlh) | 264 | const struct nlmsghdr *unlh) |
261 | { | 265 | { |
262 | if (sk->sk_state == TCP_TIME_WAIT) | 266 | if (sk->sk_state == TCP_TIME_WAIT) |
263 | return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, | 267 | return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, |
264 | skb, r, pid, seq, nlmsg_flags, | 268 | skb, r, pid, seq, nlmsg_flags, |
265 | unlh); | 269 | unlh); |
266 | return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh); | 270 | return inet_csk_diag_fill(sk, skb, r, user_ns, pid, seq, nlmsg_flags, unlh); |
267 | } | 271 | } |
268 | 272 | ||
269 | int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, | 273 | int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, |
@@ -311,6 +315,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s | |||
311 | } | 315 | } |
312 | 316 | ||
313 | err = sk_diag_fill(sk, rep, req, | 317 | err = sk_diag_fill(sk, rep, req, |
318 | sk_user_ns(NETLINK_CB(in_skb).ssk), | ||
314 | NETLINK_CB(in_skb).pid, | 319 | NETLINK_CB(in_skb).pid, |
315 | nlh->nlmsg_seq, 0, nlh); | 320 | nlh->nlmsg_seq, 0, nlh); |
316 | if (err < 0) { | 321 | if (err < 0) { |
@@ -551,6 +556,7 @@ static int inet_csk_diag_dump(struct sock *sk, | |||
551 | return 0; | 556 | return 0; |
552 | 557 | ||
553 | return inet_csk_diag_fill(sk, skb, r, | 558 | return inet_csk_diag_fill(sk, skb, r, |
559 | sk_user_ns(NETLINK_CB(cb->skb).ssk), | ||
554 | NETLINK_CB(cb->skb).pid, | 560 | NETLINK_CB(cb->skb).pid, |
555 | cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); | 561 | cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); |
556 | } | 562 | } |
@@ -591,7 +597,9 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, | |||
591 | } | 597 | } |
592 | 598 | ||
593 | static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | 599 | static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, |
594 | struct request_sock *req, u32 pid, u32 seq, | 600 | struct request_sock *req, |
601 | struct user_namespace *user_ns, | ||
602 | u32 pid, u32 seq, | ||
595 | const struct nlmsghdr *unlh) | 603 | const struct nlmsghdr *unlh) |
596 | { | 604 | { |
597 | const struct inet_request_sock *ireq = inet_rsk(req); | 605 | const struct inet_request_sock *ireq = inet_rsk(req); |
@@ -625,7 +633,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
625 | r->idiag_expires = jiffies_to_msecs(tmo); | 633 | r->idiag_expires = jiffies_to_msecs(tmo); |
626 | r->idiag_rqueue = 0; | 634 | r->idiag_rqueue = 0; |
627 | r->idiag_wqueue = 0; | 635 | r->idiag_wqueue = 0; |
628 | r->idiag_uid = sock_i_uid(sk); | 636 | r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); |
629 | r->idiag_inode = 0; | 637 | r->idiag_inode = 0; |
630 | #if IS_ENABLED(CONFIG_IPV6) | 638 | #if IS_ENABLED(CONFIG_IPV6) |
631 | if (r->idiag_family == AF_INET6) { | 639 | if (r->idiag_family == AF_INET6) { |
@@ -702,6 +710,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
702 | } | 710 | } |
703 | 711 | ||
704 | err = inet_diag_fill_req(skb, sk, req, | 712 | err = inet_diag_fill_req(skb, sk, req, |
713 | sk_user_ns(NETLINK_CB(cb->skb).ssk), | ||
705 | NETLINK_CB(cb->skb).pid, | 714 | NETLINK_CB(cb->skb).pid, |
706 | cb->nlh->nlmsg_seq, cb->nlh); | 715 | cb->nlh->nlmsg_seq, cb->nlh); |
707 | if (err < 0) { | 716 | if (err < 0) { |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index e1e0a4e8fd34..000e3d239d64 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -194,7 +194,7 @@ void __init inet_initpeers(void) | |||
194 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, | 194 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, |
195 | NULL); | 195 | NULL); |
196 | 196 | ||
197 | INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker); | 197 | INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker); |
198 | } | 198 | } |
199 | 199 | ||
200 | static int addr_compare(const struct inetpeer_addr *a, | 200 | static int addr_compare(const struct inetpeer_addr *a, |
@@ -510,7 +510,10 @@ relookup: | |||
510 | secure_ipv6_id(daddr->addr.a6)); | 510 | secure_ipv6_id(daddr->addr.a6)); |
511 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | 511 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; |
512 | p->rate_tokens = 0; | 512 | p->rate_tokens = 0; |
513 | p->rate_last = 0; | 513 | /* 60*HZ is arbitrary, but chosen enough high so that the first |
514 | * calculation of tokens is at its maximum. | ||
515 | */ | ||
516 | p->rate_last = jiffies - 60*HZ; | ||
514 | INIT_LIST_HEAD(&p->gc_list); | 517 | INIT_LIST_HEAD(&p->gc_list); |
515 | 518 | ||
516 | /* Link the node. */ | 519 | /* Link the node. */ |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 147ccc3e93db..c196d749daf2 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -1338,10 +1338,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk, | |||
1338 | iph->ihl = 5; | 1338 | iph->ihl = 5; |
1339 | iph->tos = inet->tos; | 1339 | iph->tos = inet->tos; |
1340 | iph->frag_off = df; | 1340 | iph->frag_off = df; |
1341 | ip_select_ident(iph, &rt->dst, sk); | ||
1342 | iph->ttl = ttl; | 1341 | iph->ttl = ttl; |
1343 | iph->protocol = sk->sk_protocol; | 1342 | iph->protocol = sk->sk_protocol; |
1344 | ip_copy_addrs(iph, fl4); | 1343 | ip_copy_addrs(iph, fl4); |
1344 | ip_select_ident(iph, &rt->dst, sk); | ||
1345 | 1345 | ||
1346 | if (opt) { | 1346 | if (opt) { |
1347 | iph->ihl += opt->optlen>>2; | 1347 | iph->ihl += opt->optlen>>2; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 8eec8f4a0536..ebdf06f938bf 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
124 | static struct kmem_cache *mrt_cachep __read_mostly; | 124 | static struct kmem_cache *mrt_cachep __read_mostly; |
125 | 125 | ||
126 | static struct mr_table *ipmr_new_table(struct net *net, u32 id); | 126 | static struct mr_table *ipmr_new_table(struct net *net, u32 id); |
127 | static void ipmr_free_table(struct mr_table *mrt); | ||
128 | |||
127 | static int ip_mr_forward(struct net *net, struct mr_table *mrt, | 129 | static int ip_mr_forward(struct net *net, struct mr_table *mrt, |
128 | struct sk_buff *skb, struct mfc_cache *cache, | 130 | struct sk_buff *skb, struct mfc_cache *cache, |
129 | int local); | 131 | int local); |
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt, | |||
131 | struct sk_buff *pkt, vifi_t vifi, int assert); | 133 | struct sk_buff *pkt, vifi_t vifi, int assert); |
132 | static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | 134 | static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
133 | struct mfc_cache *c, struct rtmsg *rtm); | 135 | struct mfc_cache *c, struct rtmsg *rtm); |
136 | static void mroute_clean_tables(struct mr_table *mrt); | ||
134 | static void ipmr_expire_process(unsigned long arg); | 137 | static void ipmr_expire_process(unsigned long arg); |
135 | 138 | ||
136 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | 139 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES |
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
271 | 274 | ||
272 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { | 275 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
273 | list_del(&mrt->list); | 276 | list_del(&mrt->list); |
274 | kfree(mrt); | 277 | ipmr_free_table(mrt); |
275 | } | 278 | } |
276 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 279 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
277 | } | 280 | } |
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
299 | 302 | ||
300 | static void __net_exit ipmr_rules_exit(struct net *net) | 303 | static void __net_exit ipmr_rules_exit(struct net *net) |
301 | { | 304 | { |
302 | kfree(net->ipv4.mrt); | 305 | ipmr_free_table(net->ipv4.mrt); |
303 | } | 306 | } |
304 | #endif | 307 | #endif |
305 | 308 | ||
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) | |||
336 | return mrt; | 339 | return mrt; |
337 | } | 340 | } |
338 | 341 | ||
342 | static void ipmr_free_table(struct mr_table *mrt) | ||
343 | { | ||
344 | del_timer_sync(&mrt->ipmr_expire_timer); | ||
345 | mroute_clean_tables(mrt); | ||
346 | kfree(mrt); | ||
347 | } | ||
348 | |||
339 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ | 349 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ |
340 | 350 | ||
341 | static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) | 351 | static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) |
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index ea4a23813d26..9c87cde28ff8 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c | |||
@@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, | |||
148 | if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, | 148 | if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, |
149 | hdr, NULL, &matchoff, &matchlen, | 149 | hdr, NULL, &matchoff, &matchlen, |
150 | &addr, &port) > 0) { | 150 | &addr, &port) > 0) { |
151 | unsigned int matchend, poff, plen, buflen, n; | 151 | unsigned int olen, matchend, poff, plen, buflen, n; |
152 | char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; | 152 | char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; |
153 | 153 | ||
154 | /* We're only interested in headers related to this | 154 | /* We're only interested in headers related to this |
@@ -163,17 +163,18 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, | |||
163 | goto next; | 163 | goto next; |
164 | } | 164 | } |
165 | 165 | ||
166 | olen = *datalen; | ||
166 | if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, | 167 | if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, |
167 | &addr, port)) | 168 | &addr, port)) |
168 | return NF_DROP; | 169 | return NF_DROP; |
169 | 170 | ||
170 | matchend = matchoff + matchlen; | 171 | matchend = matchoff + matchlen + *datalen - olen; |
171 | 172 | ||
172 | /* The maddr= parameter (RFC 2361) specifies where to send | 173 | /* The maddr= parameter (RFC 2361) specifies where to send |
173 | * the reply. */ | 174 | * the reply. */ |
174 | if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, | 175 | if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, |
175 | "maddr=", &poff, &plen, | 176 | "maddr=", &poff, &plen, |
176 | &addr) > 0 && | 177 | &addr, true) > 0 && |
177 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && | 178 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && |
178 | addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { | 179 | addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { |
179 | buflen = sprintf(buffer, "%pI4", | 180 | buflen = sprintf(buffer, "%pI4", |
@@ -187,7 +188,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, | |||
187 | * from which the server received the request. */ | 188 | * from which the server received the request. */ |
188 | if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, | 189 | if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, |
189 | "received=", &poff, &plen, | 190 | "received=", &poff, &plen, |
190 | &addr) > 0 && | 191 | &addr, false) > 0 && |
191 | addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && | 192 | addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && |
192 | addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { | 193 | addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { |
193 | buflen = sprintf(buffer, "%pI4", | 194 | buflen = sprintf(buffer, "%pI4", |
@@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, | |||
501 | ret = nf_ct_expect_related(rtcp_exp); | 502 | ret = nf_ct_expect_related(rtcp_exp); |
502 | if (ret == 0) | 503 | if (ret == 0) |
503 | break; | 504 | break; |
504 | else if (ret != -EBUSY) { | 505 | else if (ret == -EBUSY) { |
506 | nf_ct_unexpect_related(rtp_exp); | ||
507 | continue; | ||
508 | } else if (ret < 0) { | ||
505 | nf_ct_unexpect_related(rtp_exp); | 509 | nf_ct_unexpect_related(rtp_exp); |
506 | port = 0; | 510 | port = 0; |
507 | break; | 511 | break; |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 6232d476f37e..8f3d05424a3e 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -185,10 +185,10 @@ exit: | |||
185 | return sk; | 185 | return sk; |
186 | } | 186 | } |
187 | 187 | ||
188 | static void inet_get_ping_group_range_net(struct net *net, gid_t *low, | 188 | static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, |
189 | gid_t *high) | 189 | kgid_t *high) |
190 | { | 190 | { |
191 | gid_t *data = net->ipv4.sysctl_ping_group_range; | 191 | kgid_t *data = net->ipv4.sysctl_ping_group_range; |
192 | unsigned int seq; | 192 | unsigned int seq; |
193 | 193 | ||
194 | do { | 194 | do { |
@@ -203,19 +203,13 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low, | |||
203 | static int ping_init_sock(struct sock *sk) | 203 | static int ping_init_sock(struct sock *sk) |
204 | { | 204 | { |
205 | struct net *net = sock_net(sk); | 205 | struct net *net = sock_net(sk); |
206 | gid_t group = current_egid(); | 206 | kgid_t group = current_egid(); |
207 | gid_t range[2]; | ||
208 | struct group_info *group_info = get_current_groups(); | 207 | struct group_info *group_info = get_current_groups(); |
209 | int i, j, count = group_info->ngroups; | 208 | int i, j, count = group_info->ngroups; |
210 | kgid_t low, high; | 209 | kgid_t low, high; |
211 | 210 | ||
212 | inet_get_ping_group_range_net(net, range, range+1); | 211 | inet_get_ping_group_range_net(net, &low, &high); |
213 | low = make_kgid(&init_user_ns, range[0]); | 212 | if (gid_lte(low, group) && gid_lte(group, high)) |
214 | high = make_kgid(&init_user_ns, range[1]); | ||
215 | if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low)) | ||
216 | return -EACCES; | ||
217 | |||
218 | if (range[0] <= group && group <= range[1]) | ||
219 | return 0; | 213 | return 0; |
220 | 214 | ||
221 | for (i = 0; i < group_info->nblocks; i++) { | 215 | for (i = 0; i < group_info->nblocks; i++) { |
@@ -845,7 +839,9 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f, | |||
845 | bucket, src, srcp, dest, destp, sp->sk_state, | 839 | bucket, src, srcp, dest, destp, sp->sk_state, |
846 | sk_wmem_alloc_get(sp), | 840 | sk_wmem_alloc_get(sp), |
847 | sk_rmem_alloc_get(sp), | 841 | sk_rmem_alloc_get(sp), |
848 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | 842 | 0, 0L, 0, |
843 | from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), | ||
844 | 0, sock_i_ino(sp), | ||
849 | atomic_read(&sp->sk_refcnt), sp, | 845 | atomic_read(&sp->sk_refcnt), sp, |
850 | atomic_read(&sp->sk_drops), len); | 846 | atomic_read(&sp->sk_drops), len); |
851 | } | 847 | } |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ff0f071969ea..73d1e4df4bf6 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -131,18 +131,20 @@ found: | |||
131 | * 0 - deliver | 131 | * 0 - deliver |
132 | * 1 - block | 132 | * 1 - block |
133 | */ | 133 | */ |
134 | static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) | 134 | static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) |
135 | { | 135 | { |
136 | int type; | 136 | struct icmphdr _hdr; |
137 | const struct icmphdr *hdr; | ||
137 | 138 | ||
138 | if (!pskb_may_pull(skb, sizeof(struct icmphdr))) | 139 | hdr = skb_header_pointer(skb, skb_transport_offset(skb), |
140 | sizeof(_hdr), &_hdr); | ||
141 | if (!hdr) | ||
139 | return 1; | 142 | return 1; |
140 | 143 | ||
141 | type = icmp_hdr(skb)->type; | 144 | if (hdr->type < 32) { |
142 | if (type < 32) { | ||
143 | __u32 data = raw_sk(sk)->filter.data; | 145 | __u32 data = raw_sk(sk)->filter.data; |
144 | 146 | ||
145 | return ((1 << type) & data) != 0; | 147 | return ((1U << hdr->type) & data) != 0; |
146 | } | 148 | } |
147 | 149 | ||
148 | /* Do not block unknown ICMP types */ | 150 | /* Do not block unknown ICMP types */ |
@@ -992,7 +994,9 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) | |||
992 | i, src, srcp, dest, destp, sp->sk_state, | 994 | i, src, srcp, dest, destp, sp->sk_state, |
993 | sk_wmem_alloc_get(sp), | 995 | sk_wmem_alloc_get(sp), |
994 | sk_rmem_alloc_get(sp), | 996 | sk_rmem_alloc_get(sp), |
995 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | 997 | 0, 0L, 0, |
998 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), | ||
999 | 0, sock_i_ino(sp), | ||
996 | atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); | 1000 | atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); |
997 | } | 1001 | } |
998 | 1002 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e4ba974f143c..fd9af60397b5 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -202,11 +202,6 @@ EXPORT_SYMBOL(ip_tos2prio); | |||
202 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); | 202 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); |
203 | #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) | 203 | #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) |
204 | 204 | ||
205 | static inline int rt_genid(struct net *net) | ||
206 | { | ||
207 | return atomic_read(&net->ipv4.rt_genid); | ||
208 | } | ||
209 | |||
210 | #ifdef CONFIG_PROC_FS | 205 | #ifdef CONFIG_PROC_FS |
211 | static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) | 206 | static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) |
212 | { | 207 | { |
@@ -447,27 +442,9 @@ static inline bool rt_is_expired(const struct rtable *rth) | |||
447 | return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); | 442 | return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); |
448 | } | 443 | } |
449 | 444 | ||
450 | /* | 445 | void rt_cache_flush(struct net *net) |
451 | * Perturbation of rt_genid by a small quantity [1..256] | ||
452 | * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() | ||
453 | * many times (2^24) without giving recent rt_genid. | ||
454 | * Jenkins hash is strong enough that litle changes of rt_genid are OK. | ||
455 | */ | ||
456 | static void rt_cache_invalidate(struct net *net) | ||
457 | { | ||
458 | unsigned char shuffle; | ||
459 | |||
460 | get_random_bytes(&shuffle, sizeof(shuffle)); | ||
461 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * delay < 0 : invalidate cache (fast : entries will be deleted later) | ||
466 | * delay >= 0 : invalidate & flush cache (can be long) | ||
467 | */ | ||
468 | void rt_cache_flush(struct net *net, int delay) | ||
469 | { | 446 | { |
470 | rt_cache_invalidate(net); | 447 | rt_genid_bump(net); |
471 | } | 448 | } |
472 | 449 | ||
473 | static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, | 450 | static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, |
@@ -934,12 +911,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) | |||
934 | if (mtu < ip_rt_min_pmtu) | 911 | if (mtu < ip_rt_min_pmtu) |
935 | mtu = ip_rt_min_pmtu; | 912 | mtu = ip_rt_min_pmtu; |
936 | 913 | ||
914 | rcu_read_lock(); | ||
937 | if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { | 915 | if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { |
938 | struct fib_nh *nh = &FIB_RES_NH(res); | 916 | struct fib_nh *nh = &FIB_RES_NH(res); |
939 | 917 | ||
940 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, | 918 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, |
941 | jiffies + ip_rt_mtu_expires); | 919 | jiffies + ip_rt_mtu_expires); |
942 | } | 920 | } |
921 | rcu_read_unlock(); | ||
943 | return mtu; | 922 | return mtu; |
944 | } | 923 | } |
945 | 924 | ||
@@ -956,7 +935,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | |||
956 | dst->obsolete = DST_OBSOLETE_KILL; | 935 | dst->obsolete = DST_OBSOLETE_KILL; |
957 | } else { | 936 | } else { |
958 | rt->rt_pmtu = mtu; | 937 | rt->rt_pmtu = mtu; |
959 | dst_set_expires(&rt->dst, ip_rt_mtu_expires); | 938 | rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires); |
960 | } | 939 | } |
961 | } | 940 | } |
962 | 941 | ||
@@ -1263,7 +1242,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst) | |||
1263 | { | 1242 | { |
1264 | struct rtable *rt = (struct rtable *) dst; | 1243 | struct rtable *rt = (struct rtable *) dst; |
1265 | 1244 | ||
1266 | if (dst->flags & DST_NOCACHE) { | 1245 | if (!list_empty(&rt->rt_uncached)) { |
1267 | spin_lock_bh(&rt_uncached_lock); | 1246 | spin_lock_bh(&rt_uncached_lock); |
1268 | list_del(&rt->rt_uncached); | 1247 | list_del(&rt->rt_uncached); |
1269 | spin_unlock_bh(&rt_uncached_lock); | 1248 | spin_unlock_bh(&rt_uncached_lock); |
@@ -2028,7 +2007,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2028 | } | 2007 | } |
2029 | dev_out = net->loopback_dev; | 2008 | dev_out = net->loopback_dev; |
2030 | fl4->flowi4_oif = dev_out->ifindex; | 2009 | fl4->flowi4_oif = dev_out->ifindex; |
2031 | res.fi = NULL; | ||
2032 | flags |= RTCF_LOCAL; | 2010 | flags |= RTCF_LOCAL; |
2033 | goto make_route; | 2011 | goto make_route; |
2034 | } | 2012 | } |
@@ -2344,7 +2322,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2344 | 2322 | ||
2345 | void ip_rt_multicast_event(struct in_device *in_dev) | 2323 | void ip_rt_multicast_event(struct in_device *in_dev) |
2346 | { | 2324 | { |
2347 | rt_cache_flush(dev_net(in_dev->dev), 0); | 2325 | rt_cache_flush(dev_net(in_dev->dev)); |
2348 | } | 2326 | } |
2349 | 2327 | ||
2350 | #ifdef CONFIG_SYSCTL | 2328 | #ifdef CONFIG_SYSCTL |
@@ -2353,16 +2331,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, | |||
2353 | size_t *lenp, loff_t *ppos) | 2331 | size_t *lenp, loff_t *ppos) |
2354 | { | 2332 | { |
2355 | if (write) { | 2333 | if (write) { |
2356 | int flush_delay; | 2334 | rt_cache_flush((struct net *)__ctl->extra1); |
2357 | ctl_table ctl; | ||
2358 | struct net *net; | ||
2359 | |||
2360 | memcpy(&ctl, __ctl, sizeof(ctl)); | ||
2361 | ctl.data = &flush_delay; | ||
2362 | proc_dointvec(&ctl, write, buffer, lenp, ppos); | ||
2363 | |||
2364 | net = (struct net *)__ctl->extra1; | ||
2365 | rt_cache_flush(net, flush_delay); | ||
2366 | return 0; | 2335 | return 0; |
2367 | } | 2336 | } |
2368 | 2337 | ||
@@ -2532,8 +2501,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { | |||
2532 | 2501 | ||
2533 | static __net_init int rt_genid_init(struct net *net) | 2502 | static __net_init int rt_genid_init(struct net *net) |
2534 | { | 2503 | { |
2535 | get_random_bytes(&net->ipv4.rt_genid, | 2504 | atomic_set(&net->rt_genid, 0); |
2536 | sizeof(net->ipv4.rt_genid)); | ||
2537 | get_random_bytes(&net->ipv4.dev_addr_genid, | 2505 | get_random_bytes(&net->ipv4.dev_addr_genid, |
2538 | sizeof(net->ipv4.dev_addr_genid)); | 2506 | sizeof(net->ipv4.dev_addr_genid)); |
2539 | return 0; | 2507 | return 0; |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1b5ce96707a3..3e78c79b5586 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -76,9 +76,9 @@ static int ipv4_local_port_range(ctl_table *table, int write, | |||
76 | } | 76 | } |
77 | 77 | ||
78 | 78 | ||
79 | static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) | 79 | static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high) |
80 | { | 80 | { |
81 | gid_t *data = table->data; | 81 | kgid_t *data = table->data; |
82 | unsigned int seq; | 82 | unsigned int seq; |
83 | do { | 83 | do { |
84 | seq = read_seqbegin(&sysctl_local_ports.lock); | 84 | seq = read_seqbegin(&sysctl_local_ports.lock); |
@@ -89,12 +89,12 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, | |||
89 | } | 89 | } |
90 | 90 | ||
91 | /* Update system visible IP port range */ | 91 | /* Update system visible IP port range */ |
92 | static void set_ping_group_range(struct ctl_table *table, gid_t range[2]) | 92 | static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high) |
93 | { | 93 | { |
94 | gid_t *data = table->data; | 94 | kgid_t *data = table->data; |
95 | write_seqlock(&sysctl_local_ports.lock); | 95 | write_seqlock(&sysctl_local_ports.lock); |
96 | data[0] = range[0]; | 96 | data[0] = low; |
97 | data[1] = range[1]; | 97 | data[1] = high; |
98 | write_sequnlock(&sysctl_local_ports.lock); | 98 | write_sequnlock(&sysctl_local_ports.lock); |
99 | } | 99 | } |
100 | 100 | ||
@@ -103,21 +103,33 @@ static int ipv4_ping_group_range(ctl_table *table, int write, | |||
103 | void __user *buffer, | 103 | void __user *buffer, |
104 | size_t *lenp, loff_t *ppos) | 104 | size_t *lenp, loff_t *ppos) |
105 | { | 105 | { |
106 | struct user_namespace *user_ns = current_user_ns(); | ||
106 | int ret; | 107 | int ret; |
107 | gid_t range[2]; | 108 | gid_t urange[2]; |
109 | kgid_t low, high; | ||
108 | ctl_table tmp = { | 110 | ctl_table tmp = { |
109 | .data = &range, | 111 | .data = &urange, |
110 | .maxlen = sizeof(range), | 112 | .maxlen = sizeof(urange), |
111 | .mode = table->mode, | 113 | .mode = table->mode, |
112 | .extra1 = &ip_ping_group_range_min, | 114 | .extra1 = &ip_ping_group_range_min, |
113 | .extra2 = &ip_ping_group_range_max, | 115 | .extra2 = &ip_ping_group_range_max, |
114 | }; | 116 | }; |
115 | 117 | ||
116 | inet_get_ping_group_range_table(table, range, range + 1); | 118 | inet_get_ping_group_range_table(table, &low, &high); |
119 | urange[0] = from_kgid_munged(user_ns, low); | ||
120 | urange[1] = from_kgid_munged(user_ns, high); | ||
117 | ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); | 121 | ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
118 | 122 | ||
119 | if (write && ret == 0) | 123 | if (write && ret == 0) { |
120 | set_ping_group_range(table, range); | 124 | low = make_kgid(user_ns, urange[0]); |
125 | high = make_kgid(user_ns, urange[1]); | ||
126 | if (!gid_valid(low) || !gid_valid(high) || | ||
127 | (urange[1] < urange[0]) || gid_lt(high, low)) { | ||
128 | low = make_kgid(&init_user_ns, 1); | ||
129 | high = make_kgid(&init_user_ns, 0); | ||
130 | } | ||
131 | set_ping_group_range(table, low, high); | ||
132 | } | ||
121 | 133 | ||
122 | return ret; | 134 | return ret; |
123 | } | 135 | } |
@@ -786,7 +798,7 @@ static struct ctl_table ipv4_net_table[] = { | |||
786 | { | 798 | { |
787 | .procname = "ping_group_range", | 799 | .procname = "ping_group_range", |
788 | .data = &init_net.ipv4.sysctl_ping_group_range, | 800 | .data = &init_net.ipv4.sysctl_ping_group_range, |
789 | .maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range), | 801 | .maxlen = sizeof(gid_t)*2, |
790 | .mode = 0644, | 802 | .mode = 0644, |
791 | .proc_handler = ipv4_ping_group_range, | 803 | .proc_handler = ipv4_ping_group_range, |
792 | }, | 804 | }, |
@@ -830,8 +842,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
830 | * Sane defaults - nobody may create ping sockets. | 842 | * Sane defaults - nobody may create ping sockets. |
831 | * Boot scripts should set this to distro-specific group. | 843 | * Boot scripts should set this to distro-specific group. |
832 | */ | 844 | */ |
833 | net->ipv4.sysctl_ping_group_range[0] = 1; | 845 | net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1); |
834 | net->ipv4.sysctl_ping_group_range[1] = 0; | 846 | net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0); |
835 | 847 | ||
836 | tcp_init_mem(net); | 848 | tcp_init_mem(net); |
837 | 849 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2109ff4a1daf..5f6419341821 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1762,8 +1762,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | #ifdef CONFIG_NET_DMA | 1764 | #ifdef CONFIG_NET_DMA |
1765 | if (tp->ucopy.dma_chan) | 1765 | if (tp->ucopy.dma_chan) { |
1766 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1766 | if (tp->rcv_wnd == 0 && |
1767 | !skb_queue_empty(&sk->sk_async_wait_queue)) { | ||
1768 | tcp_service_net_dma(sk, true); | ||
1769 | tcp_cleanup_rbuf(sk, copied); | ||
1770 | } else | ||
1771 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1772 | } | ||
1767 | #endif | 1773 | #endif |
1768 | if (copied >= target) { | 1774 | if (copied >= target) { |
1769 | /* Do not sleep, just process backlog. */ | 1775 | /* Do not sleep, just process backlog. */ |
@@ -2325,10 +2331,17 @@ static int tcp_repair_options_est(struct tcp_sock *tp, | |||
2325 | tp->rx_opt.mss_clamp = opt.opt_val; | 2331 | tp->rx_opt.mss_clamp = opt.opt_val; |
2326 | break; | 2332 | break; |
2327 | case TCPOPT_WINDOW: | 2333 | case TCPOPT_WINDOW: |
2328 | if (opt.opt_val > 14) | 2334 | { |
2329 | return -EFBIG; | 2335 | u16 snd_wscale = opt.opt_val & 0xFFFF; |
2336 | u16 rcv_wscale = opt.opt_val >> 16; | ||
2337 | |||
2338 | if (snd_wscale > 14 || rcv_wscale > 14) | ||
2339 | return -EFBIG; | ||
2330 | 2340 | ||
2331 | tp->rx_opt.snd_wscale = opt.opt_val; | 2341 | tp->rx_opt.snd_wscale = snd_wscale; |
2342 | tp->rx_opt.rcv_wscale = rcv_wscale; | ||
2343 | tp->rx_opt.wscale_ok = 1; | ||
2344 | } | ||
2332 | break; | 2345 | break; |
2333 | case TCPOPT_SACK_PERM: | 2346 | case TCPOPT_SACK_PERM: |
2334 | if (opt.opt_val != 0) | 2347 | if (opt.opt_val != 0) |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 85308b90df80..d377f4854cb8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) | |||
2926 | * tcp_xmit_retransmit_queue(). | 2926 | * tcp_xmit_retransmit_queue(). |
2927 | */ | 2927 | */ |
2928 | static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | 2928 | static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, |
2929 | int newly_acked_sacked, bool is_dupack, | 2929 | int prior_sacked, bool is_dupack, |
2930 | int flag) | 2930 | int flag) |
2931 | { | 2931 | { |
2932 | struct inet_connection_sock *icsk = inet_csk(sk); | 2932 | struct inet_connection_sock *icsk = inet_csk(sk); |
2933 | struct tcp_sock *tp = tcp_sk(sk); | 2933 | struct tcp_sock *tp = tcp_sk(sk); |
2934 | int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && | 2934 | int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && |
2935 | (tcp_fackets_out(tp) > tp->reordering)); | 2935 | (tcp_fackets_out(tp) > tp->reordering)); |
2936 | int newly_acked_sacked = 0; | ||
2936 | int fast_rexmit = 0; | 2937 | int fast_rexmit = 0; |
2937 | 2938 | ||
2938 | if (WARN_ON(!tp->packets_out && tp->sacked_out)) | 2939 | if (WARN_ON(!tp->packets_out && tp->sacked_out)) |
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | |||
2992 | tcp_add_reno_sack(sk); | 2993 | tcp_add_reno_sack(sk); |
2993 | } else | 2994 | } else |
2994 | do_lost = tcp_try_undo_partial(sk, pkts_acked); | 2995 | do_lost = tcp_try_undo_partial(sk, pkts_acked); |
2996 | newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; | ||
2995 | break; | 2997 | break; |
2996 | case TCP_CA_Loss: | 2998 | case TCP_CA_Loss: |
2997 | if (flag & FLAG_DATA_ACKED) | 2999 | if (flag & FLAG_DATA_ACKED) |
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | |||
3013 | if (is_dupack) | 3015 | if (is_dupack) |
3014 | tcp_add_reno_sack(sk); | 3016 | tcp_add_reno_sack(sk); |
3015 | } | 3017 | } |
3018 | newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; | ||
3016 | 3019 | ||
3017 | if (icsk->icsk_ca_state <= TCP_CA_Disorder) | 3020 | if (icsk->icsk_ca_state <= TCP_CA_Disorder) |
3018 | tcp_try_undo_dsack(sk); | 3021 | tcp_try_undo_dsack(sk); |
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3590 | int prior_packets; | 3593 | int prior_packets; |
3591 | int prior_sacked = tp->sacked_out; | 3594 | int prior_sacked = tp->sacked_out; |
3592 | int pkts_acked = 0; | 3595 | int pkts_acked = 0; |
3593 | int newly_acked_sacked = 0; | ||
3594 | bool frto_cwnd = false; | 3596 | bool frto_cwnd = false; |
3595 | 3597 | ||
3596 | /* If the ack is older than previous acks | 3598 | /* If the ack is older than previous acks |
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3666 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); | 3668 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); |
3667 | 3669 | ||
3668 | pkts_acked = prior_packets - tp->packets_out; | 3670 | pkts_acked = prior_packets - tp->packets_out; |
3669 | newly_acked_sacked = (prior_packets - prior_sacked) - | ||
3670 | (tp->packets_out - tp->sacked_out); | ||
3671 | 3671 | ||
3672 | if (tp->frto_counter) | 3672 | if (tp->frto_counter) |
3673 | frto_cwnd = tcp_process_frto(sk, flag); | 3673 | frto_cwnd = tcp_process_frto(sk, flag); |
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3681 | tcp_may_raise_cwnd(sk, flag)) | 3681 | tcp_may_raise_cwnd(sk, flag)) |
3682 | tcp_cong_avoid(sk, ack, prior_in_flight); | 3682 | tcp_cong_avoid(sk, ack, prior_in_flight); |
3683 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); | 3683 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); |
3684 | tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, | 3684 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
3685 | is_dupack, flag); | 3685 | is_dupack, flag); |
3686 | } else { | 3686 | } else { |
3687 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) | 3687 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) |
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3698 | no_queue: | 3698 | no_queue: |
3699 | /* If data was DSACKed, see if we can undo a cwnd reduction. */ | 3699 | /* If data was DSACKed, see if we can undo a cwnd reduction. */ |
3700 | if (flag & FLAG_DSACKING_ACK) | 3700 | if (flag & FLAG_DSACKING_ACK) |
3701 | tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, | 3701 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
3702 | is_dupack, flag); | 3702 | is_dupack, flag); |
3703 | /* If this ack opens up a zero window, clear backoff. It was | 3703 | /* If this ack opens up a zero window, clear backoff. It was |
3704 | * being used to time the probes, and is probably far higher than | 3704 | * being used to time the probes, and is probably far higher than |
@@ -3718,8 +3718,7 @@ old_ack: | |||
3718 | */ | 3718 | */ |
3719 | if (TCP_SKB_CB(skb)->sacked) { | 3719 | if (TCP_SKB_CB(skb)->sacked) { |
3720 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); | 3720 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); |
3721 | newly_acked_sacked = tp->sacked_out - prior_sacked; | 3721 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
3722 | tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, | ||
3723 | is_dupack, flag); | 3722 | is_dupack, flag); |
3724 | } | 3723 | } |
3725 | 3724 | ||
@@ -4662,7 +4661,7 @@ queue_and_out: | |||
4662 | 4661 | ||
4663 | if (eaten > 0) | 4662 | if (eaten > 0) |
4664 | kfree_skb_partial(skb, fragstolen); | 4663 | kfree_skb_partial(skb, fragstolen); |
4665 | else if (!sock_flag(sk, SOCK_DEAD)) | 4664 | if (!sock_flag(sk, SOCK_DEAD)) |
4666 | sk->sk_data_ready(sk, 0); | 4665 | sk->sk_data_ready(sk, 0); |
4667 | return; | 4666 | return; |
4668 | } | 4667 | } |
@@ -5557,8 +5556,7 @@ no_ack: | |||
5557 | #endif | 5556 | #endif |
5558 | if (eaten) | 5557 | if (eaten) |
5559 | kfree_skb_partial(skb, fragstolen); | 5558 | kfree_skb_partial(skb, fragstolen); |
5560 | else | 5559 | sk->sk_data_ready(sk, 0); |
5561 | sk->sk_data_ready(sk, 0); | ||
5562 | return 0; | 5560 | return 0; |
5563 | } | 5561 | } |
5564 | } | 5562 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 767823764016..be23a0b7b89e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -417,10 +417,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
417 | 417 | ||
418 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ | 418 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ |
419 | tp->mtu_info = info; | 419 | tp->mtu_info = info; |
420 | if (!sock_owned_by_user(sk)) | 420 | if (!sock_owned_by_user(sk)) { |
421 | tcp_v4_mtu_reduced(sk); | 421 | tcp_v4_mtu_reduced(sk); |
422 | else | 422 | } else { |
423 | set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); | 423 | if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) |
424 | sock_hold(sk); | ||
425 | } | ||
424 | goto out; | 426 | goto out; |
425 | } | 427 | } |
426 | 428 | ||
@@ -1462,6 +1464,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1462 | goto exit_nonewsk; | 1464 | goto exit_nonewsk; |
1463 | 1465 | ||
1464 | newsk->sk_gso_type = SKB_GSO_TCPV4; | 1466 | newsk->sk_gso_type = SKB_GSO_TCPV4; |
1467 | inet_sk_rx_dst_set(newsk, skb); | ||
1465 | 1468 | ||
1466 | newtp = tcp_sk(newsk); | 1469 | newtp = tcp_sk(newsk); |
1467 | newinet = inet_sk(newsk); | 1470 | newinet = inet_sk(newsk); |
@@ -2390,7 +2393,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) | |||
2390 | EXPORT_SYMBOL(tcp_proc_unregister); | 2393 | EXPORT_SYMBOL(tcp_proc_unregister); |
2391 | 2394 | ||
2392 | static void get_openreq4(const struct sock *sk, const struct request_sock *req, | 2395 | static void get_openreq4(const struct sock *sk, const struct request_sock *req, |
2393 | struct seq_file *f, int i, int uid, int *len) | 2396 | struct seq_file *f, int i, kuid_t uid, int *len) |
2394 | { | 2397 | { |
2395 | const struct inet_request_sock *ireq = inet_rsk(req); | 2398 | const struct inet_request_sock *ireq = inet_rsk(req); |
2396 | int ttd = req->expires - jiffies; | 2399 | int ttd = req->expires - jiffies; |
@@ -2407,7 +2410,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req, | |||
2407 | 1, /* timers active (only the expire timer) */ | 2410 | 1, /* timers active (only the expire timer) */ |
2408 | jiffies_to_clock_t(ttd), | 2411 | jiffies_to_clock_t(ttd), |
2409 | req->retrans, | 2412 | req->retrans, |
2410 | uid, | 2413 | from_kuid_munged(seq_user_ns(f), uid), |
2411 | 0, /* non standard timer */ | 2414 | 0, /* non standard timer */ |
2412 | 0, /* open_requests have no inode */ | 2415 | 0, /* open_requests have no inode */ |
2413 | atomic_read(&sk->sk_refcnt), | 2416 | atomic_read(&sk->sk_refcnt), |
@@ -2458,7 +2461,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) | |||
2458 | timer_active, | 2461 | timer_active, |
2459 | jiffies_to_clock_t(timer_expires - jiffies), | 2462 | jiffies_to_clock_t(timer_expires - jiffies), |
2460 | icsk->icsk_retransmits, | 2463 | icsk->icsk_retransmits, |
2461 | sock_i_uid(sk), | 2464 | from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), |
2462 | icsk->icsk_probes_out, | 2465 | icsk->icsk_probes_out, |
2463 | sock_i_ino(sk), | 2466 | sock_i_ino(sk), |
2464 | atomic_read(&sk->sk_refcnt), sk, | 2467 | atomic_read(&sk->sk_refcnt), sk, |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index d9c9dcef2de3..6ff7f10dce9d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -387,8 +387,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
387 | struct tcp_sock *oldtp = tcp_sk(sk); | 387 | struct tcp_sock *oldtp = tcp_sk(sk); |
388 | struct tcp_cookie_values *oldcvp = oldtp->cookie_values; | 388 | struct tcp_cookie_values *oldcvp = oldtp->cookie_values; |
389 | 389 | ||
390 | newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb); | ||
391 | |||
392 | /* TCP Cookie Transactions require space for the cookie pair, | 390 | /* TCP Cookie Transactions require space for the cookie pair, |
393 | * as it differs for each connection. There is no need to | 391 | * as it differs for each connection. There is no need to |
394 | * copy any s_data_payload stored at the original socket. | 392 | * copy any s_data_payload stored at the original socket. |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 20dfd892c86f..d04632673a9e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -910,14 +910,18 @@ void tcp_release_cb(struct sock *sk) | |||
910 | if (flags & (1UL << TCP_TSQ_DEFERRED)) | 910 | if (flags & (1UL << TCP_TSQ_DEFERRED)) |
911 | tcp_tsq_handler(sk); | 911 | tcp_tsq_handler(sk); |
912 | 912 | ||
913 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) | 913 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { |
914 | tcp_write_timer_handler(sk); | 914 | tcp_write_timer_handler(sk); |
915 | 915 | __sock_put(sk); | |
916 | if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) | 916 | } |
917 | if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { | ||
917 | tcp_delack_timer_handler(sk); | 918 | tcp_delack_timer_handler(sk); |
918 | 919 | __sock_put(sk); | |
919 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) | 920 | } |
921 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { | ||
920 | sk->sk_prot->mtu_reduced(sk); | 922 | sk->sk_prot->mtu_reduced(sk); |
923 | __sock_put(sk); | ||
924 | } | ||
921 | } | 925 | } |
922 | EXPORT_SYMBOL(tcp_release_cb); | 926 | EXPORT_SYMBOL(tcp_release_cb); |
923 | 927 | ||
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 6df36ad55a38..b774a03bd1dc 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -252,7 +252,8 @@ static void tcp_delack_timer(unsigned long data) | |||
252 | inet_csk(sk)->icsk_ack.blocked = 1; | 252 | inet_csk(sk)->icsk_ack.blocked = 1; |
253 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); | 253 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
254 | /* deleguate our work to tcp_release_cb() */ | 254 | /* deleguate our work to tcp_release_cb() */ |
255 | set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); | 255 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) |
256 | sock_hold(sk); | ||
256 | } | 257 | } |
257 | bh_unlock_sock(sk); | 258 | bh_unlock_sock(sk); |
258 | sock_put(sk); | 259 | sock_put(sk); |
@@ -481,7 +482,8 @@ static void tcp_write_timer(unsigned long data) | |||
481 | tcp_write_timer_handler(sk); | 482 | tcp_write_timer_handler(sk); |
482 | } else { | 483 | } else { |
483 | /* deleguate our work to tcp_release_cb() */ | 484 | /* deleguate our work to tcp_release_cb() */ |
484 | set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); | 485 | if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) |
486 | sock_hold(sk); | ||
485 | } | 487 | } |
486 | bh_unlock_sock(sk); | 488 | bh_unlock_sock(sk); |
487 | sock_put(sk); | 489 | sock_put(sk); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6f6d1aca3c3d..79c8dbe59b54 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1226,6 +1226,11 @@ try_again: | |||
1226 | 1226 | ||
1227 | if (unlikely(err)) { | 1227 | if (unlikely(err)) { |
1228 | trace_kfree_skb(skb, udp_recvmsg); | 1228 | trace_kfree_skb(skb, udp_recvmsg); |
1229 | if (!peeked) { | ||
1230 | atomic_inc(&sk->sk_drops); | ||
1231 | UDP_INC_STATS_USER(sock_net(sk), | ||
1232 | UDP_MIB_INERRORS, is_udplite); | ||
1233 | } | ||
1229 | goto out_free; | 1234 | goto out_free; |
1230 | } | 1235 | } |
1231 | 1236 | ||
@@ -2110,7 +2115,9 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, | |||
2110 | bucket, src, srcp, dest, destp, sp->sk_state, | 2115 | bucket, src, srcp, dest, destp, sp->sk_state, |
2111 | sk_wmem_alloc_get(sp), | 2116 | sk_wmem_alloc_get(sp), |
2112 | sk_rmem_alloc_get(sp), | 2117 | sk_rmem_alloc_get(sp), |
2113 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | 2118 | 0, 0L, 0, |
2119 | from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), | ||
2120 | 0, sock_i_ino(sp), | ||
2114 | atomic_read(&sp->sk_refcnt), sp, | 2121 | atomic_read(&sp->sk_refcnt), sp, |
2115 | atomic_read(&sp->sk_drops), len); | 2122 | atomic_read(&sp->sk_drops), len); |
2116 | } | 2123 | } |
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 16d0960062be..d2f336ea82ca 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c | |||
@@ -24,7 +24,9 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, | |||
24 | if (!inet_diag_bc_sk(bc, sk)) | 24 | if (!inet_diag_bc_sk(bc, sk)) |
25 | return 0; | 25 | return 0; |
26 | 26 | ||
27 | return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid, | 27 | return inet_sk_diag_fill(sk, NULL, skb, req, |
28 | sk_user_ns(NETLINK_CB(cb->skb).ssk), | ||
29 | NETLINK_CB(cb->skb).pid, | ||
28 | cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); | 30 | cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); |
29 | } | 31 | } |
30 | 32 | ||
@@ -69,6 +71,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, | |||
69 | goto out; | 71 | goto out; |
70 | 72 | ||
71 | err = inet_sk_diag_fill(sk, NULL, rep, req, | 73 | err = inet_sk_diag_fill(sk, NULL, rep, req, |
74 | sk_user_ns(NETLINK_CB(in_skb).ssk), | ||
72 | NETLINK_CB(in_skb).pid, | 75 | NETLINK_CB(in_skb).pid, |
73 | nlh->nlmsg_seq, 0, nlh); | 76 | nlh->nlmsg_seq, 0, nlh); |
74 | if (err < 0) { | 77 | if (err < 0) { |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 79181819a24f..6bc85f7c31e3 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -494,8 +494,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf) | |||
494 | struct net_device *dev; | 494 | struct net_device *dev; |
495 | struct inet6_dev *idev; | 495 | struct inet6_dev *idev; |
496 | 496 | ||
497 | rcu_read_lock(); | 497 | for_each_netdev(net, dev) { |
498 | for_each_netdev_rcu(net, dev) { | ||
499 | idev = __in6_dev_get(dev); | 498 | idev = __in6_dev_get(dev); |
500 | if (idev) { | 499 | if (idev) { |
501 | int changed = (!idev->cnf.forwarding) ^ (!newf); | 500 | int changed = (!idev->cnf.forwarding) ^ (!newf); |
@@ -504,7 +503,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf) | |||
504 | dev_forward_change(idev); | 503 | dev_forward_change(idev); |
505 | } | 504 | } |
506 | } | 505 | } |
507 | rcu_read_unlock(); | ||
508 | } | 506 | } |
509 | 507 | ||
510 | static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) | 508 | static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 6dc7fd353ef5..282f3723ee19 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
167 | struct esp_data *esp = x->data; | 167 | struct esp_data *esp = x->data; |
168 | 168 | ||
169 | /* skb is pure payload to encrypt */ | 169 | /* skb is pure payload to encrypt */ |
170 | err = -ENOMEM; | ||
171 | |||
172 | aead = esp->aead; | 170 | aead = esp->aead; |
173 | alen = crypto_aead_authsize(aead); | 171 | alen = crypto_aead_authsize(aead); |
174 | 172 | ||
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
203 | } | 201 | } |
204 | 202 | ||
205 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | 203 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); |
206 | if (!tmp) | 204 | if (!tmp) { |
205 | err = -ENOMEM; | ||
207 | goto error; | 206 | goto error; |
207 | } | ||
208 | 208 | ||
209 | seqhi = esp_tmp_seqhi(tmp); | 209 | seqhi = esp_tmp_seqhi(tmp); |
210 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 210 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 0251a6005be8..c4f934176cab 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -175,33 +175,12 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, | |||
175 | const struct in6_addr *saddr) | 175 | const struct in6_addr *saddr) |
176 | { | 176 | { |
177 | __ip6_dst_store(sk, dst, daddr, saddr); | 177 | __ip6_dst_store(sk, dst, daddr, saddr); |
178 | |||
179 | #ifdef CONFIG_XFRM | ||
180 | { | ||
181 | struct rt6_info *rt = (struct rt6_info *)dst; | ||
182 | rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); | ||
183 | } | ||
184 | #endif | ||
185 | } | 178 | } |
186 | 179 | ||
187 | static inline | 180 | static inline |
188 | struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | 181 | struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) |
189 | { | 182 | { |
190 | struct dst_entry *dst; | 183 | return __sk_dst_check(sk, cookie); |
191 | |||
192 | dst = __sk_dst_check(sk, cookie); | ||
193 | |||
194 | #ifdef CONFIG_XFRM | ||
195 | if (dst) { | ||
196 | struct rt6_info *rt = (struct rt6_info *)dst; | ||
197 | if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { | ||
198 | __sk_dst_reset(sk); | ||
199 | dst = NULL; | ||
200 | } | ||
201 | } | ||
202 | #endif | ||
203 | |||
204 | return dst; | ||
205 | } | 184 | } |
206 | 185 | ||
207 | static struct dst_entry *inet6_csk_route_socket(struct sock *sk, | 186 | static struct dst_entry *inet6_csk_route_socket(struct sock *sk, |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 13690d650c3e..286acfc21250 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -819,6 +819,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) | |||
819 | offsetof(struct rt6_info, rt6i_src), | 819 | offsetof(struct rt6_info, rt6i_src), |
820 | allow_create, replace_required); | 820 | allow_create, replace_required); |
821 | 821 | ||
822 | if (IS_ERR(sn)) { | ||
823 | err = PTR_ERR(sn); | ||
824 | sn = NULL; | ||
825 | } | ||
822 | if (!sn) { | 826 | if (!sn) { |
823 | /* If it is failed, discard just allocated | 827 | /* If it is failed, discard just allocated |
824 | root, and then (in st_failure) stale node | 828 | root, and then (in st_failure) stale node |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 9772fbd8a3f5..90bbefb57943 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
25 | #include <linux/pid_namespace.h> | ||
25 | 26 | ||
26 | #include <net/net_namespace.h> | 27 | #include <net/net_namespace.h> |
27 | #include <net/sock.h> | 28 | #include <net/sock.h> |
@@ -91,6 +92,8 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label) | |||
91 | static void fl_free(struct ip6_flowlabel *fl) | 92 | static void fl_free(struct ip6_flowlabel *fl) |
92 | { | 93 | { |
93 | if (fl) { | 94 | if (fl) { |
95 | if (fl->share == IPV6_FL_S_PROCESS) | ||
96 | put_pid(fl->owner.pid); | ||
94 | release_net(fl->fl_net); | 97 | release_net(fl->fl_net); |
95 | kfree(fl->opt); | 98 | kfree(fl->opt); |
96 | } | 99 | } |
@@ -394,10 +397,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, | |||
394 | case IPV6_FL_S_ANY: | 397 | case IPV6_FL_S_ANY: |
395 | break; | 398 | break; |
396 | case IPV6_FL_S_PROCESS: | 399 | case IPV6_FL_S_PROCESS: |
397 | fl->owner = current->pid; | 400 | fl->owner.pid = get_task_pid(current, PIDTYPE_PID); |
398 | break; | 401 | break; |
399 | case IPV6_FL_S_USER: | 402 | case IPV6_FL_S_USER: |
400 | fl->owner = current_euid(); | 403 | fl->owner.uid = current_euid(); |
401 | break; | 404 | break; |
402 | default: | 405 | default: |
403 | err = -EINVAL; | 406 | err = -EINVAL; |
@@ -561,7 +564,10 @@ recheck: | |||
561 | err = -EPERM; | 564 | err = -EPERM; |
562 | if (fl1->share == IPV6_FL_S_EXCL || | 565 | if (fl1->share == IPV6_FL_S_EXCL || |
563 | fl1->share != fl->share || | 566 | fl1->share != fl->share || |
564 | fl1->owner != fl->owner) | 567 | ((fl1->share == IPV6_FL_S_PROCESS) && |
568 | (fl1->owner.pid == fl->owner.pid)) || | ||
569 | ((fl1->share == IPV6_FL_S_USER) && | ||
570 | uid_eq(fl1->owner.uid, fl->owner.uid))) | ||
565 | goto release; | 571 | goto release; |
566 | 572 | ||
567 | err = -EINVAL; | 573 | err = -EINVAL; |
@@ -621,6 +627,7 @@ done: | |||
621 | 627 | ||
622 | struct ip6fl_iter_state { | 628 | struct ip6fl_iter_state { |
623 | struct seq_net_private p; | 629 | struct seq_net_private p; |
630 | struct pid_namespace *pid_ns; | ||
624 | int bucket; | 631 | int bucket; |
625 | }; | 632 | }; |
626 | 633 | ||
@@ -699,6 +706,7 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v) | |||
699 | 706 | ||
700 | static int ip6fl_seq_show(struct seq_file *seq, void *v) | 707 | static int ip6fl_seq_show(struct seq_file *seq, void *v) |
701 | { | 708 | { |
709 | struct ip6fl_iter_state *state = ip6fl_seq_private(seq); | ||
702 | if (v == SEQ_START_TOKEN) | 710 | if (v == SEQ_START_TOKEN) |
703 | seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", | 711 | seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", |
704 | "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); | 712 | "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); |
@@ -708,7 +716,11 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v) | |||
708 | "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", | 716 | "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", |
709 | (unsigned int)ntohl(fl->label), | 717 | (unsigned int)ntohl(fl->label), |
710 | fl->share, | 718 | fl->share, |
711 | (int)fl->owner, | 719 | ((fl->share == IPV6_FL_S_PROCESS) ? |
720 | pid_nr_ns(fl->owner.pid, state->pid_ns) : | ||
721 | ((fl->share == IPV6_FL_S_USER) ? | ||
722 | from_kuid_munged(seq_user_ns(seq), fl->owner.uid) : | ||
723 | 0)), | ||
712 | atomic_read(&fl->users), | 724 | atomic_read(&fl->users), |
713 | fl->linger/HZ, | 725 | fl->linger/HZ, |
714 | (long)(fl->expires - jiffies)/HZ, | 726 | (long)(fl->expires - jiffies)/HZ, |
@@ -727,8 +739,29 @@ static const struct seq_operations ip6fl_seq_ops = { | |||
727 | 739 | ||
728 | static int ip6fl_seq_open(struct inode *inode, struct file *file) | 740 | static int ip6fl_seq_open(struct inode *inode, struct file *file) |
729 | { | 741 | { |
730 | return seq_open_net(inode, file, &ip6fl_seq_ops, | 742 | struct seq_file *seq; |
731 | sizeof(struct ip6fl_iter_state)); | 743 | struct ip6fl_iter_state *state; |
744 | int err; | ||
745 | |||
746 | err = seq_open_net(inode, file, &ip6fl_seq_ops, | ||
747 | sizeof(struct ip6fl_iter_state)); | ||
748 | |||
749 | if (!err) { | ||
750 | seq = file->private_data; | ||
751 | state = ip6fl_seq_private(seq); | ||
752 | rcu_read_lock(); | ||
753 | state->pid_ns = get_pid_ns(task_active_pid_ns(current)); | ||
754 | rcu_read_unlock(); | ||
755 | } | ||
756 | return err; | ||
757 | } | ||
758 | |||
759 | static int ip6fl_seq_release(struct inode *inode, struct file *file) | ||
760 | { | ||
761 | struct seq_file *seq = file->private_data; | ||
762 | struct ip6fl_iter_state *state = ip6fl_seq_private(seq); | ||
763 | put_pid_ns(state->pid_ns); | ||
764 | return seq_release_net(inode, file); | ||
732 | } | 765 | } |
733 | 766 | ||
734 | static const struct file_operations ip6fl_seq_fops = { | 767 | static const struct file_operations ip6fl_seq_fops = { |
@@ -736,7 +769,7 @@ static const struct file_operations ip6fl_seq_fops = { | |||
736 | .open = ip6fl_seq_open, | 769 | .open = ip6fl_seq_open, |
737 | .read = seq_read, | 770 | .read = seq_read, |
738 | .llseek = seq_lseek, | 771 | .llseek = seq_lseek, |
739 | .release = seq_release_net, | 772 | .release = ip6fl_seq_release, |
740 | }; | 773 | }; |
741 | 774 | ||
742 | static int __net_init ip6_flowlabel_proc_init(struct net *net) | 775 | static int __net_init ip6_flowlabel_proc_init(struct net *net) |
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 5b087c31d87b..0f9bdc5ee9f3 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c | |||
@@ -86,28 +86,30 @@ static int mip6_mh_len(int type) | |||
86 | 86 | ||
87 | static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) | 87 | static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) |
88 | { | 88 | { |
89 | struct ip6_mh *mh; | 89 | struct ip6_mh _hdr; |
90 | const struct ip6_mh *mh; | ||
90 | 91 | ||
91 | if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) || | 92 | mh = skb_header_pointer(skb, skb_transport_offset(skb), |
92 | !pskb_may_pull(skb, (skb_transport_offset(skb) + | 93 | sizeof(_hdr), &_hdr); |
93 | ((skb_transport_header(skb)[1] + 1) << 3)))) | 94 | if (!mh) |
94 | return -1; | 95 | return -1; |
95 | 96 | ||
96 | mh = (struct ip6_mh *)skb_transport_header(skb); | 97 | if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len) |
98 | return -1; | ||
97 | 99 | ||
98 | if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { | 100 | if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { |
99 | LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", | 101 | LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", |
100 | mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); | 102 | mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); |
101 | mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) - | 103 | mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) + |
102 | skb_network_header(skb))); | 104 | skb_network_header_len(skb)); |
103 | return -1; | 105 | return -1; |
104 | } | 106 | } |
105 | 107 | ||
106 | if (mh->ip6mh_proto != IPPROTO_NONE) { | 108 | if (mh->ip6mh_proto != IPPROTO_NONE) { |
107 | LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", | 109 | LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", |
108 | mh->ip6mh_proto); | 110 | mh->ip6mh_proto); |
109 | mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) - | 111 | mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) + |
110 | skb_network_header(skb))); | 112 | skb_network_header_len(skb)); |
111 | return -1; | 113 | return -1; |
112 | } | 114 | } |
113 | 115 | ||
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index da2e92d05c15..745a32042950 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -307,10 +307,10 @@ static int __net_init ipv6_proc_init_net(struct net *net) | |||
307 | goto proc_dev_snmp6_fail; | 307 | goto proc_dev_snmp6_fail; |
308 | return 0; | 308 | return 0; |
309 | 309 | ||
310 | proc_dev_snmp6_fail: | ||
311 | proc_net_remove(net, "snmp6"); | ||
310 | proc_snmp6_fail: | 312 | proc_snmp6_fail: |
311 | proc_net_remove(net, "sockstat6"); | 313 | proc_net_remove(net, "sockstat6"); |
312 | proc_dev_snmp6_fail: | ||
313 | proc_net_remove(net, "dev_snmp6"); | ||
314 | return -ENOMEM; | 314 | return -ENOMEM; |
315 | } | 315 | } |
316 | 316 | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ef0579d5bca6..d8e95c77db99 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -107,21 +107,20 @@ found: | |||
107 | * 0 - deliver | 107 | * 0 - deliver |
108 | * 1 - block | 108 | * 1 - block |
109 | */ | 109 | */ |
110 | static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) | 110 | static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) |
111 | { | 111 | { |
112 | struct icmp6hdr *icmph; | 112 | struct icmp6hdr *_hdr; |
113 | struct raw6_sock *rp = raw6_sk(sk); | 113 | const struct icmp6hdr *hdr; |
114 | |||
115 | if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { | ||
116 | __u32 *data = &rp->filter.data[0]; | ||
117 | int bit_nr; | ||
118 | 114 | ||
119 | icmph = (struct icmp6hdr *) skb->data; | 115 | hdr = skb_header_pointer(skb, skb_transport_offset(skb), |
120 | bit_nr = icmph->icmp6_type; | 116 | sizeof(_hdr), &_hdr); |
117 | if (hdr) { | ||
118 | const __u32 *data = &raw6_sk(sk)->filter.data[0]; | ||
119 | unsigned int type = hdr->icmp6_type; | ||
121 | 120 | ||
122 | return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; | 121 | return (data[type >> 5] & (1U << (type & 31))) != 0; |
123 | } | 122 | } |
124 | return 0; | 123 | return 1; |
125 | } | 124 | } |
126 | 125 | ||
127 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 126 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) |
@@ -1251,7 +1250,8 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) | |||
1251 | sk_wmem_alloc_get(sp), | 1250 | sk_wmem_alloc_get(sp), |
1252 | sk_rmem_alloc_get(sp), | 1251 | sk_rmem_alloc_get(sp), |
1253 | 0, 0L, 0, | 1252 | 0, 0L, 0, |
1254 | sock_i_uid(sp), 0, | 1253 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), |
1254 | 0, | ||
1255 | sock_i_ino(sp), | 1255 | sock_i_ino(sp), |
1256 | atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); | 1256 | atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); |
1257 | } | 1257 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8e80fd279100..854e4018d205 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -226,7 +226,7 @@ static struct rt6_info ip6_null_entry_template = { | |||
226 | .dst = { | 226 | .dst = { |
227 | .__refcnt = ATOMIC_INIT(1), | 227 | .__refcnt = ATOMIC_INIT(1), |
228 | .__use = 1, | 228 | .__use = 1, |
229 | .obsolete = -1, | 229 | .obsolete = DST_OBSOLETE_FORCE_CHK, |
230 | .error = -ENETUNREACH, | 230 | .error = -ENETUNREACH, |
231 | .input = ip6_pkt_discard, | 231 | .input = ip6_pkt_discard, |
232 | .output = ip6_pkt_discard_out, | 232 | .output = ip6_pkt_discard_out, |
@@ -246,7 +246,7 @@ static struct rt6_info ip6_prohibit_entry_template = { | |||
246 | .dst = { | 246 | .dst = { |
247 | .__refcnt = ATOMIC_INIT(1), | 247 | .__refcnt = ATOMIC_INIT(1), |
248 | .__use = 1, | 248 | .__use = 1, |
249 | .obsolete = -1, | 249 | .obsolete = DST_OBSOLETE_FORCE_CHK, |
250 | .error = -EACCES, | 250 | .error = -EACCES, |
251 | .input = ip6_pkt_prohibit, | 251 | .input = ip6_pkt_prohibit, |
252 | .output = ip6_pkt_prohibit_out, | 252 | .output = ip6_pkt_prohibit_out, |
@@ -261,7 +261,7 @@ static struct rt6_info ip6_blk_hole_entry_template = { | |||
261 | .dst = { | 261 | .dst = { |
262 | .__refcnt = ATOMIC_INIT(1), | 262 | .__refcnt = ATOMIC_INIT(1), |
263 | .__use = 1, | 263 | .__use = 1, |
264 | .obsolete = -1, | 264 | .obsolete = DST_OBSOLETE_FORCE_CHK, |
265 | .error = -EINVAL, | 265 | .error = -EINVAL, |
266 | .input = dst_discard, | 266 | .input = dst_discard, |
267 | .output = dst_discard, | 267 | .output = dst_discard, |
@@ -281,13 +281,14 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net, | |||
281 | struct fib6_table *table) | 281 | struct fib6_table *table) |
282 | { | 282 | { |
283 | struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, | 283 | struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, |
284 | 0, DST_OBSOLETE_NONE, flags); | 284 | 0, DST_OBSOLETE_FORCE_CHK, flags); |
285 | 285 | ||
286 | if (rt) { | 286 | if (rt) { |
287 | struct dst_entry *dst = &rt->dst; | 287 | struct dst_entry *dst = &rt->dst; |
288 | 288 | ||
289 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); | 289 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); |
290 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); | 290 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); |
291 | rt->rt6i_genid = rt_genid(net); | ||
291 | } | 292 | } |
292 | return rt; | 293 | return rt; |
293 | } | 294 | } |
@@ -1031,6 +1032,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
1031 | 1032 | ||
1032 | rt = (struct rt6_info *) dst; | 1033 | rt = (struct rt6_info *) dst; |
1033 | 1034 | ||
1035 | /* All IPV6 dsts are created with ->obsolete set to the value | ||
1036 | * DST_OBSOLETE_FORCE_CHK which forces validation calls down | ||
1037 | * into this function always. | ||
1038 | */ | ||
1039 | if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) | ||
1040 | return NULL; | ||
1041 | |||
1034 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { | 1042 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { |
1035 | if (rt->rt6i_peer_genid != rt6_peer_genid()) { | 1043 | if (rt->rt6i_peer_genid != rt6_peer_genid()) { |
1036 | if (!rt6_has_peer(rt)) | 1044 | if (!rt6_has_peer(rt)) |
@@ -1397,8 +1405,6 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1397 | goto out; | 1405 | goto out; |
1398 | } | 1406 | } |
1399 | 1407 | ||
1400 | rt->dst.obsolete = -1; | ||
1401 | |||
1402 | if (cfg->fc_flags & RTF_EXPIRES) | 1408 | if (cfg->fc_flags & RTF_EXPIRES) |
1403 | rt6_set_expires(rt, jiffies + | 1409 | rt6_set_expires(rt, jiffies + |
1404 | clock_t_to_jiffies(cfg->fc_expires)); | 1410 | clock_t_to_jiffies(cfg->fc_expires)); |
@@ -2080,7 +2086,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
2080 | rt->dst.input = ip6_input; | 2086 | rt->dst.input = ip6_input; |
2081 | rt->dst.output = ip6_output; | 2087 | rt->dst.output = ip6_output; |
2082 | rt->rt6i_idev = idev; | 2088 | rt->rt6i_idev = idev; |
2083 | rt->dst.obsolete = -1; | ||
2084 | 2089 | ||
2085 | rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; | 2090 | rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; |
2086 | if (anycast) | 2091 | if (anycast) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index bb9ce2b2f377..342ec62cdbde 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -94,6 +94,18 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, | |||
94 | } | 94 | } |
95 | #endif | 95 | #endif |
96 | 96 | ||
97 | static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | ||
98 | { | ||
99 | struct dst_entry *dst = skb_dst(skb); | ||
100 | const struct rt6_info *rt = (const struct rt6_info *)dst; | ||
101 | |||
102 | dst_hold(dst); | ||
103 | sk->sk_rx_dst = dst; | ||
104 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | ||
105 | if (rt->rt6i_node) | ||
106 | inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; | ||
107 | } | ||
108 | |||
97 | static void tcp_v6_hash(struct sock *sk) | 109 | static void tcp_v6_hash(struct sock *sk) |
98 | { | 110 | { |
99 | if (sk->sk_state != TCP_CLOSE) { | 111 | if (sk->sk_state != TCP_CLOSE) { |
@@ -391,8 +403,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
391 | tp->mtu_info = ntohl(info); | 403 | tp->mtu_info = ntohl(info); |
392 | if (!sock_owned_by_user(sk)) | 404 | if (!sock_owned_by_user(sk)) |
393 | tcp_v6_mtu_reduced(sk); | 405 | tcp_v6_mtu_reduced(sk); |
394 | else | 406 | else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, |
395 | set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); | 407 | &tp->tsq_flags)) |
408 | sock_hold(sk); | ||
396 | goto out; | 409 | goto out; |
397 | } | 410 | } |
398 | 411 | ||
@@ -1270,6 +1283,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1270 | 1283 | ||
1271 | newsk->sk_gso_type = SKB_GSO_TCPV6; | 1284 | newsk->sk_gso_type = SKB_GSO_TCPV6; |
1272 | __ip6_dst_store(newsk, dst, NULL, NULL); | 1285 | __ip6_dst_store(newsk, dst, NULL, NULL); |
1286 | inet6_sk_rx_dst_set(newsk, skb); | ||
1273 | 1287 | ||
1274 | newtcp6sk = (struct tcp6_sock *)newsk; | 1288 | newtcp6sk = (struct tcp6_sock *)newsk; |
1275 | inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; | 1289 | inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; |
@@ -1729,18 +1743,6 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { | |||
1729 | .twsk_destructor= tcp_twsk_destructor, | 1743 | .twsk_destructor= tcp_twsk_destructor, |
1730 | }; | 1744 | }; |
1731 | 1745 | ||
1732 | static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | ||
1733 | { | ||
1734 | struct dst_entry *dst = skb_dst(skb); | ||
1735 | const struct rt6_info *rt = (const struct rt6_info *)dst; | ||
1736 | |||
1737 | dst_hold(dst); | ||
1738 | sk->sk_rx_dst = dst; | ||
1739 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | ||
1740 | if (rt->rt6i_node) | ||
1741 | inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; | ||
1742 | } | ||
1743 | |||
1744 | static const struct inet_connection_sock_af_ops ipv6_specific = { | 1746 | static const struct inet_connection_sock_af_ops ipv6_specific = { |
1745 | .queue_xmit = inet6_csk_xmit, | 1747 | .queue_xmit = inet6_csk_xmit, |
1746 | .send_check = tcp_v6_send_check, | 1748 | .send_check = tcp_v6_send_check, |
@@ -1827,7 +1829,7 @@ static void tcp_v6_destroy_sock(struct sock *sk) | |||
1827 | #ifdef CONFIG_PROC_FS | 1829 | #ifdef CONFIG_PROC_FS |
1828 | /* Proc filesystem TCPv6 sock list dumping. */ | 1830 | /* Proc filesystem TCPv6 sock list dumping. */ |
1829 | static void get_openreq6(struct seq_file *seq, | 1831 | static void get_openreq6(struct seq_file *seq, |
1830 | const struct sock *sk, struct request_sock *req, int i, int uid) | 1832 | const struct sock *sk, struct request_sock *req, int i, kuid_t uid) |
1831 | { | 1833 | { |
1832 | int ttd = req->expires - jiffies; | 1834 | int ttd = req->expires - jiffies; |
1833 | const struct in6_addr *src = &inet6_rsk(req)->loc_addr; | 1835 | const struct in6_addr *src = &inet6_rsk(req)->loc_addr; |
@@ -1851,7 +1853,7 @@ static void get_openreq6(struct seq_file *seq, | |||
1851 | 1, /* timers active (only the expire timer) */ | 1853 | 1, /* timers active (only the expire timer) */ |
1852 | jiffies_to_clock_t(ttd), | 1854 | jiffies_to_clock_t(ttd), |
1853 | req->retrans, | 1855 | req->retrans, |
1854 | uid, | 1856 | from_kuid_munged(seq_user_ns(seq), uid), |
1855 | 0, /* non standard timer */ | 1857 | 0, /* non standard timer */ |
1856 | 0, /* open_requests have no inode */ | 1858 | 0, /* open_requests have no inode */ |
1857 | 0, req); | 1859 | 0, req); |
@@ -1901,7 +1903,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
1901 | timer_active, | 1903 | timer_active, |
1902 | jiffies_to_clock_t(timer_expires - jiffies), | 1904 | jiffies_to_clock_t(timer_expires - jiffies), |
1903 | icsk->icsk_retransmits, | 1905 | icsk->icsk_retransmits, |
1904 | sock_i_uid(sp), | 1906 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), |
1905 | icsk->icsk_probes_out, | 1907 | icsk->icsk_probes_out, |
1906 | sock_i_ino(sp), | 1908 | sock_i_ino(sp), |
1907 | atomic_read(&sp->sk_refcnt), sp, | 1909 | atomic_read(&sp->sk_refcnt), sp, |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 99d0077b56b8..fc9997260a6b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -394,6 +394,17 @@ try_again: | |||
394 | } | 394 | } |
395 | if (unlikely(err)) { | 395 | if (unlikely(err)) { |
396 | trace_kfree_skb(skb, udpv6_recvmsg); | 396 | trace_kfree_skb(skb, udpv6_recvmsg); |
397 | if (!peeked) { | ||
398 | atomic_inc(&sk->sk_drops); | ||
399 | if (is_udp4) | ||
400 | UDP_INC_STATS_USER(sock_net(sk), | ||
401 | UDP_MIB_INERRORS, | ||
402 | is_udplite); | ||
403 | else | ||
404 | UDP6_INC_STATS_USER(sock_net(sk), | ||
405 | UDP_MIB_INERRORS, | ||
406 | is_udplite); | ||
407 | } | ||
397 | goto out_free; | 408 | goto out_free; |
398 | } | 409 | } |
399 | if (!peeked) { | 410 | if (!peeked) { |
@@ -1458,7 +1469,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket | |||
1458 | sk_wmem_alloc_get(sp), | 1469 | sk_wmem_alloc_get(sp), |
1459 | sk_rmem_alloc_get(sp), | 1470 | sk_rmem_alloc_get(sp), |
1460 | 0, 0L, 0, | 1471 | 0, 0L, 0, |
1461 | sock_i_uid(sp), 0, | 1472 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), |
1473 | 0, | ||
1462 | sock_i_ino(sp), | 1474 | sock_i_ino(sp), |
1463 | atomic_read(&sp->sk_refcnt), sp, | 1475 | atomic_read(&sp->sk_refcnt), sp, |
1464 | atomic_read(&sp->sk_drops)); | 1476 | atomic_read(&sp->sk_drops)); |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ef39812107b1..f8c4c08ffb60 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -73,6 +73,13 @@ static int xfrm6_get_tos(const struct flowi *fl) | |||
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst) | ||
77 | { | ||
78 | struct rt6_info *rt = (struct rt6_info *)xdst; | ||
79 | |||
80 | rt6_init_peer(rt, net->ipv6.peers); | ||
81 | } | ||
82 | |||
76 | static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, | 83 | static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, |
77 | int nfheader_len) | 84 | int nfheader_len) |
78 | { | 85 | { |
@@ -286,6 +293,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { | |||
286 | .get_saddr = xfrm6_get_saddr, | 293 | .get_saddr = xfrm6_get_saddr, |
287 | .decode_session = _decode_session6, | 294 | .decode_session = _decode_session6, |
288 | .get_tos = xfrm6_get_tos, | 295 | .get_tos = xfrm6_get_tos, |
296 | .init_dst = xfrm6_init_dst, | ||
289 | .init_path = xfrm6_init_path, | 297 | .init_path = xfrm6_init_path, |
290 | .fill_dst = xfrm6_fill_dst, | 298 | .fill_dst = xfrm6_fill_dst, |
291 | .blackhole_route = ip6_blackhole_route, | 299 | .blackhole_route = ip6_blackhole_route, |
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index f8ba30dfecae..02ff7f2f60d4 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c | |||
@@ -217,7 +217,8 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v) | |||
217 | seq_printf(seq, "%08X %08X %02X %03d\n", | 217 | seq_printf(seq, "%08X %08X %02X %03d\n", |
218 | sk_wmem_alloc_get(s), | 218 | sk_wmem_alloc_get(s), |
219 | sk_rmem_alloc_get(s), | 219 | sk_rmem_alloc_get(s), |
220 | s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); | 220 | s->sk_state, |
221 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(s))); | ||
221 | out: | 222 | out: |
222 | return 0; | 223 | return 0; |
223 | } | 224 | } |
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index 8b915f3ac3b9..308939128359 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c | |||
@@ -99,7 +99,6 @@ pi_param_info_t ircomm_param_info = { pi_major_call_table, 3, 0x0f, 4 }; | |||
99 | */ | 99 | */ |
100 | int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) | 100 | int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) |
101 | { | 101 | { |
102 | struct tty_struct *tty; | ||
103 | unsigned long flags; | 102 | unsigned long flags; |
104 | struct sk_buff *skb; | 103 | struct sk_buff *skb; |
105 | int count; | 104 | int count; |
@@ -109,10 +108,6 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) | |||
109 | IRDA_ASSERT(self != NULL, return -1;); | 108 | IRDA_ASSERT(self != NULL, return -1;); |
110 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 109 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
111 | 110 | ||
112 | tty = self->tty; | ||
113 | if (!tty) | ||
114 | return 0; | ||
115 | |||
116 | /* Make sure we don't send parameters for raw mode */ | 111 | /* Make sure we don't send parameters for raw mode */ |
117 | if (self->service_type == IRCOMM_3_WIRE_RAW) | 112 | if (self->service_type == IRCOMM_3_WIRE_RAW) |
118 | return 0; | 113 | return 0; |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 6b9d5a0e42f9..95a3a7a336ba 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -52,6 +52,8 @@ | |||
52 | #include <net/irda/ircomm_tty_attach.h> | 52 | #include <net/irda/ircomm_tty_attach.h> |
53 | #include <net/irda/ircomm_tty.h> | 53 | #include <net/irda/ircomm_tty.h> |
54 | 54 | ||
55 | static int ircomm_tty_install(struct tty_driver *driver, | ||
56 | struct tty_struct *tty); | ||
55 | static int ircomm_tty_open(struct tty_struct *tty, struct file *filp); | 57 | static int ircomm_tty_open(struct tty_struct *tty, struct file *filp); |
56 | static void ircomm_tty_close(struct tty_struct * tty, struct file *filp); | 58 | static void ircomm_tty_close(struct tty_struct * tty, struct file *filp); |
57 | static int ircomm_tty_write(struct tty_struct * tty, | 59 | static int ircomm_tty_write(struct tty_struct * tty, |
@@ -82,6 +84,7 @@ static struct tty_driver *driver; | |||
82 | static hashbin_t *ircomm_tty = NULL; | 84 | static hashbin_t *ircomm_tty = NULL; |
83 | 85 | ||
84 | static const struct tty_operations ops = { | 86 | static const struct tty_operations ops = { |
87 | .install = ircomm_tty_install, | ||
85 | .open = ircomm_tty_open, | 88 | .open = ircomm_tty_open, |
86 | .close = ircomm_tty_close, | 89 | .close = ircomm_tty_close, |
87 | .write = ircomm_tty_write, | 90 | .write = ircomm_tty_write, |
@@ -104,6 +107,35 @@ static const struct tty_operations ops = { | |||
104 | #endif /* CONFIG_PROC_FS */ | 107 | #endif /* CONFIG_PROC_FS */ |
105 | }; | 108 | }; |
106 | 109 | ||
110 | static void ircomm_port_raise_dtr_rts(struct tty_port *port, int raise) | ||
111 | { | ||
112 | struct ircomm_tty_cb *self = container_of(port, struct ircomm_tty_cb, | ||
113 | port); | ||
114 | /* | ||
115 | * Here, we use to lock those two guys, but as ircomm_param_request() | ||
116 | * does it itself, I don't see the point (and I see the deadlock). | ||
117 | * Jean II | ||
118 | */ | ||
119 | if (raise) | ||
120 | self->settings.dte |= IRCOMM_RTS | IRCOMM_DTR; | ||
121 | else | ||
122 | self->settings.dte &= ~(IRCOMM_RTS | IRCOMM_DTR); | ||
123 | |||
124 | ircomm_param_request(self, IRCOMM_DTE, TRUE); | ||
125 | } | ||
126 | |||
127 | static int ircomm_port_carrier_raised(struct tty_port *port) | ||
128 | { | ||
129 | struct ircomm_tty_cb *self = container_of(port, struct ircomm_tty_cb, | ||
130 | port); | ||
131 | return self->settings.dce & IRCOMM_CD; | ||
132 | } | ||
133 | |||
134 | static const struct tty_port_operations ircomm_port_ops = { | ||
135 | .dtr_rts = ircomm_port_raise_dtr_rts, | ||
136 | .carrier_raised = ircomm_port_carrier_raised, | ||
137 | }; | ||
138 | |||
107 | /* | 139 | /* |
108 | * Function ircomm_tty_init() | 140 | * Function ircomm_tty_init() |
109 | * | 141 | * |
@@ -194,7 +226,7 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self) | |||
194 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 226 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
195 | 227 | ||
196 | /* Check if already open */ | 228 | /* Check if already open */ |
197 | if (test_and_set_bit(ASYNC_B_INITIALIZED, &self->flags)) { | 229 | if (test_and_set_bit(ASYNCB_INITIALIZED, &self->port.flags)) { |
198 | IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ ); | 230 | IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ ); |
199 | return 0; | 231 | return 0; |
200 | } | 232 | } |
@@ -231,7 +263,7 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self) | |||
231 | 263 | ||
232 | return 0; | 264 | return 0; |
233 | err: | 265 | err: |
234 | clear_bit(ASYNC_B_INITIALIZED, &self->flags); | 266 | clear_bit(ASYNCB_INITIALIZED, &self->port.flags); |
235 | return ret; | 267 | return ret; |
236 | } | 268 | } |
237 | 269 | ||
@@ -242,72 +274,62 @@ err: | |||
242 | * | 274 | * |
243 | */ | 275 | */ |
244 | static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | 276 | static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, |
245 | struct file *filp) | 277 | struct tty_struct *tty, struct file *filp) |
246 | { | 278 | { |
279 | struct tty_port *port = &self->port; | ||
247 | DECLARE_WAITQUEUE(wait, current); | 280 | DECLARE_WAITQUEUE(wait, current); |
248 | int retval; | 281 | int retval; |
249 | int do_clocal = 0, extra_count = 0; | 282 | int do_clocal = 0, extra_count = 0; |
250 | unsigned long flags; | 283 | unsigned long flags; |
251 | struct tty_struct *tty; | ||
252 | 284 | ||
253 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 285 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
254 | 286 | ||
255 | tty = self->tty; | ||
256 | |||
257 | /* | 287 | /* |
258 | * If non-blocking mode is set, or the port is not enabled, | 288 | * If non-blocking mode is set, or the port is not enabled, |
259 | * then make the check up front and then exit. | 289 | * then make the check up front and then exit. |
260 | */ | 290 | */ |
261 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | 291 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ |
262 | /* nonblock mode is set or port is not enabled */ | 292 | /* nonblock mode is set or port is not enabled */ |
263 | self->flags |= ASYNC_NORMAL_ACTIVE; | 293 | port->flags |= ASYNC_NORMAL_ACTIVE; |
264 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); | 294 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); |
265 | return 0; | 295 | return 0; |
266 | } | 296 | } |
267 | 297 | ||
268 | if (tty->termios->c_cflag & CLOCAL) { | 298 | if (tty->termios.c_cflag & CLOCAL) { |
269 | IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ ); | 299 | IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ ); |
270 | do_clocal = 1; | 300 | do_clocal = 1; |
271 | } | 301 | } |
272 | 302 | ||
273 | /* Wait for carrier detect and the line to become | 303 | /* Wait for carrier detect and the line to become |
274 | * free (i.e., not in use by the callout). While we are in | 304 | * free (i.e., not in use by the callout). While we are in |
275 | * this loop, self->open_count is dropped by one, so that | 305 | * this loop, port->count is dropped by one, so that |
276 | * mgsl_close() knows when to free things. We restore it upon | 306 | * mgsl_close() knows when to free things. We restore it upon |
277 | * exit, either normal or abnormal. | 307 | * exit, either normal or abnormal. |
278 | */ | 308 | */ |
279 | 309 | ||
280 | retval = 0; | 310 | retval = 0; |
281 | add_wait_queue(&self->open_wait, &wait); | 311 | add_wait_queue(&port->open_wait, &wait); |
282 | 312 | ||
283 | IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", | 313 | IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", |
284 | __FILE__,__LINE__, tty->driver->name, self->open_count ); | 314 | __FILE__, __LINE__, tty->driver->name, port->count); |
285 | 315 | ||
286 | /* As far as I can see, we protect open_count - Jean II */ | 316 | spin_lock_irqsave(&port->lock, flags); |
287 | spin_lock_irqsave(&self->spinlock, flags); | ||
288 | if (!tty_hung_up_p(filp)) { | 317 | if (!tty_hung_up_p(filp)) { |
289 | extra_count = 1; | 318 | extra_count = 1; |
290 | self->open_count--; | 319 | port->count--; |
291 | } | 320 | } |
292 | spin_unlock_irqrestore(&self->spinlock, flags); | 321 | spin_unlock_irqrestore(&port->lock, flags); |
293 | self->blocked_open++; | 322 | port->blocked_open++; |
294 | 323 | ||
295 | while (1) { | 324 | while (1) { |
296 | if (tty->termios->c_cflag & CBAUD) { | 325 | if (tty->termios.c_cflag & CBAUD) |
297 | /* Here, we use to lock those two guys, but | 326 | tty_port_raise_dtr_rts(port); |
298 | * as ircomm_param_request() does it itself, | ||
299 | * I don't see the point (and I see the deadlock). | ||
300 | * Jean II */ | ||
301 | self->settings.dte |= IRCOMM_RTS + IRCOMM_DTR; | ||
302 | |||
303 | ircomm_param_request(self, IRCOMM_DTE, TRUE); | ||
304 | } | ||
305 | 327 | ||
306 | current->state = TASK_INTERRUPTIBLE; | 328 | current->state = TASK_INTERRUPTIBLE; |
307 | 329 | ||
308 | if (tty_hung_up_p(filp) || | 330 | if (tty_hung_up_p(filp) || |
309 | !test_bit(ASYNC_B_INITIALIZED, &self->flags)) { | 331 | !test_bit(ASYNCB_INITIALIZED, &port->flags)) { |
310 | retval = (self->flags & ASYNC_HUP_NOTIFY) ? | 332 | retval = (port->flags & ASYNC_HUP_NOTIFY) ? |
311 | -EAGAIN : -ERESTARTSYS; | 333 | -EAGAIN : -ERESTARTSYS; |
312 | break; | 334 | break; |
313 | } | 335 | } |
@@ -317,8 +339,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
317 | * specified, we cannot return before the IrCOMM link is | 339 | * specified, we cannot return before the IrCOMM link is |
318 | * ready | 340 | * ready |
319 | */ | 341 | */ |
320 | if (!test_bit(ASYNC_B_CLOSING, &self->flags) && | 342 | if (!test_bit(ASYNCB_CLOSING, &port->flags) && |
321 | (do_clocal || (self->settings.dce & IRCOMM_CD)) && | 343 | (do_clocal || tty_port_carrier_raised(port)) && |
322 | self->state == IRCOMM_TTY_READY) | 344 | self->state == IRCOMM_TTY_READY) |
323 | { | 345 | { |
324 | break; | 346 | break; |
@@ -330,46 +352,36 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
330 | } | 352 | } |
331 | 353 | ||
332 | IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", | 354 | IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", |
333 | __FILE__,__LINE__, tty->driver->name, self->open_count ); | 355 | __FILE__, __LINE__, tty->driver->name, port->count); |
334 | 356 | ||
335 | schedule(); | 357 | schedule(); |
336 | } | 358 | } |
337 | 359 | ||
338 | __set_current_state(TASK_RUNNING); | 360 | __set_current_state(TASK_RUNNING); |
339 | remove_wait_queue(&self->open_wait, &wait); | 361 | remove_wait_queue(&port->open_wait, &wait); |
340 | 362 | ||
341 | if (extra_count) { | 363 | if (extra_count) { |
342 | /* ++ is not atomic, so this should be protected - Jean II */ | 364 | /* ++ is not atomic, so this should be protected - Jean II */ |
343 | spin_lock_irqsave(&self->spinlock, flags); | 365 | spin_lock_irqsave(&port->lock, flags); |
344 | self->open_count++; | 366 | port->count++; |
345 | spin_unlock_irqrestore(&self->spinlock, flags); | 367 | spin_unlock_irqrestore(&port->lock, flags); |
346 | } | 368 | } |
347 | self->blocked_open--; | 369 | port->blocked_open--; |
348 | 370 | ||
349 | IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", | 371 | IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", |
350 | __FILE__,__LINE__, tty->driver->name, self->open_count); | 372 | __FILE__, __LINE__, tty->driver->name, port->count); |
351 | 373 | ||
352 | if (!retval) | 374 | if (!retval) |
353 | self->flags |= ASYNC_NORMAL_ACTIVE; | 375 | port->flags |= ASYNC_NORMAL_ACTIVE; |
354 | 376 | ||
355 | return retval; | 377 | return retval; |
356 | } | 378 | } |
357 | 379 | ||
358 | /* | 380 | |
359 | * Function ircomm_tty_open (tty, filp) | 381 | static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) |
360 | * | ||
361 | * This routine is called when a particular tty device is opened. This | ||
362 | * routine is mandatory; if this routine is not filled in, the attempted | ||
363 | * open will fail with ENODEV. | ||
364 | */ | ||
365 | static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | ||
366 | { | 382 | { |
367 | struct ircomm_tty_cb *self; | 383 | struct ircomm_tty_cb *self; |
368 | unsigned int line = tty->index; | 384 | unsigned int line = tty->index; |
369 | unsigned long flags; | ||
370 | int ret; | ||
371 | |||
372 | IRDA_DEBUG(2, "%s()\n", __func__ ); | ||
373 | 385 | ||
374 | /* Check if instance already exists */ | 386 | /* Check if instance already exists */ |
375 | self = hashbin_lock_find(ircomm_tty, line, NULL); | 387 | self = hashbin_lock_find(ircomm_tty, line, NULL); |
@@ -381,6 +393,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
381 | return -ENOMEM; | 393 | return -ENOMEM; |
382 | } | 394 | } |
383 | 395 | ||
396 | tty_port_init(&self->port); | ||
397 | self->port.ops = &ircomm_port_ops; | ||
384 | self->magic = IRCOMM_TTY_MAGIC; | 398 | self->magic = IRCOMM_TTY_MAGIC; |
385 | self->flow = FLOW_STOP; | 399 | self->flow = FLOW_STOP; |
386 | 400 | ||
@@ -388,13 +402,9 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
388 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint); | 402 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint); |
389 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; | 403 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; |
390 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; | 404 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; |
391 | self->close_delay = 5*HZ/10; | ||
392 | self->closing_wait = 30*HZ; | ||
393 | 405 | ||
394 | /* Init some important stuff */ | 406 | /* Init some important stuff */ |
395 | init_timer(&self->watchdog_timer); | 407 | init_timer(&self->watchdog_timer); |
396 | init_waitqueue_head(&self->open_wait); | ||
397 | init_waitqueue_head(&self->close_wait); | ||
398 | spin_lock_init(&self->spinlock); | 408 | spin_lock_init(&self->spinlock); |
399 | 409 | ||
400 | /* | 410 | /* |
@@ -404,31 +414,48 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
404 | * | 414 | * |
405 | * Note this is completely usafe and doesn't work properly | 415 | * Note this is completely usafe and doesn't work properly |
406 | */ | 416 | */ |
407 | tty->termios->c_iflag = 0; | 417 | tty->termios.c_iflag = 0; |
408 | tty->termios->c_oflag = 0; | 418 | tty->termios.c_oflag = 0; |
409 | 419 | ||
410 | /* Insert into hash */ | 420 | /* Insert into hash */ |
411 | hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL); | 421 | hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL); |
412 | } | 422 | } |
413 | /* ++ is not atomic, so this should be protected - Jean II */ | ||
414 | spin_lock_irqsave(&self->spinlock, flags); | ||
415 | self->open_count++; | ||
416 | 423 | ||
417 | tty->driver_data = self; | 424 | return tty_port_install(&self->port, driver, tty); |
418 | self->tty = tty; | 425 | } |
419 | spin_unlock_irqrestore(&self->spinlock, flags); | 426 | |
427 | /* | ||
428 | * Function ircomm_tty_open (tty, filp) | ||
429 | * | ||
430 | * This routine is called when a particular tty device is opened. This | ||
431 | * routine is mandatory; if this routine is not filled in, the attempted | ||
432 | * open will fail with ENODEV. | ||
433 | */ | ||
434 | static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | ||
435 | { | ||
436 | struct ircomm_tty_cb *self = tty->driver_data; | ||
437 | unsigned long flags; | ||
438 | int ret; | ||
439 | |||
440 | IRDA_DEBUG(2, "%s()\n", __func__ ); | ||
441 | |||
442 | /* ++ is not atomic, so this should be protected - Jean II */ | ||
443 | spin_lock_irqsave(&self->port.lock, flags); | ||
444 | self->port.count++; | ||
445 | spin_unlock_irqrestore(&self->port.lock, flags); | ||
446 | tty_port_tty_set(&self->port, tty); | ||
420 | 447 | ||
421 | IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, | 448 | IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, |
422 | self->line, self->open_count); | 449 | self->line, self->port.count); |
423 | 450 | ||
424 | /* Not really used by us, but lets do it anyway */ | 451 | /* Not really used by us, but lets do it anyway */ |
425 | self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0; | 452 | tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; |
426 | 453 | ||
427 | /* | 454 | /* |
428 | * If the port is the middle of closing, bail out now | 455 | * If the port is the middle of closing, bail out now |
429 | */ | 456 | */ |
430 | if (tty_hung_up_p(filp) || | 457 | if (tty_hung_up_p(filp) || |
431 | test_bit(ASYNC_B_CLOSING, &self->flags)) { | 458 | test_bit(ASYNCB_CLOSING, &self->port.flags)) { |
432 | 459 | ||
433 | /* Hm, why are we blocking on ASYNC_CLOSING if we | 460 | /* Hm, why are we blocking on ASYNC_CLOSING if we |
434 | * do return -EAGAIN/-ERESTARTSYS below anyway? | 461 | * do return -EAGAIN/-ERESTARTSYS below anyway? |
@@ -438,14 +465,15 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
438 | * probably better sleep uninterruptible? | 465 | * probably better sleep uninterruptible? |
439 | */ | 466 | */ |
440 | 467 | ||
441 | if (wait_event_interruptible(self->close_wait, !test_bit(ASYNC_B_CLOSING, &self->flags))) { | 468 | if (wait_event_interruptible(self->port.close_wait, |
469 | !test_bit(ASYNCB_CLOSING, &self->port.flags))) { | ||
442 | IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", | 470 | IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", |
443 | __func__); | 471 | __func__); |
444 | return -ERESTARTSYS; | 472 | return -ERESTARTSYS; |
445 | } | 473 | } |
446 | 474 | ||
447 | #ifdef SERIAL_DO_RESTART | 475 | #ifdef SERIAL_DO_RESTART |
448 | return (self->flags & ASYNC_HUP_NOTIFY) ? | 476 | return (self->port.flags & ASYNC_HUP_NOTIFY) ? |
449 | -EAGAIN : -ERESTARTSYS; | 477 | -EAGAIN : -ERESTARTSYS; |
450 | #else | 478 | #else |
451 | return -EAGAIN; | 479 | return -EAGAIN; |
@@ -453,7 +481,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
453 | } | 481 | } |
454 | 482 | ||
455 | /* Check if this is a "normal" ircomm device, or an irlpt device */ | 483 | /* Check if this is a "normal" ircomm device, or an irlpt device */ |
456 | if (line < 0x10) { | 484 | if (self->line < 0x10) { |
457 | self->service_type = IRCOMM_3_WIRE | IRCOMM_9_WIRE; | 485 | self->service_type = IRCOMM_3_WIRE | IRCOMM_9_WIRE; |
458 | self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ | 486 | self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ |
459 | /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ | 487 | /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ |
@@ -469,7 +497,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
469 | if (ret) | 497 | if (ret) |
470 | return ret; | 498 | return ret; |
471 | 499 | ||
472 | ret = ircomm_tty_block_til_ready(self, filp); | 500 | ret = ircomm_tty_block_til_ready(self, tty, filp); |
473 | if (ret) { | 501 | if (ret) { |
474 | IRDA_DEBUG(2, | 502 | IRDA_DEBUG(2, |
475 | "%s(), returning after block_til_ready with %d\n", __func__ , | 503 | "%s(), returning after block_til_ready with %d\n", __func__ , |
@@ -489,81 +517,22 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
489 | static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) | 517 | static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) |
490 | { | 518 | { |
491 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 519 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
492 | unsigned long flags; | 520 | struct tty_port *port = &self->port; |
493 | 521 | ||
494 | IRDA_DEBUG(0, "%s()\n", __func__ ); | 522 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
495 | 523 | ||
496 | IRDA_ASSERT(self != NULL, return;); | 524 | IRDA_ASSERT(self != NULL, return;); |
497 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 525 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
498 | 526 | ||
499 | spin_lock_irqsave(&self->spinlock, flags); | 527 | if (tty_port_close_start(port, tty, filp) == 0) |
500 | |||
501 | if (tty_hung_up_p(filp)) { | ||
502 | spin_unlock_irqrestore(&self->spinlock, flags); | ||
503 | |||
504 | IRDA_DEBUG(0, "%s(), returning 1\n", __func__ ); | ||
505 | return; | ||
506 | } | ||
507 | |||
508 | if ((tty->count == 1) && (self->open_count != 1)) { | ||
509 | /* | ||
510 | * Uh, oh. tty->count is 1, which means that the tty | ||
511 | * structure will be freed. state->count should always | ||
512 | * be one in these conditions. If it's greater than | ||
513 | * one, we've got real problems, since it means the | ||
514 | * serial port won't be shutdown. | ||
515 | */ | ||
516 | IRDA_DEBUG(0, "%s(), bad serial port count; " | ||
517 | "tty->count is 1, state->count is %d\n", __func__ , | ||
518 | self->open_count); | ||
519 | self->open_count = 1; | ||
520 | } | ||
521 | |||
522 | if (--self->open_count < 0) { | ||
523 | IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", | ||
524 | __func__, self->line, self->open_count); | ||
525 | self->open_count = 0; | ||
526 | } | ||
527 | if (self->open_count) { | ||
528 | spin_unlock_irqrestore(&self->spinlock, flags); | ||
529 | |||
530 | IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ ); | ||
531 | return; | 528 | return; |
532 | } | ||
533 | |||
534 | /* Hum... Should be test_and_set_bit ??? - Jean II */ | ||
535 | set_bit(ASYNC_B_CLOSING, &self->flags); | ||
536 | |||
537 | /* We need to unlock here (we were unlocking at the end of this | ||
538 | * function), because tty_wait_until_sent() may schedule. | ||
539 | * I don't know if the rest should be protected somehow, | ||
540 | * so someone should check. - Jean II */ | ||
541 | spin_unlock_irqrestore(&self->spinlock, flags); | ||
542 | |||
543 | /* | ||
544 | * Now we wait for the transmit buffer to clear; and we notify | ||
545 | * the line discipline to only process XON/XOFF characters. | ||
546 | */ | ||
547 | tty->closing = 1; | ||
548 | if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE) | ||
549 | tty_wait_until_sent_from_close(tty, self->closing_wait); | ||
550 | 529 | ||
551 | ircomm_tty_shutdown(self); | 530 | ircomm_tty_shutdown(self); |
552 | 531 | ||
553 | tty_driver_flush_buffer(tty); | 532 | tty_driver_flush_buffer(tty); |
554 | tty_ldisc_flush(tty); | ||
555 | |||
556 | tty->closing = 0; | ||
557 | self->tty = NULL; | ||
558 | 533 | ||
559 | if (self->blocked_open) { | 534 | tty_port_close_end(port, tty); |
560 | if (self->close_delay) | 535 | tty_port_tty_set(port, NULL); |
561 | schedule_timeout_interruptible(self->close_delay); | ||
562 | wake_up_interruptible(&self->open_wait); | ||
563 | } | ||
564 | |||
565 | self->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
566 | wake_up_interruptible(&self->close_wait); | ||
567 | } | 536 | } |
568 | 537 | ||
569 | /* | 538 | /* |
@@ -606,7 +575,7 @@ static void ircomm_tty_do_softint(struct work_struct *work) | |||
606 | if (!self || self->magic != IRCOMM_TTY_MAGIC) | 575 | if (!self || self->magic != IRCOMM_TTY_MAGIC) |
607 | return; | 576 | return; |
608 | 577 | ||
609 | tty = self->tty; | 578 | tty = tty_port_tty_get(&self->port); |
610 | if (!tty) | 579 | if (!tty) |
611 | return; | 580 | return; |
612 | 581 | ||
@@ -627,7 +596,7 @@ static void ircomm_tty_do_softint(struct work_struct *work) | |||
627 | } | 596 | } |
628 | 597 | ||
629 | if (tty->hw_stopped) | 598 | if (tty->hw_stopped) |
630 | return; | 599 | goto put; |
631 | 600 | ||
632 | /* Unlink transmit buffer */ | 601 | /* Unlink transmit buffer */ |
633 | spin_lock_irqsave(&self->spinlock, flags); | 602 | spin_lock_irqsave(&self->spinlock, flags); |
@@ -646,6 +615,8 @@ static void ircomm_tty_do_softint(struct work_struct *work) | |||
646 | 615 | ||
647 | /* Check if user (still) wants to be waken up */ | 616 | /* Check if user (still) wants to be waken up */ |
648 | tty_wakeup(tty); | 617 | tty_wakeup(tty); |
618 | put: | ||
619 | tty_kref_put(tty); | ||
649 | } | 620 | } |
650 | 621 | ||
651 | /* | 622 | /* |
@@ -880,7 +851,7 @@ static void ircomm_tty_throttle(struct tty_struct *tty) | |||
880 | ircomm_tty_send_xchar(tty, STOP_CHAR(tty)); | 851 | ircomm_tty_send_xchar(tty, STOP_CHAR(tty)); |
881 | 852 | ||
882 | /* Hardware flow control? */ | 853 | /* Hardware flow control? */ |
883 | if (tty->termios->c_cflag & CRTSCTS) { | 854 | if (tty->termios.c_cflag & CRTSCTS) { |
884 | self->settings.dte &= ~IRCOMM_RTS; | 855 | self->settings.dte &= ~IRCOMM_RTS; |
885 | self->settings.dte |= IRCOMM_DELTA_RTS; | 856 | self->settings.dte |= IRCOMM_DELTA_RTS; |
886 | 857 | ||
@@ -912,7 +883,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty) | |||
912 | } | 883 | } |
913 | 884 | ||
914 | /* Using hardware flow control? */ | 885 | /* Using hardware flow control? */ |
915 | if (tty->termios->c_cflag & CRTSCTS) { | 886 | if (tty->termios.c_cflag & CRTSCTS) { |
916 | self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); | 887 | self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); |
917 | 888 | ||
918 | ircomm_param_request(self, IRCOMM_DTE, TRUE); | 889 | ircomm_param_request(self, IRCOMM_DTE, TRUE); |
@@ -955,7 +926,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self) | |||
955 | 926 | ||
956 | IRDA_DEBUG(0, "%s()\n", __func__ ); | 927 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
957 | 928 | ||
958 | if (!test_and_clear_bit(ASYNC_B_INITIALIZED, &self->flags)) | 929 | if (!test_and_clear_bit(ASYNCB_INITIALIZED, &self->port.flags)) |
959 | return; | 930 | return; |
960 | 931 | ||
961 | ircomm_tty_detach_cable(self); | 932 | ircomm_tty_detach_cable(self); |
@@ -994,6 +965,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self) | |||
994 | static void ircomm_tty_hangup(struct tty_struct *tty) | 965 | static void ircomm_tty_hangup(struct tty_struct *tty) |
995 | { | 966 | { |
996 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 967 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
968 | struct tty_port *port = &self->port; | ||
997 | unsigned long flags; | 969 | unsigned long flags; |
998 | 970 | ||
999 | IRDA_DEBUG(0, "%s()\n", __func__ ); | 971 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
@@ -1004,14 +976,17 @@ static void ircomm_tty_hangup(struct tty_struct *tty) | |||
1004 | /* ircomm_tty_flush_buffer(tty); */ | 976 | /* ircomm_tty_flush_buffer(tty); */ |
1005 | ircomm_tty_shutdown(self); | 977 | ircomm_tty_shutdown(self); |
1006 | 978 | ||
1007 | /* I guess we need to lock here - Jean II */ | 979 | spin_lock_irqsave(&port->lock, flags); |
1008 | spin_lock_irqsave(&self->spinlock, flags); | 980 | port->flags &= ~ASYNC_NORMAL_ACTIVE; |
1009 | self->flags &= ~ASYNC_NORMAL_ACTIVE; | 981 | if (port->tty) { |
1010 | self->tty = NULL; | 982 | set_bit(TTY_IO_ERROR, &port->tty->flags); |
1011 | self->open_count = 0; | 983 | tty_kref_put(port->tty); |
1012 | spin_unlock_irqrestore(&self->spinlock, flags); | 984 | } |
985 | port->tty = NULL; | ||
986 | port->count = 0; | ||
987 | spin_unlock_irqrestore(&port->lock, flags); | ||
1013 | 988 | ||
1014 | wake_up_interruptible(&self->open_wait); | 989 | wake_up_interruptible(&port->open_wait); |
1015 | } | 990 | } |
1016 | 991 | ||
1017 | /* | 992 | /* |
@@ -1071,20 +1046,20 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) | |||
1071 | IRDA_ASSERT(self != NULL, return;); | 1046 | IRDA_ASSERT(self != NULL, return;); |
1072 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 1047 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
1073 | 1048 | ||
1074 | tty = self->tty; | 1049 | tty = tty_port_tty_get(&self->port); |
1075 | 1050 | ||
1076 | status = self->settings.dce; | 1051 | status = self->settings.dce; |
1077 | 1052 | ||
1078 | if (status & IRCOMM_DCE_DELTA_ANY) { | 1053 | if (status & IRCOMM_DCE_DELTA_ANY) { |
1079 | /*wake_up_interruptible(&self->delta_msr_wait);*/ | 1054 | /*wake_up_interruptible(&self->delta_msr_wait);*/ |
1080 | } | 1055 | } |
1081 | if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { | 1056 | if ((self->port.flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { |
1082 | IRDA_DEBUG(2, | 1057 | IRDA_DEBUG(2, |
1083 | "%s(), ircomm%d CD now %s...\n", __func__ , self->line, | 1058 | "%s(), ircomm%d CD now %s...\n", __func__ , self->line, |
1084 | (status & IRCOMM_CD) ? "on" : "off"); | 1059 | (status & IRCOMM_CD) ? "on" : "off"); |
1085 | 1060 | ||
1086 | if (status & IRCOMM_CD) { | 1061 | if (status & IRCOMM_CD) { |
1087 | wake_up_interruptible(&self->open_wait); | 1062 | wake_up_interruptible(&self->port.open_wait); |
1088 | } else { | 1063 | } else { |
1089 | IRDA_DEBUG(2, | 1064 | IRDA_DEBUG(2, |
1090 | "%s(), Doing serial hangup..\n", __func__ ); | 1065 | "%s(), Doing serial hangup..\n", __func__ ); |
@@ -1092,10 +1067,10 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) | |||
1092 | tty_hangup(tty); | 1067 | tty_hangup(tty); |
1093 | 1068 | ||
1094 | /* Hangup will remote the tty, so better break out */ | 1069 | /* Hangup will remote the tty, so better break out */ |
1095 | return; | 1070 | goto put; |
1096 | } | 1071 | } |
1097 | } | 1072 | } |
1098 | if (self->flags & ASYNC_CTS_FLOW) { | 1073 | if (tty && tty_port_cts_enabled(&self->port)) { |
1099 | if (tty->hw_stopped) { | 1074 | if (tty->hw_stopped) { |
1100 | if (status & IRCOMM_CTS) { | 1075 | if (status & IRCOMM_CTS) { |
1101 | IRDA_DEBUG(2, | 1076 | IRDA_DEBUG(2, |
@@ -1103,10 +1078,10 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) | |||
1103 | tty->hw_stopped = 0; | 1078 | tty->hw_stopped = 0; |
1104 | 1079 | ||
1105 | /* Wake up processes blocked on open */ | 1080 | /* Wake up processes blocked on open */ |
1106 | wake_up_interruptible(&self->open_wait); | 1081 | wake_up_interruptible(&self->port.open_wait); |
1107 | 1082 | ||
1108 | schedule_work(&self->tqueue); | 1083 | schedule_work(&self->tqueue); |
1109 | return; | 1084 | goto put; |
1110 | } | 1085 | } |
1111 | } else { | 1086 | } else { |
1112 | if (!(status & IRCOMM_CTS)) { | 1087 | if (!(status & IRCOMM_CTS)) { |
@@ -1116,6 +1091,8 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) | |||
1116 | } | 1091 | } |
1117 | } | 1092 | } |
1118 | } | 1093 | } |
1094 | put: | ||
1095 | tty_kref_put(tty); | ||
1119 | } | 1096 | } |
1120 | 1097 | ||
1121 | /* | 1098 | /* |
@@ -1128,6 +1105,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap, | |||
1128 | struct sk_buff *skb) | 1105 | struct sk_buff *skb) |
1129 | { | 1106 | { |
1130 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; | 1107 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; |
1108 | struct tty_struct *tty; | ||
1131 | 1109 | ||
1132 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 1110 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
1133 | 1111 | ||
@@ -1135,7 +1113,8 @@ static int ircomm_tty_data_indication(void *instance, void *sap, | |||
1135 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 1113 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
1136 | IRDA_ASSERT(skb != NULL, return -1;); | 1114 | IRDA_ASSERT(skb != NULL, return -1;); |
1137 | 1115 | ||
1138 | if (!self->tty) { | 1116 | tty = tty_port_tty_get(&self->port); |
1117 | if (!tty) { | ||
1139 | IRDA_DEBUG(0, "%s(), no tty!\n", __func__ ); | 1118 | IRDA_DEBUG(0, "%s(), no tty!\n", __func__ ); |
1140 | return 0; | 1119 | return 0; |
1141 | } | 1120 | } |
@@ -1146,7 +1125,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap, | |||
1146 | * Devices like WinCE can do this, and since they don't send any | 1125 | * Devices like WinCE can do this, and since they don't send any |
1147 | * params, we can just as well declare the hardware for running. | 1126 | * params, we can just as well declare the hardware for running. |
1148 | */ | 1127 | */ |
1149 | if (self->tty->hw_stopped && (self->flow == FLOW_START)) { | 1128 | if (tty->hw_stopped && (self->flow == FLOW_START)) { |
1150 | IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ ); | 1129 | IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ ); |
1151 | ircomm_param_request(self, IRCOMM_POLL, TRUE); | 1130 | ircomm_param_request(self, IRCOMM_POLL, TRUE); |
1152 | 1131 | ||
@@ -1159,8 +1138,9 @@ static int ircomm_tty_data_indication(void *instance, void *sap, | |||
1159 | * Use flip buffer functions since the code may be called from interrupt | 1138 | * Use flip buffer functions since the code may be called from interrupt |
1160 | * context | 1139 | * context |
1161 | */ | 1140 | */ |
1162 | tty_insert_flip_string(self->tty, skb->data, skb->len); | 1141 | tty_insert_flip_string(tty, skb->data, skb->len); |
1163 | tty_flip_buffer_push(self->tty); | 1142 | tty_flip_buffer_push(tty); |
1143 | tty_kref_put(tty); | ||
1164 | 1144 | ||
1165 | /* No need to kfree_skb - see ircomm_ttp_data_indication() */ | 1145 | /* No need to kfree_skb - see ircomm_ttp_data_indication() */ |
1166 | 1146 | ||
@@ -1211,12 +1191,13 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, | |||
1211 | IRDA_ASSERT(self != NULL, return;); | 1191 | IRDA_ASSERT(self != NULL, return;); |
1212 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 1192 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
1213 | 1193 | ||
1214 | tty = self->tty; | 1194 | tty = tty_port_tty_get(&self->port); |
1215 | 1195 | ||
1216 | switch (cmd) { | 1196 | switch (cmd) { |
1217 | case FLOW_START: | 1197 | case FLOW_START: |
1218 | IRDA_DEBUG(2, "%s(), hw start!\n", __func__ ); | 1198 | IRDA_DEBUG(2, "%s(), hw start!\n", __func__ ); |
1219 | tty->hw_stopped = 0; | 1199 | if (tty) |
1200 | tty->hw_stopped = 0; | ||
1220 | 1201 | ||
1221 | /* ircomm_tty_do_softint will take care of the rest */ | 1202 | /* ircomm_tty_do_softint will take care of the rest */ |
1222 | schedule_work(&self->tqueue); | 1203 | schedule_work(&self->tqueue); |
@@ -1224,15 +1205,19 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, | |||
1224 | default: /* If we get here, something is very wrong, better stop */ | 1205 | default: /* If we get here, something is very wrong, better stop */ |
1225 | case FLOW_STOP: | 1206 | case FLOW_STOP: |
1226 | IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ ); | 1207 | IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ ); |
1227 | tty->hw_stopped = 1; | 1208 | if (tty) |
1209 | tty->hw_stopped = 1; | ||
1228 | break; | 1210 | break; |
1229 | } | 1211 | } |
1212 | |||
1213 | tty_kref_put(tty); | ||
1230 | self->flow = cmd; | 1214 | self->flow = cmd; |
1231 | } | 1215 | } |
1232 | 1216 | ||
1233 | #ifdef CONFIG_PROC_FS | 1217 | #ifdef CONFIG_PROC_FS |
1234 | static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) | 1218 | static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) |
1235 | { | 1219 | { |
1220 | struct tty_struct *tty; | ||
1236 | char sep; | 1221 | char sep; |
1237 | 1222 | ||
1238 | seq_printf(m, "State: %s\n", ircomm_tty_state[self->state]); | 1223 | seq_printf(m, "State: %s\n", ircomm_tty_state[self->state]); |
@@ -1328,40 +1313,43 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) | |||
1328 | 1313 | ||
1329 | seq_puts(m, "Flags:"); | 1314 | seq_puts(m, "Flags:"); |
1330 | sep = ' '; | 1315 | sep = ' '; |
1331 | if (self->flags & ASYNC_CTS_FLOW) { | 1316 | if (tty_port_cts_enabled(&self->port)) { |
1332 | seq_printf(m, "%cASYNC_CTS_FLOW", sep); | 1317 | seq_printf(m, "%cASYNC_CTS_FLOW", sep); |
1333 | sep = '|'; | 1318 | sep = '|'; |
1334 | } | 1319 | } |
1335 | if (self->flags & ASYNC_CHECK_CD) { | 1320 | if (self->port.flags & ASYNC_CHECK_CD) { |
1336 | seq_printf(m, "%cASYNC_CHECK_CD", sep); | 1321 | seq_printf(m, "%cASYNC_CHECK_CD", sep); |
1337 | sep = '|'; | 1322 | sep = '|'; |
1338 | } | 1323 | } |
1339 | if (self->flags & ASYNC_INITIALIZED) { | 1324 | if (self->port.flags & ASYNC_INITIALIZED) { |
1340 | seq_printf(m, "%cASYNC_INITIALIZED", sep); | 1325 | seq_printf(m, "%cASYNC_INITIALIZED", sep); |
1341 | sep = '|'; | 1326 | sep = '|'; |
1342 | } | 1327 | } |
1343 | if (self->flags & ASYNC_LOW_LATENCY) { | 1328 | if (self->port.flags & ASYNC_LOW_LATENCY) { |
1344 | seq_printf(m, "%cASYNC_LOW_LATENCY", sep); | 1329 | seq_printf(m, "%cASYNC_LOW_LATENCY", sep); |
1345 | sep = '|'; | 1330 | sep = '|'; |
1346 | } | 1331 | } |
1347 | if (self->flags & ASYNC_CLOSING) { | 1332 | if (self->port.flags & ASYNC_CLOSING) { |
1348 | seq_printf(m, "%cASYNC_CLOSING", sep); | 1333 | seq_printf(m, "%cASYNC_CLOSING", sep); |
1349 | sep = '|'; | 1334 | sep = '|'; |
1350 | } | 1335 | } |
1351 | if (self->flags & ASYNC_NORMAL_ACTIVE) { | 1336 | if (self->port.flags & ASYNC_NORMAL_ACTIVE) { |
1352 | seq_printf(m, "%cASYNC_NORMAL_ACTIVE", sep); | 1337 | seq_printf(m, "%cASYNC_NORMAL_ACTIVE", sep); |
1353 | sep = '|'; | 1338 | sep = '|'; |
1354 | } | 1339 | } |
1355 | seq_putc(m, '\n'); | 1340 | seq_putc(m, '\n'); |
1356 | 1341 | ||
1357 | seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); | 1342 | seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); |
1358 | seq_printf(m, "Open count: %d\n", self->open_count); | 1343 | seq_printf(m, "Open count: %d\n", self->port.count); |
1359 | seq_printf(m, "Max data size: %d\n", self->max_data_size); | 1344 | seq_printf(m, "Max data size: %d\n", self->max_data_size); |
1360 | seq_printf(m, "Max header size: %d\n", self->max_header_size); | 1345 | seq_printf(m, "Max header size: %d\n", self->max_header_size); |
1361 | 1346 | ||
1362 | if (self->tty) | 1347 | tty = tty_port_tty_get(&self->port); |
1348 | if (tty) { | ||
1363 | seq_printf(m, "Hardware: %s\n", | 1349 | seq_printf(m, "Hardware: %s\n", |
1364 | self->tty->hw_stopped ? "Stopped" : "Running"); | 1350 | tty->hw_stopped ? "Stopped" : "Running"); |
1351 | tty_kref_put(tty); | ||
1352 | } | ||
1365 | } | 1353 | } |
1366 | 1354 | ||
1367 | static int ircomm_tty_proc_show(struct seq_file *m, void *v) | 1355 | static int ircomm_tty_proc_show(struct seq_file *m, void *v) |
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c index b65d66e0d817..edab393e0c82 100644 --- a/net/irda/ircomm/ircomm_tty_attach.c +++ b/net/irda/ircomm/ircomm_tty_attach.c | |||
@@ -130,6 +130,8 @@ static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, | |||
130 | */ | 130 | */ |
131 | int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) | 131 | int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) |
132 | { | 132 | { |
133 | struct tty_struct *tty; | ||
134 | |||
133 | IRDA_DEBUG(0, "%s()\n", __func__ ); | 135 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
134 | 136 | ||
135 | IRDA_ASSERT(self != NULL, return -1;); | 137 | IRDA_ASSERT(self != NULL, return -1;); |
@@ -142,7 +144,11 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) | |||
142 | } | 144 | } |
143 | 145 | ||
144 | /* Make sure nobody tries to write before the link is up */ | 146 | /* Make sure nobody tries to write before the link is up */ |
145 | self->tty->hw_stopped = 1; | 147 | tty = tty_port_tty_get(&self->port); |
148 | if (tty) { | ||
149 | tty->hw_stopped = 1; | ||
150 | tty_kref_put(tty); | ||
151 | } | ||
146 | 152 | ||
147 | ircomm_tty_ias_register(self); | 153 | ircomm_tty_ias_register(self); |
148 | 154 | ||
@@ -398,23 +404,26 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap, | |||
398 | struct sk_buff *skb) | 404 | struct sk_buff *skb) |
399 | { | 405 | { |
400 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; | 406 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; |
407 | struct tty_struct *tty; | ||
401 | 408 | ||
402 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 409 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
403 | 410 | ||
404 | IRDA_ASSERT(self != NULL, return;); | 411 | IRDA_ASSERT(self != NULL, return;); |
405 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 412 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
406 | 413 | ||
407 | if (!self->tty) | 414 | tty = tty_port_tty_get(&self->port); |
415 | if (!tty) | ||
408 | return; | 416 | return; |
409 | 417 | ||
410 | /* This will stop control data transfers */ | 418 | /* This will stop control data transfers */ |
411 | self->flow = FLOW_STOP; | 419 | self->flow = FLOW_STOP; |
412 | 420 | ||
413 | /* Stop data transfers */ | 421 | /* Stop data transfers */ |
414 | self->tty->hw_stopped = 1; | 422 | tty->hw_stopped = 1; |
415 | 423 | ||
416 | ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL, | 424 | ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL, |
417 | NULL); | 425 | NULL); |
426 | tty_kref_put(tty); | ||
418 | } | 427 | } |
419 | 428 | ||
420 | /* | 429 | /* |
@@ -550,12 +559,15 @@ void ircomm_tty_connect_indication(void *instance, void *sap, | |||
550 | */ | 559 | */ |
551 | void ircomm_tty_link_established(struct ircomm_tty_cb *self) | 560 | void ircomm_tty_link_established(struct ircomm_tty_cb *self) |
552 | { | 561 | { |
562 | struct tty_struct *tty; | ||
563 | |||
553 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 564 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
554 | 565 | ||
555 | IRDA_ASSERT(self != NULL, return;); | 566 | IRDA_ASSERT(self != NULL, return;); |
556 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 567 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
557 | 568 | ||
558 | if (!self->tty) | 569 | tty = tty_port_tty_get(&self->port); |
570 | if (!tty) | ||
559 | return; | 571 | return; |
560 | 572 | ||
561 | del_timer(&self->watchdog_timer); | 573 | del_timer(&self->watchdog_timer); |
@@ -566,19 +578,22 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self) | |||
566 | * will have to wait for the peer device (DCE) to raise the CTS | 578 | * will have to wait for the peer device (DCE) to raise the CTS |
567 | * line. | 579 | * line. |
568 | */ | 580 | */ |
569 | if ((self->flags & ASYNC_CTS_FLOW) && ((self->settings.dce & IRCOMM_CTS) == 0)) { | 581 | if (tty_port_cts_enabled(&self->port) && |
582 | ((self->settings.dce & IRCOMM_CTS) == 0)) { | ||
570 | IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ ); | 583 | IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ ); |
571 | return; | 584 | goto put; |
572 | } else { | 585 | } else { |
573 | IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ ); | 586 | IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ ); |
574 | 587 | ||
575 | self->tty->hw_stopped = 0; | 588 | tty->hw_stopped = 0; |
576 | 589 | ||
577 | /* Wake up processes blocked on open */ | 590 | /* Wake up processes blocked on open */ |
578 | wake_up_interruptible(&self->open_wait); | 591 | wake_up_interruptible(&self->port.open_wait); |
579 | } | 592 | } |
580 | 593 | ||
581 | schedule_work(&self->tqueue); | 594 | schedule_work(&self->tqueue); |
595 | put: | ||
596 | tty_kref_put(tty); | ||
582 | } | 597 | } |
583 | 598 | ||
584 | /* | 599 | /* |
@@ -977,14 +992,17 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, | |||
977 | ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); | 992 | ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH); |
978 | ircomm_tty_start_watchdog_timer(self, 3*HZ); | 993 | ircomm_tty_start_watchdog_timer(self, 3*HZ); |
979 | 994 | ||
980 | if (self->flags & ASYNC_CHECK_CD) { | 995 | if (self->port.flags & ASYNC_CHECK_CD) { |
981 | /* Drop carrier */ | 996 | /* Drop carrier */ |
982 | self->settings.dce = IRCOMM_DELTA_CD; | 997 | self->settings.dce = IRCOMM_DELTA_CD; |
983 | ircomm_tty_check_modem_status(self); | 998 | ircomm_tty_check_modem_status(self); |
984 | } else { | 999 | } else { |
1000 | struct tty_struct *tty = tty_port_tty_get(&self->port); | ||
985 | IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ ); | 1001 | IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ ); |
986 | if (self->tty) | 1002 | if (tty) { |
987 | tty_hangup(self->tty); | 1003 | tty_hangup(tty); |
1004 | tty_kref_put(tty); | ||
1005 | } | ||
988 | } | 1006 | } |
989 | break; | 1007 | break; |
990 | default: | 1008 | default: |
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c index d0667d68351d..b343f50dc8d7 100644 --- a/net/irda/ircomm/ircomm_tty_ioctl.c +++ b/net/irda/ircomm/ircomm_tty_ioctl.c | |||
@@ -52,17 +52,18 @@ | |||
52 | * Change speed of the driver. If the remote device is a DCE, then this | 52 | * Change speed of the driver. If the remote device is a DCE, then this |
53 | * should make it change the speed of its serial port | 53 | * should make it change the speed of its serial port |
54 | */ | 54 | */ |
55 | static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) | 55 | static void ircomm_tty_change_speed(struct ircomm_tty_cb *self, |
56 | struct tty_struct *tty) | ||
56 | { | 57 | { |
57 | unsigned int cflag, cval; | 58 | unsigned int cflag, cval; |
58 | int baud; | 59 | int baud; |
59 | 60 | ||
60 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 61 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
61 | 62 | ||
62 | if (!self->tty || !self->tty->termios || !self->ircomm) | 63 | if (!self->ircomm) |
63 | return; | 64 | return; |
64 | 65 | ||
65 | cflag = self->tty->termios->c_cflag; | 66 | cflag = tty->termios.c_cflag; |
66 | 67 | ||
67 | /* byte size and parity */ | 68 | /* byte size and parity */ |
68 | switch (cflag & CSIZE) { | 69 | switch (cflag & CSIZE) { |
@@ -81,7 +82,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) | |||
81 | cval |= IRCOMM_PARITY_EVEN; | 82 | cval |= IRCOMM_PARITY_EVEN; |
82 | 83 | ||
83 | /* Determine divisor based on baud rate */ | 84 | /* Determine divisor based on baud rate */ |
84 | baud = tty_get_baud_rate(self->tty); | 85 | baud = tty_get_baud_rate(tty); |
85 | if (!baud) | 86 | if (!baud) |
86 | baud = 9600; /* B0 transition handled in rs_set_termios */ | 87 | baud = 9600; /* B0 transition handled in rs_set_termios */ |
87 | 88 | ||
@@ -90,19 +91,19 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) | |||
90 | 91 | ||
91 | /* CTS flow control flag and modem status interrupts */ | 92 | /* CTS flow control flag and modem status interrupts */ |
92 | if (cflag & CRTSCTS) { | 93 | if (cflag & CRTSCTS) { |
93 | self->flags |= ASYNC_CTS_FLOW; | 94 | self->port.flags |= ASYNC_CTS_FLOW; |
94 | self->settings.flow_control |= IRCOMM_RTS_CTS_IN; | 95 | self->settings.flow_control |= IRCOMM_RTS_CTS_IN; |
95 | /* This got me. Bummer. Jean II */ | 96 | /* This got me. Bummer. Jean II */ |
96 | if (self->service_type == IRCOMM_3_WIRE_RAW) | 97 | if (self->service_type == IRCOMM_3_WIRE_RAW) |
97 | IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __func__); | 98 | IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __func__); |
98 | } else { | 99 | } else { |
99 | self->flags &= ~ASYNC_CTS_FLOW; | 100 | self->port.flags &= ~ASYNC_CTS_FLOW; |
100 | self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; | 101 | self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; |
101 | } | 102 | } |
102 | if (cflag & CLOCAL) | 103 | if (cflag & CLOCAL) |
103 | self->flags &= ~ASYNC_CHECK_CD; | 104 | self->port.flags &= ~ASYNC_CHECK_CD; |
104 | else | 105 | else |
105 | self->flags |= ASYNC_CHECK_CD; | 106 | self->port.flags |= ASYNC_CHECK_CD; |
106 | #if 0 | 107 | #if 0 |
107 | /* | 108 | /* |
108 | * Set up parity check flag | 109 | * Set up parity check flag |
@@ -148,18 +149,18 @@ void ircomm_tty_set_termios(struct tty_struct *tty, | |||
148 | struct ktermios *old_termios) | 149 | struct ktermios *old_termios) |
149 | { | 150 | { |
150 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 151 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
151 | unsigned int cflag = tty->termios->c_cflag; | 152 | unsigned int cflag = tty->termios.c_cflag; |
152 | 153 | ||
153 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 154 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
154 | 155 | ||
155 | if ((cflag == old_termios->c_cflag) && | 156 | if ((cflag == old_termios->c_cflag) && |
156 | (RELEVANT_IFLAG(tty->termios->c_iflag) == | 157 | (RELEVANT_IFLAG(tty->termios.c_iflag) == |
157 | RELEVANT_IFLAG(old_termios->c_iflag))) | 158 | RELEVANT_IFLAG(old_termios->c_iflag))) |
158 | { | 159 | { |
159 | return; | 160 | return; |
160 | } | 161 | } |
161 | 162 | ||
162 | ircomm_tty_change_speed(self); | 163 | ircomm_tty_change_speed(self, tty); |
163 | 164 | ||
164 | /* Handle transition to B0 status */ | 165 | /* Handle transition to B0 status */ |
165 | if ((old_termios->c_cflag & CBAUD) && | 166 | if ((old_termios->c_cflag & CBAUD) && |
@@ -172,7 +173,7 @@ void ircomm_tty_set_termios(struct tty_struct *tty, | |||
172 | if (!(old_termios->c_cflag & CBAUD) && | 173 | if (!(old_termios->c_cflag & CBAUD) && |
173 | (cflag & CBAUD)) { | 174 | (cflag & CBAUD)) { |
174 | self->settings.dte |= IRCOMM_DTR; | 175 | self->settings.dte |= IRCOMM_DTR; |
175 | if (!(tty->termios->c_cflag & CRTSCTS) || | 176 | if (!(tty->termios.c_cflag & CRTSCTS) || |
176 | !test_bit(TTY_THROTTLED, &tty->flags)) { | 177 | !test_bit(TTY_THROTTLED, &tty->flags)) { |
177 | self->settings.dte |= IRCOMM_RTS; | 178 | self->settings.dte |= IRCOMM_RTS; |
178 | } | 179 | } |
@@ -181,7 +182,7 @@ void ircomm_tty_set_termios(struct tty_struct *tty, | |||
181 | 182 | ||
182 | /* Handle turning off CRTSCTS */ | 183 | /* Handle turning off CRTSCTS */ |
183 | if ((old_termios->c_cflag & CRTSCTS) && | 184 | if ((old_termios->c_cflag & CRTSCTS) && |
184 | !(tty->termios->c_cflag & CRTSCTS)) | 185 | !(tty->termios.c_cflag & CRTSCTS)) |
185 | { | 186 | { |
186 | tty->hw_stopped = 0; | 187 | tty->hw_stopped = 0; |
187 | ircomm_tty_start(tty); | 188 | ircomm_tty_start(tty); |
@@ -270,10 +271,10 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self, | |||
270 | 271 | ||
271 | memset(&info, 0, sizeof(info)); | 272 | memset(&info, 0, sizeof(info)); |
272 | info.line = self->line; | 273 | info.line = self->line; |
273 | info.flags = self->flags; | 274 | info.flags = self->port.flags; |
274 | info.baud_base = self->settings.data_rate; | 275 | info.baud_base = self->settings.data_rate; |
275 | info.close_delay = self->close_delay; | 276 | info.close_delay = self->port.close_delay; |
276 | info.closing_wait = self->closing_wait; | 277 | info.closing_wait = self->port.closing_wait; |
277 | 278 | ||
278 | /* For compatibility */ | 279 | /* For compatibility */ |
279 | info.type = PORT_16550A; | 280 | info.type = PORT_16550A; |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 34e418508a67..0481d4b51476 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -3661,7 +3661,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v) | |||
3661 | atomic_read(&s->sk_refcnt), | 3661 | atomic_read(&s->sk_refcnt), |
3662 | sk_rmem_alloc_get(s), | 3662 | sk_rmem_alloc_get(s), |
3663 | sk_wmem_alloc_get(s), | 3663 | sk_wmem_alloc_get(s), |
3664 | sock_i_uid(s), | 3664 | from_kuid_munged(seq_user_ns(f), sock_i_uid(s)), |
3665 | sock_i_ino(s) | 3665 | sock_i_ino(s) |
3666 | ); | 3666 | ); |
3667 | return 0; | 3667 | return 0; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 393355d37b47..1a9f3723c13c 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | |||
1347 | /* Remove from tunnel list */ | 1347 | /* Remove from tunnel list */ |
1348 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | 1348 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); |
1349 | list_del_rcu(&tunnel->list); | 1349 | list_del_rcu(&tunnel->list); |
1350 | kfree_rcu(tunnel, rcu); | ||
1350 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | 1351 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); |
1351 | synchronize_rcu(); | ||
1352 | 1352 | ||
1353 | atomic_dec(&l2tp_tunnel_count); | 1353 | atomic_dec(&l2tp_tunnel_count); |
1354 | kfree(tunnel); | ||
1355 | } | 1354 | } |
1356 | 1355 | ||
1357 | /* Create a socket for the tunnel, if one isn't set up by | 1356 | /* Create a socket for the tunnel, if one isn't set up by |
@@ -1502,6 +1501,8 @@ out: | |||
1502 | return err; | 1501 | return err; |
1503 | } | 1502 | } |
1504 | 1503 | ||
1504 | static struct lock_class_key l2tp_socket_class; | ||
1505 | |||
1505 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) | 1506 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) |
1506 | { | 1507 | { |
1507 | struct l2tp_tunnel *tunnel = NULL; | 1508 | struct l2tp_tunnel *tunnel = NULL; |
@@ -1606,6 +1607,8 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1606 | tunnel->old_sk_destruct = sk->sk_destruct; | 1607 | tunnel->old_sk_destruct = sk->sk_destruct; |
1607 | sk->sk_destruct = &l2tp_tunnel_destruct; | 1608 | sk->sk_destruct = &l2tp_tunnel_destruct; |
1608 | tunnel->sock = sk; | 1609 | tunnel->sock = sk; |
1610 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); | ||
1611 | |||
1609 | sk->sk_allocation = GFP_ATOMIC; | 1612 | sk->sk_allocation = GFP_ATOMIC; |
1610 | 1613 | ||
1611 | /* Add tunnel to our list */ | 1614 | /* Add tunnel to our list */ |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a38ec6cdeee1..56d583e083a7 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg { | |||
163 | 163 | ||
164 | struct l2tp_tunnel { | 164 | struct l2tp_tunnel { |
165 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ | 165 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ |
166 | struct rcu_head rcu; | ||
166 | rwlock_t hlist_lock; /* protect session_hlist */ | 167 | rwlock_t hlist_lock; /* protect session_hlist */ |
167 | struct hlist_head session_hlist[L2TP_HASH_SIZE]; | 168 | struct hlist_head session_hlist[L2TP_HASH_SIZE]; |
168 | /* hashed list of sessions, | 169 | /* hashed list of sessions, |
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index f9ee74deeac2..3bfb34aaee29 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
@@ -153,7 +153,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, | |||
153 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); | 153 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); |
154 | } | 154 | } |
155 | 155 | ||
156 | if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) | 156 | if (!pskb_may_pull(skb, ETH_HLEN)) |
157 | goto error; | 157 | goto error; |
158 | 158 | ||
159 | secpath_reset(skb); | 159 | secpath_reset(skb); |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 35e1e4bde587..927547171bc7 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -410,6 +410,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, | |||
410 | lsa->l2tp_family = AF_INET6; | 410 | lsa->l2tp_family = AF_INET6; |
411 | lsa->l2tp_flowinfo = 0; | 411 | lsa->l2tp_flowinfo = 0; |
412 | lsa->l2tp_scope_id = 0; | 412 | lsa->l2tp_scope_id = 0; |
413 | lsa->l2tp_unused = 0; | ||
413 | if (peer) { | 414 | if (peer) { |
414 | if (!lsk->peer_conn_id) | 415 | if (!lsk->peer_conn_id) |
415 | return -ENOTCONN; | 416 | return -ENOTCONN; |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index d71cd9229a47..6f936358d664 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -80,8 +80,8 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) | |||
80 | 80 | ||
81 | hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, | 81 | hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, |
82 | &l2tp_nl_family, 0, L2TP_CMD_NOOP); | 82 | &l2tp_nl_family, 0, L2TP_CMD_NOOP); |
83 | if (IS_ERR(hdr)) { | 83 | if (!hdr) { |
84 | ret = PTR_ERR(hdr); | 84 | ret = -EMSGSIZE; |
85 | goto err_out; | 85 | goto err_out; |
86 | } | 86 | } |
87 | 87 | ||
@@ -250,8 +250,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, | |||
250 | 250 | ||
251 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, | 251 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, |
252 | L2TP_CMD_TUNNEL_GET); | 252 | L2TP_CMD_TUNNEL_GET); |
253 | if (IS_ERR(hdr)) | 253 | if (!hdr) |
254 | return PTR_ERR(hdr); | 254 | return -EMSGSIZE; |
255 | 255 | ||
256 | if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || | 256 | if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || |
257 | nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || | 257 | nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || |
@@ -617,8 +617,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags | |||
617 | sk = tunnel->sock; | 617 | sk = tunnel->sock; |
618 | 618 | ||
619 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); | 619 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); |
620 | if (IS_ERR(hdr)) | 620 | if (!hdr) |
621 | return PTR_ERR(hdr); | 621 | return -EMSGSIZE; |
622 | 622 | ||
623 | if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || | 623 | if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || |
624 | nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || | 624 | nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index f6fe4d400502..c2190005a114 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -969,14 +969,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, | |||
969 | struct sockaddr_llc sllc; | 969 | struct sockaddr_llc sllc; |
970 | struct sock *sk = sock->sk; | 970 | struct sock *sk = sock->sk; |
971 | struct llc_sock *llc = llc_sk(sk); | 971 | struct llc_sock *llc = llc_sk(sk); |
972 | int rc = 0; | 972 | int rc = -EBADF; |
973 | 973 | ||
974 | memset(&sllc, 0, sizeof(sllc)); | 974 | memset(&sllc, 0, sizeof(sllc)); |
975 | lock_sock(sk); | 975 | lock_sock(sk); |
976 | if (sock_flag(sk, SOCK_ZAPPED)) | 976 | if (sock_flag(sk, SOCK_ZAPPED)) |
977 | goto out; | 977 | goto out; |
978 | *uaddrlen = sizeof(sllc); | 978 | *uaddrlen = sizeof(sllc); |
979 | memset(uaddr, 0, *uaddrlen); | ||
980 | if (peer) { | 979 | if (peer) { |
981 | rc = -ENOTCONN; | 980 | rc = -ENOTCONN; |
982 | if (sk->sk_state != TCP_ESTABLISHED) | 981 | if (sk->sk_state != TCP_ESTABLISHED) |
@@ -1206,7 +1205,7 @@ static int __init llc2_init(void) | |||
1206 | rc = llc_proc_init(); | 1205 | rc = llc_proc_init(); |
1207 | if (rc != 0) { | 1206 | if (rc != 0) { |
1208 | printk(llc_proc_err_msg); | 1207 | printk(llc_proc_err_msg); |
1209 | goto out_unregister_llc_proto; | 1208 | goto out_station; |
1210 | } | 1209 | } |
1211 | rc = llc_sysctl_init(); | 1210 | rc = llc_sysctl_init(); |
1212 | if (rc) { | 1211 | if (rc) { |
@@ -1226,7 +1225,8 @@ out_sysctl: | |||
1226 | llc_sysctl_exit(); | 1225 | llc_sysctl_exit(); |
1227 | out_proc: | 1226 | out_proc: |
1228 | llc_proc_exit(); | 1227 | llc_proc_exit(); |
1229 | out_unregister_llc_proto: | 1228 | out_station: |
1229 | llc_station_exit(); | ||
1230 | proto_unregister(&llc_proto); | 1230 | proto_unregister(&llc_proto); |
1231 | goto out; | 1231 | goto out; |
1232 | } | 1232 | } |
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index e32cab44ea95..dd3e83328ad5 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c | |||
@@ -42,6 +42,7 @@ static void (*llc_type_handlers[2])(struct llc_sap *sap, | |||
42 | void llc_add_pack(int type, void (*handler)(struct llc_sap *sap, | 42 | void llc_add_pack(int type, void (*handler)(struct llc_sap *sap, |
43 | struct sk_buff *skb)) | 43 | struct sk_buff *skb)) |
44 | { | 44 | { |
45 | smp_wmb(); /* ensure initialisation is complete before it's called */ | ||
45 | if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) | 46 | if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) |
46 | llc_type_handlers[type - 1] = handler; | 47 | llc_type_handlers[type - 1] = handler; |
47 | } | 48 | } |
@@ -50,11 +51,19 @@ void llc_remove_pack(int type) | |||
50 | { | 51 | { |
51 | if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) | 52 | if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) |
52 | llc_type_handlers[type - 1] = NULL; | 53 | llc_type_handlers[type - 1] = NULL; |
54 | synchronize_net(); | ||
53 | } | 55 | } |
54 | 56 | ||
55 | void llc_set_station_handler(void (*handler)(struct sk_buff *skb)) | 57 | void llc_set_station_handler(void (*handler)(struct sk_buff *skb)) |
56 | { | 58 | { |
59 | /* Ensure initialisation is complete before it's called */ | ||
60 | if (handler) | ||
61 | smp_wmb(); | ||
62 | |||
57 | llc_station_handler = handler; | 63 | llc_station_handler = handler; |
64 | |||
65 | if (!handler) | ||
66 | synchronize_net(); | ||
58 | } | 67 | } |
59 | 68 | ||
60 | /** | 69 | /** |
@@ -150,6 +159,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
150 | int dest; | 159 | int dest; |
151 | int (*rcv)(struct sk_buff *, struct net_device *, | 160 | int (*rcv)(struct sk_buff *, struct net_device *, |
152 | struct packet_type *, struct net_device *); | 161 | struct packet_type *, struct net_device *); |
162 | void (*sta_handler)(struct sk_buff *skb); | ||
163 | void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb); | ||
153 | 164 | ||
154 | if (!net_eq(dev_net(dev), &init_net)) | 165 | if (!net_eq(dev_net(dev), &init_net)) |
155 | goto drop; | 166 | goto drop; |
@@ -182,7 +193,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
182 | */ | 193 | */ |
183 | rcv = rcu_dereference(sap->rcv_func); | 194 | rcv = rcu_dereference(sap->rcv_func); |
184 | dest = llc_pdu_type(skb); | 195 | dest = llc_pdu_type(skb); |
185 | if (unlikely(!dest || !llc_type_handlers[dest - 1])) { | 196 | sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL; |
197 | if (unlikely(!sap_handler)) { | ||
186 | if (rcv) | 198 | if (rcv) |
187 | rcv(skb, dev, pt, orig_dev); | 199 | rcv(skb, dev, pt, orig_dev); |
188 | else | 200 | else |
@@ -193,7 +205,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
193 | if (cskb) | 205 | if (cskb) |
194 | rcv(cskb, dev, pt, orig_dev); | 206 | rcv(cskb, dev, pt, orig_dev); |
195 | } | 207 | } |
196 | llc_type_handlers[dest - 1](sap, skb); | 208 | sap_handler(sap, skb); |
197 | } | 209 | } |
198 | llc_sap_put(sap); | 210 | llc_sap_put(sap); |
199 | out: | 211 | out: |
@@ -202,9 +214,10 @@ drop: | |||
202 | kfree_skb(skb); | 214 | kfree_skb(skb); |
203 | goto out; | 215 | goto out; |
204 | handle_station: | 216 | handle_station: |
205 | if (!llc_station_handler) | 217 | sta_handler = ACCESS_ONCE(llc_station_handler); |
218 | if (!sta_handler) | ||
206 | goto drop; | 219 | goto drop; |
207 | llc_station_handler(skb); | 220 | sta_handler(skb); |
208 | goto out; | 221 | goto out; |
209 | } | 222 | } |
210 | 223 | ||
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index a1839c004357..7b4799cfbf8d 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c | |||
@@ -151,7 +151,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v) | |||
151 | sk_wmem_alloc_get(sk), | 151 | sk_wmem_alloc_get(sk), |
152 | sk_rmem_alloc_get(sk) - llc->copied_seq, | 152 | sk_rmem_alloc_get(sk) - llc->copied_seq, |
153 | sk->sk_state, | 153 | sk->sk_state, |
154 | sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1, | 154 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), |
155 | llc->link); | 155 | llc->link); |
156 | out: | 156 | out: |
157 | return 0; | 157 | return 0; |
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 6828e39ec2ec..b2f2bac2c2a2 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c | |||
@@ -687,12 +687,8 @@ static void llc_station_rcv(struct sk_buff *skb) | |||
687 | llc_station_state_process(skb); | 687 | llc_station_state_process(skb); |
688 | } | 688 | } |
689 | 689 | ||
690 | int __init llc_station_init(void) | 690 | void __init llc_station_init(void) |
691 | { | 691 | { |
692 | int rc = -ENOBUFS; | ||
693 | struct sk_buff *skb; | ||
694 | struct llc_station_state_ev *ev; | ||
695 | |||
696 | skb_queue_head_init(&llc_main_station.mac_pdu_q); | 692 | skb_queue_head_init(&llc_main_station.mac_pdu_q); |
697 | skb_queue_head_init(&llc_main_station.ev_q.list); | 693 | skb_queue_head_init(&llc_main_station.ev_q.list); |
698 | spin_lock_init(&llc_main_station.ev_q.lock); | 694 | spin_lock_init(&llc_main_station.ev_q.lock); |
@@ -700,23 +696,12 @@ int __init llc_station_init(void) | |||
700 | (unsigned long)&llc_main_station); | 696 | (unsigned long)&llc_main_station); |
701 | llc_main_station.ack_timer.expires = jiffies + | 697 | llc_main_station.ack_timer.expires = jiffies + |
702 | sysctl_llc_station_ack_timeout; | 698 | sysctl_llc_station_ack_timeout; |
703 | skb = alloc_skb(0, GFP_ATOMIC); | ||
704 | if (!skb) | ||
705 | goto out; | ||
706 | rc = 0; | ||
707 | llc_set_station_handler(llc_station_rcv); | ||
708 | ev = llc_station_ev(skb); | ||
709 | memset(ev, 0, sizeof(*ev)); | ||
710 | llc_main_station.maximum_retry = 1; | 699 | llc_main_station.maximum_retry = 1; |
711 | llc_main_station.state = LLC_STATION_STATE_DOWN; | 700 | llc_main_station.state = LLC_STATION_STATE_UP; |
712 | ev->type = LLC_STATION_EV_TYPE_SIMPLE; | 701 | llc_set_station_handler(llc_station_rcv); |
713 | ev->prim_type = LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK; | ||
714 | rc = llc_station_next_state(skb); | ||
715 | out: | ||
716 | return rc; | ||
717 | } | 702 | } |
718 | 703 | ||
719 | void __exit llc_station_exit(void) | 704 | void llc_station_exit(void) |
720 | { | 705 | { |
721 | llc_set_station_handler(NULL); | 706 | llc_set_station_handler(NULL); |
722 | } | 707 | } |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d41974aacf51..a58c0b649ba1 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1378,6 +1378,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
1378 | else | 1378 | else |
1379 | memset(next_hop, 0, ETH_ALEN); | 1379 | memset(next_hop, 0, ETH_ALEN); |
1380 | 1380 | ||
1381 | memset(pinfo, 0, sizeof(*pinfo)); | ||
1382 | |||
1381 | pinfo->generation = mesh_paths_generation; | 1383 | pinfo->generation = mesh_paths_generation; |
1382 | 1384 | ||
1383 | pinfo->filled = MPATH_INFO_FRAME_QLEN | | 1385 | pinfo->filled = MPATH_INFO_FRAME_QLEN | |
@@ -1396,7 +1398,6 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
1396 | pinfo->discovery_timeout = | 1398 | pinfo->discovery_timeout = |
1397 | jiffies_to_msecs(mpath->discovery_timeout); | 1399 | jiffies_to_msecs(mpath->discovery_timeout); |
1398 | pinfo->discovery_retries = mpath->discovery_retries; | 1400 | pinfo->discovery_retries = mpath->discovery_retries; |
1399 | pinfo->flags = 0; | ||
1400 | if (mpath->flags & MESH_PATH_ACTIVE) | 1401 | if (mpath->flags & MESH_PATH_ACTIVE) |
1401 | pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; | 1402 | pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; |
1402 | if (mpath->flags & MESH_PATH_RESOLVING) | 1403 | if (mpath->flags & MESH_PATH_RESOLVING) |
@@ -1405,10 +1406,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
1405 | pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; | 1406 | pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; |
1406 | if (mpath->flags & MESH_PATH_FIXED) | 1407 | if (mpath->flags & MESH_PATH_FIXED) |
1407 | pinfo->flags |= NL80211_MPATH_FLAG_FIXED; | 1408 | pinfo->flags |= NL80211_MPATH_FLAG_FIXED; |
1408 | if (mpath->flags & MESH_PATH_RESOLVING) | 1409 | if (mpath->flags & MESH_PATH_RESOLVED) |
1409 | pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; | 1410 | pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED; |
1410 | |||
1411 | pinfo->flags = mpath->flags; | ||
1412 | } | 1411 | } |
1413 | 1412 | ||
1414 | static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, | 1413 | static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a4a5acdbaa4d..f76b83341cf9 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -3248,6 +3248,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | |||
3248 | goto out_unlock; | 3248 | goto out_unlock; |
3249 | 3249 | ||
3250 | err_clear: | 3250 | err_clear: |
3251 | memset(ifmgd->bssid, 0, ETH_ALEN); | ||
3252 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); | ||
3251 | ifmgd->auth_data = NULL; | 3253 | ifmgd->auth_data = NULL; |
3252 | err_free: | 3254 | err_free: |
3253 | kfree(auth_data); | 3255 | kfree(auth_data); |
@@ -3439,6 +3441,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
3439 | err = 0; | 3441 | err = 0; |
3440 | goto out; | 3442 | goto out; |
3441 | err_clear: | 3443 | err_clear: |
3444 | memset(ifmgd->bssid, 0, ETH_ALEN); | ||
3445 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); | ||
3442 | ifmgd->assoc_data = NULL; | 3446 | ifmgd->assoc_data = NULL; |
3443 | err_free: | 3447 | err_free: |
3444 | kfree(assoc_data); | 3448 | kfree(assoc_data); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index acf712ffb5e6..c5e8c9c31f76 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1811 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, | 1811 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, |
1812 | sdata, NULL, NULL); | 1812 | sdata, NULL, NULL); |
1813 | } else { | 1813 | } else { |
1814 | int is_mesh_mcast = 1; | 1814 | /* DS -> MBSS (802.11-2012 13.11.3.3). |
1815 | const u8 *mesh_da; | 1815 | * For unicast with unknown forwarding information, |
1816 | * destination might be in the MBSS or if that fails | ||
1817 | * forwarded to another mesh gate. In either case | ||
1818 | * resolution will be handled in ieee80211_xmit(), so | ||
1819 | * leave the original DA. This also works for mcast */ | ||
1820 | const u8 *mesh_da = skb->data; | ||
1821 | |||
1822 | if (mppath) | ||
1823 | mesh_da = mppath->mpp; | ||
1824 | else if (mpath) | ||
1825 | mesh_da = mpath->dst; | ||
1826 | rcu_read_unlock(); | ||
1816 | 1827 | ||
1817 | if (is_multicast_ether_addr(skb->data)) | ||
1818 | /* DA TA mSA AE:SA */ | ||
1819 | mesh_da = skb->data; | ||
1820 | else { | ||
1821 | static const u8 bcast[ETH_ALEN] = | ||
1822 | { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | ||
1823 | if (mppath) { | ||
1824 | /* RA TA mDA mSA AE:DA SA */ | ||
1825 | mesh_da = mppath->mpp; | ||
1826 | is_mesh_mcast = 0; | ||
1827 | } else if (mpath) { | ||
1828 | mesh_da = mpath->dst; | ||
1829 | is_mesh_mcast = 0; | ||
1830 | } else { | ||
1831 | /* DA TA mSA AE:SA */ | ||
1832 | mesh_da = bcast; | ||
1833 | } | ||
1834 | } | ||
1835 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, | 1828 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, |
1836 | mesh_da, sdata->vif.addr); | 1829 | mesh_da, sdata->vif.addr); |
1837 | rcu_read_unlock(); | 1830 | if (is_multicast_ether_addr(mesh_da)) |
1838 | if (is_mesh_mcast) | 1831 | /* DA TA mSA AE:SA */ |
1839 | meshhdrlen = | 1832 | meshhdrlen = |
1840 | ieee80211_new_mesh_header(&mesh_hdr, | 1833 | ieee80211_new_mesh_header(&mesh_hdr, |
1841 | sdata, | 1834 | sdata, |
1842 | skb->data + ETH_ALEN, | 1835 | skb->data + ETH_ALEN, |
1843 | NULL); | 1836 | NULL); |
1844 | else | 1837 | else |
1838 | /* RA TA mDA mSA AE:DA SA */ | ||
1845 | meshhdrlen = | 1839 | meshhdrlen = |
1846 | ieee80211_new_mesh_header(&mesh_hdr, | 1840 | ieee80211_new_mesh_header(&mesh_hdr, |
1847 | sdata, | 1841 | sdata, |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 84444dda194b..f51013c07b9f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, | |||
1171 | goto out_err; | 1171 | goto out_err; |
1172 | } | 1172 | } |
1173 | svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); | 1173 | svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); |
1174 | if (!svc->stats.cpustats) | 1174 | if (!svc->stats.cpustats) { |
1175 | ret = -ENOMEM; | ||
1175 | goto out_err; | 1176 | goto out_err; |
1177 | } | ||
1176 | 1178 | ||
1177 | /* I'm the first user of the service */ | 1179 | /* I'm the first user of the service */ |
1178 | atomic_set(&svc->usecnt, 0); | 1180 | atomic_set(&svc->usecnt, 0); |
@@ -2759,6 +2761,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2759 | { | 2761 | { |
2760 | struct ip_vs_timeout_user t; | 2762 | struct ip_vs_timeout_user t; |
2761 | 2763 | ||
2764 | memset(&t, 0, sizeof(t)); | ||
2762 | __ip_vs_get_timeouts(net, &t); | 2765 | __ip_vs_get_timeouts(net, &t); |
2763 | if (copy_to_user(user, &t, sizeof(t)) != 0) | 2766 | if (copy_to_user(user, &t, sizeof(t)) != 0) |
2764 | ret = -EFAULT; | 2767 | ret = -EFAULT; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index cf4875565d67..2ceec64b19f9 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack) | |||
249 | { | 249 | { |
250 | struct nf_conn *ct = (void *)ul_conntrack; | 250 | struct nf_conn *ct = (void *)ul_conntrack; |
251 | struct net *net = nf_ct_net(ct); | 251 | struct net *net = nf_ct_net(ct); |
252 | struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); | ||
253 | |||
254 | BUG_ON(ecache == NULL); | ||
252 | 255 | ||
253 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { | 256 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { |
254 | /* bad luck, let's retry again */ | 257 | /* bad luck, let's retry again */ |
255 | ct->timeout.expires = jiffies + | 258 | ecache->timeout.expires = jiffies + |
256 | (random32() % net->ct.sysctl_events_retry_timeout); | 259 | (random32() % net->ct.sysctl_events_retry_timeout); |
257 | add_timer(&ct->timeout); | 260 | add_timer(&ecache->timeout); |
258 | return; | 261 | return; |
259 | } | 262 | } |
260 | /* we've got the event delivered, now it's dying */ | 263 | /* we've got the event delivered, now it's dying */ |
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack) | |||
268 | void nf_ct_insert_dying_list(struct nf_conn *ct) | 271 | void nf_ct_insert_dying_list(struct nf_conn *ct) |
269 | { | 272 | { |
270 | struct net *net = nf_ct_net(ct); | 273 | struct net *net = nf_ct_net(ct); |
274 | struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); | ||
275 | |||
276 | BUG_ON(ecache == NULL); | ||
271 | 277 | ||
272 | /* add this conntrack to the dying list */ | 278 | /* add this conntrack to the dying list */ |
273 | spin_lock_bh(&nf_conntrack_lock); | 279 | spin_lock_bh(&nf_conntrack_lock); |
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct) | |||
275 | &net->ct.dying); | 281 | &net->ct.dying); |
276 | spin_unlock_bh(&nf_conntrack_lock); | 282 | spin_unlock_bh(&nf_conntrack_lock); |
277 | /* set a new timer to retry event delivery */ | 283 | /* set a new timer to retry event delivery */ |
278 | setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); | 284 | setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); |
279 | ct->timeout.expires = jiffies + | 285 | ecache->timeout.expires = jiffies + |
280 | (random32() % net->ct.sysctl_events_retry_timeout); | 286 | (random32() % net->ct.sysctl_events_retry_timeout); |
281 | add_timer(&ct->timeout); | 287 | add_timer(&ecache->timeout); |
282 | } | 288 | } |
283 | EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); | 289 | EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); |
284 | 290 | ||
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 45cf602a76bc..527651a53a45 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -361,23 +361,6 @@ static void evict_oldest_expect(struct nf_conn *master, | |||
361 | } | 361 | } |
362 | } | 362 | } |
363 | 363 | ||
364 | static inline int refresh_timer(struct nf_conntrack_expect *i) | ||
365 | { | ||
366 | struct nf_conn_help *master_help = nfct_help(i->master); | ||
367 | const struct nf_conntrack_expect_policy *p; | ||
368 | |||
369 | if (!del_timer(&i->timeout)) | ||
370 | return 0; | ||
371 | |||
372 | p = &rcu_dereference_protected( | ||
373 | master_help->helper, | ||
374 | lockdep_is_held(&nf_conntrack_lock) | ||
375 | )->expect_policy[i->class]; | ||
376 | i->timeout.expires = jiffies + p->timeout * HZ; | ||
377 | add_timer(&i->timeout); | ||
378 | return 1; | ||
379 | } | ||
380 | |||
381 | static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) | 364 | static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) |
382 | { | 365 | { |
383 | const struct nf_conntrack_expect_policy *p; | 366 | const struct nf_conntrack_expect_policy *p; |
@@ -386,7 +369,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) | |||
386 | struct nf_conn_help *master_help = nfct_help(master); | 369 | struct nf_conn_help *master_help = nfct_help(master); |
387 | struct nf_conntrack_helper *helper; | 370 | struct nf_conntrack_helper *helper; |
388 | struct net *net = nf_ct_exp_net(expect); | 371 | struct net *net = nf_ct_exp_net(expect); |
389 | struct hlist_node *n; | 372 | struct hlist_node *n, *next; |
390 | unsigned int h; | 373 | unsigned int h; |
391 | int ret = 1; | 374 | int ret = 1; |
392 | 375 | ||
@@ -395,12 +378,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) | |||
395 | goto out; | 378 | goto out; |
396 | } | 379 | } |
397 | h = nf_ct_expect_dst_hash(&expect->tuple); | 380 | h = nf_ct_expect_dst_hash(&expect->tuple); |
398 | hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { | 381 | hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { |
399 | if (expect_matches(i, expect)) { | 382 | if (expect_matches(i, expect)) { |
400 | /* Refresh timer: if it's dying, ignore.. */ | 383 | if (del_timer(&i->timeout)) { |
401 | if (refresh_timer(i)) { | 384 | nf_ct_unlink_expect(i); |
402 | ret = 0; | 385 | nf_ct_expect_put(i); |
403 | goto out; | 386 | break; |
404 | } | 387 | } |
405 | } else if (expect_clash(i, expect)) { | 388 | } else if (expect_clash(i, expect)) { |
406 | ret = -EBUSY; | 389 | ret = -EBUSY; |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 14f67a2cbcb5..9807f3278fcb 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1896,10 +1896,15 @@ static int | |||
1896 | ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct) | 1896 | ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct) |
1897 | { | 1897 | { |
1898 | struct nlattr *cda[CTA_MAX+1]; | 1898 | struct nlattr *cda[CTA_MAX+1]; |
1899 | int ret; | ||
1899 | 1900 | ||
1900 | nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy); | 1901 | nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy); |
1901 | 1902 | ||
1902 | return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); | 1903 | spin_lock_bh(&nf_conntrack_lock); |
1904 | ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); | ||
1905 | spin_unlock_bh(&nf_conntrack_lock); | ||
1906 | |||
1907 | return ret; | ||
1903 | } | 1908 | } |
1904 | 1909 | ||
1905 | static struct nfq_ct_hook ctnetlink_nfqueue_hook = { | 1910 | static struct nfq_ct_hook ctnetlink_nfqueue_hook = { |
@@ -2785,7 +2790,8 @@ static int __init ctnetlink_init(void) | |||
2785 | goto err_unreg_subsys; | 2790 | goto err_unreg_subsys; |
2786 | } | 2791 | } |
2787 | 2792 | ||
2788 | if (register_pernet_subsys(&ctnetlink_net_ops)) { | 2793 | ret = register_pernet_subsys(&ctnetlink_net_ops); |
2794 | if (ret < 0) { | ||
2789 | pr_err("ctnetlink_init: cannot register pernet operations\n"); | 2795 | pr_err("ctnetlink_init: cannot register pernet operations\n"); |
2790 | goto err_unreg_exp_subsys; | 2796 | goto err_unreg_exp_subsys; |
2791 | } | 2797 | } |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index a5ac11ebef33..e046b3756aab 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -158,21 +158,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
158 | * sCL -> sSS | 158 | * sCL -> sSS |
159 | */ | 159 | */ |
160 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ | 160 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ |
161 | /*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR }, | 161 | /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR }, |
162 | /* | 162 | /* |
163 | * sNO -> sIV Too late and no reason to do anything | 163 | * sNO -> sIV Too late and no reason to do anything |
164 | * sSS -> sIV Client can't send SYN and then SYN/ACK | 164 | * sSS -> sIV Client can't send SYN and then SYN/ACK |
165 | * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open | 165 | * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open |
166 | * sSR -> sIG | 166 | * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open |
167 | * sES -> sIG Error: SYNs in window outside the SYN_SENT state | 167 | * sES -> sIV Invalid SYN/ACK packets sent by the client |
168 | * are errors. Receiver will reply with RST | 168 | * sFW -> sIV |
169 | * and close the connection. | 169 | * sCW -> sIV |
170 | * Or we are not in sync and hold a dead connection. | 170 | * sLA -> sIV |
171 | * sFW -> sIG | 171 | * sTW -> sIV |
172 | * sCW -> sIG | 172 | * sCL -> sIV |
173 | * sLA -> sIG | ||
174 | * sTW -> sIG | ||
175 | * sCL -> sIG | ||
176 | */ | 173 | */ |
177 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ | 174 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ |
178 | /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, | 175 | /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, |
@@ -633,15 +630,9 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
633 | ack = sack = receiver->td_end; | 630 | ack = sack = receiver->td_end; |
634 | } | 631 | } |
635 | 632 | ||
636 | if (seq == end | 633 | if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT) |
637 | && (!tcph->rst | ||
638 | || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) | ||
639 | /* | 634 | /* |
640 | * Packets contains no data: we assume it is valid | 635 | * RST sent answering SYN. |
641 | * and check the ack value only. | ||
642 | * However RST segments are always validated by their | ||
643 | * SEQ number, except when seq == 0 (reset sent answering | ||
644 | * SYN. | ||
645 | */ | 636 | */ |
646 | seq = end = sender->td_end; | 637 | seq = end = sender->td_end; |
647 | 638 | ||
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 758a1bacc126..5c0a112aeee6 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -183,12 +183,12 @@ static int media_len(const struct nf_conn *ct, const char *dptr, | |||
183 | return len + digits_len(ct, dptr, limit, shift); | 183 | return len + digits_len(ct, dptr, limit, shift); |
184 | } | 184 | } |
185 | 185 | ||
186 | static int parse_addr(const struct nf_conn *ct, const char *cp, | 186 | static int sip_parse_addr(const struct nf_conn *ct, const char *cp, |
187 | const char **endp, union nf_inet_addr *addr, | 187 | const char **endp, union nf_inet_addr *addr, |
188 | const char *limit) | 188 | const char *limit, bool delim) |
189 | { | 189 | { |
190 | const char *end; | 190 | const char *end; |
191 | int ret = 0; | 191 | int ret; |
192 | 192 | ||
193 | if (!ct) | 193 | if (!ct) |
194 | return 0; | 194 | return 0; |
@@ -197,16 +197,28 @@ static int parse_addr(const struct nf_conn *ct, const char *cp, | |||
197 | switch (nf_ct_l3num(ct)) { | 197 | switch (nf_ct_l3num(ct)) { |
198 | case AF_INET: | 198 | case AF_INET: |
199 | ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); | 199 | ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); |
200 | if (ret == 0) | ||
201 | return 0; | ||
200 | break; | 202 | break; |
201 | case AF_INET6: | 203 | case AF_INET6: |
204 | if (cp < limit && *cp == '[') | ||
205 | cp++; | ||
206 | else if (delim) | ||
207 | return 0; | ||
208 | |||
202 | ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); | 209 | ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); |
210 | if (ret == 0) | ||
211 | return 0; | ||
212 | |||
213 | if (end < limit && *end == ']') | ||
214 | end++; | ||
215 | else if (delim) | ||
216 | return 0; | ||
203 | break; | 217 | break; |
204 | default: | 218 | default: |
205 | BUG(); | 219 | BUG(); |
206 | } | 220 | } |
207 | 221 | ||
208 | if (ret == 0 || end == cp) | ||
209 | return 0; | ||
210 | if (endp) | 222 | if (endp) |
211 | *endp = end; | 223 | *endp = end; |
212 | return 1; | 224 | return 1; |
@@ -219,7 +231,7 @@ static int epaddr_len(const struct nf_conn *ct, const char *dptr, | |||
219 | union nf_inet_addr addr; | 231 | union nf_inet_addr addr; |
220 | const char *aux = dptr; | 232 | const char *aux = dptr; |
221 | 233 | ||
222 | if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { | 234 | if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) { |
223 | pr_debug("ip: %s parse failed.!\n", dptr); | 235 | pr_debug("ip: %s parse failed.!\n", dptr); |
224 | return 0; | 236 | return 0; |
225 | } | 237 | } |
@@ -296,7 +308,7 @@ int ct_sip_parse_request(const struct nf_conn *ct, | |||
296 | return 0; | 308 | return 0; |
297 | dptr += shift; | 309 | dptr += shift; |
298 | 310 | ||
299 | if (!parse_addr(ct, dptr, &end, addr, limit)) | 311 | if (!sip_parse_addr(ct, dptr, &end, addr, limit, true)) |
300 | return -1; | 312 | return -1; |
301 | if (end < limit && *end == ':') { | 313 | if (end < limit && *end == ':') { |
302 | end++; | 314 | end++; |
@@ -550,7 +562,7 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, | |||
550 | if (ret == 0) | 562 | if (ret == 0) |
551 | return ret; | 563 | return ret; |
552 | 564 | ||
553 | if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit)) | 565 | if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true)) |
554 | return -1; | 566 | return -1; |
555 | if (*c == ':') { | 567 | if (*c == ':') { |
556 | c++; | 568 | c++; |
@@ -599,7 +611,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, | |||
599 | unsigned int dataoff, unsigned int datalen, | 611 | unsigned int dataoff, unsigned int datalen, |
600 | const char *name, | 612 | const char *name, |
601 | unsigned int *matchoff, unsigned int *matchlen, | 613 | unsigned int *matchoff, unsigned int *matchlen, |
602 | union nf_inet_addr *addr) | 614 | union nf_inet_addr *addr, bool delim) |
603 | { | 615 | { |
604 | const char *limit = dptr + datalen; | 616 | const char *limit = dptr + datalen; |
605 | const char *start, *end; | 617 | const char *start, *end; |
@@ -613,7 +625,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, | |||
613 | return 0; | 625 | return 0; |
614 | 626 | ||
615 | start += strlen(name); | 627 | start += strlen(name); |
616 | if (!parse_addr(ct, start, &end, addr, limit)) | 628 | if (!sip_parse_addr(ct, start, &end, addr, limit, delim)) |
617 | return 0; | 629 | return 0; |
618 | *matchoff = start - dptr; | 630 | *matchoff = start - dptr; |
619 | *matchlen = end - start; | 631 | *matchlen = end - start; |
@@ -675,6 +687,47 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, | |||
675 | return 1; | 687 | return 1; |
676 | } | 688 | } |
677 | 689 | ||
690 | static int sdp_parse_addr(const struct nf_conn *ct, const char *cp, | ||
691 | const char **endp, union nf_inet_addr *addr, | ||
692 | const char *limit) | ||
693 | { | ||
694 | const char *end; | ||
695 | int ret; | ||
696 | |||
697 | memset(addr, 0, sizeof(*addr)); | ||
698 | switch (nf_ct_l3num(ct)) { | ||
699 | case AF_INET: | ||
700 | ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); | ||
701 | break; | ||
702 | case AF_INET6: | ||
703 | ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); | ||
704 | break; | ||
705 | default: | ||
706 | BUG(); | ||
707 | } | ||
708 | |||
709 | if (ret == 0) | ||
710 | return 0; | ||
711 | if (endp) | ||
712 | *endp = end; | ||
713 | return 1; | ||
714 | } | ||
715 | |||
716 | /* skip ip address. returns its length. */ | ||
717 | static int sdp_addr_len(const struct nf_conn *ct, const char *dptr, | ||
718 | const char *limit, int *shift) | ||
719 | { | ||
720 | union nf_inet_addr addr; | ||
721 | const char *aux = dptr; | ||
722 | |||
723 | if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) { | ||
724 | pr_debug("ip: %s parse failed.!\n", dptr); | ||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | return dptr - aux; | ||
729 | } | ||
730 | |||
678 | /* SDP header parsing: a SDP session description contains an ordered set of | 731 | /* SDP header parsing: a SDP session description contains an ordered set of |
679 | * headers, starting with a section containing general session parameters, | 732 | * headers, starting with a section containing general session parameters, |
680 | * optionally followed by multiple media descriptions. | 733 | * optionally followed by multiple media descriptions. |
@@ -686,10 +739,10 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, | |||
686 | */ | 739 | */ |
687 | static const struct sip_header ct_sdp_hdrs[] = { | 740 | static const struct sip_header ct_sdp_hdrs[] = { |
688 | [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), | 741 | [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), |
689 | [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len), | 742 | [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", sdp_addr_len), |
690 | [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len), | 743 | [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", sdp_addr_len), |
691 | [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len), | 744 | [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", sdp_addr_len), |
692 | [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len), | 745 | [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", sdp_addr_len), |
693 | [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), | 746 | [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), |
694 | }; | 747 | }; |
695 | 748 | ||
@@ -775,8 +828,8 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr, | |||
775 | if (ret <= 0) | 828 | if (ret <= 0) |
776 | return ret; | 829 | return ret; |
777 | 830 | ||
778 | if (!parse_addr(ct, dptr + *matchoff, NULL, addr, | 831 | if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr, |
779 | dptr + *matchoff + *matchlen)) | 832 | dptr + *matchoff + *matchlen)) |
780 | return -1; | 833 | return -1; |
781 | return 1; | 834 | return 1; |
782 | } | 835 | } |
@@ -1515,7 +1568,6 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, | |||
1515 | } | 1568 | } |
1516 | 1569 | ||
1517 | static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; | 1570 | static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; |
1518 | static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly; | ||
1519 | 1571 | ||
1520 | static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { | 1572 | static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { |
1521 | [SIP_EXPECT_SIGNALLING] = { | 1573 | [SIP_EXPECT_SIGNALLING] = { |
@@ -1585,9 +1637,9 @@ static int __init nf_conntrack_sip_init(void) | |||
1585 | sip[i][j].me = THIS_MODULE; | 1637 | sip[i][j].me = THIS_MODULE; |
1586 | 1638 | ||
1587 | if (ports[i] == SIP_PORT) | 1639 | if (ports[i] == SIP_PORT) |
1588 | sprintf(sip_names[i][j], "sip"); | 1640 | sprintf(sip[i][j].name, "sip"); |
1589 | else | 1641 | else |
1590 | sprintf(sip_names[i][j], "sip-%u", i); | 1642 | sprintf(sip[i][j].name, "sip-%u", i); |
1591 | 1643 | ||
1592 | pr_debug("port #%u: %u\n", i, ports[i]); | 1644 | pr_debug("port #%u: %u\n", i, ports[i]); |
1593 | 1645 | ||
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 169ab59ed9d4..8cfc401e197e 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -55,6 +55,7 @@ struct nfulnl_instance { | |||
55 | unsigned int qlen; /* number of nlmsgs in skb */ | 55 | unsigned int qlen; /* number of nlmsgs in skb */ |
56 | struct sk_buff *skb; /* pre-allocatd skb */ | 56 | struct sk_buff *skb; /* pre-allocatd skb */ |
57 | struct timer_list timer; | 57 | struct timer_list timer; |
58 | struct user_namespace *peer_user_ns; /* User namespace of the peer process */ | ||
58 | int peer_pid; /* PID of the peer process */ | 59 | int peer_pid; /* PID of the peer process */ |
59 | 60 | ||
60 | /* configurable parameters */ | 61 | /* configurable parameters */ |
@@ -132,7 +133,7 @@ instance_put(struct nfulnl_instance *inst) | |||
132 | static void nfulnl_timer(unsigned long data); | 133 | static void nfulnl_timer(unsigned long data); |
133 | 134 | ||
134 | static struct nfulnl_instance * | 135 | static struct nfulnl_instance * |
135 | instance_create(u_int16_t group_num, int pid) | 136 | instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns) |
136 | { | 137 | { |
137 | struct nfulnl_instance *inst; | 138 | struct nfulnl_instance *inst; |
138 | int err; | 139 | int err; |
@@ -162,6 +163,7 @@ instance_create(u_int16_t group_num, int pid) | |||
162 | 163 | ||
163 | setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); | 164 | setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); |
164 | 165 | ||
166 | inst->peer_user_ns = user_ns; | ||
165 | inst->peer_pid = pid; | 167 | inst->peer_pid = pid; |
166 | inst->group_num = group_num; | 168 | inst->group_num = group_num; |
167 | 169 | ||
@@ -381,6 +383,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
381 | struct nlmsghdr *nlh; | 383 | struct nlmsghdr *nlh; |
382 | struct nfgenmsg *nfmsg; | 384 | struct nfgenmsg *nfmsg; |
383 | sk_buff_data_t old_tail = inst->skb->tail; | 385 | sk_buff_data_t old_tail = inst->skb->tail; |
386 | struct sock *sk; | ||
384 | 387 | ||
385 | nlh = nlmsg_put(inst->skb, 0, 0, | 388 | nlh = nlmsg_put(inst->skb, 0, 0, |
386 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, | 389 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, |
@@ -480,7 +483,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
480 | } | 483 | } |
481 | 484 | ||
482 | if (indev && skb_mac_header_was_set(skb)) { | 485 | if (indev && skb_mac_header_was_set(skb)) { |
483 | if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || | 486 | if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || |
484 | nla_put_be16(inst->skb, NFULA_HWLEN, | 487 | nla_put_be16(inst->skb, NFULA_HWLEN, |
485 | htons(skb->dev->hard_header_len)) || | 488 | htons(skb->dev->hard_header_len)) || |
486 | nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, | 489 | nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, |
@@ -499,18 +502,21 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
499 | } | 502 | } |
500 | 503 | ||
501 | /* UID */ | 504 | /* UID */ |
502 | if (skb->sk) { | 505 | sk = skb->sk; |
503 | read_lock_bh(&skb->sk->sk_callback_lock); | 506 | if (sk && sk->sk_state != TCP_TIME_WAIT) { |
504 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) { | 507 | read_lock_bh(&sk->sk_callback_lock); |
505 | struct file *file = skb->sk->sk_socket->file; | 508 | if (sk->sk_socket && sk->sk_socket->file) { |
506 | __be32 uid = htonl(file->f_cred->fsuid); | 509 | struct file *file = sk->sk_socket->file; |
507 | __be32 gid = htonl(file->f_cred->fsgid); | 510 | const struct cred *cred = file->f_cred; |
508 | read_unlock_bh(&skb->sk->sk_callback_lock); | 511 | struct user_namespace *user_ns = inst->peer_user_ns; |
512 | __be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid)); | ||
513 | __be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid)); | ||
514 | read_unlock_bh(&sk->sk_callback_lock); | ||
509 | if (nla_put_be32(inst->skb, NFULA_UID, uid) || | 515 | if (nla_put_be32(inst->skb, NFULA_UID, uid) || |
510 | nla_put_be32(inst->skb, NFULA_GID, gid)) | 516 | nla_put_be32(inst->skb, NFULA_GID, gid)) |
511 | goto nla_put_failure; | 517 | goto nla_put_failure; |
512 | } else | 518 | } else |
513 | read_unlock_bh(&skb->sk->sk_callback_lock); | 519 | read_unlock_bh(&sk->sk_callback_lock); |
514 | } | 520 | } |
515 | 521 | ||
516 | /* local sequence number */ | 522 | /* local sequence number */ |
@@ -783,7 +789,8 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
783 | } | 789 | } |
784 | 790 | ||
785 | inst = instance_create(group_num, | 791 | inst = instance_create(group_num, |
786 | NETLINK_CB(skb).pid); | 792 | NETLINK_CB(skb).pid, |
793 | sk_user_ns(NETLINK_CB(skb).ssk)); | ||
787 | if (IS_ERR(inst)) { | 794 | if (IS_ERR(inst)) { |
788 | ret = PTR_ERR(inst); | 795 | ret = PTR_ERR(inst); |
789 | goto out; | 796 | goto out; |
@@ -996,8 +1003,10 @@ static int __init nfnetlink_log_init(void) | |||
996 | 1003 | ||
997 | #ifdef CONFIG_PROC_FS | 1004 | #ifdef CONFIG_PROC_FS |
998 | if (!proc_create("nfnetlink_log", 0440, | 1005 | if (!proc_create("nfnetlink_log", 0440, |
999 | proc_net_netfilter, &nful_file_ops)) | 1006 | proc_net_netfilter, &nful_file_ops)) { |
1007 | status = -ENOMEM; | ||
1000 | goto cleanup_logger; | 1008 | goto cleanup_logger; |
1009 | } | ||
1001 | #endif | 1010 | #endif |
1002 | return status; | 1011 | return status; |
1003 | 1012 | ||
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c index ff5f75fddb15..fa40096940a1 100644 --- a/net/netfilter/xt_LOG.c +++ b/net/netfilter/xt_LOG.c | |||
@@ -145,6 +145,21 @@ static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb, | |||
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
148 | static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk) | ||
149 | { | ||
150 | if (!sk || sk->sk_state == TCP_TIME_WAIT) | ||
151 | return; | ||
152 | |||
153 | read_lock_bh(&sk->sk_callback_lock); | ||
154 | if (sk->sk_socket && sk->sk_socket->file) { | ||
155 | const struct cred *cred = sk->sk_socket->file->f_cred; | ||
156 | sb_add(m, "UID=%u GID=%u ", | ||
157 | from_kuid_munged(&init_user_ns, cred->fsuid), | ||
158 | from_kgid_munged(&init_user_ns, cred->fsgid)); | ||
159 | } | ||
160 | read_unlock_bh(&sk->sk_callback_lock); | ||
161 | } | ||
162 | |||
148 | /* One level of recursion won't kill us */ | 163 | /* One level of recursion won't kill us */ |
149 | static void dump_ipv4_packet(struct sbuff *m, | 164 | static void dump_ipv4_packet(struct sbuff *m, |
150 | const struct nf_loginfo *info, | 165 | const struct nf_loginfo *info, |
@@ -361,14 +376,8 @@ static void dump_ipv4_packet(struct sbuff *m, | |||
361 | } | 376 | } |
362 | 377 | ||
363 | /* Max length: 15 "UID=4294967295 " */ | 378 | /* Max length: 15 "UID=4294967295 " */ |
364 | if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) { | 379 | if ((logflags & XT_LOG_UID) && !iphoff) |
365 | read_lock_bh(&skb->sk->sk_callback_lock); | 380 | dump_sk_uid_gid(m, skb->sk); |
366 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) | ||
367 | sb_add(m, "UID=%u GID=%u ", | ||
368 | skb->sk->sk_socket->file->f_cred->fsuid, | ||
369 | skb->sk->sk_socket->file->f_cred->fsgid); | ||
370 | read_unlock_bh(&skb->sk->sk_callback_lock); | ||
371 | } | ||
372 | 381 | ||
373 | /* Max length: 16 "MARK=0xFFFFFFFF " */ | 382 | /* Max length: 16 "MARK=0xFFFFFFFF " */ |
374 | if (!iphoff && skb->mark) | 383 | if (!iphoff && skb->mark) |
@@ -436,8 +445,8 @@ log_packet_common(struct sbuff *m, | |||
436 | const struct nf_loginfo *loginfo, | 445 | const struct nf_loginfo *loginfo, |
437 | const char *prefix) | 446 | const char *prefix) |
438 | { | 447 | { |
439 | sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level, | 448 | sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ", |
440 | prefix, | 449 | '0' + loginfo->u.log.level, prefix, |
441 | in ? in->name : "", | 450 | in ? in->name : "", |
442 | out ? out->name : ""); | 451 | out ? out->name : ""); |
443 | #ifdef CONFIG_BRIDGE_NETFILTER | 452 | #ifdef CONFIG_BRIDGE_NETFILTER |
@@ -717,14 +726,8 @@ static void dump_ipv6_packet(struct sbuff *m, | |||
717 | } | 726 | } |
718 | 727 | ||
719 | /* Max length: 15 "UID=4294967295 " */ | 728 | /* Max length: 15 "UID=4294967295 " */ |
720 | if ((logflags & XT_LOG_UID) && recurse && skb->sk) { | 729 | if ((logflags & XT_LOG_UID) && recurse) |
721 | read_lock_bh(&skb->sk->sk_callback_lock); | 730 | dump_sk_uid_gid(m, skb->sk); |
722 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) | ||
723 | sb_add(m, "UID=%u GID=%u ", | ||
724 | skb->sk->sk_socket->file->f_cred->fsuid, | ||
725 | skb->sk->sk_socket->file->f_cred->fsgid); | ||
726 | read_unlock_bh(&skb->sk->sk_callback_lock); | ||
727 | } | ||
728 | 731 | ||
729 | /* Max length: 16 "MARK=0xFFFFFFFF " */ | 732 | /* Max length: 16 "MARK=0xFFFFFFFF " */ |
730 | if (!recurse && skb->mark) | 733 | if (!recurse && skb->mark) |
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index 5c22ce8ab309..a4c1e4528cac 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c | |||
@@ -117,11 +117,11 @@ static int limit_mt_check(const struct xt_mtchk_param *par) | |||
117 | 117 | ||
118 | /* For SMP, we only want to use one set of state. */ | 118 | /* For SMP, we only want to use one set of state. */ |
119 | r->master = priv; | 119 | r->master = priv; |
120 | /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * | ||
121 | 128. */ | ||
122 | priv->prev = jiffies; | ||
123 | priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ | ||
120 | if (r->cost == 0) { | 124 | if (r->cost == 0) { |
121 | /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * | ||
122 | 128. */ | ||
123 | priv->prev = jiffies; | ||
124 | priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ | ||
125 | r->credit_cap = priv->credit; /* Credits full. */ | 125 | r->credit_cap = priv->credit; /* Credits full. */ |
126 | r->cost = user2credits(r->avg); | 126 | r->cost = user2credits(r->avg); |
127 | } | 127 | } |
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index 772d7389b337..ca2e577ed8ac 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c | |||
@@ -17,6 +17,17 @@ | |||
17 | #include <linux/netfilter/x_tables.h> | 17 | #include <linux/netfilter/x_tables.h> |
18 | #include <linux/netfilter/xt_owner.h> | 18 | #include <linux/netfilter/xt_owner.h> |
19 | 19 | ||
20 | static int owner_check(const struct xt_mtchk_param *par) | ||
21 | { | ||
22 | struct xt_owner_match_info *info = par->matchinfo; | ||
23 | |||
24 | /* For now only allow adding matches from the initial user namespace */ | ||
25 | if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) && | ||
26 | (current_user_ns() != &init_user_ns)) | ||
27 | return -EINVAL; | ||
28 | return 0; | ||
29 | } | ||
30 | |||
20 | static bool | 31 | static bool |
21 | owner_mt(const struct sk_buff *skb, struct xt_action_param *par) | 32 | owner_mt(const struct sk_buff *skb, struct xt_action_param *par) |
22 | { | 33 | { |
@@ -37,17 +48,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
37 | return ((info->match ^ info->invert) & | 48 | return ((info->match ^ info->invert) & |
38 | (XT_OWNER_UID | XT_OWNER_GID)) == 0; | 49 | (XT_OWNER_UID | XT_OWNER_GID)) == 0; |
39 | 50 | ||
40 | if (info->match & XT_OWNER_UID) | 51 | if (info->match & XT_OWNER_UID) { |
41 | if ((filp->f_cred->fsuid >= info->uid_min && | 52 | kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min); |
42 | filp->f_cred->fsuid <= info->uid_max) ^ | 53 | kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max); |
54 | if ((uid_gte(filp->f_cred->fsuid, uid_min) && | ||
55 | uid_lte(filp->f_cred->fsuid, uid_max)) ^ | ||
43 | !(info->invert & XT_OWNER_UID)) | 56 | !(info->invert & XT_OWNER_UID)) |
44 | return false; | 57 | return false; |
58 | } | ||
45 | 59 | ||
46 | if (info->match & XT_OWNER_GID) | 60 | if (info->match & XT_OWNER_GID) { |
47 | if ((filp->f_cred->fsgid >= info->gid_min && | 61 | kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min); |
48 | filp->f_cred->fsgid <= info->gid_max) ^ | 62 | kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max); |
63 | if ((gid_gte(filp->f_cred->fsgid, gid_min) && | ||
64 | gid_lte(filp->f_cred->fsgid, gid_max)) ^ | ||
49 | !(info->invert & XT_OWNER_GID)) | 65 | !(info->invert & XT_OWNER_GID)) |
50 | return false; | 66 | return false; |
67 | } | ||
51 | 68 | ||
52 | return true; | 69 | return true; |
53 | } | 70 | } |
@@ -56,6 +73,7 @@ static struct xt_match owner_mt_reg __read_mostly = { | |||
56 | .name = "owner", | 73 | .name = "owner", |
57 | .revision = 1, | 74 | .revision = 1, |
58 | .family = NFPROTO_UNSPEC, | 75 | .family = NFPROTO_UNSPEC, |
76 | .checkentry = owner_check, | ||
59 | .match = owner_mt, | 77 | .match = owner_mt, |
60 | .matchsize = sizeof(struct xt_owner_match_info), | 78 | .matchsize = sizeof(struct xt_owner_match_info), |
61 | .hooks = (1 << NF_INET_LOCAL_OUT) | | 79 | .hooks = (1 << NF_INET_LOCAL_OUT) | |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index ae2ad1eec8d0..4635c9b00459 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -317,6 +317,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par, | |||
317 | struct recent_table *t; | 317 | struct recent_table *t; |
318 | #ifdef CONFIG_PROC_FS | 318 | #ifdef CONFIG_PROC_FS |
319 | struct proc_dir_entry *pde; | 319 | struct proc_dir_entry *pde; |
320 | kuid_t uid; | ||
321 | kgid_t gid; | ||
320 | #endif | 322 | #endif |
321 | unsigned int i; | 323 | unsigned int i; |
322 | int ret = -EINVAL; | 324 | int ret = -EINVAL; |
@@ -372,6 +374,13 @@ static int recent_mt_check(const struct xt_mtchk_param *par, | |||
372 | for (i = 0; i < ip_list_hash_size; i++) | 374 | for (i = 0; i < ip_list_hash_size; i++) |
373 | INIT_LIST_HEAD(&t->iphash[i]); | 375 | INIT_LIST_HEAD(&t->iphash[i]); |
374 | #ifdef CONFIG_PROC_FS | 376 | #ifdef CONFIG_PROC_FS |
377 | uid = make_kuid(&init_user_ns, ip_list_uid); | ||
378 | gid = make_kgid(&init_user_ns, ip_list_gid); | ||
379 | if (!uid_valid(uid) || !gid_valid(gid)) { | ||
380 | kfree(t); | ||
381 | ret = -EINVAL; | ||
382 | goto out; | ||
383 | } | ||
375 | pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, | 384 | pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, |
376 | &recent_mt_fops, t); | 385 | &recent_mt_fops, t); |
377 | if (pde == NULL) { | 386 | if (pde == NULL) { |
@@ -379,8 +388,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par, | |||
379 | ret = -ENOMEM; | 388 | ret = -ENOMEM; |
380 | goto out; | 389 | goto out; |
381 | } | 390 | } |
382 | pde->uid = ip_list_uid; | 391 | pde->uid = uid; |
383 | pde->gid = ip_list_gid; | 392 | pde->gid = gid; |
384 | #endif | 393 | #endif |
385 | spin_lock_bh(&recent_lock); | 394 | spin_lock_bh(&recent_lock); |
386 | list_add_tail(&t->list, &recent_net->tables); | 395 | list_add_tail(&t->list, &recent_net->tables); |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index e7ff694f1049..729a345c75a4 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -1541,7 +1541,7 @@ int __init netlbl_unlabel_defconf(void) | |||
1541 | * it is called is at bootup before the audit subsystem is reporting | 1541 | * it is called is at bootup before the audit subsystem is reporting |
1542 | * messages so don't worry to much about these values. */ | 1542 | * messages so don't worry to much about these values. */ |
1543 | security_task_getsecid(current, &audit_info.secid); | 1543 | security_task_getsecid(current, &audit_info.secid); |
1544 | audit_info.loginuid = 0; | 1544 | audit_info.loginuid = GLOBAL_ROOT_UID; |
1545 | audit_info.sessionid = 0; | 1545 | audit_info.sessionid = 0; |
1546 | 1546 | ||
1547 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | 1547 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c index 9fae63f10298..9650c4ad5f88 100644 --- a/net/netlabel/netlabel_user.c +++ b/net/netlabel/netlabel_user.c | |||
@@ -109,7 +109,7 @@ struct audit_buffer *netlbl_audit_start_common(int type, | |||
109 | return NULL; | 109 | return NULL; |
110 | 110 | ||
111 | audit_log_format(audit_buf, "netlabel: auid=%u ses=%u", | 111 | audit_log_format(audit_buf, "netlabel: auid=%u ses=%u", |
112 | audit_info->loginuid, | 112 | from_kuid(&init_user_ns, audit_info->loginuid), |
113 | audit_info->sessionid); | 113 | audit_info->sessionid); |
114 | 114 | ||
115 | if (audit_info->secid != 0 && | 115 | if (audit_info->secid != 0 && |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5463969da45b..382119917166 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -912,7 +912,8 @@ static void netlink_rcv_wake(struct sock *sk) | |||
912 | wake_up_interruptible(&nlk->wait); | 912 | wake_up_interruptible(&nlk->wait); |
913 | } | 913 | } |
914 | 914 | ||
915 | static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) | 915 | static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, |
916 | struct sock *ssk) | ||
916 | { | 917 | { |
917 | int ret; | 918 | int ret; |
918 | struct netlink_sock *nlk = nlk_sk(sk); | 919 | struct netlink_sock *nlk = nlk_sk(sk); |
@@ -921,6 +922,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) | |||
921 | if (nlk->netlink_rcv != NULL) { | 922 | if (nlk->netlink_rcv != NULL) { |
922 | ret = skb->len; | 923 | ret = skb->len; |
923 | skb_set_owner_r(skb, sk); | 924 | skb_set_owner_r(skb, sk); |
925 | NETLINK_CB(skb).ssk = ssk; | ||
924 | nlk->netlink_rcv(skb); | 926 | nlk->netlink_rcv(skb); |
925 | consume_skb(skb); | 927 | consume_skb(skb); |
926 | } else { | 928 | } else { |
@@ -947,7 +949,7 @@ retry: | |||
947 | return PTR_ERR(sk); | 949 | return PTR_ERR(sk); |
948 | } | 950 | } |
949 | if (netlink_is_kernel(sk)) | 951 | if (netlink_is_kernel(sk)) |
950 | return netlink_unicast_kernel(sk, skb); | 952 | return netlink_unicast_kernel(sk, skb, ssk); |
951 | 953 | ||
952 | if (sk_filter(sk, skb)) { | 954 | if (sk_filter(sk, skb)) { |
953 | err = skb->len; | 955 | err = skb->len; |
@@ -1362,7 +1364,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1362 | if (NULL == siocb->scm) | 1364 | if (NULL == siocb->scm) |
1363 | siocb->scm = &scm; | 1365 | siocb->scm = &scm; |
1364 | 1366 | ||
1365 | err = scm_send(sock, msg, siocb->scm); | 1367 | err = scm_send(sock, msg, siocb->scm, true); |
1366 | if (err < 0) | 1368 | if (err < 0) |
1367 | return err; | 1369 | return err; |
1368 | 1370 | ||
@@ -1373,7 +1375,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1373 | dst_pid = addr->nl_pid; | 1375 | dst_pid = addr->nl_pid; |
1374 | dst_group = ffs(addr->nl_groups); | 1376 | dst_group = ffs(addr->nl_groups); |
1375 | err = -EPERM; | 1377 | err = -EPERM; |
1376 | if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) | 1378 | if ((dst_group || dst_pid) && |
1379 | !netlink_capable(sock, NL_NONROOT_SEND)) | ||
1377 | goto out; | 1380 | goto out; |
1378 | } else { | 1381 | } else { |
1379 | dst_pid = nlk->dst_pid; | 1382 | dst_pid = nlk->dst_pid; |
@@ -2147,6 +2150,7 @@ static void __init netlink_add_usersock_entry(void) | |||
2147 | rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); | 2150 | rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); |
2148 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; | 2151 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; |
2149 | nl_table[NETLINK_USERSOCK].registered = 1; | 2152 | nl_table[NETLINK_USERSOCK].registered = 1; |
2153 | nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND; | ||
2150 | 2154 | ||
2151 | netlink_table_ungrab(); | 2155 | netlink_table_ungrab(); |
2152 | } | 2156 | } |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 06592d8b4a2b..7261eb81974f 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -601,7 +601,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
601 | if (!capable(CAP_NET_BIND_SERVICE)) { | 601 | if (!capable(CAP_NET_BIND_SERVICE)) { |
602 | dev_put(dev); | 602 | dev_put(dev); |
603 | release_sock(sk); | 603 | release_sock(sk); |
604 | return -EACCES; | 604 | return -EPERM; |
605 | } | 605 | } |
606 | nr->user_addr = addr->fsa_digipeater[0]; | 606 | nr->user_addr = addr->fsa_digipeater[0]; |
607 | nr->source_addr = addr->fsa_ax25.sax25_call; | 607 | nr->source_addr = addr->fsa_ax25.sax25_call; |
@@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1169 | msg->msg_flags |= MSG_TRUNC; | 1169 | msg->msg_flags |= MSG_TRUNC; |
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1172 | er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
1173 | if (er < 0) { | ||
1174 | skb_free_datagram(sk, skb); | ||
1175 | release_sock(sk); | ||
1176 | return er; | ||
1177 | } | ||
1173 | 1178 | ||
1174 | if (sax != NULL) { | 1179 | if (sax != NULL) { |
1175 | sax->sax25_family = AF_NETROM; | 1180 | sax->sax25_family = AF_NETROM; |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index f3f96badf5aa..954405ceae9e 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -45,7 +45,7 @@ static int make_writable(struct sk_buff *skb, int write_len) | |||
45 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 45 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
46 | } | 46 | } |
47 | 47 | ||
48 | /* remove VLAN header from packet and update csum accrodingly. */ | 48 | /* remove VLAN header from packet and update csum accordingly. */ |
49 | static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | 49 | static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) |
50 | { | 50 | { |
51 | struct vlan_hdr *vhdr; | 51 | struct vlan_hdr *vhdr; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index d8277d29e710..cf58cedad083 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -425,10 +425,10 @@ static int validate_sample(const struct nlattr *attr, | |||
425 | static int validate_tp_port(const struct sw_flow_key *flow_key) | 425 | static int validate_tp_port(const struct sw_flow_key *flow_key) |
426 | { | 426 | { |
427 | if (flow_key->eth.type == htons(ETH_P_IP)) { | 427 | if (flow_key->eth.type == htons(ETH_P_IP)) { |
428 | if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) | 428 | if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) |
429 | return 0; | 429 | return 0; |
430 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { | 430 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { |
431 | if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) | 431 | if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst) |
432 | return 0; | 432 | return 0; |
433 | } | 433 | } |
434 | 434 | ||
@@ -460,7 +460,7 @@ static int validate_set(const struct nlattr *a, | |||
460 | if (flow_key->eth.type != htons(ETH_P_IP)) | 460 | if (flow_key->eth.type != htons(ETH_P_IP)) |
461 | return -EINVAL; | 461 | return -EINVAL; |
462 | 462 | ||
463 | if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst) | 463 | if (!flow_key->ip.proto) |
464 | return -EINVAL; | 464 | return -EINVAL; |
465 | 465 | ||
466 | ipv4_key = nla_data(ovs_key); | 466 | ipv4_key = nla_data(ovs_key); |
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 9b75617ca4e0..c30df1a10c67 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
@@ -145,15 +145,17 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies); | |||
145 | * OVS_KEY_ATTR_PRIORITY 4 -- 4 8 | 145 | * OVS_KEY_ATTR_PRIORITY 4 -- 4 8 |
146 | * OVS_KEY_ATTR_IN_PORT 4 -- 4 8 | 146 | * OVS_KEY_ATTR_IN_PORT 4 -- 4 8 |
147 | * OVS_KEY_ATTR_ETHERNET 12 -- 4 16 | 147 | * OVS_KEY_ATTR_ETHERNET 12 -- 4 16 |
148 | * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (outer VLAN ethertype) | ||
148 | * OVS_KEY_ATTR_8021Q 4 -- 4 8 | 149 | * OVS_KEY_ATTR_8021Q 4 -- 4 8 |
149 | * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 | 150 | * OVS_KEY_ATTR_ENCAP 0 -- 4 4 (VLAN encapsulation) |
151 | * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (inner VLAN ethertype) | ||
150 | * OVS_KEY_ATTR_IPV6 40 -- 4 44 | 152 | * OVS_KEY_ATTR_IPV6 40 -- 4 44 |
151 | * OVS_KEY_ATTR_ICMPV6 2 2 4 8 | 153 | * OVS_KEY_ATTR_ICMPV6 2 2 4 8 |
152 | * OVS_KEY_ATTR_ND 28 -- 4 32 | 154 | * OVS_KEY_ATTR_ND 28 -- 4 32 |
153 | * ------------------------------------------------- | 155 | * ------------------------------------------------- |
154 | * total 132 | 156 | * total 144 |
155 | */ | 157 | */ |
156 | #define FLOW_BUFSIZE 132 | 158 | #define FLOW_BUFSIZE 144 |
157 | 159 | ||
158 | int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); | 160 | int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); |
159 | int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, | 161 | int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8ac890a1a4c0..048fba476aa5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1273,6 +1273,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) | |||
1273 | spin_unlock(&f->lock); | 1273 | spin_unlock(&f->lock); |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | static bool match_fanout_group(struct packet_type *ptype, struct sock * sk) | ||
1277 | { | ||
1278 | if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) | ||
1279 | return true; | ||
1280 | |||
1281 | return false; | ||
1282 | } | ||
1283 | |||
1276 | static int fanout_add(struct sock *sk, u16 id, u16 type_flags) | 1284 | static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
1277 | { | 1285 | { |
1278 | struct packet_sock *po = pkt_sk(sk); | 1286 | struct packet_sock *po = pkt_sk(sk); |
@@ -1325,6 +1333,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) | |||
1325 | match->prot_hook.dev = po->prot_hook.dev; | 1333 | match->prot_hook.dev = po->prot_hook.dev; |
1326 | match->prot_hook.func = packet_rcv_fanout; | 1334 | match->prot_hook.func = packet_rcv_fanout; |
1327 | match->prot_hook.af_packet_priv = match; | 1335 | match->prot_hook.af_packet_priv = match; |
1336 | match->prot_hook.id_match = match_fanout_group; | ||
1328 | dev_add_pack(&match->prot_hook); | 1337 | dev_add_pack(&match->prot_hook); |
1329 | list_add(&match->list, &fanout_list); | 1338 | list_add(&match->list, &fanout_list); |
1330 | } | 1339 | } |
@@ -3845,7 +3854,7 @@ static int packet_seq_show(struct seq_file *seq, void *v) | |||
3845 | po->ifindex, | 3854 | po->ifindex, |
3846 | po->running, | 3855 | po->running, |
3847 | atomic_read(&s->sk_rmem_alloc), | 3856 | atomic_read(&s->sk_rmem_alloc), |
3848 | sock_i_uid(s), | 3857 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), |
3849 | sock_i_ino(s)); | 3858 | sock_i_ino(s)); |
3850 | } | 3859 | } |
3851 | 3860 | ||
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 0acc943f713a..b7e982782255 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -612,7 +612,8 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v) | |||
612 | sk->sk_protocol, pn->sobject, pn->dobject, | 612 | sk->sk_protocol, pn->sobject, pn->dobject, |
613 | pn->resource, sk->sk_state, | 613 | pn->resource, sk->sk_state, |
614 | sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), | 614 | sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), |
615 | sock_i_uid(sk), sock_i_ino(sk), | 615 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), |
616 | sock_i_ino(sk), | ||
616 | atomic_read(&sk->sk_refcnt), sk, | 617 | atomic_read(&sk->sk_refcnt), sk, |
617 | atomic_read(&sk->sk_drops), &len); | 618 | atomic_read(&sk->sk_drops), &len); |
618 | } | 619 | } |
@@ -796,7 +797,8 @@ static int pn_res_seq_show(struct seq_file *seq, void *v) | |||
796 | struct sock *sk = *psk; | 797 | struct sock *sk = *psk; |
797 | 798 | ||
798 | seq_printf(seq, "%02X %5d %lu%n", | 799 | seq_printf(seq, "%02X %5d %lu%n", |
799 | (int) (psk - pnres.sk), sock_i_uid(sk), | 800 | (int) (psk - pnres.sk), |
801 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), | ||
800 | sock_i_ino(sk), &len); | 802 | sock_i_ino(sk), &len); |
801 | } | 803 | } |
802 | seq_printf(seq, "%*s\n", 63 - len, ""); | 804 | seq_printf(seq, "%*s\n", 63 - len, ""); |
diff --git a/net/rfkill/input.c b/net/rfkill/input.c index 24c55c53e6a2..c9d931e7ffec 100644 --- a/net/rfkill/input.c +++ b/net/rfkill/input.c | |||
@@ -164,8 +164,7 @@ static void rfkill_schedule_global_op(enum rfkill_sched_op op) | |||
164 | rfkill_op_pending = true; | 164 | rfkill_op_pending = true; |
165 | if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { | 165 | if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { |
166 | /* bypass the limiter for EPO */ | 166 | /* bypass the limiter for EPO */ |
167 | cancel_delayed_work(&rfkill_op_work); | 167 | mod_delayed_work(system_wq, &rfkill_op_work, 0); |
168 | schedule_delayed_work(&rfkill_op_work, 0); | ||
169 | rfkill_last_scheduled = jiffies; | 168 | rfkill_last_scheduled = jiffies; |
170 | } else | 169 | } else |
171 | rfkill_schedule_ratelimited(); | 170 | rfkill_schedule_ratelimited(); |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 8b1f9f49960f..011d2384b115 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
@@ -948,7 +948,8 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn, | |||
948 | 948 | ||
949 | _enter(""); | 949 | _enter(""); |
950 | 950 | ||
951 | key = key_alloc(&key_type_rxrpc, "x", 0, 0, cred, 0, | 951 | key = key_alloc(&key_type_rxrpc, "x", |
952 | GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, 0, | ||
952 | KEY_ALLOC_NOT_IN_QUOTA); | 953 | KEY_ALLOC_NOT_IN_QUOTA); |
953 | if (IS_ERR(key)) { | 954 | if (IS_ERR(key)) { |
954 | _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key)); | 955 | _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key)); |
@@ -994,7 +995,8 @@ struct key *rxrpc_get_null_key(const char *keyname) | |||
994 | struct key *key; | 995 | struct key *key; |
995 | int ret; | 996 | int ret; |
996 | 997 | ||
997 | key = key_alloc(&key_type_rxrpc, keyname, 0, 0, cred, | 998 | key = key_alloc(&key_type_rxrpc, keyname, |
999 | GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, | ||
998 | KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA); | 1000 | KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA); |
999 | if (IS_ERR(key)) | 1001 | if (IS_ERR(key)) |
1000 | return key; | 1002 | return key; |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index fe81cc18e9e0..9c0fd0c78814 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -200,13 +200,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, | |||
200 | out: | 200 | out: |
201 | if (err) { | 201 | if (err) { |
202 | m->tcf_qstats.overlimits++; | 202 | m->tcf_qstats.overlimits++; |
203 | /* should we be asking for packet to be dropped? | 203 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR) |
204 | * may make sense for redirect case only | 204 | retval = TC_ACT_SHOT; |
205 | */ | 205 | else |
206 | retval = TC_ACT_SHOT; | 206 | retval = m->tcf_action; |
207 | } else { | 207 | } else |
208 | retval = m->tcf_action; | 208 | retval = m->tcf_action; |
209 | } | ||
210 | spin_unlock(&m->tcf_lock); | 209 | spin_unlock(&m->tcf_lock); |
211 | 210 | ||
212 | return retval; | 211 | return retval; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 6dd1131f2ec1..dc3ef5aef355 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -319,7 +319,7 @@ replay: | |||
319 | } | 319 | } |
320 | } | 320 | } |
321 | 321 | ||
322 | err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); | 322 | err = tp->ops->change(skb, tp, cl, t->tcm_handle, tca, &fh); |
323 | if (err == 0) { | 323 | if (err == 0) { |
324 | if (tp_created) { | 324 | if (tp_created) { |
325 | spin_lock_bh(root_lock); | 325 | spin_lock_bh(root_lock); |
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 590960a22a77..344a11b342e5 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c | |||
@@ -162,7 +162,8 @@ errout: | |||
162 | return err; | 162 | return err; |
163 | } | 163 | } |
164 | 164 | ||
165 | static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, | 165 | static int basic_change(struct sk_buff *in_skb, |
166 | struct tcf_proto *tp, unsigned long base, u32 handle, | ||
166 | struct nlattr **tca, unsigned long *arg) | 167 | struct nlattr **tca, unsigned long *arg) |
167 | { | 168 | { |
168 | int err; | 169 | int err; |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 7743ea8d1d38..2ecde225ae60 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -77,11 +77,18 @@ struct cgroup_subsys net_cls_subsys = { | |||
77 | .name = "net_cls", | 77 | .name = "net_cls", |
78 | .create = cgrp_create, | 78 | .create = cgrp_create, |
79 | .destroy = cgrp_destroy, | 79 | .destroy = cgrp_destroy, |
80 | #ifdef CONFIG_NET_CLS_CGROUP | ||
81 | .subsys_id = net_cls_subsys_id, | 80 | .subsys_id = net_cls_subsys_id, |
82 | #endif | ||
83 | .base_cftypes = ss_files, | 81 | .base_cftypes = ss_files, |
84 | .module = THIS_MODULE, | 82 | .module = THIS_MODULE, |
83 | |||
84 | /* | ||
85 | * While net_cls cgroup has the rudimentary hierarchy support of | ||
86 | * inheriting the parent's classid on cgroup creation, it doesn't | ||
87 | * properly propagates config changes in ancestors to their | ||
88 | * descendents. A child should follow the parent's configuration | ||
89 | * but be allowed to override it. Fix it and remove the following. | ||
90 | */ | ||
91 | .broken_hierarchy = true, | ||
85 | }; | 92 | }; |
86 | 93 | ||
87 | struct cls_cgroup_head { | 94 | struct cls_cgroup_head { |
@@ -151,7 +158,8 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { | |||
151 | [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, | 158 | [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, |
152 | }; | 159 | }; |
153 | 160 | ||
154 | static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, | 161 | static int cls_cgroup_change(struct sk_buff *in_skb, |
162 | struct tcf_proto *tp, unsigned long base, | ||
155 | u32 handle, struct nlattr **tca, | 163 | u32 handle, struct nlattr **tca, |
156 | unsigned long *arg) | 164 | unsigned long *arg) |
157 | { | 165 | { |
@@ -283,12 +291,6 @@ static int __init init_cgroup_cls(void) | |||
283 | if (ret) | 291 | if (ret) |
284 | goto out; | 292 | goto out; |
285 | 293 | ||
286 | #ifndef CONFIG_NET_CLS_CGROUP | ||
287 | /* We can't use rcu_assign_pointer because this is an int. */ | ||
288 | smp_wmb(); | ||
289 | net_cls_subsys_id = net_cls_subsys.subsys_id; | ||
290 | #endif | ||
291 | |||
292 | ret = register_tcf_proto_ops(&cls_cgroup_ops); | 294 | ret = register_tcf_proto_ops(&cls_cgroup_ops); |
293 | if (ret) | 295 | if (ret) |
294 | cgroup_unload_subsys(&net_cls_subsys); | 296 | cgroup_unload_subsys(&net_cls_subsys); |
@@ -301,11 +303,6 @@ static void __exit exit_cgroup_cls(void) | |||
301 | { | 303 | { |
302 | unregister_tcf_proto_ops(&cls_cgroup_ops); | 304 | unregister_tcf_proto_ops(&cls_cgroup_ops); |
303 | 305 | ||
304 | #ifndef CONFIG_NET_CLS_CGROUP | ||
305 | net_cls_subsys_id = -1; | ||
306 | synchronize_rcu(); | ||
307 | #endif | ||
308 | |||
309 | cgroup_unload_subsys(&net_cls_subsys); | 306 | cgroup_unload_subsys(&net_cls_subsys); |
310 | } | 307 | } |
311 | 308 | ||
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index ccd08c8dc6a7..ce82d0cb1b47 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -193,15 +193,19 @@ static u32 flow_get_rtclassid(const struct sk_buff *skb) | |||
193 | 193 | ||
194 | static u32 flow_get_skuid(const struct sk_buff *skb) | 194 | static u32 flow_get_skuid(const struct sk_buff *skb) |
195 | { | 195 | { |
196 | if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) | 196 | if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { |
197 | return skb->sk->sk_socket->file->f_cred->fsuid; | 197 | kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid; |
198 | return from_kuid(&init_user_ns, skuid); | ||
199 | } | ||
198 | return 0; | 200 | return 0; |
199 | } | 201 | } |
200 | 202 | ||
201 | static u32 flow_get_skgid(const struct sk_buff *skb) | 203 | static u32 flow_get_skgid(const struct sk_buff *skb) |
202 | { | 204 | { |
203 | if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) | 205 | if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { |
204 | return skb->sk->sk_socket->file->f_cred->fsgid; | 206 | kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid; |
207 | return from_kgid(&init_user_ns, skgid); | ||
208 | } | ||
205 | return 0; | 209 | return 0; |
206 | } | 210 | } |
207 | 211 | ||
@@ -347,7 +351,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { | |||
347 | [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, | 351 | [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, |
348 | }; | 352 | }; |
349 | 353 | ||
350 | static int flow_change(struct tcf_proto *tp, unsigned long base, | 354 | static int flow_change(struct sk_buff *in_skb, |
355 | struct tcf_proto *tp, unsigned long base, | ||
351 | u32 handle, struct nlattr **tca, | 356 | u32 handle, struct nlattr **tca, |
352 | unsigned long *arg) | 357 | unsigned long *arg) |
353 | { | 358 | { |
@@ -386,6 +391,10 @@ static int flow_change(struct tcf_proto *tp, unsigned long base, | |||
386 | 391 | ||
387 | if (fls(keymask) - 1 > FLOW_KEY_MAX) | 392 | if (fls(keymask) - 1 > FLOW_KEY_MAX) |
388 | return -EOPNOTSUPP; | 393 | return -EOPNOTSUPP; |
394 | |||
395 | if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) && | ||
396 | sk_user_ns(NETLINK_CB(in_skb).ssk) != &init_user_ns) | ||
397 | return -EOPNOTSUPP; | ||
389 | } | 398 | } |
390 | 399 | ||
391 | err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map); | 400 | err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map); |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 8384a4797240..4075a0aef2aa 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -233,7 +233,8 @@ errout: | |||
233 | return err; | 233 | return err; |
234 | } | 234 | } |
235 | 235 | ||
236 | static int fw_change(struct tcf_proto *tp, unsigned long base, | 236 | static int fw_change(struct sk_buff *in_skb, |
237 | struct tcf_proto *tp, unsigned long base, | ||
237 | u32 handle, | 238 | u32 handle, |
238 | struct nlattr **tca, | 239 | struct nlattr **tca, |
239 | unsigned long *arg) | 240 | unsigned long *arg) |
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 44f405cb9aaf..c10d57bf98f2 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c | |||
@@ -427,7 +427,8 @@ errout: | |||
427 | return err; | 427 | return err; |
428 | } | 428 | } |
429 | 429 | ||
430 | static int route4_change(struct tcf_proto *tp, unsigned long base, | 430 | static int route4_change(struct sk_buff *in_skb, |
431 | struct tcf_proto *tp, unsigned long base, | ||
431 | u32 handle, | 432 | u32 handle, |
432 | struct nlattr **tca, | 433 | struct nlattr **tca, |
433 | unsigned long *arg) | 434 | unsigned long *arg) |
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 18ab93ec8d7e..494bbb90924a 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h | |||
@@ -416,7 +416,8 @@ static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = { | |||
416 | [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, | 416 | [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, |
417 | }; | 417 | }; |
418 | 418 | ||
419 | static int rsvp_change(struct tcf_proto *tp, unsigned long base, | 419 | static int rsvp_change(struct sk_buff *in_skb, |
420 | struct tcf_proto *tp, unsigned long base, | ||
420 | u32 handle, | 421 | u32 handle, |
421 | struct nlattr **tca, | 422 | struct nlattr **tca, |
422 | unsigned long *arg) | 423 | unsigned long *arg) |
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index fe29420d0b0e..a1293b4ab7a1 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c | |||
@@ -332,7 +332,8 @@ errout: | |||
332 | } | 332 | } |
333 | 333 | ||
334 | static int | 334 | static int |
335 | tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle, | 335 | tcindex_change(struct sk_buff *in_skb, |
336 | struct tcf_proto *tp, unsigned long base, u32 handle, | ||
336 | struct nlattr **tca, unsigned long *arg) | 337 | struct nlattr **tca, unsigned long *arg) |
337 | { | 338 | { |
338 | struct nlattr *opt = tca[TCA_OPTIONS]; | 339 | struct nlattr *opt = tca[TCA_OPTIONS]; |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d45373fb00b9..c7c27bc91b5a 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -544,7 +544,8 @@ errout: | |||
544 | return err; | 544 | return err; |
545 | } | 545 | } |
546 | 546 | ||
547 | static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, | 547 | static int u32_change(struct sk_buff *in_skb, |
548 | struct tcf_proto *tp, unsigned long base, u32 handle, | ||
548 | struct nlattr **tca, | 549 | struct nlattr **tca, |
549 | unsigned long *arg) | 550 | unsigned long *arg) |
550 | { | 551 | { |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 6aabd77d1cfd..564b9fc8efd3 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) | 250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) |
251 | cl = defmap[TC_PRIO_BESTEFFORT]; | 251 | cl = defmap[TC_PRIO_BESTEFFORT]; |
252 | 252 | ||
253 | if (cl == NULL || cl->level >= head->level) | 253 | if (cl == NULL) |
254 | goto fallback; | 254 | goto fallback; |
255 | } | 255 | } |
256 | 256 | if (cl->level >= head->level) | |
257 | goto fallback; | ||
257 | #ifdef CONFIG_NET_CLS_ACT | 258 | #ifdef CONFIG_NET_CLS_ACT |
258 | switch (result) { | 259 | switch (result) { |
259 | case TC_ACT_QUEUED: | 260 | case TC_ACT_QUEUED: |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 9fc1c62ec80e..4e606fcb2534 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -191,7 +191,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
191 | 191 | ||
192 | if (list_empty(&flow->flowchain)) { | 192 | if (list_empty(&flow->flowchain)) { |
193 | list_add_tail(&flow->flowchain, &q->new_flows); | 193 | list_add_tail(&flow->flowchain, &q->new_flows); |
194 | codel_vars_init(&flow->cvars); | ||
195 | q->new_flow_count++; | 194 | q->new_flow_count++; |
196 | flow->deficit = q->quantum; | 195 | flow->deficit = q->quantum; |
197 | flow->dropped = 0; | 196 | flow->dropped = 0; |
@@ -418,6 +417,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) | |||
418 | struct fq_codel_flow *flow = q->flows + i; | 417 | struct fq_codel_flow *flow = q->flows + i; |
419 | 418 | ||
420 | INIT_LIST_HEAD(&flow->flowchain); | 419 | INIT_LIST_HEAD(&flow->flowchain); |
420 | codel_vars_init(&flow->cvars); | ||
421 | } | 421 | } |
422 | } | 422 | } |
423 | if (sch->limit >= 1) | 423 | if (sch->limit >= 1) |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index e901583e4ea5..d42234c0f13b 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -102,9 +102,8 @@ static inline int gred_wred_mode_check(struct Qdisc *sch) | |||
102 | if (q == NULL) | 102 | if (q == NULL) |
103 | continue; | 103 | continue; |
104 | 104 | ||
105 | for (n = 0; n < table->DPs; n++) | 105 | for (n = i + 1; n < table->DPs; n++) |
106 | if (table->tab[n] && table->tab[n] != q && | 106 | if (table->tab[n] && table->tab[n]->prio == q->prio) |
107 | table->tab[n]->prio == q->prio) | ||
108 | return 1; | 107 | return 1; |
109 | } | 108 | } |
110 | 109 | ||
@@ -137,6 +136,7 @@ static inline void gred_store_wred_set(struct gred_sched *table, | |||
137 | struct gred_sched_data *q) | 136 | struct gred_sched_data *q) |
138 | { | 137 | { |
139 | table->wred_set.qavg = q->vars.qavg; | 138 | table->wred_set.qavg = q->vars.qavg; |
139 | table->wred_set.qidlestart = q->vars.qidlestart; | ||
140 | } | 140 | } |
141 | 141 | ||
142 | static inline int gred_use_ecn(struct gred_sched *t) | 142 | static inline int gred_use_ecn(struct gred_sched *t) |
@@ -176,7 +176,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
176 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; | 176 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; |
177 | } | 177 | } |
178 | 178 | ||
179 | /* sum up all the qaves of prios <= to ours to get the new qave */ | 179 | /* sum up all the qaves of prios < ours to get the new qave */ |
180 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { | 180 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { |
181 | int i; | 181 | int i; |
182 | 182 | ||
@@ -260,16 +260,18 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) | |||
260 | } else { | 260 | } else { |
261 | q->backlog -= qdisc_pkt_len(skb); | 261 | q->backlog -= qdisc_pkt_len(skb); |
262 | 262 | ||
263 | if (!q->backlog && !gred_wred_mode(t)) | 263 | if (gred_wred_mode(t)) { |
264 | red_start_of_idle_period(&q->vars); | 264 | if (!sch->qstats.backlog) |
265 | red_start_of_idle_period(&t->wred_set); | ||
266 | } else { | ||
267 | if (!q->backlog) | ||
268 | red_start_of_idle_period(&q->vars); | ||
269 | } | ||
265 | } | 270 | } |
266 | 271 | ||
267 | return skb; | 272 | return skb; |
268 | } | 273 | } |
269 | 274 | ||
270 | if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) | ||
271 | red_start_of_idle_period(&t->wred_set); | ||
272 | |||
273 | return NULL; | 275 | return NULL; |
274 | } | 276 | } |
275 | 277 | ||
@@ -291,19 +293,20 @@ static unsigned int gred_drop(struct Qdisc *sch) | |||
291 | q->backlog -= len; | 293 | q->backlog -= len; |
292 | q->stats.other++; | 294 | q->stats.other++; |
293 | 295 | ||
294 | if (!q->backlog && !gred_wred_mode(t)) | 296 | if (gred_wred_mode(t)) { |
295 | red_start_of_idle_period(&q->vars); | 297 | if (!sch->qstats.backlog) |
298 | red_start_of_idle_period(&t->wred_set); | ||
299 | } else { | ||
300 | if (!q->backlog) | ||
301 | red_start_of_idle_period(&q->vars); | ||
302 | } | ||
296 | } | 303 | } |
297 | 304 | ||
298 | qdisc_drop(skb, sch); | 305 | qdisc_drop(skb, sch); |
299 | return len; | 306 | return len; |
300 | } | 307 | } |
301 | 308 | ||
302 | if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) | ||
303 | red_start_of_idle_period(&t->wred_set); | ||
304 | |||
305 | return 0; | 309 | return 0; |
306 | |||
307 | } | 310 | } |
308 | 311 | ||
309 | static void gred_reset(struct Qdisc *sch) | 312 | static void gred_reset(struct Qdisc *sch) |
@@ -535,6 +538,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
535 | for (i = 0; i < MAX_DPs; i++) { | 538 | for (i = 0; i < MAX_DPs; i++) { |
536 | struct gred_sched_data *q = table->tab[i]; | 539 | struct gred_sched_data *q = table->tab[i]; |
537 | struct tc_gred_qopt opt; | 540 | struct tc_gred_qopt opt; |
541 | unsigned long qavg; | ||
538 | 542 | ||
539 | memset(&opt, 0, sizeof(opt)); | 543 | memset(&opt, 0, sizeof(opt)); |
540 | 544 | ||
@@ -566,7 +570,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
566 | if (gred_wred_mode(table)) | 570 | if (gred_wred_mode(table)) |
567 | gred_load_wred_set(table, q); | 571 | gred_load_wred_set(table, q); |
568 | 572 | ||
569 | opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); | 573 | qavg = red_calc_qavg(&q->parms, &q->vars, |
574 | q->vars.qavg >> q->parms.Wlog); | ||
575 | opt.qave = qavg >> q->parms.Wlog; | ||
570 | 576 | ||
571 | append_opt: | 577 | append_opt: |
572 | if (nla_append(skb, sizeof(opt), &opt) < 0) | 578 | if (nla_append(skb, sizeof(opt), &opt) < 0) |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e4723d31fdd5..211a21217045 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -865,7 +865,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) | |||
865 | if (mask) { | 865 | if (mask) { |
866 | struct qfq_group *next = qfq_ffs(q, mask); | 866 | struct qfq_group *next = qfq_ffs(q, mask); |
867 | if (qfq_gt(roundedF, next->F)) { | 867 | if (qfq_gt(roundedF, next->F)) { |
868 | cl->S = next->F; | 868 | if (qfq_gt(limit, next->F)) |
869 | cl->S = next->F; | ||
870 | else /* preserve timestamp correctness */ | ||
871 | cl->S = limit; | ||
869 | return; | 872 | return; |
870 | } | 873 | } |
871 | } | 874 | } |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 838e18b4d7ea..be50aa234dcd 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -364,6 +364,25 @@ finish: | |||
364 | return retval; | 364 | return retval; |
365 | } | 365 | } |
366 | 366 | ||
367 | static void sctp_packet_release_owner(struct sk_buff *skb) | ||
368 | { | ||
369 | sk_free(skb->sk); | ||
370 | } | ||
371 | |||
372 | static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
373 | { | ||
374 | skb_orphan(skb); | ||
375 | skb->sk = sk; | ||
376 | skb->destructor = sctp_packet_release_owner; | ||
377 | |||
378 | /* | ||
379 | * The data chunks have already been accounted for in sctp_sendmsg(), | ||
380 | * therefore only reserve a single byte to keep socket around until | ||
381 | * the packet has been transmitted. | ||
382 | */ | ||
383 | atomic_inc(&sk->sk_wmem_alloc); | ||
384 | } | ||
385 | |||
367 | /* All packets are sent to the network through this function from | 386 | /* All packets are sent to the network through this function from |
368 | * sctp_outq_tail(). | 387 | * sctp_outq_tail(). |
369 | * | 388 | * |
@@ -405,7 +424,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
405 | /* Set the owning socket so that we know where to get the | 424 | /* Set the owning socket so that we know where to get the |
406 | * destination IP address. | 425 | * destination IP address. |
407 | */ | 426 | */ |
408 | skb_set_owner_w(nskb, sk); | 427 | sctp_packet_set_owner_w(nskb, sk); |
409 | 428 | ||
410 | if (!sctp_transport_dst_check(tp)) { | 429 | if (!sctp_transport_dst_check(tp)) { |
411 | sctp_transport_route(tp, NULL, sctp_sk(sk)); | 430 | sctp_transport_route(tp, NULL, sctp_sk(sk)); |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 1e2eee88c3ea..dc12febc977a 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -216,7 +216,8 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) | |||
216 | seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, | 216 | seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, |
217 | sctp_sk(sk)->type, sk->sk_state, hash, | 217 | sctp_sk(sk)->type, sk->sk_state, hash, |
218 | epb->bind_addr.port, | 218 | epb->bind_addr.port, |
219 | sock_i_uid(sk), sock_i_ino(sk)); | 219 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), |
220 | sock_i_ino(sk)); | ||
220 | 221 | ||
221 | sctp_seq_dump_local_addrs(seq, epb); | 222 | sctp_seq_dump_local_addrs(seq, epb); |
222 | seq_printf(seq, "\n"); | 223 | seq_printf(seq, "\n"); |
@@ -324,7 +325,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
324 | assoc->assoc_id, | 325 | assoc->assoc_id, |
325 | assoc->sndbuf_used, | 326 | assoc->sndbuf_used, |
326 | atomic_read(&assoc->rmem_alloc), | 327 | atomic_read(&assoc->rmem_alloc), |
327 | sock_i_uid(sk), sock_i_ino(sk), | 328 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), |
329 | sock_i_ino(sk), | ||
328 | epb->bind_addr.port, | 330 | epb->bind_addr.port, |
329 | assoc->peer.port); | 331 | assoc->peer.port); |
330 | seq_printf(seq, " "); | 332 | seq_printf(seq, " "); |
diff --git a/net/socket.c b/net/socket.c index dfe5b66c97e0..edc3c4af9085 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, | |||
2604 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); | 2604 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); |
2605 | set_fs(old_fs); | 2605 | set_fs(old_fs); |
2606 | if (!err) | 2606 | if (!err) |
2607 | err = compat_put_timeval(up, &ktv); | 2607 | err = compat_put_timeval(&ktv, up); |
2608 | 2608 | ||
2609 | return err; | 2609 | return err; |
2610 | } | 2610 | } |
@@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, | |||
2620 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); | 2620 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); |
2621 | set_fs(old_fs); | 2621 | set_fs(old_fs); |
2622 | if (!err) | 2622 | if (!err) |
2623 | err = compat_put_timespec(up, &kts); | 2623 | err = compat_put_timespec(&kts, up); |
2624 | 2624 | ||
2625 | return err; | 2625 | return err; |
2626 | } | 2626 | } |
@@ -2657,6 +2657,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) | |||
2657 | if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) | 2657 | if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) |
2658 | return -EFAULT; | 2658 | return -EFAULT; |
2659 | 2659 | ||
2660 | memset(&ifc, 0, sizeof(ifc)); | ||
2660 | if (ifc32.ifcbuf == 0) { | 2661 | if (ifc32.ifcbuf == 0) { |
2661 | ifc32.ifc_len = 0; | 2662 | ifc32.ifc_len = 0; |
2662 | ifc.ifc_len = 0; | 2663 | ifc.ifc_len = 0; |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 2afd2a84dc35..2a68bb3db772 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -1635,7 +1635,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) | |||
1635 | 1635 | ||
1636 | void __init cache_initialize(void) | 1636 | void __init cache_initialize(void) |
1637 | { | 1637 | { |
1638 | INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); | 1638 | INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean); |
1639 | } | 1639 | } |
1640 | 1640 | ||
1641 | int cache_register_net(struct cache_detail *cd, struct net *net) | 1641 | int cache_register_net(struct cache_detail *cd, struct net *net) |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 88f2bf671960..bac973a31367 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -316,7 +316,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) | |||
316 | */ | 316 | */ |
317 | void svc_xprt_enqueue(struct svc_xprt *xprt) | 317 | void svc_xprt_enqueue(struct svc_xprt *xprt) |
318 | { | 318 | { |
319 | struct svc_serv *serv = xprt->xpt_server; | ||
320 | struct svc_pool *pool; | 319 | struct svc_pool *pool; |
321 | struct svc_rqst *rqstp; | 320 | struct svc_rqst *rqstp; |
322 | int cpu; | 321 | int cpu; |
@@ -362,8 +361,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
362 | rqstp, rqstp->rq_xprt); | 361 | rqstp, rqstp->rq_xprt); |
363 | rqstp->rq_xprt = xprt; | 362 | rqstp->rq_xprt = xprt; |
364 | svc_xprt_get(xprt); | 363 | svc_xprt_get(xprt); |
365 | rqstp->rq_reserved = serv->sv_max_mesg; | ||
366 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | ||
367 | pool->sp_stats.threads_woken++; | 364 | pool->sp_stats.threads_woken++; |
368 | wake_up(&rqstp->rq_wait); | 365 | wake_up(&rqstp->rq_wait); |
369 | } else { | 366 | } else { |
@@ -640,8 +637,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
640 | if (xprt) { | 637 | if (xprt) { |
641 | rqstp->rq_xprt = xprt; | 638 | rqstp->rq_xprt = xprt; |
642 | svc_xprt_get(xprt); | 639 | svc_xprt_get(xprt); |
643 | rqstp->rq_reserved = serv->sv_max_mesg; | ||
644 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | ||
645 | 640 | ||
646 | /* As there is a shortage of threads and this request | 641 | /* As there is a shortage of threads and this request |
647 | * had to be queued, don't allow the thread to wait so | 642 | * had to be queued, don't allow the thread to wait so |
@@ -738,6 +733,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
738 | else | 733 | else |
739 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); | 734 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
740 | dprintk("svc: got len=%d\n", len); | 735 | dprintk("svc: got len=%d\n", len); |
736 | rqstp->rq_reserved = serv->sv_max_mesg; | ||
737 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | ||
741 | } | 738 | } |
742 | svc_xprt_received(xprt); | 739 | svc_xprt_received(xprt); |
743 | 740 | ||
@@ -794,7 +791,8 @@ int svc_send(struct svc_rqst *rqstp) | |||
794 | 791 | ||
795 | /* Grab mutex to serialize outgoing data. */ | 792 | /* Grab mutex to serialize outgoing data. */ |
796 | mutex_lock(&xprt->xpt_mutex); | 793 | mutex_lock(&xprt->xpt_mutex); |
797 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) | 794 | if (test_bit(XPT_DEAD, &xprt->xpt_flags) |
795 | || test_bit(XPT_CLOSE, &xprt->xpt_flags)) | ||
798 | len = -ENOTCONN; | 796 | len = -ENOTCONN; |
799 | else | 797 | else |
800 | len = xprt->xpt_ops->xpo_sendto(rqstp); | 798 | len = xprt->xpt_ops->xpo_sendto(rqstp); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 18bc130255a7..998aa8c1807c 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -1129,9 +1129,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
1129 | if (len >= 0) | 1129 | if (len >= 0) |
1130 | svsk->sk_tcplen += len; | 1130 | svsk->sk_tcplen += len; |
1131 | if (len != want) { | 1131 | if (len != want) { |
1132 | svc_tcp_save_pages(svsk, rqstp); | ||
1132 | if (len < 0 && len != -EAGAIN) | 1133 | if (len < 0 && len != -EAGAIN) |
1133 | goto err_other; | 1134 | goto err_other; |
1134 | svc_tcp_save_pages(svsk, rqstp); | ||
1135 | dprintk("svc: incomplete TCP record (%d of %d)\n", | 1135 | dprintk("svc: incomplete TCP record (%d of %d)\n", |
1136 | svsk->sk_tcplen, svsk->sk_reclen); | 1136 | svsk->sk_tcplen, svsk->sk_reclen); |
1137 | goto err_noclose; | 1137 | goto err_noclose; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index a5a402a7d21f..5d7f61d7559c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | |||
969 | return false; | 969 | return false; |
970 | } | 970 | } |
971 | 971 | ||
972 | static void xprt_alloc_slot(struct rpc_task *task) | 972 | void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) |
973 | { | 973 | { |
974 | struct rpc_xprt *xprt = task->tk_xprt; | ||
975 | struct rpc_rqst *req; | 974 | struct rpc_rqst *req; |
976 | 975 | ||
976 | spin_lock(&xprt->reserve_lock); | ||
977 | if (!list_empty(&xprt->free)) { | 977 | if (!list_empty(&xprt->free)) { |
978 | req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); | 978 | req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); |
979 | list_del(&req->rq_list); | 979 | list_del(&req->rq_list); |
@@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task) | |||
994 | default: | 994 | default: |
995 | task->tk_status = -EAGAIN; | 995 | task->tk_status = -EAGAIN; |
996 | } | 996 | } |
997 | spin_unlock(&xprt->reserve_lock); | ||
997 | return; | 998 | return; |
998 | out_init_req: | 999 | out_init_req: |
999 | task->tk_status = 0; | 1000 | task->tk_status = 0; |
1000 | task->tk_rqstp = req; | 1001 | task->tk_rqstp = req; |
1001 | xprt_request_init(task, xprt); | 1002 | xprt_request_init(task, xprt); |
1003 | spin_unlock(&xprt->reserve_lock); | ||
1004 | } | ||
1005 | EXPORT_SYMBOL_GPL(xprt_alloc_slot); | ||
1006 | |||
1007 | void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) | ||
1008 | { | ||
1009 | /* Note: grabbing the xprt_lock_write() ensures that we throttle | ||
1010 | * new slot allocation if the transport is congested (i.e. when | ||
1011 | * reconnecting a stream transport or when out of socket write | ||
1012 | * buffer space). | ||
1013 | */ | ||
1014 | if (xprt_lock_write(xprt, task)) { | ||
1015 | xprt_alloc_slot(xprt, task); | ||
1016 | xprt_release_write(xprt, task); | ||
1017 | } | ||
1002 | } | 1018 | } |
1019 | EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot); | ||
1003 | 1020 | ||
1004 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) | 1021 | static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) |
1005 | { | 1022 | { |
@@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task) | |||
1083 | if (task->tk_rqstp != NULL) | 1100 | if (task->tk_rqstp != NULL) |
1084 | return; | 1101 | return; |
1085 | 1102 | ||
1086 | /* Note: grabbing the xprt_lock_write() here is not strictly needed, | ||
1087 | * but ensures that we throttle new slot allocation if the transport | ||
1088 | * is congested (e.g. if reconnecting or if we're out of socket | ||
1089 | * write buffer space). | ||
1090 | */ | ||
1091 | task->tk_timeout = 0; | 1103 | task->tk_timeout = 0; |
1092 | task->tk_status = -EAGAIN; | 1104 | task->tk_status = -EAGAIN; |
1093 | if (!xprt_lock_write(xprt, task)) | 1105 | xprt->ops->alloc_slot(xprt, task); |
1094 | return; | ||
1095 | |||
1096 | spin_lock(&xprt->reserve_lock); | ||
1097 | xprt_alloc_slot(task); | ||
1098 | spin_unlock(&xprt->reserve_lock); | ||
1099 | xprt_release_write(xprt, task); | ||
1100 | } | 1106 | } |
1101 | 1107 | ||
1102 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) | 1108 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 06cdbff79e4a..5d9202dc7cb1 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
713 | static struct rpc_xprt_ops xprt_rdma_procs = { | 713 | static struct rpc_xprt_ops xprt_rdma_procs = { |
714 | .reserve_xprt = xprt_rdma_reserve_xprt, | 714 | .reserve_xprt = xprt_rdma_reserve_xprt, |
715 | .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ | 715 | .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ |
716 | .alloc_slot = xprt_alloc_slot, | ||
716 | .release_request = xprt_release_rqst_cong, /* ditto */ | 717 | .release_request = xprt_release_rqst_cong, /* ditto */ |
717 | .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ | 718 | .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ |
718 | .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ | 719 | .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 400567243f84..a35b8e52e551 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2473,6 +2473,7 @@ static void bc_destroy(struct rpc_xprt *xprt) | |||
2473 | static struct rpc_xprt_ops xs_local_ops = { | 2473 | static struct rpc_xprt_ops xs_local_ops = { |
2474 | .reserve_xprt = xprt_reserve_xprt, | 2474 | .reserve_xprt = xprt_reserve_xprt, |
2475 | .release_xprt = xs_tcp_release_xprt, | 2475 | .release_xprt = xs_tcp_release_xprt, |
2476 | .alloc_slot = xprt_alloc_slot, | ||
2476 | .rpcbind = xs_local_rpcbind, | 2477 | .rpcbind = xs_local_rpcbind, |
2477 | .set_port = xs_local_set_port, | 2478 | .set_port = xs_local_set_port, |
2478 | .connect = xs_connect, | 2479 | .connect = xs_connect, |
@@ -2489,6 +2490,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
2489 | .set_buffer_size = xs_udp_set_buffer_size, | 2490 | .set_buffer_size = xs_udp_set_buffer_size, |
2490 | .reserve_xprt = xprt_reserve_xprt_cong, | 2491 | .reserve_xprt = xprt_reserve_xprt_cong, |
2491 | .release_xprt = xprt_release_xprt_cong, | 2492 | .release_xprt = xprt_release_xprt_cong, |
2493 | .alloc_slot = xprt_alloc_slot, | ||
2492 | .rpcbind = rpcb_getport_async, | 2494 | .rpcbind = rpcb_getport_async, |
2493 | .set_port = xs_set_port, | 2495 | .set_port = xs_set_port, |
2494 | .connect = xs_connect, | 2496 | .connect = xs_connect, |
@@ -2506,6 +2508,7 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
2506 | static struct rpc_xprt_ops xs_tcp_ops = { | 2508 | static struct rpc_xprt_ops xs_tcp_ops = { |
2507 | .reserve_xprt = xprt_reserve_xprt, | 2509 | .reserve_xprt = xprt_reserve_xprt, |
2508 | .release_xprt = xs_tcp_release_xprt, | 2510 | .release_xprt = xs_tcp_release_xprt, |
2511 | .alloc_slot = xprt_lock_and_alloc_slot, | ||
2509 | .rpcbind = rpcb_getport_async, | 2512 | .rpcbind = rpcb_getport_async, |
2510 | .set_port = xs_set_port, | 2513 | .set_port = xs_set_port, |
2511 | .connect = xs_connect, | 2514 | .connect = xs_connect, |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e4768c180da2..c5ee4ff61364 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1450,7 +1450,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1450 | if (NULL == siocb->scm) | 1450 | if (NULL == siocb->scm) |
1451 | siocb->scm = &tmp_scm; | 1451 | siocb->scm = &tmp_scm; |
1452 | wait_for_unix_gc(); | 1452 | wait_for_unix_gc(); |
1453 | err = scm_send(sock, msg, siocb->scm); | 1453 | err = scm_send(sock, msg, siocb->scm, false); |
1454 | if (err < 0) | 1454 | if (err < 0) |
1455 | return err; | 1455 | return err; |
1456 | 1456 | ||
@@ -1619,7 +1619,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1619 | if (NULL == siocb->scm) | 1619 | if (NULL == siocb->scm) |
1620 | siocb->scm = &tmp_scm; | 1620 | siocb->scm = &tmp_scm; |
1621 | wait_for_unix_gc(); | 1621 | wait_for_unix_gc(); |
1622 | err = scm_send(sock, msg, siocb->scm); | 1622 | err = scm_send(sock, msg, siocb->scm, false); |
1623 | if (err < 0) | 1623 | if (err < 0) |
1624 | return err; | 1624 | return err; |
1625 | 1625 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 97026f3b215a..1e37dbf00cb3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -5633,8 +5633,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) | |||
5633 | sizeof(connect.ht_capa_mask)); | 5633 | sizeof(connect.ht_capa_mask)); |
5634 | 5634 | ||
5635 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { | 5635 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { |
5636 | if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) | 5636 | if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) { |
5637 | kfree(connkeys); | ||
5637 | return -EINVAL; | 5638 | return -EINVAL; |
5639 | } | ||
5638 | memcpy(&connect.ht_capa, | 5640 | memcpy(&connect.ht_capa, |
5639 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), | 5641 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), |
5640 | sizeof(connect.ht_capa)); | 5642 | sizeof(connect.ht_capa)); |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2ded3c7fad06..72d170ca3406 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -350,6 +350,9 @@ static void reg_regdb_search(struct work_struct *work) | |||
350 | struct reg_regdb_search_request *request; | 350 | struct reg_regdb_search_request *request; |
351 | const struct ieee80211_regdomain *curdom, *regdom; | 351 | const struct ieee80211_regdomain *curdom, *regdom; |
352 | int i, r; | 352 | int i, r; |
353 | bool set_reg = false; | ||
354 | |||
355 | mutex_lock(&cfg80211_mutex); | ||
353 | 356 | ||
354 | mutex_lock(®_regdb_search_mutex); | 357 | mutex_lock(®_regdb_search_mutex); |
355 | while (!list_empty(®_regdb_search_list)) { | 358 | while (!list_empty(®_regdb_search_list)) { |
@@ -365,9 +368,7 @@ static void reg_regdb_search(struct work_struct *work) | |||
365 | r = reg_copy_regd(®dom, curdom); | 368 | r = reg_copy_regd(®dom, curdom); |
366 | if (r) | 369 | if (r) |
367 | break; | 370 | break; |
368 | mutex_lock(&cfg80211_mutex); | 371 | set_reg = true; |
369 | set_regdom(regdom); | ||
370 | mutex_unlock(&cfg80211_mutex); | ||
371 | break; | 372 | break; |
372 | } | 373 | } |
373 | } | 374 | } |
@@ -375,6 +376,11 @@ static void reg_regdb_search(struct work_struct *work) | |||
375 | kfree(request); | 376 | kfree(request); |
376 | } | 377 | } |
377 | mutex_unlock(®_regdb_search_mutex); | 378 | mutex_unlock(®_regdb_search_mutex); |
379 | |||
380 | if (set_reg) | ||
381 | set_regdom(regdom); | ||
382 | |||
383 | mutex_unlock(&cfg80211_mutex); | ||
378 | } | 384 | } |
379 | 385 | ||
380 | static DECLARE_WORK(reg_regdb_work, reg_regdb_search); | 386 | static DECLARE_WORK(reg_regdb_work, reg_regdb_search); |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 54a0dc2e2f8d..ab2bb42fe094 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -212,7 +212,7 @@ resume: | |||
212 | /* only the first xfrm gets the encap type */ | 212 | /* only the first xfrm gets the encap type */ |
213 | encap_type = 0; | 213 | encap_type = 0; |
214 | 214 | ||
215 | if (async && x->repl->check(x, skb, seq)) { | 215 | if (async && x->repl->recheck(x, skb, seq)) { |
216 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); | 216 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); |
217 | goto drop_unlock; | 217 | goto drop_unlock; |
218 | } | 218 | } |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index c5a5165a5927..46550997548c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -585,6 +585,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
585 | xfrm_pol_hold(policy); | 585 | xfrm_pol_hold(policy); |
586 | net->xfrm.policy_count[dir]++; | 586 | net->xfrm.policy_count[dir]++; |
587 | atomic_inc(&flow_cache_genid); | 587 | atomic_inc(&flow_cache_genid); |
588 | rt_genid_bump(net); | ||
588 | if (delpol) | 589 | if (delpol) |
589 | __xfrm_policy_unlink(delpol, dir); | 590 | __xfrm_policy_unlink(delpol, dir); |
590 | policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); | 591 | policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); |
@@ -1357,6 +1358,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1357 | 1358 | ||
1358 | memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); | 1359 | memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); |
1359 | xdst->flo.ops = &xfrm_bundle_fc_ops; | 1360 | xdst->flo.ops = &xfrm_bundle_fc_ops; |
1361 | if (afinfo->init_dst) | ||
1362 | afinfo->init_dst(net, xdst); | ||
1360 | } else | 1363 | } else |
1361 | xdst = ERR_PTR(-ENOBUFS); | 1364 | xdst = ERR_PTR(-ENOBUFS); |
1362 | 1365 | ||
@@ -1761,7 +1764,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family, | |||
1761 | 1764 | ||
1762 | if (!afinfo) { | 1765 | if (!afinfo) { |
1763 | dst_release(dst_orig); | 1766 | dst_release(dst_orig); |
1764 | ret = ERR_PTR(-EINVAL); | 1767 | return ERR_PTR(-EINVAL); |
1765 | } else { | 1768 | } else { |
1766 | ret = afinfo->blackhole_route(net, dst_orig); | 1769 | ret = afinfo->blackhole_route(net, dst_orig); |
1767 | } | 1770 | } |
@@ -2630,12 +2633,12 @@ static void xfrm_policy_fini(struct net *net) | |||
2630 | 2633 | ||
2631 | flush_work(&net->xfrm.policy_hash_work); | 2634 | flush_work(&net->xfrm.policy_hash_work); |
2632 | #ifdef CONFIG_XFRM_SUB_POLICY | 2635 | #ifdef CONFIG_XFRM_SUB_POLICY |
2633 | audit_info.loginuid = -1; | 2636 | audit_info.loginuid = INVALID_UID; |
2634 | audit_info.sessionid = -1; | 2637 | audit_info.sessionid = -1; |
2635 | audit_info.secid = 0; | 2638 | audit_info.secid = 0; |
2636 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); | 2639 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); |
2637 | #endif | 2640 | #endif |
2638 | audit_info.loginuid = -1; | 2641 | audit_info.loginuid = INVALID_UID; |
2639 | audit_info.sessionid = -1; | 2642 | audit_info.sessionid = -1; |
2640 | audit_info.secid = 0; | 2643 | audit_info.secid = 0; |
2641 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); | 2644 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); |
@@ -2742,7 +2745,7 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, | |||
2742 | } | 2745 | } |
2743 | 2746 | ||
2744 | void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, | 2747 | void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, |
2745 | uid_t auid, u32 sessionid, u32 secid) | 2748 | kuid_t auid, u32 sessionid, u32 secid) |
2746 | { | 2749 | { |
2747 | struct audit_buffer *audit_buf; | 2750 | struct audit_buffer *audit_buf; |
2748 | 2751 | ||
@@ -2757,7 +2760,7 @@ void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, | |||
2757 | EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); | 2760 | EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); |
2758 | 2761 | ||
2759 | void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, | 2762 | void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, |
2760 | uid_t auid, u32 sessionid, u32 secid) | 2763 | kuid_t auid, u32 sessionid, u32 secid) |
2761 | { | 2764 | { |
2762 | struct audit_buffer *audit_buf; | 2765 | struct audit_buffer *audit_buf; |
2763 | 2766 | ||
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 2f6d11d04a2b..3efb07d3eb27 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -420,6 +420,18 @@ err: | |||
420 | return -EINVAL; | 420 | return -EINVAL; |
421 | } | 421 | } |
422 | 422 | ||
423 | static int xfrm_replay_recheck_esn(struct xfrm_state *x, | ||
424 | struct sk_buff *skb, __be32 net_seq) | ||
425 | { | ||
426 | if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi != | ||
427 | htonl(xfrm_replay_seqhi(x, net_seq)))) { | ||
428 | x->stats.replay_window++; | ||
429 | return -EINVAL; | ||
430 | } | ||
431 | |||
432 | return xfrm_replay_check_esn(x, skb, net_seq); | ||
433 | } | ||
434 | |||
423 | static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) | 435 | static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) |
424 | { | 436 | { |
425 | unsigned int bitnr, nr, i; | 437 | unsigned int bitnr, nr, i; |
@@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) | |||
479 | static struct xfrm_replay xfrm_replay_legacy = { | 491 | static struct xfrm_replay xfrm_replay_legacy = { |
480 | .advance = xfrm_replay_advance, | 492 | .advance = xfrm_replay_advance, |
481 | .check = xfrm_replay_check, | 493 | .check = xfrm_replay_check, |
494 | .recheck = xfrm_replay_check, | ||
482 | .notify = xfrm_replay_notify, | 495 | .notify = xfrm_replay_notify, |
483 | .overflow = xfrm_replay_overflow, | 496 | .overflow = xfrm_replay_overflow, |
484 | }; | 497 | }; |
@@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = { | |||
486 | static struct xfrm_replay xfrm_replay_bmp = { | 499 | static struct xfrm_replay xfrm_replay_bmp = { |
487 | .advance = xfrm_replay_advance_bmp, | 500 | .advance = xfrm_replay_advance_bmp, |
488 | .check = xfrm_replay_check_bmp, | 501 | .check = xfrm_replay_check_bmp, |
502 | .recheck = xfrm_replay_check_bmp, | ||
489 | .notify = xfrm_replay_notify_bmp, | 503 | .notify = xfrm_replay_notify_bmp, |
490 | .overflow = xfrm_replay_overflow_bmp, | 504 | .overflow = xfrm_replay_overflow_bmp, |
491 | }; | 505 | }; |
@@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = { | |||
493 | static struct xfrm_replay xfrm_replay_esn = { | 507 | static struct xfrm_replay xfrm_replay_esn = { |
494 | .advance = xfrm_replay_advance_esn, | 508 | .advance = xfrm_replay_advance_esn, |
495 | .check = xfrm_replay_check_esn, | 509 | .check = xfrm_replay_check_esn, |
510 | .recheck = xfrm_replay_recheck_esn, | ||
496 | .notify = xfrm_replay_notify_bmp, | 511 | .notify = xfrm_replay_notify_bmp, |
497 | .overflow = xfrm_replay_overflow_esn, | 512 | .overflow = xfrm_replay_overflow_esn, |
498 | }; | 513 | }; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 87cd0e4d4282..bd2d9841ad59 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay) | |||
1994 | goto error; | 1994 | goto error; |
1995 | 1995 | ||
1996 | x->outer_mode = xfrm_get_mode(x->props.mode, family); | 1996 | x->outer_mode = xfrm_get_mode(x->props.mode, family); |
1997 | if (x->outer_mode == NULL) | 1997 | if (x->outer_mode == NULL) { |
1998 | err = -EPROTONOSUPPORT; | ||
1998 | goto error; | 1999 | goto error; |
2000 | } | ||
1999 | 2001 | ||
2000 | if (init_replay) { | 2002 | if (init_replay) { |
2001 | err = xfrm_init_replay(x); | 2003 | err = xfrm_init_replay(x); |
@@ -2058,7 +2060,7 @@ void xfrm_state_fini(struct net *net) | |||
2058 | unsigned int sz; | 2060 | unsigned int sz; |
2059 | 2061 | ||
2060 | flush_work(&net->xfrm.state_hash_work); | 2062 | flush_work(&net->xfrm.state_hash_work); |
2061 | audit_info.loginuid = -1; | 2063 | audit_info.loginuid = INVALID_UID; |
2062 | audit_info.sessionid = -1; | 2064 | audit_info.sessionid = -1; |
2063 | audit_info.secid = 0; | 2065 | audit_info.secid = 0; |
2064 | xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info); | 2066 | xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info); |
@@ -2125,7 +2127,7 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, | |||
2125 | } | 2127 | } |
2126 | 2128 | ||
2127 | void xfrm_audit_state_add(struct xfrm_state *x, int result, | 2129 | void xfrm_audit_state_add(struct xfrm_state *x, int result, |
2128 | uid_t auid, u32 sessionid, u32 secid) | 2130 | kuid_t auid, u32 sessionid, u32 secid) |
2129 | { | 2131 | { |
2130 | struct audit_buffer *audit_buf; | 2132 | struct audit_buffer *audit_buf; |
2131 | 2133 | ||
@@ -2140,7 +2142,7 @@ void xfrm_audit_state_add(struct xfrm_state *x, int result, | |||
2140 | EXPORT_SYMBOL_GPL(xfrm_audit_state_add); | 2142 | EXPORT_SYMBOL_GPL(xfrm_audit_state_add); |
2141 | 2143 | ||
2142 | void xfrm_audit_state_delete(struct xfrm_state *x, int result, | 2144 | void xfrm_audit_state_delete(struct xfrm_state *x, int result, |
2143 | uid_t auid, u32 sessionid, u32 secid) | 2145 | kuid_t auid, u32 sessionid, u32 secid) |
2144 | { | 2146 | { |
2145 | struct audit_buffer *audit_buf; | 2147 | struct audit_buffer *audit_buf; |
2146 | 2148 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e75d8e47f35c..bc542448307a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p, | |||
123 | struct nlattr **attrs) | 123 | struct nlattr **attrs) |
124 | { | 124 | { |
125 | struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; | 125 | struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; |
126 | struct xfrm_replay_state_esn *rs; | ||
126 | 127 | ||
127 | if ((p->flags & XFRM_STATE_ESN) && !rt) | 128 | if (p->flags & XFRM_STATE_ESN) { |
128 | return -EINVAL; | 129 | if (!rt) |
130 | return -EINVAL; | ||
131 | |||
132 | rs = nla_data(rt); | ||
133 | |||
134 | if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) | ||
135 | return -EINVAL; | ||
136 | |||
137 | if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && | ||
138 | nla_len(rt) != sizeof(*rs)) | ||
139 | return -EINVAL; | ||
140 | } | ||
129 | 141 | ||
130 | if (!rt) | 142 | if (!rt) |
131 | return 0; | 143 | return 0; |
@@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es | |||
370 | struct nlattr *rp) | 382 | struct nlattr *rp) |
371 | { | 383 | { |
372 | struct xfrm_replay_state_esn *up; | 384 | struct xfrm_replay_state_esn *up; |
385 | int ulen; | ||
373 | 386 | ||
374 | if (!replay_esn || !rp) | 387 | if (!replay_esn || !rp) |
375 | return 0; | 388 | return 0; |
376 | 389 | ||
377 | up = nla_data(rp); | 390 | up = nla_data(rp); |
391 | ulen = xfrm_replay_state_esn_len(up); | ||
378 | 392 | ||
379 | if (xfrm_replay_state_esn_len(replay_esn) != | 393 | if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) |
380 | xfrm_replay_state_esn_len(up)) | ||
381 | return -EINVAL; | 394 | return -EINVAL; |
382 | 395 | ||
383 | return 0; | 396 | return 0; |
@@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn | |||
388 | struct nlattr *rta) | 401 | struct nlattr *rta) |
389 | { | 402 | { |
390 | struct xfrm_replay_state_esn *p, *pp, *up; | 403 | struct xfrm_replay_state_esn *p, *pp, *up; |
404 | int klen, ulen; | ||
391 | 405 | ||
392 | if (!rta) | 406 | if (!rta) |
393 | return 0; | 407 | return 0; |
394 | 408 | ||
395 | up = nla_data(rta); | 409 | up = nla_data(rta); |
410 | klen = xfrm_replay_state_esn_len(up); | ||
411 | ulen = nla_len(rta) >= klen ? klen : sizeof(*up); | ||
396 | 412 | ||
397 | p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); | 413 | p = kzalloc(klen, GFP_KERNEL); |
398 | if (!p) | 414 | if (!p) |
399 | return -ENOMEM; | 415 | return -ENOMEM; |
400 | 416 | ||
401 | pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); | 417 | pp = kzalloc(klen, GFP_KERNEL); |
402 | if (!pp) { | 418 | if (!pp) { |
403 | kfree(p); | 419 | kfree(p); |
404 | return -ENOMEM; | 420 | return -ENOMEM; |
405 | } | 421 | } |
406 | 422 | ||
423 | memcpy(p, up, ulen); | ||
424 | memcpy(pp, up, ulen); | ||
425 | |||
407 | *replay_esn = p; | 426 | *replay_esn = p; |
408 | *preplay_esn = pp; | 427 | *preplay_esn = pp; |
409 | 428 | ||
@@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info * | |||
442 | * somehow made shareable and move it to xfrm_state.c - JHS | 461 | * somehow made shareable and move it to xfrm_state.c - JHS |
443 | * | 462 | * |
444 | */ | 463 | */ |
445 | static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) | 464 | static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, |
465 | int update_esn) | ||
446 | { | 466 | { |
447 | struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; | 467 | struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; |
448 | struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; | 468 | struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; |
449 | struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; | 469 | struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; |
450 | struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; | 470 | struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; |
451 | struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; | 471 | struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; |
@@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, | |||
555 | goto error; | 575 | goto error; |
556 | 576 | ||
557 | /* override default values from above */ | 577 | /* override default values from above */ |
558 | xfrm_update_ae_params(x, attrs); | 578 | xfrm_update_ae_params(x, attrs, 0); |
559 | 579 | ||
560 | return x; | 580 | return x; |
561 | 581 | ||
@@ -575,7 +595,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
575 | struct xfrm_state *x; | 595 | struct xfrm_state *x; |
576 | int err; | 596 | int err; |
577 | struct km_event c; | 597 | struct km_event c; |
578 | uid_t loginuid = audit_get_loginuid(current); | 598 | kuid_t loginuid = audit_get_loginuid(current); |
579 | u32 sessionid = audit_get_sessionid(current); | 599 | u32 sessionid = audit_get_sessionid(current); |
580 | u32 sid; | 600 | u32 sid; |
581 | 601 | ||
@@ -654,7 +674,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
654 | int err = -ESRCH; | 674 | int err = -ESRCH; |
655 | struct km_event c; | 675 | struct km_event c; |
656 | struct xfrm_usersa_id *p = nlmsg_data(nlh); | 676 | struct xfrm_usersa_id *p = nlmsg_data(nlh); |
657 | uid_t loginuid = audit_get_loginuid(current); | 677 | kuid_t loginuid = audit_get_loginuid(current); |
658 | u32 sessionid = audit_get_sessionid(current); | 678 | u32 sessionid = audit_get_sessionid(current); |
659 | u32 sid; | 679 | u32 sid; |
660 | 680 | ||
@@ -689,6 +709,7 @@ out: | |||
689 | 709 | ||
690 | static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) | 710 | static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) |
691 | { | 711 | { |
712 | memset(p, 0, sizeof(*p)); | ||
692 | memcpy(&p->id, &x->id, sizeof(p->id)); | 713 | memcpy(&p->id, &x->id, sizeof(p->id)); |
693 | memcpy(&p->sel, &x->sel, sizeof(p->sel)); | 714 | memcpy(&p->sel, &x->sel, sizeof(p->sel)); |
694 | memcpy(&p->lft, &x->lft, sizeof(p->lft)); | 715 | memcpy(&p->lft, &x->lft, sizeof(p->lft)); |
@@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) | |||
742 | return -EMSGSIZE; | 763 | return -EMSGSIZE; |
743 | 764 | ||
744 | algo = nla_data(nla); | 765 | algo = nla_data(nla); |
745 | strcpy(algo->alg_name, auth->alg_name); | 766 | strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); |
746 | memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); | 767 | memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); |
747 | algo->alg_key_len = auth->alg_key_len; | 768 | algo->alg_key_len = auth->alg_key_len; |
748 | 769 | ||
@@ -878,6 +899,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, | |||
878 | { | 899 | { |
879 | struct xfrm_dump_info info; | 900 | struct xfrm_dump_info info; |
880 | struct sk_buff *skb; | 901 | struct sk_buff *skb; |
902 | int err; | ||
881 | 903 | ||
882 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | 904 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
883 | if (!skb) | 905 | if (!skb) |
@@ -888,9 +910,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, | |||
888 | info.nlmsg_seq = seq; | 910 | info.nlmsg_seq = seq; |
889 | info.nlmsg_flags = 0; | 911 | info.nlmsg_flags = 0; |
890 | 912 | ||
891 | if (dump_one_state(x, 0, &info)) { | 913 | err = dump_one_state(x, 0, &info); |
914 | if (err) { | ||
892 | kfree_skb(skb); | 915 | kfree_skb(skb); |
893 | return NULL; | 916 | return ERR_PTR(err); |
894 | } | 917 | } |
895 | 918 | ||
896 | return skb; | 919 | return skb; |
@@ -1317,6 +1340,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy | |||
1317 | 1340 | ||
1318 | static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) | 1341 | static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) |
1319 | { | 1342 | { |
1343 | memset(p, 0, sizeof(*p)); | ||
1320 | memcpy(&p->sel, &xp->selector, sizeof(p->sel)); | 1344 | memcpy(&p->sel, &xp->selector, sizeof(p->sel)); |
1321 | memcpy(&p->lft, &xp->lft, sizeof(p->lft)); | 1345 | memcpy(&p->lft, &xp->lft, sizeof(p->lft)); |
1322 | memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); | 1346 | memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); |
@@ -1369,7 +1393,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1369 | struct km_event c; | 1393 | struct km_event c; |
1370 | int err; | 1394 | int err; |
1371 | int excl; | 1395 | int excl; |
1372 | uid_t loginuid = audit_get_loginuid(current); | 1396 | kuid_t loginuid = audit_get_loginuid(current); |
1373 | u32 sessionid = audit_get_sessionid(current); | 1397 | u32 sessionid = audit_get_sessionid(current); |
1374 | u32 sid; | 1398 | u32 sid; |
1375 | 1399 | ||
@@ -1421,6 +1445,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) | |||
1421 | struct xfrm_user_tmpl *up = &vec[i]; | 1445 | struct xfrm_user_tmpl *up = &vec[i]; |
1422 | struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; | 1446 | struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; |
1423 | 1447 | ||
1448 | memset(up, 0, sizeof(*up)); | ||
1424 | memcpy(&up->id, &kp->id, sizeof(up->id)); | 1449 | memcpy(&up->id, &kp->id, sizeof(up->id)); |
1425 | up->family = kp->encap_family; | 1450 | up->family = kp->encap_family; |
1426 | memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); | 1451 | memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); |
@@ -1546,6 +1571,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, | |||
1546 | { | 1571 | { |
1547 | struct xfrm_dump_info info; | 1572 | struct xfrm_dump_info info; |
1548 | struct sk_buff *skb; | 1573 | struct sk_buff *skb; |
1574 | int err; | ||
1549 | 1575 | ||
1550 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 1576 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
1551 | if (!skb) | 1577 | if (!skb) |
@@ -1556,9 +1582,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, | |||
1556 | info.nlmsg_seq = seq; | 1582 | info.nlmsg_seq = seq; |
1557 | info.nlmsg_flags = 0; | 1583 | info.nlmsg_flags = 0; |
1558 | 1584 | ||
1559 | if (dump_one_policy(xp, dir, 0, &info) < 0) { | 1585 | err = dump_one_policy(xp, dir, 0, &info); |
1586 | if (err) { | ||
1560 | kfree_skb(skb); | 1587 | kfree_skb(skb); |
1561 | return NULL; | 1588 | return ERR_PTR(err); |
1562 | } | 1589 | } |
1563 | 1590 | ||
1564 | return skb; | 1591 | return skb; |
@@ -1624,7 +1651,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1624 | NETLINK_CB(skb).pid); | 1651 | NETLINK_CB(skb).pid); |
1625 | } | 1652 | } |
1626 | } else { | 1653 | } else { |
1627 | uid_t loginuid = audit_get_loginuid(current); | 1654 | kuid_t loginuid = audit_get_loginuid(current); |
1628 | u32 sessionid = audit_get_sessionid(current); | 1655 | u32 sessionid = audit_get_sessionid(current); |
1629 | u32 sid; | 1656 | u32 sid; |
1630 | 1657 | ||
@@ -1822,7 +1849,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1822 | goto out; | 1849 | goto out; |
1823 | 1850 | ||
1824 | spin_lock_bh(&x->lock); | 1851 | spin_lock_bh(&x->lock); |
1825 | xfrm_update_ae_params(x, attrs); | 1852 | xfrm_update_ae_params(x, attrs, 1); |
1826 | spin_unlock_bh(&x->lock); | 1853 | spin_unlock_bh(&x->lock); |
1827 | 1854 | ||
1828 | c.event = nlh->nlmsg_type; | 1855 | c.event = nlh->nlmsg_type; |
@@ -1918,7 +1945,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1918 | 1945 | ||
1919 | err = 0; | 1946 | err = 0; |
1920 | if (up->hard) { | 1947 | if (up->hard) { |
1921 | uid_t loginuid = audit_get_loginuid(current); | 1948 | kuid_t loginuid = audit_get_loginuid(current); |
1922 | u32 sessionid = audit_get_sessionid(current); | 1949 | u32 sessionid = audit_get_sessionid(current); |
1923 | u32 sid; | 1950 | u32 sid; |
1924 | 1951 | ||
@@ -1961,7 +1988,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1961 | km_state_expired(x, ue->hard, current->pid); | 1988 | km_state_expired(x, ue->hard, current->pid); |
1962 | 1989 | ||
1963 | if (ue->hard) { | 1990 | if (ue->hard) { |
1964 | uid_t loginuid = audit_get_loginuid(current); | 1991 | kuid_t loginuid = audit_get_loginuid(current); |
1965 | u32 sessionid = audit_get_sessionid(current); | 1992 | u32 sessionid = audit_get_sessionid(current); |
1966 | u32 sid; | 1993 | u32 sid; |
1967 | 1994 | ||