aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/br2684.c4
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/dev_mcast.c12
-rw-r--r--net/core/gen_estimator.c81
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_cubic.c2
-rw-r--r--net/ipv4/tcp_highspeed.c2
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_hybla.c4
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_lp.c5
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_veno.c6
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/iriap.c2
-rw-r--r--net/irda/irias_object.c43
-rw-r--r--net/irda/irlap.c2
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/irda/irproc.c2
-rw-r--r--net/irda/irsysctl.c2
-rw-r--r--net/irda/irttp.c2
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netlink/af_netlink.c166
-rw-r--r--net/netlink/genetlink.c235
-rw-r--r--net/rfkill/rfkill.c2
-rw-r--r--net/sched/Kconfig6
-rw-r--r--net/sched/sch_atm.c3
-rw-r--r--net/xfrm/xfrm_policy.c2
34 files changed, 471 insertions, 188 deletions
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index faa6aaf67563..c0f6861eefe3 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -460,11 +460,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
460 skb_pull(skb, plen); 460 skb_pull(skb, plen);
461 skb_set_mac_header(skb, -ETH_HLEN); 461 skb_set_mac_header(skb, -ETH_HLEN);
462 skb->pkt_type = PACKET_HOST; 462 skb->pkt_type = PACKET_HOST;
463#ifdef CONFIG_BR2684_FAST_TRANS
464 skb->protocol = ((u16 *) skb->data)[-1];
465#else /* some protocols might require this: */
466 skb->protocol = br_type_trans(skb, net_dev); 463 skb->protocol = br_type_trans(skb, net_dev);
467#endif /* CONFIG_BR2684_FAST_TRANS */
468#else 464#else
469 skb_pull(skb, plen - ETH_HLEN); 465 skb_pull(skb, plen - ETH_HLEN);
470 skb->protocol = eth_type_trans(skb, net_dev); 466 skb->protocol = eth_type_trans(skb, net_dev);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a786e7863200..1ea2f86f7683 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -125,7 +125,7 @@ static void br_stp_start(struct net_bridge *br)
125 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; 125 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
126 char *envp[] = { NULL }; 126 char *envp[] = { NULL };
127 127
128 r = call_usermodehelper(BR_STP_PROG, argv, envp, 1); 128 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
129 if (r == 0) { 129 if (r == 0) {
130 br->stp_enabled = BR_USER_STP; 130 br->stp_enabled = BR_USER_STP;
131 printk(KERN_INFO "%s: userspace STP started\n", br->dev->name); 131 printk(KERN_INFO "%s: userspace STP started\n", br->dev->name);
diff --git a/net/core/dev.c b/net/core/dev.c
index 13a0d9f6da54..6357f54c8ff7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2715,20 +2715,6 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
2715 return 0; 2715 return 0;
2716} 2716}
2717 2717
2718void __dev_addr_discard(struct dev_addr_list **list)
2719{
2720 struct dev_addr_list *tmp;
2721
2722 while (*list != NULL) {
2723 tmp = *list;
2724 *list = tmp->next;
2725 if (tmp->da_users > tmp->da_gusers)
2726 printk("__dev_addr_discard: address leakage! "
2727 "da_users=%d\n", tmp->da_users);
2728 kfree(tmp);
2729 }
2730}
2731
2732/** 2718/**
2733 * dev_unicast_delete - Release secondary unicast address. 2719 * dev_unicast_delete - Release secondary unicast address.
2734 * @dev: device 2720 * @dev: device
@@ -2777,11 +2763,30 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2777} 2763}
2778EXPORT_SYMBOL(dev_unicast_add); 2764EXPORT_SYMBOL(dev_unicast_add);
2779 2765
2780static void dev_unicast_discard(struct net_device *dev) 2766static void __dev_addr_discard(struct dev_addr_list **list)
2767{
2768 struct dev_addr_list *tmp;
2769
2770 while (*list != NULL) {
2771 tmp = *list;
2772 *list = tmp->next;
2773 if (tmp->da_users > tmp->da_gusers)
2774 printk("__dev_addr_discard: address leakage! "
2775 "da_users=%d\n", tmp->da_users);
2776 kfree(tmp);
2777 }
2778}
2779
2780static void dev_addr_discard(struct net_device *dev)
2781{ 2781{
2782 netif_tx_lock_bh(dev); 2782 netif_tx_lock_bh(dev);
2783
2783 __dev_addr_discard(&dev->uc_list); 2784 __dev_addr_discard(&dev->uc_list);
2784 dev->uc_count = 0; 2785 dev->uc_count = 0;
2786
2787 __dev_addr_discard(&dev->mc_list);
2788 dev->mc_count = 0;
2789
2785 netif_tx_unlock_bh(dev); 2790 netif_tx_unlock_bh(dev);
2786} 2791}
2787 2792
@@ -3739,8 +3744,7 @@ void unregister_netdevice(struct net_device *dev)
3739 /* 3744 /*
3740 * Flush the unicast and multicast chains 3745 * Flush the unicast and multicast chains
3741 */ 3746 */
3742 dev_unicast_discard(dev); 3747 dev_addr_discard(dev);
3743 dev_mc_discard(dev);
3744 3748
3745 if (dev->uninit) 3749 if (dev->uninit)
3746 dev->uninit(dev); 3750 dev->uninit(dev);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 235a2a8a0d05..99aece1aeccf 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -177,18 +177,6 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
177} 177}
178EXPORT_SYMBOL(dev_mc_unsync); 178EXPORT_SYMBOL(dev_mc_unsync);
179 179
180/*
181 * Discard multicast list when a device is downed
182 */
183
184void dev_mc_discard(struct net_device *dev)
185{
186 netif_tx_lock_bh(dev);
187 __dev_addr_discard(&dev->mc_list);
188 dev->mc_count = 0;
189 netif_tx_unlock_bh(dev);
190}
191
192#ifdef CONFIG_PROC_FS 180#ifdef CONFIG_PROC_FS
193static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos) 181static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
194{ 182{
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cc84d8d8a3c7..590a767b029c 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -79,27 +79,27 @@
79 79
80struct gen_estimator 80struct gen_estimator
81{ 81{
82 struct gen_estimator *next; 82 struct list_head list;
83 struct gnet_stats_basic *bstats; 83 struct gnet_stats_basic *bstats;
84 struct gnet_stats_rate_est *rate_est; 84 struct gnet_stats_rate_est *rate_est;
85 spinlock_t *stats_lock; 85 spinlock_t *stats_lock;
86 unsigned interval;
87 int ewma_log; 86 int ewma_log;
88 u64 last_bytes; 87 u64 last_bytes;
89 u32 last_packets; 88 u32 last_packets;
90 u32 avpps; 89 u32 avpps;
91 u32 avbps; 90 u32 avbps;
91 struct rcu_head e_rcu;
92}; 92};
93 93
94struct gen_estimator_head 94struct gen_estimator_head
95{ 95{
96 struct timer_list timer; 96 struct timer_list timer;
97 struct gen_estimator *list; 97 struct list_head list;
98}; 98};
99 99
100static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; 100static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
101 101
102/* Estimator array lock */ 102/* Protects against NULL dereference */
103static DEFINE_RWLOCK(est_lock); 103static DEFINE_RWLOCK(est_lock);
104 104
105static void est_timer(unsigned long arg) 105static void est_timer(unsigned long arg)
@@ -107,13 +107,17 @@ static void est_timer(unsigned long arg)
107 int idx = (int)arg; 107 int idx = (int)arg;
108 struct gen_estimator *e; 108 struct gen_estimator *e;
109 109
110 read_lock(&est_lock); 110 rcu_read_lock();
111 for (e = elist[idx].list; e; e = e->next) { 111 list_for_each_entry_rcu(e, &elist[idx].list, list) {
112 u64 nbytes; 112 u64 nbytes;
113 u32 npackets; 113 u32 npackets;
114 u32 rate; 114 u32 rate;
115 115
116 spin_lock(e->stats_lock); 116 spin_lock(e->stats_lock);
117 read_lock(&est_lock);
118 if (e->bstats == NULL)
119 goto skip;
120
117 nbytes = e->bstats->bytes; 121 nbytes = e->bstats->bytes;
118 npackets = e->bstats->packets; 122 npackets = e->bstats->packets;
119 rate = (nbytes - e->last_bytes)<<(7 - idx); 123 rate = (nbytes - e->last_bytes)<<(7 - idx);
@@ -125,12 +129,14 @@ static void est_timer(unsigned long arg)
125 e->last_packets = npackets; 129 e->last_packets = npackets;
126 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; 130 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
127 e->rate_est->pps = (e->avpps+0x1FF)>>10; 131 e->rate_est->pps = (e->avpps+0x1FF)>>10;
132skip:
133 read_unlock(&est_lock);
128 spin_unlock(e->stats_lock); 134 spin_unlock(e->stats_lock);
129 } 135 }
130 136
131 if (elist[idx].list != NULL) 137 if (!list_empty(&elist[idx].list))
132 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4)); 138 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
133 read_unlock(&est_lock); 139 rcu_read_unlock();
134} 140}
135 141
136/** 142/**
@@ -147,12 +153,17 @@ static void est_timer(unsigned long arg)
147 * &rate_est with the statistics lock grabed during this period. 153 * &rate_est with the statistics lock grabed during this period.
148 * 154 *
149 * Returns 0 on success or a negative error code. 155 * Returns 0 on success or a negative error code.
156 *
157 * NOTE: Called under rtnl_mutex
150 */ 158 */
151int gen_new_estimator(struct gnet_stats_basic *bstats, 159int gen_new_estimator(struct gnet_stats_basic *bstats,
152 struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt) 160 struct gnet_stats_rate_est *rate_est,
161 spinlock_t *stats_lock,
162 struct rtattr *opt)
153{ 163{
154 struct gen_estimator *est; 164 struct gen_estimator *est;
155 struct gnet_estimator *parm = RTA_DATA(opt); 165 struct gnet_estimator *parm = RTA_DATA(opt);
166 int idx;
156 167
157 if (RTA_PAYLOAD(opt) < sizeof(*parm)) 168 if (RTA_PAYLOAD(opt) < sizeof(*parm))
158 return -EINVAL; 169 return -EINVAL;
@@ -164,7 +175,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
164 if (est == NULL) 175 if (est == NULL)
165 return -ENOBUFS; 176 return -ENOBUFS;
166 177
167 est->interval = parm->interval + 2; 178 idx = parm->interval + 2;
168 est->bstats = bstats; 179 est->bstats = bstats;
169 est->rate_est = rate_est; 180 est->rate_est = rate_est;
170 est->stats_lock = stats_lock; 181 est->stats_lock = stats_lock;
@@ -174,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
174 est->last_packets = bstats->packets; 185 est->last_packets = bstats->packets;
175 est->avpps = rate_est->pps<<10; 186 est->avpps = rate_est->pps<<10;
176 187
177 est->next = elist[est->interval].list; 188 if (!elist[idx].timer.function) {
178 if (est->next == NULL) { 189 INIT_LIST_HEAD(&elist[idx].list);
179 init_timer(&elist[est->interval].timer); 190 setup_timer(&elist[idx].timer, est_timer, idx);
180 elist[est->interval].timer.data = est->interval;
181 elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
182 elist[est->interval].timer.function = est_timer;
183 add_timer(&elist[est->interval].timer);
184 } 191 }
185 write_lock_bh(&est_lock); 192
186 elist[est->interval].list = est; 193 if (list_empty(&elist[idx].list))
187 write_unlock_bh(&est_lock); 194 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
195
196 list_add_rcu(&est->list, &elist[idx].list);
188 return 0; 197 return 0;
189} 198}
190 199
200static void __gen_kill_estimator(struct rcu_head *head)
201{
202 struct gen_estimator *e = container_of(head,
203 struct gen_estimator, e_rcu);
204 kfree(e);
205}
206
191/** 207/**
192 * gen_kill_estimator - remove a rate estimator 208 * gen_kill_estimator - remove a rate estimator
193 * @bstats: basic statistics 209 * @bstats: basic statistics
@@ -195,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
195 * 211 *
196 * Removes the rate estimator specified by &bstats and &rate_est 212 * Removes the rate estimator specified by &bstats and &rate_est
197 * and deletes the timer. 213 * and deletes the timer.
214 *
215 * NOTE: Called under rtnl_mutex
198 */ 216 */
199void gen_kill_estimator(struct gnet_stats_basic *bstats, 217void gen_kill_estimator(struct gnet_stats_basic *bstats,
200 struct gnet_stats_rate_est *rate_est) 218 struct gnet_stats_rate_est *rate_est)
201{ 219{
202 int idx; 220 int idx;
203 struct gen_estimator *est, **pest; 221 struct gen_estimator *e, *n;
204 222
205 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) { 223 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
206 int killed = 0; 224
207 pest = &elist[idx].list; 225 /* Skip non initialized indexes */
208 while ((est=*pest) != NULL) { 226 if (!elist[idx].timer.function)
209 if (est->rate_est != rate_est || est->bstats != bstats) { 227 continue;
210 pest = &est->next; 228
229 list_for_each_entry_safe(e, n, &elist[idx].list, list) {
230 if (e->rate_est != rate_est || e->bstats != bstats)
211 continue; 231 continue;
212 }
213 232
214 write_lock_bh(&est_lock); 233 write_lock_bh(&est_lock);
215 *pest = est->next; 234 e->bstats = NULL;
216 write_unlock_bh(&est_lock); 235 write_unlock_bh(&est_lock);
217 236
218 kfree(est); 237 list_del_rcu(&e->list);
219 killed++; 238 call_rcu(&e->e_rcu, __gen_kill_estimator);
220 } 239 }
221 if (killed && elist[idx].list == NULL)
222 del_timer(&elist[idx].timer);
223 } 240 }
224} 241}
225 242
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index dd9ef65ad3ff..519de091a94d 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -137,7 +137,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
137} 137}
138 138
139static void bictcp_cong_avoid(struct sock *sk, u32 ack, 139static void bictcp_cong_avoid(struct sock *sk, u32 ack,
140 u32 seq_rtt, u32 in_flight, int data_acked) 140 u32 in_flight, int data_acked)
141{ 141{
142 struct tcp_sock *tp = tcp_sk(sk); 142 struct tcp_sock *tp = tcp_sk(sk);
143 struct bictcp *ca = inet_csk_ca(sk); 143 struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1260e52ad772..55fca1820c34 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -324,8 +324,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
324/* This is Jacobson's slow start and congestion avoidance. 324/* This is Jacobson's slow start and congestion avoidance.
325 * SIGCOMM '88, p. 328. 325 * SIGCOMM '88, p. 328.
326 */ 326 */
327void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, 327void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
328 int flag)
329{ 328{
330 struct tcp_sock *tp = tcp_sk(sk); 329 struct tcp_sock *tp = tcp_sk(sk);
331 330
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index ebfaac2f9f46..d17da30d82d6 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -270,7 +270,7 @@ static inline void measure_delay(struct sock *sk)
270} 270}
271 271
272static void bictcp_cong_avoid(struct sock *sk, u32 ack, 272static void bictcp_cong_avoid(struct sock *sk, u32 ack,
273 u32 seq_rtt, u32 in_flight, int data_acked) 273 u32 in_flight, int data_acked)
274{ 274{
275 struct tcp_sock *tp = tcp_sk(sk); 275 struct tcp_sock *tp = tcp_sk(sk);
276 struct bictcp *ca = inet_csk_ca(sk); 276 struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 43d624e5043c..14a073d8b60f 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk)
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110} 110}
111 111
112static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, 112static void hstcp_cong_avoid(struct sock *sk, u32 adk,
113 u32 in_flight, int data_acked) 113 u32 in_flight, int data_acked)
114{ 114{
115 struct tcp_sock *tp = tcp_sk(sk); 115 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4ba4a7ae0a85..632c05a75883 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
225 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 225 return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
226} 226}
227 227
228static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 228static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt,
229 u32 in_flight, int data_acked) 229 u32 in_flight, int data_acked)
230{ 230{
231 struct tcp_sock *tp = tcp_sk(sk); 231 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index e5be35117223..b3e55cf56171 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -85,7 +85,7 @@ static inline u32 hybla_fraction(u32 odds)
85 * o Give cwnd a new value based on the model proposed 85 * o Give cwnd a new value based on the model proposed
86 * o remember increments <1 86 * o remember increments <1
87 */ 87 */
88static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 88static void hybla_cong_avoid(struct sock *sk, u32 ack,
89 u32 in_flight, int flag) 89 u32 in_flight, int flag)
90{ 90{
91 struct tcp_sock *tp = tcp_sk(sk); 91 struct tcp_sock *tp = tcp_sk(sk);
@@ -103,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
103 return; 103 return;
104 104
105 if (!ca->hybla_en) 105 if (!ca->hybla_en)
106 return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); 106 return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
107 107
108 if (ca->rho == 0) 108 if (ca->rho == 0)
109 hybla_recalc_param(sk); 109 hybla_recalc_param(sk);
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index b2b2256d3b84..cc5de6f69d46 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -258,7 +258,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
258/* 258/*
259 * Increase window in response to successful acknowledgment. 259 * Increase window in response to successful acknowledgment.
260 */ 260 */
261static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 261static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack,
262 u32 in_flight, int flag) 262 u32 in_flight, int flag)
263{ 263{
264 struct tcp_sock *tp = tcp_sk(sk); 264 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4e5884ac8f29..fec8a7a4dbaf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2323,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
2323 tcp_ack_no_tstamp(sk, seq_rtt, flag); 2323 tcp_ack_no_tstamp(sk, seq_rtt, flag);
2324} 2324}
2325 2325
2326static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 2326static void tcp_cong_avoid(struct sock *sk, u32 ack,
2327 u32 in_flight, int good) 2327 u32 in_flight, int good)
2328{ 2328{
2329 const struct inet_connection_sock *icsk = inet_csk(sk); 2329 const struct inet_connection_sock *icsk = inet_csk(sk);
2330 icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good); 2330 icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);
2331 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2331 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
2332} 2332}
2333 2333
@@ -2826,11 +2826,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2826 /* Advance CWND, if state allows this. */ 2826 /* Advance CWND, if state allows this. */
2827 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && 2827 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
2828 tcp_may_raise_cwnd(sk, flag)) 2828 tcp_may_raise_cwnd(sk, flag))
2829 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0); 2829 tcp_cong_avoid(sk, ack, prior_in_flight, 0);
2830 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2830 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2831 } else { 2831 } else {
2832 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 2832 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
2833 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1); 2833 tcp_cong_avoid(sk, ack, prior_in_flight, 1);
2834 } 2834 }
2835 2835
2836 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) 2836 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index e49836ce012e..80e140e3ec2d 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
115 * Will only call newReno CA when away from inference. 115 * Will only call newReno CA when away from inference.
116 * From TCP-LP's paper, this will be handled in additive increasement. 116 * From TCP-LP's paper, this will be handled in additive increasement.
117 */ 117 */
118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, 118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
119 int flag)
120{ 119{
121 struct lp *lp = inet_csk_ca(sk); 120 struct lp *lp = inet_csk_ca(sk);
122 121
123 if (!(lp->flag & LP_WITHIN_INF)) 122 if (!(lp->flag & LP_WITHIN_INF))
124 tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); 123 tcp_reno_cong_avoid(sk, ack, in_flight, flag);
125} 124}
126 125
127/** 126/**
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 4624501e9680..be27a33a1c68 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,7 +15,7 @@
15#define TCP_SCALABLE_AI_CNT 50U 15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3 16#define TCP_SCALABLE_MD_SCALE 3
17 17
18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack,
19 u32 in_flight, int flag) 19 u32 in_flight, int flag)
20{ 20{
21 struct tcp_sock *tp = tcp_sk(sk); 21 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index e218a51ceced..914e0307f7af 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,13 +163,13 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
163EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); 163EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
164 164
165static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, 165static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
166 u32 seq_rtt, u32 in_flight, int flag) 166 u32 in_flight, int flag)
167{ 167{
168 struct tcp_sock *tp = tcp_sk(sk); 168 struct tcp_sock *tp = tcp_sk(sk);
169 struct vegas *vegas = inet_csk_ca(sk); 169 struct vegas *vegas = inet_csk_ca(sk);
170 170
171 if (!vegas->doing_vegas_now) 171 if (!vegas->doing_vegas_now)
172 return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); 172 return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
173 173
174 /* The key players are v_beg_snd_una and v_beg_snd_nxt. 174 /* The key players are v_beg_snd_una and v_beg_snd_nxt.
175 * 175 *
@@ -228,7 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
228 /* We don't have enough RTT samples to do the Vegas 228 /* We don't have enough RTT samples to do the Vegas
229 * calculation, so we'll behave like Reno. 229 * calculation, so we'll behave like Reno.
230 */ 230 */
231 tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); 231 tcp_reno_cong_avoid(sk, ack, in_flight, flag);
232 } else { 232 } else {
233 u32 rtt, target_cwnd, diff; 233 u32 rtt, target_cwnd, diff;
234 234
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index ec854cc5fad5..7a55ddf86032 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -115,13 +115,13 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
115} 115}
116 116
117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, 117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
118 u32 seq_rtt, u32 in_flight, int flag) 118 u32 in_flight, int flag)
119{ 119{
120 struct tcp_sock *tp = tcp_sk(sk); 120 struct tcp_sock *tp = tcp_sk(sk);
121 struct veno *veno = inet_csk_ca(sk); 121 struct veno *veno = inet_csk_ca(sk);
122 122
123 if (!veno->doing_veno_now) 123 if (!veno->doing_veno_now)
124 return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); 124 return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
125 125
126 /* limited by applications */ 126 /* limited by applications */
127 if (!tcp_is_cwnd_limited(sk, in_flight)) 127 if (!tcp_is_cwnd_limited(sk, in_flight))
@@ -132,7 +132,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
132 /* We don't have enough rtt samples to do the Veno 132 /* We don't have enough rtt samples to do the Veno
133 * calculation, so we'll behave like Reno. 133 * calculation, so we'll behave like Reno.
134 */ 134 */
135 tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); 135 tcp_reno_cong_avoid(sk, ack, in_flight, flag);
136 } else { 136 } else {
137 u32 rtt, target_cwnd; 137 u32 rtt, target_cwnd;
138 138
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 545ed237ab53..c04b7c6ec702 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -70,7 +70,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
70} 70}
71 71
72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, 72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
73 u32 seq_rtt, u32 in_flight, int flag) 73 u32 in_flight, int flag)
74{ 74{
75 struct tcp_sock *tp = tcp_sk(sk); 75 struct tcp_sock *tp = tcp_sk(sk);
76 struct yeah *yeah = inet_csk_ca(sk); 76 struct yeah *yeah = inet_csk_ca(sk);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index dcd7e325b283..4c670cf6aefa 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2567,7 +2567,7 @@ int __init irsock_init(void)
2567 * Remove IrDA protocol 2567 * Remove IrDA protocol
2568 * 2568 *
2569 */ 2569 */
2570void __exit irsock_cleanup(void) 2570void irsock_cleanup(void)
2571{ 2571{
2572 sock_unregister(PF_IRDA); 2572 sock_unregister(PF_IRDA);
2573 proto_unregister(&irda_proto); 2573 proto_unregister(&irda_proto);
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 7b5def1ea633..435b563d29a6 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -95,14 +95,14 @@ int __init irda_device_init( void)
95 return 0; 95 return 0;
96} 96}
97 97
98static void __exit leftover_dongle(void *arg) 98static void leftover_dongle(void *arg)
99{ 99{
100 struct dongle_reg *reg = arg; 100 struct dongle_reg *reg = arg;
101 IRDA_WARNING("IrDA: Dongle type %x not unregistered\n", 101 IRDA_WARNING("IrDA: Dongle type %x not unregistered\n",
102 reg->type); 102 reg->type);
103} 103}
104 104
105void __exit irda_device_cleanup(void) 105void irda_device_cleanup(void)
106{ 106{
107 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 107 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
108 108
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 774eb707940c..ee3889fa49ab 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -153,7 +153,7 @@ int __init iriap_init(void)
153 * Initializes the IrIAP layer, called by the module cleanup code in 153 * Initializes the IrIAP layer, called by the module cleanup code in
154 * irmod.c 154 * irmod.c
155 */ 155 */
156void __exit iriap_cleanup(void) 156void iriap_cleanup(void)
157{ 157{
158 irlmp_unregister_service(service_handle); 158 irlmp_unregister_service(service_handle);
159 159
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 4adaae242b9e..cf302457097b 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -36,39 +36,6 @@ hashbin_t *irias_objects;
36 */ 36 */
37struct ias_value irias_missing = { IAS_MISSING, 0, 0, 0, {0}}; 37struct ias_value irias_missing = { IAS_MISSING, 0, 0, 0, {0}};
38 38
39/*
40 * Function strndup (str, max)
41 *
42 * My own kernel version of strndup!
43 *
44 * Faster, check boundary... Jean II
45 */
46static char *strndup(char *str, size_t max)
47{
48 char *new_str;
49 int len;
50
51 /* Check string */
52 if (str == NULL)
53 return NULL;
54 /* Check length, truncate */
55 len = strlen(str);
56 if(len > max)
57 len = max;
58
59 /* Allocate new string */
60 new_str = kmalloc(len + 1, GFP_ATOMIC);
61 if (new_str == NULL) {
62 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
63 return NULL;
64 }
65
66 /* Copy and truncate */
67 memcpy(new_str, str, len);
68 new_str[len] = '\0';
69
70 return new_str;
71}
72 39
73/* 40/*
74 * Function ias_new_object (name, id) 41 * Function ias_new_object (name, id)
@@ -90,7 +57,7 @@ struct ias_object *irias_new_object( char *name, int id)
90 } 57 }
91 58
92 obj->magic = IAS_OBJECT_MAGIC; 59 obj->magic = IAS_OBJECT_MAGIC;
93 obj->name = strndup(name, IAS_MAX_CLASSNAME); 60 obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC);
94 if (!obj->name) { 61 if (!obj->name) {
95 IRDA_WARNING("%s(), Unable to allocate name!\n", 62 IRDA_WARNING("%s(), Unable to allocate name!\n",
96 __FUNCTION__); 63 __FUNCTION__);
@@ -360,7 +327,7 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
360 } 327 }
361 328
362 attrib->magic = IAS_ATTRIB_MAGIC; 329 attrib->magic = IAS_ATTRIB_MAGIC;
363 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 330 attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
364 331
365 /* Insert value */ 332 /* Insert value */
366 attrib->value = irias_new_integer_value(value); 333 attrib->value = irias_new_integer_value(value);
@@ -404,7 +371,7 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
404 } 371 }
405 372
406 attrib->magic = IAS_ATTRIB_MAGIC; 373 attrib->magic = IAS_ATTRIB_MAGIC;
407 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 374 attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
408 375
409 attrib->value = irias_new_octseq_value( octets, len); 376 attrib->value = irias_new_octseq_value( octets, len);
410 if (!attrib->name || !attrib->value) { 377 if (!attrib->name || !attrib->value) {
@@ -446,7 +413,7 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
446 } 413 }
447 414
448 attrib->magic = IAS_ATTRIB_MAGIC; 415 attrib->magic = IAS_ATTRIB_MAGIC;
449 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); 416 attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
450 417
451 attrib->value = irias_new_string_value(value); 418 attrib->value = irias_new_string_value(value);
452 if (!attrib->name || !attrib->value) { 419 if (!attrib->name || !attrib->value) {
@@ -506,7 +473,7 @@ struct ias_value *irias_new_string_value(char *string)
506 473
507 value->type = IAS_STRING; 474 value->type = IAS_STRING;
508 value->charset = CS_ASCII; 475 value->charset = CS_ASCII;
509 value->t.string = strndup(string, IAS_MAX_STRING); 476 value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC);
510 if (!value->t.string) { 477 if (!value->t.string) {
511 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 478 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
512 kfree(value); 479 kfree(value);
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 2fc9f518f89d..3d76aafdb2e5 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -95,7 +95,7 @@ int __init irlap_init(void)
95 return 0; 95 return 0;
96} 96}
97 97
98void __exit irlap_cleanup(void) 98void irlap_cleanup(void)
99{ 99{
100 IRDA_ASSERT(irlap != NULL, return;); 100 IRDA_ASSERT(irlap != NULL, return;);
101 101
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 24a5e3f23778..7efa930ed684 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -116,7 +116,7 @@ int __init irlmp_init(void)
116 * Remove IrLMP layer 116 * Remove IrLMP layer
117 * 117 *
118 */ 118 */
119void __exit irlmp_cleanup(void) 119void irlmp_cleanup(void)
120{ 120{
121 /* Check for main structure */ 121 /* Check for main structure */
122 IRDA_ASSERT(irlmp != NULL, return;); 122 IRDA_ASSERT(irlmp != NULL, return;);
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index d6f9aba5b9dc..181cb51b48a8 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -84,7 +84,7 @@ void __init irda_proc_register(void)
84 * Unregister irda entry in /proc file system 84 * Unregister irda entry in /proc file system
85 * 85 *
86 */ 86 */
87void __exit irda_proc_unregister(void) 87void irda_proc_unregister(void)
88{ 88{
89 int i; 89 int i;
90 90
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2e968e7d8fea..957e04feb0f7 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -287,7 +287,7 @@ int __init irda_sysctl_register(void)
287 * Unregister our sysctl interface 287 * Unregister our sysctl interface
288 * 288 *
289 */ 289 */
290void __exit irda_sysctl_unregister(void) 290void irda_sysctl_unregister(void)
291{ 291{
292 unregister_sysctl_table(irda_table_header); 292 unregister_sysctl_table(irda_table_header);
293} 293}
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 7f50832a2cd5..3d7ab03fb131 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -109,7 +109,7 @@ int __init irttp_init(void)
109 * Called by module destruction/cleanup code 109 * Called by module destruction/cleanup code
110 * 110 *
111 */ 111 */
112void __exit irttp_cleanup(void) 112void irttp_cleanup(void)
113{ 113{
114 /* Check for main structure */ 114 /* Check for main structure */
115 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;); 115 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 3ac39f1ec775..3599770a2473 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -436,6 +436,7 @@ config NETFILTER_XT_MATCH_CONNBYTES
436config NETFILTER_XT_MATCH_CONNLIMIT 436config NETFILTER_XT_MATCH_CONNLIMIT
437 tristate '"connlimit" match support"' 437 tristate '"connlimit" match support"'
438 depends on NETFILTER_XTABLES 438 depends on NETFILTER_XTABLES
439 depends on NF_CONNTRACK
439 ---help--- 440 ---help---
440 This match allows you to match against the number of parallel 441 This match allows you to match against the number of parallel
441 connections to a server per client IP address (or address block). 442 connections to a server per client IP address (or address block).
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a3c8e692f493..5681ce3aebca 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
62#include <net/netlink.h> 62#include <net/netlink.h>
63 63
64#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) 64#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
65#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
65 66
66struct netlink_sock { 67struct netlink_sock {
67 /* struct sock has to be the first member of netlink_sock */ 68 /* struct sock has to be the first member of netlink_sock */
@@ -314,10 +315,12 @@ netlink_update_listeners(struct sock *sk)
314 unsigned long mask; 315 unsigned long mask;
315 unsigned int i; 316 unsigned int i;
316 317
317 for (i = 0; i < NLGRPSZ(tbl->groups)/sizeof(unsigned long); i++) { 318 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
318 mask = 0; 319 mask = 0;
319 sk_for_each_bound(sk, node, &tbl->mc_list) 320 sk_for_each_bound(sk, node, &tbl->mc_list) {
320 mask |= nlk_sk(sk)->groups[i]; 321 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
322 mask |= nlk_sk(sk)->groups[i];
323 }
321 tbl->listeners[i] = mask; 324 tbl->listeners[i] = mask;
322 } 325 }
323 /* this function is only called with the netlink table "grabbed", which 326 /* this function is only called with the netlink table "grabbed", which
@@ -555,26 +558,37 @@ netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
555 nlk->subscriptions = subscriptions; 558 nlk->subscriptions = subscriptions;
556} 559}
557 560
558static int netlink_alloc_groups(struct sock *sk) 561static int netlink_realloc_groups(struct sock *sk)
559{ 562{
560 struct netlink_sock *nlk = nlk_sk(sk); 563 struct netlink_sock *nlk = nlk_sk(sk);
561 unsigned int groups; 564 unsigned int groups;
565 unsigned long *new_groups;
562 int err = 0; 566 int err = 0;
563 567
564 netlink_lock_table(); 568 netlink_table_grab();
569
565 groups = nl_table[sk->sk_protocol].groups; 570 groups = nl_table[sk->sk_protocol].groups;
566 if (!nl_table[sk->sk_protocol].registered) 571 if (!nl_table[sk->sk_protocol].registered) {
567 err = -ENOENT; 572 err = -ENOENT;
568 netlink_unlock_table(); 573 goto out_unlock;
574 }
569 575
570 if (err) 576 if (nlk->ngroups >= groups)
571 return err; 577 goto out_unlock;
572 578
573 nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL); 579 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
574 if (nlk->groups == NULL) 580 if (new_groups == NULL) {
575 return -ENOMEM; 581 err = -ENOMEM;
582 goto out_unlock;
583 }
584 memset((char*)new_groups + NLGRPSZ(nlk->ngroups), 0,
585 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
586
587 nlk->groups = new_groups;
576 nlk->ngroups = groups; 588 nlk->ngroups = groups;
577 return 0; 589 out_unlock:
590 netlink_table_ungrab();
591 return err;
578} 592}
579 593
580static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 594static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
@@ -591,11 +605,9 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len
591 if (nladdr->nl_groups) { 605 if (nladdr->nl_groups) {
592 if (!netlink_capable(sock, NL_NONROOT_RECV)) 606 if (!netlink_capable(sock, NL_NONROOT_RECV))
593 return -EPERM; 607 return -EPERM;
594 if (nlk->groups == NULL) { 608 err = netlink_realloc_groups(sk);
595 err = netlink_alloc_groups(sk); 609 if (err)
596 if (err) 610 return err;
597 return err;
598 }
599 } 611 }
600 612
601 if (nlk->pid) { 613 if (nlk->pid) {
@@ -839,10 +851,18 @@ retry:
839int netlink_has_listeners(struct sock *sk, unsigned int group) 851int netlink_has_listeners(struct sock *sk, unsigned int group)
840{ 852{
841 int res = 0; 853 int res = 0;
854 unsigned long *listeners;
842 855
843 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET)); 856 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
857
858 rcu_read_lock();
859 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
860
844 if (group - 1 < nl_table[sk->sk_protocol].groups) 861 if (group - 1 < nl_table[sk->sk_protocol].groups)
845 res = test_bit(group - 1, nl_table[sk->sk_protocol].listeners); 862 res = test_bit(group - 1, listeners);
863
864 rcu_read_unlock();
865
846 return res; 866 return res;
847} 867}
848EXPORT_SYMBOL_GPL(netlink_has_listeners); 868EXPORT_SYMBOL_GPL(netlink_has_listeners);
@@ -1007,18 +1027,36 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1007 read_unlock(&nl_table_lock); 1027 read_unlock(&nl_table_lock);
1008} 1028}
1009 1029
1030/* must be called with netlink table grabbed */
1031static void netlink_update_socket_mc(struct netlink_sock *nlk,
1032 unsigned int group,
1033 int is_new)
1034{
1035 int old, new = !!is_new, subscriptions;
1036
1037 old = test_bit(group - 1, nlk->groups);
1038 subscriptions = nlk->subscriptions - old + new;
1039 if (new)
1040 __set_bit(group - 1, nlk->groups);
1041 else
1042 __clear_bit(group - 1, nlk->groups);
1043 netlink_update_subscriptions(&nlk->sk, subscriptions);
1044 netlink_update_listeners(&nlk->sk);
1045}
1046
1010static int netlink_setsockopt(struct socket *sock, int level, int optname, 1047static int netlink_setsockopt(struct socket *sock, int level, int optname,
1011 char __user *optval, int optlen) 1048 char __user *optval, int optlen)
1012{ 1049{
1013 struct sock *sk = sock->sk; 1050 struct sock *sk = sock->sk;
1014 struct netlink_sock *nlk = nlk_sk(sk); 1051 struct netlink_sock *nlk = nlk_sk(sk);
1015 int val = 0, err; 1052 unsigned int val = 0;
1053 int err;
1016 1054
1017 if (level != SOL_NETLINK) 1055 if (level != SOL_NETLINK)
1018 return -ENOPROTOOPT; 1056 return -ENOPROTOOPT;
1019 1057
1020 if (optlen >= sizeof(int) && 1058 if (optlen >= sizeof(int) &&
1021 get_user(val, (int __user *)optval)) 1059 get_user(val, (unsigned int __user *)optval))
1022 return -EFAULT; 1060 return -EFAULT;
1023 1061
1024 switch (optname) { 1062 switch (optname) {
@@ -1031,27 +1069,16 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1031 break; 1069 break;
1032 case NETLINK_ADD_MEMBERSHIP: 1070 case NETLINK_ADD_MEMBERSHIP:
1033 case NETLINK_DROP_MEMBERSHIP: { 1071 case NETLINK_DROP_MEMBERSHIP: {
1034 unsigned int subscriptions;
1035 int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0;
1036
1037 if (!netlink_capable(sock, NL_NONROOT_RECV)) 1072 if (!netlink_capable(sock, NL_NONROOT_RECV))
1038 return -EPERM; 1073 return -EPERM;
1039 if (nlk->groups == NULL) { 1074 err = netlink_realloc_groups(sk);
1040 err = netlink_alloc_groups(sk); 1075 if (err)
1041 if (err) 1076 return err;
1042 return err;
1043 }
1044 if (!val || val - 1 >= nlk->ngroups) 1077 if (!val || val - 1 >= nlk->ngroups)
1045 return -EINVAL; 1078 return -EINVAL;
1046 netlink_table_grab(); 1079 netlink_table_grab();
1047 old = test_bit(val - 1, nlk->groups); 1080 netlink_update_socket_mc(nlk, val,
1048 subscriptions = nlk->subscriptions - old + new; 1081 optname == NETLINK_ADD_MEMBERSHIP);
1049 if (new)
1050 __set_bit(val - 1, nlk->groups);
1051 else
1052 __clear_bit(val - 1, nlk->groups);
1053 netlink_update_subscriptions(sk, subscriptions);
1054 netlink_update_listeners(sk);
1055 netlink_table_ungrab(); 1082 netlink_table_ungrab();
1056 err = 0; 1083 err = 0;
1057 break; 1084 break;
@@ -1327,6 +1354,71 @@ out_sock_release:
1327 return NULL; 1354 return NULL;
1328} 1355}
1329 1356
1357/**
1358 * netlink_change_ngroups - change number of multicast groups
1359 *
1360 * This changes the number of multicast groups that are available
1361 * on a certain netlink family. Note that it is not possible to
1362 * change the number of groups to below 32. Also note that it does
1363 * not implicitly call netlink_clear_multicast_users() when the
1364 * number of groups is reduced.
1365 *
1366 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1367 * @groups: The new number of groups.
1368 */
1369int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1370{
1371 unsigned long *listeners, *old = NULL;
1372 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1373 int err = 0;
1374
1375 if (groups < 32)
1376 groups = 32;
1377
1378 netlink_table_grab();
1379 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1380 listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC);
1381 if (!listeners) {
1382 err = -ENOMEM;
1383 goto out_ungrab;
1384 }
1385 old = tbl->listeners;
1386 memcpy(listeners, old, NLGRPSZ(tbl->groups));
1387 rcu_assign_pointer(tbl->listeners, listeners);
1388 }
1389 tbl->groups = groups;
1390
1391 out_ungrab:
1392 netlink_table_ungrab();
1393 synchronize_rcu();
1394 kfree(old);
1395 return err;
1396}
1397EXPORT_SYMBOL(netlink_change_ngroups);
1398
1399/**
1400 * netlink_clear_multicast_users - kick off multicast listeners
1401 *
1402 * This function removes all listeners from the given group.
1403 * @ksk: The kernel netlink socket, as returned by
1404 * netlink_kernel_create().
1405 * @group: The multicast group to clear.
1406 */
1407void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1408{
1409 struct sock *sk;
1410 struct hlist_node *node;
1411 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1412
1413 netlink_table_grab();
1414
1415 sk_for_each_bound(sk, node, &tbl->mc_list)
1416 netlink_update_socket_mc(nlk_sk(sk), group, 0);
1417
1418 netlink_table_ungrab();
1419}
1420EXPORT_SYMBOL(netlink_clear_multicast_users);
1421
1330void netlink_set_nonroot(int protocol, unsigned int flags) 1422void netlink_set_nonroot(int protocol, unsigned int flags)
1331{ 1423{
1332 if ((unsigned int)protocol < MAX_LINKS) 1424 if ((unsigned int)protocol < MAX_LINKS)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index b9ab62f938d0..e146531faf1d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Authors: Jamal Hadi Salim 4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch> 5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
6 */ 7 */
7 8
8#include <linux/module.h> 9#include <linux/module.h>
@@ -13,6 +14,7 @@
13#include <linux/string.h> 14#include <linux/string.h>
14#include <linux/skbuff.h> 15#include <linux/skbuff.h>
15#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/bitmap.h>
16#include <net/sock.h> 18#include <net/sock.h>
17#include <net/genetlink.h> 19#include <net/genetlink.h>
18 20
@@ -42,6 +44,16 @@ static void genl_unlock(void)
42#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) 44#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
43 45
44static struct list_head family_ht[GENL_FAM_TAB_SIZE]; 46static struct list_head family_ht[GENL_FAM_TAB_SIZE];
47/*
48 * Bitmap of multicast groups that are currently in use.
49 *
50 * To avoid an allocation at boot of just one unsigned long,
51 * declare it global instead.
52 * Bit 0 is marked as already used since group 0 is invalid.
53 */
54static unsigned long mc_group_start = 0x1;
55static unsigned long *mc_groups = &mc_group_start;
56static unsigned long mc_groups_longs = 1;
45 57
46static int genl_ctrl_event(int event, void *data); 58static int genl_ctrl_event(int event, void *data);
47 59
@@ -116,6 +128,114 @@ static inline u16 genl_generate_id(void)
116 return id_gen_idx; 128 return id_gen_idx;
117} 129}
118 130
131static struct genl_multicast_group notify_grp;
132
133/**
134 * genl_register_mc_group - register a multicast group
135 *
136 * Registers the specified multicast group and notifies userspace
137 * about the new group.
138 *
139 * Returns 0 on success or a negative error code.
140 *
141 * @family: The generic netlink family the group shall be registered for.
142 * @grp: The group to register, must have a name.
143 */
144int genl_register_mc_group(struct genl_family *family,
145 struct genl_multicast_group *grp)
146{
147 int id;
148 unsigned long *new_groups;
149 int err;
150
151 BUG_ON(grp->name[0] == '\0');
152
153 genl_lock();
154
155 /* special-case our own group */
156 if (grp == &notify_grp)
157 id = GENL_ID_CTRL;
158 else
159 id = find_first_zero_bit(mc_groups,
160 mc_groups_longs * BITS_PER_LONG);
161
162
163 if (id >= mc_groups_longs * BITS_PER_LONG) {
164 size_t nlen = (mc_groups_longs + 1) * sizeof(unsigned long);
165
166 if (mc_groups == &mc_group_start) {
167 new_groups = kzalloc(nlen, GFP_KERNEL);
168 if (!new_groups) {
169 err = -ENOMEM;
170 goto out;
171 }
172 mc_groups = new_groups;
173 *mc_groups = mc_group_start;
174 } else {
175 new_groups = krealloc(mc_groups, nlen, GFP_KERNEL);
176 if (!new_groups) {
177 err = -ENOMEM;
178 goto out;
179 }
180 mc_groups = new_groups;
181 mc_groups[mc_groups_longs] = 0;
182 }
183 mc_groups_longs++;
184 }
185
186 err = netlink_change_ngroups(genl_sock,
187 sizeof(unsigned long) * NETLINK_GENERIC);
188 if (err)
189 goto out;
190
191 grp->id = id;
192 set_bit(id, mc_groups);
193 list_add_tail(&grp->list, &family->mcast_groups);
194 grp->family = family;
195
196 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp);
197 out:
198 genl_unlock();
199 return 0;
200}
201EXPORT_SYMBOL(genl_register_mc_group);
202
203/**
204 * genl_unregister_mc_group - unregister a multicast group
205 *
206 * Unregisters the specified multicast group and notifies userspace
207 * about it. All current listeners on the group are removed.
208 *
209 * Note: It is not necessary to unregister all multicast groups before
210 * unregistering the family, unregistering the family will cause
211 * all assigned multicast groups to be unregistered automatically.
212 *
213 * @family: Generic netlink family the group belongs to.
214 * @grp: The group to unregister, must have been registered successfully
215 * previously.
216 */
217void genl_unregister_mc_group(struct genl_family *family,
218 struct genl_multicast_group *grp)
219{
220 BUG_ON(grp->family != family);
221 genl_lock();
222 netlink_clear_multicast_users(genl_sock, grp->id);
223 clear_bit(grp->id, mc_groups);
224 list_del(&grp->list);
225 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
226 grp->id = 0;
227 grp->family = NULL;
228 genl_unlock();
229}
230
231static void genl_unregister_mc_groups(struct genl_family *family)
232{
233 struct genl_multicast_group *grp, *tmp;
234
235 list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list)
236 genl_unregister_mc_group(family, grp);
237}
238
119/** 239/**
120 * genl_register_ops - register generic netlink operations 240 * genl_register_ops - register generic netlink operations
121 * @family: generic netlink family 241 * @family: generic netlink family
@@ -216,6 +336,7 @@ int genl_register_family(struct genl_family *family)
216 goto errout; 336 goto errout;
217 337
218 INIT_LIST_HEAD(&family->ops_list); 338 INIT_LIST_HEAD(&family->ops_list);
339 INIT_LIST_HEAD(&family->mcast_groups);
219 340
220 genl_lock(); 341 genl_lock();
221 342
@@ -275,6 +396,8 @@ int genl_unregister_family(struct genl_family *family)
275{ 396{
276 struct genl_family *rc; 397 struct genl_family *rc;
277 398
399 genl_unregister_mc_groups(family);
400
278 genl_lock(); 401 genl_lock();
279 402
280 list_for_each_entry(rc, genl_family_chain(family->id), family_list) { 403 list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
@@ -410,6 +533,67 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
410 nla_nest_end(skb, nla_ops); 533 nla_nest_end(skb, nla_ops);
411 } 534 }
412 535
536 if (!list_empty(&family->mcast_groups)) {
537 struct genl_multicast_group *grp;
538 struct nlattr *nla_grps;
539 int idx = 1;
540
541 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
542 if (nla_grps == NULL)
543 goto nla_put_failure;
544
545 list_for_each_entry(grp, &family->mcast_groups, list) {
546 struct nlattr *nest;
547
548 nest = nla_nest_start(skb, idx++);
549 if (nest == NULL)
550 goto nla_put_failure;
551
552 NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
553 NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
554 grp->name);
555
556 nla_nest_end(skb, nest);
557 }
558 nla_nest_end(skb, nla_grps);
559 }
560
561 return genlmsg_end(skb, hdr);
562
563nla_put_failure:
564 return genlmsg_cancel(skb, hdr);
565}
566
567static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
568 u32 seq, u32 flags, struct sk_buff *skb,
569 u8 cmd)
570{
571 void *hdr;
572 struct nlattr *nla_grps;
573 struct nlattr *nest;
574
575 hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
576 if (hdr == NULL)
577 return -1;
578
579 NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name);
580 NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id);
581
582 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
583 if (nla_grps == NULL)
584 goto nla_put_failure;
585
586 nest = nla_nest_start(skb, 1);
587 if (nest == NULL)
588 goto nla_put_failure;
589
590 NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
591 NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
592 grp->name);
593
594 nla_nest_end(skb, nest);
595 nla_nest_end(skb, nla_grps);
596
413 return genlmsg_end(skb, hdr); 597 return genlmsg_end(skb, hdr);
414 598
415nla_put_failure: 599nla_put_failure:
@@ -453,8 +637,8 @@ errout:
453 return skb->len; 637 return skb->len;
454} 638}
455 639
456static struct sk_buff *ctrl_build_msg(struct genl_family *family, u32 pid, 640static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
457 int seq, u8 cmd) 641 u32 pid, int seq, u8 cmd)
458{ 642{
459 struct sk_buff *skb; 643 struct sk_buff *skb;
460 int err; 644 int err;
@@ -472,6 +656,25 @@ static struct sk_buff *ctrl_build_msg(struct genl_family *family, u32 pid,
472 return skb; 656 return skb;
473} 657}
474 658
659static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
660 u32 pid, int seq, u8 cmd)
661{
662 struct sk_buff *skb;
663 int err;
664
665 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
666 if (skb == NULL)
667 return ERR_PTR(-ENOBUFS);
668
669 err = ctrl_fill_mcgrp_info(grp, pid, seq, 0, skb, cmd);
670 if (err < 0) {
671 nlmsg_free(skb);
672 return ERR_PTR(err);
673 }
674
675 return skb;
676}
677
475static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = { 678static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
476 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 }, 679 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
477 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING, 680 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
@@ -501,8 +704,8 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
501 goto errout; 704 goto errout;
502 } 705 }
503 706
504 msg = ctrl_build_msg(res, info->snd_pid, info->snd_seq, 707 msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq,
505 CTRL_CMD_NEWFAMILY); 708 CTRL_CMD_NEWFAMILY);
506 if (IS_ERR(msg)) { 709 if (IS_ERR(msg)) {
507 err = PTR_ERR(msg); 710 err = PTR_ERR(msg);
508 goto errout; 711 goto errout;
@@ -523,7 +726,15 @@ static int genl_ctrl_event(int event, void *data)
523 switch (event) { 726 switch (event) {
524 case CTRL_CMD_NEWFAMILY: 727 case CTRL_CMD_NEWFAMILY:
525 case CTRL_CMD_DELFAMILY: 728 case CTRL_CMD_DELFAMILY:
526 msg = ctrl_build_msg(data, 0, 0, event); 729 msg = ctrl_build_family_msg(data, 0, 0, event);
730 if (IS_ERR(msg))
731 return PTR_ERR(msg);
732
733 genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL);
734 break;
735 case CTRL_CMD_NEWMCAST_GRP:
736 case CTRL_CMD_DELMCAST_GRP:
737 msg = ctrl_build_mcgrp_msg(data, 0, 0, event);
527 if (IS_ERR(msg)) 738 if (IS_ERR(msg))
528 return PTR_ERR(msg); 739 return PTR_ERR(msg);
529 740
@@ -541,6 +752,10 @@ static struct genl_ops genl_ctrl_ops = {
541 .policy = ctrl_policy, 752 .policy = ctrl_policy,
542}; 753};
543 754
755static struct genl_multicast_group notify_grp = {
756 .name = "notify",
757};
758
544static int __init genl_init(void) 759static int __init genl_init(void)
545{ 760{
546 int i, err; 761 int i, err;
@@ -557,11 +772,17 @@ static int __init genl_init(void)
557 goto errout_register; 772 goto errout_register;
558 773
559 netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); 774 netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
560 genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID, 775
561 genl_rcv, NULL, THIS_MODULE); 776 /* we'll bump the group number right afterwards */
777 genl_sock = netlink_kernel_create(NETLINK_GENERIC, 0, genl_rcv,
778 NULL, THIS_MODULE);
562 if (genl_sock == NULL) 779 if (genl_sock == NULL)
563 panic("GENL: Cannot initialize generic netlink\n"); 780 panic("GENL: Cannot initialize generic netlink\n");
564 781
782 err = genl_register_mc_group(&genl_ctrl, &notify_grp);
783 if (err < 0)
784 goto errout_register;
785
565 return 0; 786 return 0;
566 787
567errout_register: 788errout_register:
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index f3986d498b40..db3395bfbcfa 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -187,7 +187,7 @@ static ssize_t rfkill_claim_store(struct device *dev,
187static struct device_attribute rfkill_dev_attrs[] = { 187static struct device_attribute rfkill_dev_attrs[] = {
188 __ATTR(name, S_IRUGO, rfkill_name_show, NULL), 188 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
189 __ATTR(type, S_IRUGO, rfkill_type_show, NULL), 189 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
190 __ATTR(state, S_IRUGO, rfkill_state_show, rfkill_state_store), 190 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
191 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), 191 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
192 __ATTR_NULL 192 __ATTR_NULL
193}; 193};
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index d3f7c3f9407a..8a74cac0be8c 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -97,7 +97,7 @@ config NET_SCH_ATM
97 select classes of this queuing discipline. Each class maps 97 select classes of this queuing discipline. Each class maps
98 the flow(s) it is handling to a given virtual circuit. 98 the flow(s) it is handling to a given virtual circuit.
99 99
100 See the top of <file:net/sched/sch_atm.c>) for more details. 100 See the top of <file:net/sched/sch_atm.c> for more details.
101 101
102 To compile this code as a module, choose M here: the 102 To compile this code as a module, choose M here: the
103 module will be called sch_atm. 103 module will be called sch_atm.
@@ -137,7 +137,7 @@ config NET_SCH_SFQ
137 tristate "Stochastic Fairness Queueing (SFQ)" 137 tristate "Stochastic Fairness Queueing (SFQ)"
138 ---help--- 138 ---help---
139 Say Y here if you want to use the Stochastic Fairness Queueing (SFQ) 139 Say Y here if you want to use the Stochastic Fairness Queueing (SFQ)
140 packet scheduling algorithm . 140 packet scheduling algorithm.
141 141
142 See the top of <file:net/sched/sch_sfq.c> for more details. 142 See the top of <file:net/sched/sch_sfq.c> for more details.
143 143
@@ -306,7 +306,7 @@ config NET_CLS_RSVP6
306 is important for real time data such as streaming sound or video. 306 is important for real time data such as streaming sound or video.
307 307
308 Say Y here if you want to be able to classify outgoing packets based 308 Say Y here if you want to be able to classify outgoing packets based
309 on their RSVP requests and you are using the IPv6. 309 on their RSVP requests and you are using the IPv6 protocol.
310 310
311 To compile this code as a module, choose M here: the 311 To compile this code as a module, choose M here: the
312 module will be called cls_rsvp6. 312 module will be called cls_rsvp6.
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 417ec8fb7f1a..ddc4f2c54379 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -292,13 +292,12 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
292 } 292 }
293 } 293 }
294 DPRINTK("atm_tc_change: new id %x\n", classid); 294 DPRINTK("atm_tc_change: new id %x\n", classid);
295 flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL); 295 flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
296 DPRINTK("atm_tc_change: flow %p\n", flow); 296 DPRINTK("atm_tc_change: flow %p\n", flow);
297 if (!flow) { 297 if (!flow) {
298 error = -ENOBUFS; 298 error = -ENOBUFS;
299 goto err_out; 299 goto err_out;
300 } 300 }
301 memset(flow, 0, sizeof(*flow));
302 flow->filter_list = NULL; 301 flow->filter_list = NULL;
303 if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) 302 if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
304 flow->q = &noop_qdisc; 303 flow->q = &noop_qdisc;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 157bfbd250ba..b48f06fc9fd9 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2141,7 +2141,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2141 if (last == first) 2141 if (last == first)
2142 break; 2142 break;
2143 2143
2144 last = last->u.next; 2144 last = (struct xfrm_dst *)last->u.dst.next;
2145 last->child_mtu_cached = mtu; 2145 last->child_mtu_cached = mtu;
2146 } 2146 }
2147 2147