aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-18 13:24:36 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-18 13:24:36 -0400
commit485cf925d8b7a6b3c62fe5f1e167f2d0d4edf32a (patch)
tree57798f48123a62dd1801f039b676b06913e34e72 /net
parent31bdc5dc7666aa2fe04c626cea30fe3c20cf481c (diff)
parent3fd8f9e4b6c184d03d340bc86630f700de967fa8 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (24 commits) [NETFILTER]: xt_connlimit needs to depend on nf_conntrack [NETFILTER]: ipt_iprange.h must #include <linux/types.h> [IrDA]: Fix IrDA build failure [ATM]: nicstar needs virt_to_bus [NET]: move __dev_addr_discard adjacent to dev_addr_discard for readability [NET]: merge dev_unicast_discard and dev_mc_discard into one [NET]: move dev_mc_discard from dev_mcast.c to dev.c [NETLINK]: negative groups in netlink_setsockopt [PPPOL2TP]: Reset meta-data in xmit function [PPPOL2TP]: Fix use-after-free [PKT_SCHED]: Some typo fixes in net/sched/Kconfig [XFRM]: Fix crash introduced by struct dst_entry reordering [TCP]: remove unused argument to cong_avoid op [ATM]: [idt77252] Rename CONFIG_ATM_IDT77252_SEND_IDLE to not resemble a Kconfig variable [ATM]: [drivers] ioremap balanced with iounmap [ATM]: [lanai] sram_test_word() must be __devinit [ATM]: [nicstar] Replace C code with call to ARRAY_SIZE() macro. [ATM]: Eliminate dead config variable CONFIG_BR2684_FAST_TRANS. [ATM]: Replacing kmalloc/memset combination with kzalloc. [NET]: gen_estimator deadlock fix ...
Diffstat (limited to 'net')
-rw-r--r--net/atm/br2684.c4
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/dev_mcast.c12
-rw-r--r--net/core/gen_estimator.c81
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_cubic.c2
-rw-r--r--net/ipv4/tcp_highspeed.c2
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_hybla.c4
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_lp.c5
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_veno.c6
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/iriap.c2
-rw-r--r--net/irda/irlap.c2
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/irda/irproc.c2
-rw-r--r--net/irda/irsysctl.c2
-rw-r--r--net/irda/irttp.c2
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sched/Kconfig6
-rw-r--r--net/sched/sch_atm.c3
-rw-r--r--net/xfrm/xfrm_policy.c2
30 files changed, 110 insertions, 106 deletions
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index faa6aaf67563..c0f6861eefe3 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -460,11 +460,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
460 skb_pull(skb, plen); 460 skb_pull(skb, plen);
461 skb_set_mac_header(skb, -ETH_HLEN); 461 skb_set_mac_header(skb, -ETH_HLEN);
462 skb->pkt_type = PACKET_HOST; 462 skb->pkt_type = PACKET_HOST;
463#ifdef CONFIG_BR2684_FAST_TRANS
464 skb->protocol = ((u16 *) skb->data)[-1];
465#else /* some protocols might require this: */
466 skb->protocol = br_type_trans(skb, net_dev); 463 skb->protocol = br_type_trans(skb, net_dev);
467#endif /* CONFIG_BR2684_FAST_TRANS */
468#else 464#else
469 skb_pull(skb, plen - ETH_HLEN); 465 skb_pull(skb, plen - ETH_HLEN);
470 skb->protocol = eth_type_trans(skb, net_dev); 466 skb->protocol = eth_type_trans(skb, net_dev);
diff --git a/net/core/dev.c b/net/core/dev.c
index 13a0d9f6da54..6357f54c8ff7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2715,20 +2715,6 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
2715 return 0; 2715 return 0;
2716} 2716}
2717 2717
2718void __dev_addr_discard(struct dev_addr_list **list)
2719{
2720 struct dev_addr_list *tmp;
2721
2722 while (*list != NULL) {
2723 tmp = *list;
2724 *list = tmp->next;
2725 if (tmp->da_users > tmp->da_gusers)
2726 printk("__dev_addr_discard: address leakage! "
2727 "da_users=%d\n", tmp->da_users);
2728 kfree(tmp);
2729 }
2730}
2731
2732/** 2718/**
2733 * dev_unicast_delete - Release secondary unicast address. 2719 * dev_unicast_delete - Release secondary unicast address.
2734 * @dev: device 2720 * @dev: device
@@ -2777,11 +2763,30 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2777} 2763}
2778EXPORT_SYMBOL(dev_unicast_add); 2764EXPORT_SYMBOL(dev_unicast_add);
2779 2765
2780static void dev_unicast_discard(struct net_device *dev) 2766static void __dev_addr_discard(struct dev_addr_list **list)
2767{
2768 struct dev_addr_list *tmp;
2769
2770 while (*list != NULL) {
2771 tmp = *list;
2772 *list = tmp->next;
2773 if (tmp->da_users > tmp->da_gusers)
2774 printk("__dev_addr_discard: address leakage! "
2775 "da_users=%d\n", tmp->da_users);
2776 kfree(tmp);
2777 }
2778}
2779
2780static void dev_addr_discard(struct net_device *dev)
2781{ 2781{
2782 netif_tx_lock_bh(dev); 2782 netif_tx_lock_bh(dev);
2783
2783 __dev_addr_discard(&dev->uc_list); 2784 __dev_addr_discard(&dev->uc_list);
2784 dev->uc_count = 0; 2785 dev->uc_count = 0;
2786
2787 __dev_addr_discard(&dev->mc_list);
2788 dev->mc_count = 0;
2789
2785 netif_tx_unlock_bh(dev); 2790 netif_tx_unlock_bh(dev);
2786} 2791}
2787 2792
@@ -3739,8 +3744,7 @@ void unregister_netdevice(struct net_device *dev)
3739 /* 3744 /*
3740 * Flush the unicast and multicast chains 3745 * Flush the unicast and multicast chains
3741 */ 3746 */
3742 dev_unicast_discard(dev); 3747 dev_addr_discard(dev);
3743 dev_mc_discard(dev);
3744 3748
3745 if (dev->uninit) 3749 if (dev->uninit)
3746 dev->uninit(dev); 3750 dev->uninit(dev);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 235a2a8a0d05..99aece1aeccf 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -177,18 +177,6 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
177} 177}
178EXPORT_SYMBOL(dev_mc_unsync); 178EXPORT_SYMBOL(dev_mc_unsync);
179 179
180/*
181 * Discard multicast list when a device is downed
182 */
183
184void dev_mc_discard(struct net_device *dev)
185{
186 netif_tx_lock_bh(dev);
187 __dev_addr_discard(&dev->mc_list);
188 dev->mc_count = 0;
189 netif_tx_unlock_bh(dev);
190}
191
192#ifdef CONFIG_PROC_FS 180#ifdef CONFIG_PROC_FS
193static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos) 181static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
194{ 182{
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cc84d8d8a3c7..590a767b029c 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -79,27 +79,27 @@
79 79
80struct gen_estimator 80struct gen_estimator
81{ 81{
82 struct gen_estimator *next; 82 struct list_head list;
83 struct gnet_stats_basic *bstats; 83 struct gnet_stats_basic *bstats;
84 struct gnet_stats_rate_est *rate_est; 84 struct gnet_stats_rate_est *rate_est;
85 spinlock_t *stats_lock; 85 spinlock_t *stats_lock;
86 unsigned interval;
87 int ewma_log; 86 int ewma_log;
88 u64 last_bytes; 87 u64 last_bytes;
89 u32 last_packets; 88 u32 last_packets;
90 u32 avpps; 89 u32 avpps;
91 u32 avbps; 90 u32 avbps;
91 struct rcu_head e_rcu;
92}; 92};
93 93
94struct gen_estimator_head 94struct gen_estimator_head
95{ 95{
96 struct timer_list timer; 96 struct timer_list timer;
97 struct gen_estimator *list; 97 struct list_head list;
98}; 98};
99 99
100static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; 100static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
101 101
102/* Estimator array lock */ 102/* Protects against NULL dereference */
103static DEFINE_RWLOCK(est_lock); 103static DEFINE_RWLOCK(est_lock);
104 104
105static void est_timer(unsigned long arg) 105static void est_timer(unsigned long arg)
@@ -107,13 +107,17 @@ static void est_timer(unsigned long arg)
107 int idx = (int)arg; 107 int idx = (int)arg;
108 struct gen_estimator *e; 108 struct gen_estimator *e;
109 109
110 read_lock(&est_lock); 110 rcu_read_lock();
111 for (e = elist[idx].list; e; e = e->next) { 111 list_for_each_entry_rcu(e, &elist[idx].list, list) {
112 u64 nbytes; 112 u64 nbytes;
113 u32 npackets; 113 u32 npackets;
114 u32 rate; 114 u32 rate;
115 115
116 spin_lock(e->stats_lock); 116 spin_lock(e->stats_lock);
117 read_lock(&est_lock);
118 if (e->bstats == NULL)
119 goto skip;
120
117 nbytes = e->bstats->bytes; 121 nbytes = e->bstats->bytes;
118 npackets = e->bstats->packets; 122 npackets = e->bstats->packets;
119 rate = (nbytes - e->last_bytes)<<(7 - idx); 123 rate = (nbytes - e->last_bytes)<<(7 - idx);
@@ -125,12 +129,14 @@ static void est_timer(unsigned long arg)
125 e->last_packets = npackets; 129 e->last_packets = npackets;
126 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; 130 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
127 e->rate_est->pps = (e->avpps+0x1FF)>>10; 131 e->rate_est->pps = (e->avpps+0x1FF)>>10;
132skip:
133 read_unlock(&est_lock);
128 spin_unlock(e->stats_lock); 134 spin_unlock(e->stats_lock);
129 } 135 }
130 136
131 if (elist[idx].list != NULL) 137 if (!list_empty(&elist[idx].list))
132 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4)); 138 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
133 read_unlock(&est_lock); 139 rcu_read_unlock();
134} 140}
135 141
136/** 142/**
@@ -147,12 +153,17 @@ static void est_timer(unsigned long arg)
147 * &rate_est with the statistics lock grabed during this period. 153 * &rate_est with the statistics lock grabed during this period.
148 * 154 *
149 * Returns 0 on success or a negative error code. 155 * Returns 0 on success or a negative error code.
156 *
157 * NOTE: Called under rtnl_mutex
150 */ 158 */
151int gen_new_estimator(struct gnet_stats_basic *bstats, 159int gen_new_estimator(struct gnet_stats_basic *bstats,
152 struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt) 160 struct gnet_stats_rate_est *rate_est,
161 spinlock_t *stats_lock,
162 struct rtattr *opt)
153{ 163{
154 struct gen_estimator *est; 164 struct gen_estimator *est;
155 struct gnet_estimator *parm = RTA_DATA(opt); 165 struct gnet_estimator *parm = RTA_DATA(opt);
166 int idx;
156 167
157 if (RTA_PAYLOAD(opt) < sizeof(*parm)) 168 if (RTA_PAYLOAD(opt) < sizeof(*parm))
158 return -EINVAL; 169 return -EINVAL;
@@ -164,7 +175,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
164 if (est == NULL) 175 if (est == NULL)
165 return -ENOBUFS; 176 return -ENOBUFS;
166 177
167 est->interval = parm->interval + 2; 178 idx = parm->interval + 2;
168 est->bstats = bstats; 179 est->bstats = bstats;
169 est->rate_est = rate_est; 180 est->rate_est = rate_est;
170 est->stats_lock = stats_lock; 181 est->stats_lock = stats_lock;
@@ -174,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
174 est->last_packets = bstats->packets; 185 est->last_packets = bstats->packets;
175 est->avpps = rate_est->pps<<10; 186 est->avpps = rate_est->pps<<10;
176 187
177 est->next = elist[est->interval].list; 188 if (!elist[idx].timer.function) {
178 if (est->next == NULL) { 189 INIT_LIST_HEAD(&elist[idx].list);
179 init_timer(&elist[est->interval].timer); 190 setup_timer(&elist[idx].timer, est_timer, idx);
180 elist[est->interval].timer.data = est->interval;
181 elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
182 elist[est->interval].timer.function = est_timer;
183 add_timer(&elist[est->interval].timer);
184 } 191 }
185 write_lock_bh(&est_lock); 192
186 elist[est->interval].list = est; 193 if (list_empty(&elist[idx].list))
187 write_unlock_bh(&est_lock); 194 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
195
196 list_add_rcu(&est->list, &elist[idx].list);
188 return 0; 197 return 0;
189} 198}
190 199
200static void __gen_kill_estimator(struct rcu_head *head)
201{
202 struct gen_estimator *e = container_of(head,
203 struct gen_estimator, e_rcu);
204 kfree(e);
205}
206
191/** 207/**
192 * gen_kill_estimator - remove a rate estimator 208 * gen_kill_estimator - remove a rate estimator
193 * @bstats: basic statistics 209 * @bstats: basic statistics
@@ -195,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
195 * 211 *
196 * Removes the rate estimator specified by &bstats and &rate_est 212 * Removes the rate estimator specified by &bstats and &rate_est
197 * and deletes the timer. 213 * and deletes the timer.
214 *
215 * NOTE: Called under rtnl_mutex
198 */ 216 */
199void gen_kill_estimator(struct gnet_stats_basic *bstats, 217void gen_kill_estimator(struct gnet_stats_basic *bstats,
200 struct gnet_stats_rate_est *rate_est) 218 struct gnet_stats_rate_est *rate_est)
201{ 219{
202 int idx; 220 int idx;
203 struct gen_estimator *est, **pest; 221 struct gen_estimator *e, *n;
204 222
205 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) { 223 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
206 int killed = 0; 224
207 pest = &elist[idx].list; 225 /* Skip non initialized indexes */
208 while ((est=*pest) != NULL) { 226 if (!elist[idx].timer.function)
209 if (est->rate_est != rate_est || est->bstats != bstats) { 227 continue;
210 pest = &est->next; 228
229 list_for_each_entry_safe(e, n, &elist[idx].list, list) {
230 if (e->rate_est != rate_est || e->bstats != bstats)
211 continue; 231 continue;
212 }
213 232
214 write_lock_bh(&est_lock); 233 write_lock_bh(&est_lock);
215 *pest = est->next; 234 e->bstats = NULL;
216 write_unlock_bh(&est_lock); 235 write_unlock_bh(&est_lock);
217 236
218 kfree(est); 237 list_del_rcu(&e->list);
219 killed++; 238 call_rcu(&e->e_rcu, __gen_kill_estimator);
220 } 239 }
221 if (killed && elist[idx].list == NULL)
222 del_timer(&elist[idx].timer);
223 } 240 }
224} 241}
225 242
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index dd9ef65ad3ff..519de091a94d 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -137,7 +137,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
137} 137}
138 138
139static void bictcp_cong_avoid(struct sock *sk, u32 ack, 139static void bictcp_cong_avoid(struct sock *sk, u32 ack,
140 u32 seq_rtt, u32 in_flight, int data_acked) 140 u32 in_flight, int data_acked)
141{ 141{
142 struct tcp_sock *tp = tcp_sk(sk); 142 struct tcp_sock *tp = tcp_sk(sk);
143 struct bictcp *ca = inet_csk_ca(sk); 143 struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1260e52ad772..55fca1820c34 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -324,8 +324,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
324/* This is Jacobson's slow start and congestion avoidance. 324/* This is Jacobson's slow start and congestion avoidance.
325 * SIGCOMM '88, p. 328. 325 * SIGCOMM '88, p. 328.
326 */ 326 */
327void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, 327void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
328 int flag)
329{ 328{
330 struct tcp_sock *tp = tcp_sk(sk); 329 struct tcp_sock *tp = tcp_sk(sk);
331 330
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index ebfaac2f9f46..d17da30d82d6 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -270,7 +270,7 @@ static inline void measure_delay(struct sock *sk)
270} 270}
271 271
272static void bictcp_cong_avoid(struct sock *sk, u32 ack, 272static void bictcp_cong_avoid(struct sock *sk, u32 ack,
273 u32 seq_rtt, u32 in_flight, int data_acked) 273 u32 in_flight, int data_acked)
274{ 274{
275 struct tcp_sock *tp = tcp_sk(sk); 275 struct tcp_sock *tp = tcp_sk(sk);
276 struct bictcp *ca = inet_csk_ca(sk); 276 struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 43d624e5043c..14a073d8b60f 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk)
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110} 110}
111 111
112static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, 112static void hstcp_cong_avoid(struct sock *sk, u32 adk,
113 u32 in_flight, int data_acked) 113 u32 in_flight, int data_acked)
114{ 114{
115 struct tcp_sock *tp = tcp_sk(sk); 115 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4ba4a7ae0a85..632c05a75883 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
225 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 225 return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
226} 226}
227 227
228static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 228static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt,
229 u32 in_flight, int data_acked) 229 u32 in_flight, int data_acked)
230{ 230{
231 struct tcp_sock *tp = tcp_sk(sk); 231 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index e5be35117223..b3e55cf56171 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -85,7 +85,7 @@ static inline u32 hybla_fraction(u32 odds)
85 * o Give cwnd a new value based on the model proposed 85 * o Give cwnd a new value based on the model proposed
86 * o remember increments <1 86 * o remember increments <1
87 */ 87 */
88static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 88static void hybla_cong_avoid(struct sock *sk, u32 ack,
89 u32 in_flight, int flag) 89 u32 in_flight, int flag)
90{ 90{
91 struct tcp_sock *tp = tcp_sk(sk); 91 struct tcp_sock *tp = tcp_sk(sk);
@@ -103,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
103 return; 103 return;
104 104
105 if (!ca->hybla_en) 105 if (!ca->hybla_en)
106 return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); 106 return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
107 107
108 if (ca->rho == 0) 108 if (ca->rho == 0)
109 hybla_recalc_param(sk); 109 hybla_recalc_param(sk);
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index b2b2256d3b84..cc5de6f69d46 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -258,7 +258,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
258/* 258/*
259 * Increase window in response to successful acknowledgment. 259 * Increase window in response to successful acknowledgment.
260 */ 260 */
261static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 261static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack,
262 u32 in_flight, int flag) 262 u32 in_flight, int flag)
263{ 263{
264 struct tcp_sock *tp = tcp_sk(sk); 264 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4e5884ac8f29..fec8a7a4dbaf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2323,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
2323 tcp_ack_no_tstamp(sk, seq_rtt, flag); 2323 tcp_ack_no_tstamp(sk, seq_rtt, flag);
2324} 2324}
2325 2325
2326static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 2326static void tcp_cong_avoid(struct sock *sk, u32 ack,
2327 u32 in_flight, int good) 2327 u32 in_flight, int good)
2328{ 2328{
2329 const struct inet_connection_sock *icsk = inet_csk(sk); 2329 const struct inet_connection_sock *icsk = inet_csk(sk);
2330 icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good); 2330 icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);
2331 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2331 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
2332} 2332}
2333 2333
@@ -2826,11 +2826,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2826 /* Advance CWND, if state allows this. */ 2826 /* Advance CWND, if state allows this. */
2827 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && 2827 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
2828 tcp_may_raise_cwnd(sk, flag)) 2828 tcp_may_raise_cwnd(sk, flag))
2829 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0); 2829 tcp_cong_avoid(sk, ack, prior_in_flight, 0);
2830 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2830 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2831 } else { 2831 } else {
2832 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 2832 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
2833 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1); 2833 tcp_cong_avoid(sk, ack, prior_in_flight, 1);
2834 } 2834 }
2835 2835
2836 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) 2836 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index e49836ce012e..80e140e3ec2d 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
115 * Will only call newReno CA when away from inference. 115 * Will only call newReno CA when away from inference.
116 * From TCP-LP's paper, this will be handled in additive increasement. 116 * From TCP-LP's paper, this will be handled in additive increasement.
117 */ 117 */
118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, 118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
119 int flag)
120{ 119{
121 struct lp *lp = inet_csk_ca(sk); 120 struct lp *lp = inet_csk_ca(sk);
122 121
123 if (!(lp->flag & LP_WITHIN_INF)) 122 if (!(lp->flag & LP_WITHIN_INF))
124 tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag); 123 tcp_reno_cong_avoid(sk, ack, in_flight, flag);
125} 124}
126 125
127/** 126/**
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 4624501e9680..be27a33a1c68 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,7 +15,7 @@
15#define TCP_SCALABLE_AI_CNT 50U 15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3 16#define TCP_SCALABLE_MD_SCALE 3
17 17
18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack,
19 u32 in_flight, int flag) 19 u32 in_flight, int flag)
20{ 20{
21 struct tcp_sock *tp = tcp_sk(sk); 21 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index e218a51ceced..914e0307f7af 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,13 +163,13 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
163EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); 163EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
164 164
165static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, 165static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
166 u32 seq_rtt, u32 in_flight, int flag) 166 u32 in_flight, int flag)
167{ 167{
168 struct tcp_sock *tp = tcp_sk(sk); 168 struct tcp_sock *tp = tcp_sk(sk);
169 struct vegas *vegas = inet_csk_ca(sk); 169 struct vegas *vegas = inet_csk_ca(sk);
170 170
171 if (!vegas->doing_vegas_now) 171 if (!vegas->doing_vegas_now)
172 return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); 172 return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
173 173
174 /* The key players are v_beg_snd_una and v_beg_snd_nxt. 174 /* The key players are v_beg_snd_una and v_beg_snd_nxt.
175 * 175 *
@@ -228,7 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
228 /* We don't have enough RTT samples to do the Vegas 228 /* We don't have enough RTT samples to do the Vegas
229 * calculation, so we'll behave like Reno. 229 * calculation, so we'll behave like Reno.
230 */ 230 */
231 tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); 231 tcp_reno_cong_avoid(sk, ack, in_flight, flag);
232 } else { 232 } else {
233 u32 rtt, target_cwnd, diff; 233 u32 rtt, target_cwnd, diff;
234 234
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index ec854cc5fad5..7a55ddf86032 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -115,13 +115,13 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
115} 115}
116 116
117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, 117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
118 u32 seq_rtt, u32 in_flight, int flag) 118 u32 in_flight, int flag)
119{ 119{
120 struct tcp_sock *tp = tcp_sk(sk); 120 struct tcp_sock *tp = tcp_sk(sk);
121 struct veno *veno = inet_csk_ca(sk); 121 struct veno *veno = inet_csk_ca(sk);
122 122
123 if (!veno->doing_veno_now) 123 if (!veno->doing_veno_now)
124 return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); 124 return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
125 125
126 /* limited by applications */ 126 /* limited by applications */
127 if (!tcp_is_cwnd_limited(sk, in_flight)) 127 if (!tcp_is_cwnd_limited(sk, in_flight))
@@ -132,7 +132,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
132 /* We don't have enough rtt samples to do the Veno 132 /* We don't have enough rtt samples to do the Veno
133 * calculation, so we'll behave like Reno. 133 * calculation, so we'll behave like Reno.
134 */ 134 */
135 tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag); 135 tcp_reno_cong_avoid(sk, ack, in_flight, flag);
136 } else { 136 } else {
137 u32 rtt, target_cwnd; 137 u32 rtt, target_cwnd;
138 138
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 545ed237ab53..c04b7c6ec702 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -70,7 +70,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
70} 70}
71 71
72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, 72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
73 u32 seq_rtt, u32 in_flight, int flag) 73 u32 in_flight, int flag)
74{ 74{
75 struct tcp_sock *tp = tcp_sk(sk); 75 struct tcp_sock *tp = tcp_sk(sk);
76 struct yeah *yeah = inet_csk_ca(sk); 76 struct yeah *yeah = inet_csk_ca(sk);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index dcd7e325b283..4c670cf6aefa 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2567,7 +2567,7 @@ int __init irsock_init(void)
2567 * Remove IrDA protocol 2567 * Remove IrDA protocol
2568 * 2568 *
2569 */ 2569 */
2570void __exit irsock_cleanup(void) 2570void irsock_cleanup(void)
2571{ 2571{
2572 sock_unregister(PF_IRDA); 2572 sock_unregister(PF_IRDA);
2573 proto_unregister(&irda_proto); 2573 proto_unregister(&irda_proto);
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 7b5def1ea633..435b563d29a6 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -95,14 +95,14 @@ int __init irda_device_init( void)
95 return 0; 95 return 0;
96} 96}
97 97
98static void __exit leftover_dongle(void *arg) 98static void leftover_dongle(void *arg)
99{ 99{
100 struct dongle_reg *reg = arg; 100 struct dongle_reg *reg = arg;
101 IRDA_WARNING("IrDA: Dongle type %x not unregistered\n", 101 IRDA_WARNING("IrDA: Dongle type %x not unregistered\n",
102 reg->type); 102 reg->type);
103} 103}
104 104
105void __exit irda_device_cleanup(void) 105void irda_device_cleanup(void)
106{ 106{
107 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 107 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
108 108
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 774eb707940c..ee3889fa49ab 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -153,7 +153,7 @@ int __init iriap_init(void)
153 * Initializes the IrIAP layer, called by the module cleanup code in 153 * Initializes the IrIAP layer, called by the module cleanup code in
154 * irmod.c 154 * irmod.c
155 */ 155 */
156void __exit iriap_cleanup(void) 156void iriap_cleanup(void)
157{ 157{
158 irlmp_unregister_service(service_handle); 158 irlmp_unregister_service(service_handle);
159 159
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 2fc9f518f89d..3d76aafdb2e5 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -95,7 +95,7 @@ int __init irlap_init(void)
95 return 0; 95 return 0;
96} 96}
97 97
98void __exit irlap_cleanup(void) 98void irlap_cleanup(void)
99{ 99{
100 IRDA_ASSERT(irlap != NULL, return;); 100 IRDA_ASSERT(irlap != NULL, return;);
101 101
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 24a5e3f23778..7efa930ed684 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -116,7 +116,7 @@ int __init irlmp_init(void)
116 * Remove IrLMP layer 116 * Remove IrLMP layer
117 * 117 *
118 */ 118 */
119void __exit irlmp_cleanup(void) 119void irlmp_cleanup(void)
120{ 120{
121 /* Check for main structure */ 121 /* Check for main structure */
122 IRDA_ASSERT(irlmp != NULL, return;); 122 IRDA_ASSERT(irlmp != NULL, return;);
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index d6f9aba5b9dc..181cb51b48a8 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -84,7 +84,7 @@ void __init irda_proc_register(void)
84 * Unregister irda entry in /proc file system 84 * Unregister irda entry in /proc file system
85 * 85 *
86 */ 86 */
87void __exit irda_proc_unregister(void) 87void irda_proc_unregister(void)
88{ 88{
89 int i; 89 int i;
90 90
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2e968e7d8fea..957e04feb0f7 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -287,7 +287,7 @@ int __init irda_sysctl_register(void)
287 * Unregister our sysctl interface 287 * Unregister our sysctl interface
288 * 288 *
289 */ 289 */
290void __exit irda_sysctl_unregister(void) 290void irda_sysctl_unregister(void)
291{ 291{
292 unregister_sysctl_table(irda_table_header); 292 unregister_sysctl_table(irda_table_header);
293} 293}
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 7f50832a2cd5..3d7ab03fb131 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -109,7 +109,7 @@ int __init irttp_init(void)
109 * Called by module destruction/cleanup code 109 * Called by module destruction/cleanup code
110 * 110 *
111 */ 111 */
112void __exit irttp_cleanup(void) 112void irttp_cleanup(void)
113{ 113{
114 /* Check for main structure */ 114 /* Check for main structure */
115 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;); 115 IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 3ac39f1ec775..3599770a2473 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -436,6 +436,7 @@ config NETFILTER_XT_MATCH_CONNBYTES
436config NETFILTER_XT_MATCH_CONNLIMIT 436config NETFILTER_XT_MATCH_CONNLIMIT
437 tristate '"connlimit" match support"' 437 tristate '"connlimit" match support"'
438 depends on NETFILTER_XTABLES 438 depends on NETFILTER_XTABLES
439 depends on NF_CONNTRACK
439 ---help--- 440 ---help---
440 This match allows you to match against the number of parallel 441 This match allows you to match against the number of parallel
441 connections to a server per client IP address (or address block). 442 connections to a server per client IP address (or address block).
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a3c8e692f493..641cfbc278d8 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1012,13 +1012,14 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1012{ 1012{
1013 struct sock *sk = sock->sk; 1013 struct sock *sk = sock->sk;
1014 struct netlink_sock *nlk = nlk_sk(sk); 1014 struct netlink_sock *nlk = nlk_sk(sk);
1015 int val = 0, err; 1015 unsigned int val = 0;
1016 int err;
1016 1017
1017 if (level != SOL_NETLINK) 1018 if (level != SOL_NETLINK)
1018 return -ENOPROTOOPT; 1019 return -ENOPROTOOPT;
1019 1020
1020 if (optlen >= sizeof(int) && 1021 if (optlen >= sizeof(int) &&
1021 get_user(val, (int __user *)optval)) 1022 get_user(val, (unsigned int __user *)optval))
1022 return -EFAULT; 1023 return -EFAULT;
1023 1024
1024 switch (optname) { 1025 switch (optname) {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index d3f7c3f9407a..8a74cac0be8c 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -97,7 +97,7 @@ config NET_SCH_ATM
97 select classes of this queuing discipline. Each class maps 97 select classes of this queuing discipline. Each class maps
98 the flow(s) it is handling to a given virtual circuit. 98 the flow(s) it is handling to a given virtual circuit.
99 99
100 See the top of <file:net/sched/sch_atm.c>) for more details. 100 See the top of <file:net/sched/sch_atm.c> for more details.
101 101
102 To compile this code as a module, choose M here: the 102 To compile this code as a module, choose M here: the
103 module will be called sch_atm. 103 module will be called sch_atm.
@@ -137,7 +137,7 @@ config NET_SCH_SFQ
137 tristate "Stochastic Fairness Queueing (SFQ)" 137 tristate "Stochastic Fairness Queueing (SFQ)"
138 ---help--- 138 ---help---
139 Say Y here if you want to use the Stochastic Fairness Queueing (SFQ) 139 Say Y here if you want to use the Stochastic Fairness Queueing (SFQ)
140 packet scheduling algorithm . 140 packet scheduling algorithm.
141 141
142 See the top of <file:net/sched/sch_sfq.c> for more details. 142 See the top of <file:net/sched/sch_sfq.c> for more details.
143 143
@@ -306,7 +306,7 @@ config NET_CLS_RSVP6
306 is important for real time data such as streaming sound or video. 306 is important for real time data such as streaming sound or video.
307 307
308 Say Y here if you want to be able to classify outgoing packets based 308 Say Y here if you want to be able to classify outgoing packets based
309 on their RSVP requests and you are using the IPv6. 309 on their RSVP requests and you are using the IPv6 protocol.
310 310
311 To compile this code as a module, choose M here: the 311 To compile this code as a module, choose M here: the
312 module will be called cls_rsvp6. 312 module will be called cls_rsvp6.
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 417ec8fb7f1a..ddc4f2c54379 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -292,13 +292,12 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
292 } 292 }
293 } 293 }
294 DPRINTK("atm_tc_change: new id %x\n", classid); 294 DPRINTK("atm_tc_change: new id %x\n", classid);
295 flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL); 295 flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
296 DPRINTK("atm_tc_change: flow %p\n", flow); 296 DPRINTK("atm_tc_change: flow %p\n", flow);
297 if (!flow) { 297 if (!flow) {
298 error = -ENOBUFS; 298 error = -ENOBUFS;
299 goto err_out; 299 goto err_out;
300 } 300 }
301 memset(flow, 0, sizeof(*flow));
302 flow->filter_list = NULL; 301 flow->filter_list = NULL;
303 if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) 302 if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
304 flow->q = &noop_qdisc; 303 flow->q = &noop_qdisc;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 157bfbd250ba..b48f06fc9fd9 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2141,7 +2141,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2141 if (last == first) 2141 if (last == first)
2142 break; 2142 break;
2143 2143
2144 last = last->u.next; 2144 last = (struct xfrm_dst *)last->u.dst.next;
2145 last->child_mtu_cached = mtu; 2145 last->child_mtu_cached = mtu;
2146 } 2146 }
2147 2147