diff options
Diffstat (limited to 'net/ipv4')
54 files changed, 875 insertions, 865 deletions
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index a02c36d0a13e..93fe3966805d 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile | |||
@@ -10,7 +10,8 @@ obj-y := route.o inetpeer.o protocol.o \ | |||
10 | tcp_minisocks.o tcp_cong.o \ | 10 | tcp_minisocks.o tcp_cong.o \ |
11 | datagram.o raw.o udp.o udplite.o \ | 11 | datagram.o raw.o udp.o udplite.o \ |
12 | arp.o icmp.o devinet.o af_inet.o igmp.o \ | 12 | arp.o icmp.o devinet.o af_inet.o igmp.o \ |
13 | sysctl_net_ipv4.o fib_frontend.o fib_semantics.o | 13 | sysctl_net_ipv4.o fib_frontend.o fib_semantics.o \ |
14 | inet_fragment.o | ||
14 | 15 | ||
15 | obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o | 16 | obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o |
16 | obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o | 17 | obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c new file mode 100644 index 000000000000..484cf512858f --- /dev/null +++ b/net/ipv4/inet_fragment.c | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * inet fragments management | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * Authors: Pavel Emelyanov <xemul@openvz.org> | ||
10 | * Started as consolidation of ipv4/ip_fragment.c, | ||
11 | * ipv6/reassembly. and ipv6 nf conntrack reassembly | ||
12 | */ | ||
13 | |||
14 | #include <linux/list.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/timer.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/random.h> | ||
20 | #include <linux/skbuff.h> | ||
21 | #include <linux/rtnetlink.h> | ||
22 | |||
23 | #include <net/inet_frag.h> | ||
24 | |||
25 | static void inet_frag_secret_rebuild(unsigned long dummy) | ||
26 | { | ||
27 | struct inet_frags *f = (struct inet_frags *)dummy; | ||
28 | unsigned long now = jiffies; | ||
29 | int i; | ||
30 | |||
31 | write_lock(&f->lock); | ||
32 | get_random_bytes(&f->rnd, sizeof(u32)); | ||
33 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { | ||
34 | struct inet_frag_queue *q; | ||
35 | struct hlist_node *p, *n; | ||
36 | |||
37 | hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { | ||
38 | unsigned int hval = f->hashfn(q); | ||
39 | |||
40 | if (hval != i) { | ||
41 | hlist_del(&q->list); | ||
42 | |||
43 | /* Relink to new hash chain. */ | ||
44 | hlist_add_head(&q->list, &f->hash[hval]); | ||
45 | } | ||
46 | } | ||
47 | } | ||
48 | write_unlock(&f->lock); | ||
49 | |||
50 | mod_timer(&f->secret_timer, now + f->ctl->secret_interval); | ||
51 | } | ||
52 | |||
53 | void inet_frags_init(struct inet_frags *f) | ||
54 | { | ||
55 | int i; | ||
56 | |||
57 | for (i = 0; i < INETFRAGS_HASHSZ; i++) | ||
58 | INIT_HLIST_HEAD(&f->hash[i]); | ||
59 | |||
60 | INIT_LIST_HEAD(&f->lru_list); | ||
61 | rwlock_init(&f->lock); | ||
62 | |||
63 | f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ | ||
64 | (jiffies ^ (jiffies >> 6))); | ||
65 | |||
66 | f->nqueues = 0; | ||
67 | atomic_set(&f->mem, 0); | ||
68 | |||
69 | init_timer(&f->secret_timer); | ||
70 | f->secret_timer.function = inet_frag_secret_rebuild; | ||
71 | f->secret_timer.data = (unsigned long)f; | ||
72 | f->secret_timer.expires = jiffies + f->ctl->secret_interval; | ||
73 | add_timer(&f->secret_timer); | ||
74 | } | ||
75 | EXPORT_SYMBOL(inet_frags_init); | ||
76 | |||
77 | void inet_frags_fini(struct inet_frags *f) | ||
78 | { | ||
79 | del_timer(&f->secret_timer); | ||
80 | } | ||
81 | EXPORT_SYMBOL(inet_frags_fini); | ||
82 | |||
83 | static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) | ||
84 | { | ||
85 | write_lock(&f->lock); | ||
86 | hlist_del(&fq->list); | ||
87 | list_del(&fq->lru_list); | ||
88 | f->nqueues--; | ||
89 | write_unlock(&f->lock); | ||
90 | } | ||
91 | |||
92 | void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) | ||
93 | { | ||
94 | if (del_timer(&fq->timer)) | ||
95 | atomic_dec(&fq->refcnt); | ||
96 | |||
97 | if (!(fq->last_in & COMPLETE)) { | ||
98 | fq_unlink(fq, f); | ||
99 | atomic_dec(&fq->refcnt); | ||
100 | fq->last_in |= COMPLETE; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | EXPORT_SYMBOL(inet_frag_kill); | ||
105 | |||
106 | static inline void frag_kfree_skb(struct inet_frags *f, struct sk_buff *skb, | ||
107 | int *work) | ||
108 | { | ||
109 | if (work) | ||
110 | *work -= skb->truesize; | ||
111 | |||
112 | atomic_sub(skb->truesize, &f->mem); | ||
113 | if (f->skb_free) | ||
114 | f->skb_free(skb); | ||
115 | kfree_skb(skb); | ||
116 | } | ||
117 | |||
118 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, | ||
119 | int *work) | ||
120 | { | ||
121 | struct sk_buff *fp; | ||
122 | |||
123 | BUG_TRAP(q->last_in & COMPLETE); | ||
124 | BUG_TRAP(del_timer(&q->timer) == 0); | ||
125 | |||
126 | /* Release all fragment data. */ | ||
127 | fp = q->fragments; | ||
128 | while (fp) { | ||
129 | struct sk_buff *xp = fp->next; | ||
130 | |||
131 | frag_kfree_skb(f, fp, work); | ||
132 | fp = xp; | ||
133 | } | ||
134 | |||
135 | if (work) | ||
136 | *work -= f->qsize; | ||
137 | atomic_sub(f->qsize, &f->mem); | ||
138 | |||
139 | f->destructor(q); | ||
140 | |||
141 | } | ||
142 | EXPORT_SYMBOL(inet_frag_destroy); | ||
143 | |||
144 | int inet_frag_evictor(struct inet_frags *f) | ||
145 | { | ||
146 | struct inet_frag_queue *q; | ||
147 | int work, evicted = 0; | ||
148 | |||
149 | work = atomic_read(&f->mem) - f->ctl->low_thresh; | ||
150 | while (work > 0) { | ||
151 | read_lock(&f->lock); | ||
152 | if (list_empty(&f->lru_list)) { | ||
153 | read_unlock(&f->lock); | ||
154 | break; | ||
155 | } | ||
156 | |||
157 | q = list_first_entry(&f->lru_list, | ||
158 | struct inet_frag_queue, lru_list); | ||
159 | atomic_inc(&q->refcnt); | ||
160 | read_unlock(&f->lock); | ||
161 | |||
162 | spin_lock(&q->lock); | ||
163 | if (!(q->last_in & COMPLETE)) | ||
164 | inet_frag_kill(q, f); | ||
165 | spin_unlock(&q->lock); | ||
166 | |||
167 | if (atomic_dec_and_test(&q->refcnt)) | ||
168 | inet_frag_destroy(q, f, &work); | ||
169 | evicted++; | ||
170 | } | ||
171 | |||
172 | return evicted; | ||
173 | } | ||
174 | EXPORT_SYMBOL(inet_frag_evictor); | ||
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index afbf938836f5..877da3ed52e2 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <net/route.h> | 40 | #include <net/route.h> |
41 | #include <net/xfrm.h> | 41 | #include <net/xfrm.h> |
42 | 42 | ||
43 | static inline int ip_forward_finish(struct sk_buff *skb) | 43 | static int ip_forward_finish(struct sk_buff *skb) |
44 | { | 44 | { |
45 | struct ip_options * opt = &(IPCB(skb)->opt); | 45 | struct ip_options * opt = &(IPCB(skb)->opt); |
46 | 46 | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index fabb86db763b..443b3f89192f 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <net/icmp.h> | 39 | #include <net/icmp.h> |
40 | #include <net/checksum.h> | 40 | #include <net/checksum.h> |
41 | #include <net/inetpeer.h> | 41 | #include <net/inetpeer.h> |
42 | #include <net/inet_frag.h> | ||
42 | #include <linux/tcp.h> | 43 | #include <linux/tcp.h> |
43 | #include <linux/udp.h> | 44 | #include <linux/udp.h> |
44 | #include <linux/inet.h> | 45 | #include <linux/inet.h> |
@@ -49,21 +50,8 @@ | |||
49 | * as well. Or notify me, at least. --ANK | 50 | * as well. Or notify me, at least. --ANK |
50 | */ | 51 | */ |
51 | 52 | ||
52 | /* Fragment cache limits. We will commit 256K at one time. Should we | ||
53 | * cross that limit we will prune down to 192K. This should cope with | ||
54 | * even the most extreme cases without allowing an attacker to measurably | ||
55 | * harm machine performance. | ||
56 | */ | ||
57 | int sysctl_ipfrag_high_thresh __read_mostly = 256*1024; | ||
58 | int sysctl_ipfrag_low_thresh __read_mostly = 192*1024; | ||
59 | |||
60 | int sysctl_ipfrag_max_dist __read_mostly = 64; | 53 | int sysctl_ipfrag_max_dist __read_mostly = 64; |
61 | 54 | ||
62 | /* Important NOTE! Fragment queue must be destroyed before MSL expires. | ||
63 | * RFC791 is wrong proposing to prolongate timer each fragment arrival by TTL. | ||
64 | */ | ||
65 | int sysctl_ipfrag_time __read_mostly = IP_FRAG_TIME; | ||
66 | |||
67 | struct ipfrag_skb_cb | 55 | struct ipfrag_skb_cb |
68 | { | 56 | { |
69 | struct inet_skb_parm h; | 57 | struct inet_skb_parm h; |
@@ -74,153 +62,102 @@ struct ipfrag_skb_cb | |||
74 | 62 | ||
75 | /* Describe an entry in the "incomplete datagrams" queue. */ | 63 | /* Describe an entry in the "incomplete datagrams" queue. */ |
76 | struct ipq { | 64 | struct ipq { |
77 | struct hlist_node list; | 65 | struct inet_frag_queue q; |
78 | struct list_head lru_list; /* lru list member */ | 66 | |
79 | u32 user; | 67 | u32 user; |
80 | __be32 saddr; | 68 | __be32 saddr; |
81 | __be32 daddr; | 69 | __be32 daddr; |
82 | __be16 id; | 70 | __be16 id; |
83 | u8 protocol; | 71 | u8 protocol; |
84 | u8 last_in; | ||
85 | #define COMPLETE 4 | ||
86 | #define FIRST_IN 2 | ||
87 | #define LAST_IN 1 | ||
88 | |||
89 | struct sk_buff *fragments; /* linked list of received fragments */ | ||
90 | int len; /* total length of original datagram */ | ||
91 | int meat; | ||
92 | spinlock_t lock; | ||
93 | atomic_t refcnt; | ||
94 | struct timer_list timer; /* when will this queue expire? */ | ||
95 | ktime_t stamp; | ||
96 | int iif; | 72 | int iif; |
97 | unsigned int rid; | 73 | unsigned int rid; |
98 | struct inet_peer *peer; | 74 | struct inet_peer *peer; |
99 | }; | 75 | }; |
100 | 76 | ||
101 | /* Hash table. */ | 77 | struct inet_frags_ctl ip4_frags_ctl __read_mostly = { |
78 | /* | ||
79 | * Fragment cache limits. We will commit 256K at one time. Should we | ||
80 | * cross that limit we will prune down to 192K. This should cope with | ||
81 | * even the most extreme cases without allowing an attacker to | ||
82 | * measurably harm machine performance. | ||
83 | */ | ||
84 | .high_thresh = 256 * 1024, | ||
85 | .low_thresh = 192 * 1024, | ||
102 | 86 | ||
103 | #define IPQ_HASHSZ 64 | 87 | /* |
88 | * Important NOTE! Fragment queue must be destroyed before MSL expires. | ||
89 | * RFC791 is wrong proposing to prolongate timer each fragment arrival | ||
90 | * by TTL. | ||
91 | */ | ||
92 | .timeout = IP_FRAG_TIME, | ||
93 | .secret_interval = 10 * 60 * HZ, | ||
94 | }; | ||
104 | 95 | ||
105 | /* Per-bucket lock is easy to add now. */ | 96 | static struct inet_frags ip4_frags; |
106 | static struct hlist_head ipq_hash[IPQ_HASHSZ]; | ||
107 | static DEFINE_RWLOCK(ipfrag_lock); | ||
108 | static u32 ipfrag_hash_rnd; | ||
109 | static LIST_HEAD(ipq_lru_list); | ||
110 | int ip_frag_nqueues = 0; | ||
111 | 97 | ||
112 | static __inline__ void __ipq_unlink(struct ipq *qp) | 98 | int ip_frag_nqueues(void) |
113 | { | 99 | { |
114 | hlist_del(&qp->list); | 100 | return ip4_frags.nqueues; |
115 | list_del(&qp->lru_list); | ||
116 | ip_frag_nqueues--; | ||
117 | } | 101 | } |
118 | 102 | ||
119 | static __inline__ void ipq_unlink(struct ipq *ipq) | 103 | int ip_frag_mem(void) |
120 | { | 104 | { |
121 | write_lock(&ipfrag_lock); | 105 | return atomic_read(&ip4_frags.mem); |
122 | __ipq_unlink(ipq); | ||
123 | write_unlock(&ipfrag_lock); | ||
124 | } | 106 | } |
125 | 107 | ||
108 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | ||
109 | struct net_device *dev); | ||
110 | |||
126 | static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) | 111 | static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) |
127 | { | 112 | { |
128 | return jhash_3words((__force u32)id << 16 | prot, | 113 | return jhash_3words((__force u32)id << 16 | prot, |
129 | (__force u32)saddr, (__force u32)daddr, | 114 | (__force u32)saddr, (__force u32)daddr, |
130 | ipfrag_hash_rnd) & (IPQ_HASHSZ - 1); | 115 | ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); |
131 | } | 116 | } |
132 | 117 | ||
133 | static struct timer_list ipfrag_secret_timer; | 118 | static unsigned int ip4_hashfn(struct inet_frag_queue *q) |
134 | int sysctl_ipfrag_secret_interval __read_mostly = 10 * 60 * HZ; | ||
135 | |||
136 | static void ipfrag_secret_rebuild(unsigned long dummy) | ||
137 | { | 119 | { |
138 | unsigned long now = jiffies; | 120 | struct ipq *ipq; |
139 | int i; | ||
140 | |||
141 | write_lock(&ipfrag_lock); | ||
142 | get_random_bytes(&ipfrag_hash_rnd, sizeof(u32)); | ||
143 | for (i = 0; i < IPQ_HASHSZ; i++) { | ||
144 | struct ipq *q; | ||
145 | struct hlist_node *p, *n; | ||
146 | |||
147 | hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], list) { | ||
148 | unsigned int hval = ipqhashfn(q->id, q->saddr, | ||
149 | q->daddr, q->protocol); | ||
150 | |||
151 | if (hval != i) { | ||
152 | hlist_del(&q->list); | ||
153 | 121 | ||
154 | /* Relink to new hash chain. */ | 122 | ipq = container_of(q, struct ipq, q); |
155 | hlist_add_head(&q->list, &ipq_hash[hval]); | 123 | return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); |
156 | } | ||
157 | } | ||
158 | } | ||
159 | write_unlock(&ipfrag_lock); | ||
160 | |||
161 | mod_timer(&ipfrag_secret_timer, now + sysctl_ipfrag_secret_interval); | ||
162 | } | 124 | } |
163 | 125 | ||
164 | atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */ | ||
165 | |||
166 | /* Memory Tracking Functions. */ | 126 | /* Memory Tracking Functions. */ |
167 | static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) | 127 | static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) |
168 | { | 128 | { |
169 | if (work) | 129 | if (work) |
170 | *work -= skb->truesize; | 130 | *work -= skb->truesize; |
171 | atomic_sub(skb->truesize, &ip_frag_mem); | 131 | atomic_sub(skb->truesize, &ip4_frags.mem); |
172 | kfree_skb(skb); | 132 | kfree_skb(skb); |
173 | } | 133 | } |
174 | 134 | ||
175 | static __inline__ void frag_free_queue(struct ipq *qp, int *work) | 135 | static __inline__ void ip4_frag_free(struct inet_frag_queue *q) |
176 | { | 136 | { |
177 | if (work) | 137 | struct ipq *qp; |
178 | *work -= sizeof(struct ipq); | 138 | |
179 | atomic_sub(sizeof(struct ipq), &ip_frag_mem); | 139 | qp = container_of(q, struct ipq, q); |
140 | if (qp->peer) | ||
141 | inet_putpeer(qp->peer); | ||
180 | kfree(qp); | 142 | kfree(qp); |
181 | } | 143 | } |
182 | 144 | ||
183 | static __inline__ struct ipq *frag_alloc_queue(void) | 145 | static __inline__ struct ipq *frag_alloc_queue(void) |
184 | { | 146 | { |
185 | struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC); | 147 | struct ipq *qp = kzalloc(sizeof(struct ipq), GFP_ATOMIC); |
186 | 148 | ||
187 | if (!qp) | 149 | if (!qp) |
188 | return NULL; | 150 | return NULL; |
189 | atomic_add(sizeof(struct ipq), &ip_frag_mem); | 151 | atomic_add(sizeof(struct ipq), &ip4_frags.mem); |
190 | return qp; | 152 | return qp; |
191 | } | 153 | } |
192 | 154 | ||
193 | 155 | ||
194 | /* Destruction primitives. */ | 156 | /* Destruction primitives. */ |
195 | 157 | ||
196 | /* Complete destruction of ipq. */ | 158 | static __inline__ void ipq_put(struct ipq *ipq) |
197 | static void ip_frag_destroy(struct ipq *qp, int *work) | ||
198 | { | ||
199 | struct sk_buff *fp; | ||
200 | |||
201 | BUG_TRAP(qp->last_in&COMPLETE); | ||
202 | BUG_TRAP(del_timer(&qp->timer) == 0); | ||
203 | |||
204 | if (qp->peer) | ||
205 | inet_putpeer(qp->peer); | ||
206 | |||
207 | /* Release all fragment data. */ | ||
208 | fp = qp->fragments; | ||
209 | while (fp) { | ||
210 | struct sk_buff *xp = fp->next; | ||
211 | |||
212 | frag_kfree_skb(fp, work); | ||
213 | fp = xp; | ||
214 | } | ||
215 | |||
216 | /* Finally, release the queue descriptor itself. */ | ||
217 | frag_free_queue(qp, work); | ||
218 | } | ||
219 | |||
220 | static __inline__ void ipq_put(struct ipq *ipq, int *work) | ||
221 | { | 159 | { |
222 | if (atomic_dec_and_test(&ipq->refcnt)) | 160 | inet_frag_put(&ipq->q, &ip4_frags); |
223 | ip_frag_destroy(ipq, work); | ||
224 | } | 161 | } |
225 | 162 | ||
226 | /* Kill ipq entry. It is not destroyed immediately, | 163 | /* Kill ipq entry. It is not destroyed immediately, |
@@ -228,14 +165,7 @@ static __inline__ void ipq_put(struct ipq *ipq, int *work) | |||
228 | */ | 165 | */ |
229 | static void ipq_kill(struct ipq *ipq) | 166 | static void ipq_kill(struct ipq *ipq) |
230 | { | 167 | { |
231 | if (del_timer(&ipq->timer)) | 168 | inet_frag_kill(&ipq->q, &ip4_frags); |
232 | atomic_dec(&ipq->refcnt); | ||
233 | |||
234 | if (!(ipq->last_in & COMPLETE)) { | ||
235 | ipq_unlink(ipq); | ||
236 | atomic_dec(&ipq->refcnt); | ||
237 | ipq->last_in |= COMPLETE; | ||
238 | } | ||
239 | } | 169 | } |
240 | 170 | ||
241 | /* Memory limiting on fragments. Evictor trashes the oldest | 171 | /* Memory limiting on fragments. Evictor trashes the oldest |
@@ -243,33 +173,11 @@ static void ipq_kill(struct ipq *ipq) | |||
243 | */ | 173 | */ |
244 | static void ip_evictor(void) | 174 | static void ip_evictor(void) |
245 | { | 175 | { |
246 | struct ipq *qp; | 176 | int evicted; |
247 | struct list_head *tmp; | ||
248 | int work; | ||
249 | |||
250 | work = atomic_read(&ip_frag_mem) - sysctl_ipfrag_low_thresh; | ||
251 | if (work <= 0) | ||
252 | return; | ||
253 | |||
254 | while (work > 0) { | ||
255 | read_lock(&ipfrag_lock); | ||
256 | if (list_empty(&ipq_lru_list)) { | ||
257 | read_unlock(&ipfrag_lock); | ||
258 | return; | ||
259 | } | ||
260 | tmp = ipq_lru_list.next; | ||
261 | qp = list_entry(tmp, struct ipq, lru_list); | ||
262 | atomic_inc(&qp->refcnt); | ||
263 | read_unlock(&ipfrag_lock); | ||
264 | 177 | ||
265 | spin_lock(&qp->lock); | 178 | evicted = inet_frag_evictor(&ip4_frags); |
266 | if (!(qp->last_in&COMPLETE)) | 179 | if (evicted) |
267 | ipq_kill(qp); | 180 | IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted); |
268 | spin_unlock(&qp->lock); | ||
269 | |||
270 | ipq_put(qp, &work); | ||
271 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | ||
272 | } | ||
273 | } | 181 | } |
274 | 182 | ||
275 | /* | 183 | /* |
@@ -279,9 +187,9 @@ static void ip_expire(unsigned long arg) | |||
279 | { | 187 | { |
280 | struct ipq *qp = (struct ipq *) arg; | 188 | struct ipq *qp = (struct ipq *) arg; |
281 | 189 | ||
282 | spin_lock(&qp->lock); | 190 | spin_lock(&qp->q.lock); |
283 | 191 | ||
284 | if (qp->last_in & COMPLETE) | 192 | if (qp->q.last_in & COMPLETE) |
285 | goto out; | 193 | goto out; |
286 | 194 | ||
287 | ipq_kill(qp); | 195 | ipq_kill(qp); |
@@ -289,8 +197,8 @@ static void ip_expire(unsigned long arg) | |||
289 | IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); | 197 | IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); |
290 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 198 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
291 | 199 | ||
292 | if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) { | 200 | if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) { |
293 | struct sk_buff *head = qp->fragments; | 201 | struct sk_buff *head = qp->q.fragments; |
294 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | 202 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
295 | if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) { | 203 | if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) { |
296 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 204 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); |
@@ -298,8 +206,8 @@ static void ip_expire(unsigned long arg) | |||
298 | } | 206 | } |
299 | } | 207 | } |
300 | out: | 208 | out: |
301 | spin_unlock(&qp->lock); | 209 | spin_unlock(&qp->q.lock); |
302 | ipq_put(qp, NULL); | 210 | ipq_put(qp); |
303 | } | 211 | } |
304 | 212 | ||
305 | /* Creation primitives. */ | 213 | /* Creation primitives. */ |
@@ -312,7 +220,7 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in) | |||
312 | #endif | 220 | #endif |
313 | unsigned int hash; | 221 | unsigned int hash; |
314 | 222 | ||
315 | write_lock(&ipfrag_lock); | 223 | write_lock(&ip4_frags.lock); |
316 | hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr, | 224 | hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr, |
317 | qp_in->protocol); | 225 | qp_in->protocol); |
318 | #ifdef CONFIG_SMP | 226 | #ifdef CONFIG_SMP |
@@ -320,31 +228,31 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in) | |||
320 | * such entry could be created on other cpu, while we | 228 | * such entry could be created on other cpu, while we |
321 | * promoted read lock to write lock. | 229 | * promoted read lock to write lock. |
322 | */ | 230 | */ |
323 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { | 231 | hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) { |
324 | if (qp->id == qp_in->id && | 232 | if (qp->id == qp_in->id && |
325 | qp->saddr == qp_in->saddr && | 233 | qp->saddr == qp_in->saddr && |
326 | qp->daddr == qp_in->daddr && | 234 | qp->daddr == qp_in->daddr && |
327 | qp->protocol == qp_in->protocol && | 235 | qp->protocol == qp_in->protocol && |
328 | qp->user == qp_in->user) { | 236 | qp->user == qp_in->user) { |
329 | atomic_inc(&qp->refcnt); | 237 | atomic_inc(&qp->q.refcnt); |
330 | write_unlock(&ipfrag_lock); | 238 | write_unlock(&ip4_frags.lock); |
331 | qp_in->last_in |= COMPLETE; | 239 | qp_in->q.last_in |= COMPLETE; |
332 | ipq_put(qp_in, NULL); | 240 | ipq_put(qp_in); |
333 | return qp; | 241 | return qp; |
334 | } | 242 | } |
335 | } | 243 | } |
336 | #endif | 244 | #endif |
337 | qp = qp_in; | 245 | qp = qp_in; |
338 | 246 | ||
339 | if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time)) | 247 | if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) |
340 | atomic_inc(&qp->refcnt); | 248 | atomic_inc(&qp->q.refcnt); |
341 | 249 | ||
342 | atomic_inc(&qp->refcnt); | 250 | atomic_inc(&qp->q.refcnt); |
343 | hlist_add_head(&qp->list, &ipq_hash[hash]); | 251 | hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]); |
344 | INIT_LIST_HEAD(&qp->lru_list); | 252 | INIT_LIST_HEAD(&qp->q.lru_list); |
345 | list_add_tail(&qp->lru_list, &ipq_lru_list); | 253 | list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list); |
346 | ip_frag_nqueues++; | 254 | ip4_frags.nqueues++; |
347 | write_unlock(&ipfrag_lock); | 255 | write_unlock(&ip4_frags.lock); |
348 | return qp; | 256 | return qp; |
349 | } | 257 | } |
350 | 258 | ||
@@ -357,23 +265,18 @@ static struct ipq *ip_frag_create(struct iphdr *iph, u32 user) | |||
357 | goto out_nomem; | 265 | goto out_nomem; |
358 | 266 | ||
359 | qp->protocol = iph->protocol; | 267 | qp->protocol = iph->protocol; |
360 | qp->last_in = 0; | ||
361 | qp->id = iph->id; | 268 | qp->id = iph->id; |
362 | qp->saddr = iph->saddr; | 269 | qp->saddr = iph->saddr; |
363 | qp->daddr = iph->daddr; | 270 | qp->daddr = iph->daddr; |
364 | qp->user = user; | 271 | qp->user = user; |
365 | qp->len = 0; | ||
366 | qp->meat = 0; | ||
367 | qp->fragments = NULL; | ||
368 | qp->iif = 0; | ||
369 | qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL; | 272 | qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL; |
370 | 273 | ||
371 | /* Initialize a timer for this entry. */ | 274 | /* Initialize a timer for this entry. */ |
372 | init_timer(&qp->timer); | 275 | init_timer(&qp->q.timer); |
373 | qp->timer.data = (unsigned long) qp; /* pointer to queue */ | 276 | qp->q.timer.data = (unsigned long) qp; /* pointer to queue */ |
374 | qp->timer.function = ip_expire; /* expire function */ | 277 | qp->q.timer.function = ip_expire; /* expire function */ |
375 | spin_lock_init(&qp->lock); | 278 | spin_lock_init(&qp->q.lock); |
376 | atomic_set(&qp->refcnt, 1); | 279 | atomic_set(&qp->q.refcnt, 1); |
377 | 280 | ||
378 | return ip_frag_intern(qp); | 281 | return ip_frag_intern(qp); |
379 | 282 | ||
@@ -395,20 +298,20 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user) | |||
395 | struct ipq *qp; | 298 | struct ipq *qp; |
396 | struct hlist_node *n; | 299 | struct hlist_node *n; |
397 | 300 | ||
398 | read_lock(&ipfrag_lock); | 301 | read_lock(&ip4_frags.lock); |
399 | hash = ipqhashfn(id, saddr, daddr, protocol); | 302 | hash = ipqhashfn(id, saddr, daddr, protocol); |
400 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { | 303 | hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) { |
401 | if (qp->id == id && | 304 | if (qp->id == id && |
402 | qp->saddr == saddr && | 305 | qp->saddr == saddr && |
403 | qp->daddr == daddr && | 306 | qp->daddr == daddr && |
404 | qp->protocol == protocol && | 307 | qp->protocol == protocol && |
405 | qp->user == user) { | 308 | qp->user == user) { |
406 | atomic_inc(&qp->refcnt); | 309 | atomic_inc(&qp->q.refcnt); |
407 | read_unlock(&ipfrag_lock); | 310 | read_unlock(&ip4_frags.lock); |
408 | return qp; | 311 | return qp; |
409 | } | 312 | } |
410 | } | 313 | } |
411 | read_unlock(&ipfrag_lock); | 314 | read_unlock(&ip4_frags.lock); |
412 | 315 | ||
413 | return ip_frag_create(iph, user); | 316 | return ip_frag_create(iph, user); |
414 | } | 317 | } |
@@ -429,7 +332,7 @@ static inline int ip_frag_too_far(struct ipq *qp) | |||
429 | end = atomic_inc_return(&peer->rid); | 332 | end = atomic_inc_return(&peer->rid); |
430 | qp->rid = end; | 333 | qp->rid = end; |
431 | 334 | ||
432 | rc = qp->fragments && (end - start) > max; | 335 | rc = qp->q.fragments && (end - start) > max; |
433 | 336 | ||
434 | if (rc) { | 337 | if (rc) { |
435 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 338 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
@@ -442,39 +345,42 @@ static int ip_frag_reinit(struct ipq *qp) | |||
442 | { | 345 | { |
443 | struct sk_buff *fp; | 346 | struct sk_buff *fp; |
444 | 347 | ||
445 | if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time)) { | 348 | if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) { |
446 | atomic_inc(&qp->refcnt); | 349 | atomic_inc(&qp->q.refcnt); |
447 | return -ETIMEDOUT; | 350 | return -ETIMEDOUT; |
448 | } | 351 | } |
449 | 352 | ||
450 | fp = qp->fragments; | 353 | fp = qp->q.fragments; |
451 | do { | 354 | do { |
452 | struct sk_buff *xp = fp->next; | 355 | struct sk_buff *xp = fp->next; |
453 | frag_kfree_skb(fp, NULL); | 356 | frag_kfree_skb(fp, NULL); |
454 | fp = xp; | 357 | fp = xp; |
455 | } while (fp); | 358 | } while (fp); |
456 | 359 | ||
457 | qp->last_in = 0; | 360 | qp->q.last_in = 0; |
458 | qp->len = 0; | 361 | qp->q.len = 0; |
459 | qp->meat = 0; | 362 | qp->q.meat = 0; |
460 | qp->fragments = NULL; | 363 | qp->q.fragments = NULL; |
461 | qp->iif = 0; | 364 | qp->iif = 0; |
462 | 365 | ||
463 | return 0; | 366 | return 0; |
464 | } | 367 | } |
465 | 368 | ||
466 | /* Add new segment to existing queue. */ | 369 | /* Add new segment to existing queue. */ |
467 | static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | 370 | static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
468 | { | 371 | { |
469 | struct sk_buff *prev, *next; | 372 | struct sk_buff *prev, *next; |
373 | struct net_device *dev; | ||
470 | int flags, offset; | 374 | int flags, offset; |
471 | int ihl, end; | 375 | int ihl, end; |
376 | int err = -ENOENT; | ||
472 | 377 | ||
473 | if (qp->last_in & COMPLETE) | 378 | if (qp->q.last_in & COMPLETE) |
474 | goto err; | 379 | goto err; |
475 | 380 | ||
476 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && | 381 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
477 | unlikely(ip_frag_too_far(qp)) && unlikely(ip_frag_reinit(qp))) { | 382 | unlikely(ip_frag_too_far(qp)) && |
383 | unlikely(err = ip_frag_reinit(qp))) { | ||
478 | ipq_kill(qp); | 384 | ipq_kill(qp); |
479 | goto err; | 385 | goto err; |
480 | } | 386 | } |
@@ -487,36 +393,40 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
487 | 393 | ||
488 | /* Determine the position of this fragment. */ | 394 | /* Determine the position of this fragment. */ |
489 | end = offset + skb->len - ihl; | 395 | end = offset + skb->len - ihl; |
396 | err = -EINVAL; | ||
490 | 397 | ||
491 | /* Is this the final fragment? */ | 398 | /* Is this the final fragment? */ |
492 | if ((flags & IP_MF) == 0) { | 399 | if ((flags & IP_MF) == 0) { |
493 | /* If we already have some bits beyond end | 400 | /* If we already have some bits beyond end |
494 | * or have different end, the segment is corrrupted. | 401 | * or have different end, the segment is corrrupted. |
495 | */ | 402 | */ |
496 | if (end < qp->len || | 403 | if (end < qp->q.len || |
497 | ((qp->last_in & LAST_IN) && end != qp->len)) | 404 | ((qp->q.last_in & LAST_IN) && end != qp->q.len)) |
498 | goto err; | 405 | goto err; |
499 | qp->last_in |= LAST_IN; | 406 | qp->q.last_in |= LAST_IN; |
500 | qp->len = end; | 407 | qp->q.len = end; |
501 | } else { | 408 | } else { |
502 | if (end&7) { | 409 | if (end&7) { |
503 | end &= ~7; | 410 | end &= ~7; |
504 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | 411 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
505 | skb->ip_summed = CHECKSUM_NONE; | 412 | skb->ip_summed = CHECKSUM_NONE; |
506 | } | 413 | } |
507 | if (end > qp->len) { | 414 | if (end > qp->q.len) { |
508 | /* Some bits beyond end -> corruption. */ | 415 | /* Some bits beyond end -> corruption. */ |
509 | if (qp->last_in & LAST_IN) | 416 | if (qp->q.last_in & LAST_IN) |
510 | goto err; | 417 | goto err; |
511 | qp->len = end; | 418 | qp->q.len = end; |
512 | } | 419 | } |
513 | } | 420 | } |
514 | if (end == offset) | 421 | if (end == offset) |
515 | goto err; | 422 | goto err; |
516 | 423 | ||
424 | err = -ENOMEM; | ||
517 | if (pskb_pull(skb, ihl) == NULL) | 425 | if (pskb_pull(skb, ihl) == NULL) |
518 | goto err; | 426 | goto err; |
519 | if (pskb_trim_rcsum(skb, end-offset)) | 427 | |
428 | err = pskb_trim_rcsum(skb, end - offset); | ||
429 | if (err) | ||
520 | goto err; | 430 | goto err; |
521 | 431 | ||
522 | /* Find out which fragments are in front and at the back of us | 432 | /* Find out which fragments are in front and at the back of us |
@@ -524,7 +434,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
524 | * this fragment, right? | 434 | * this fragment, right? |
525 | */ | 435 | */ |
526 | prev = NULL; | 436 | prev = NULL; |
527 | for (next = qp->fragments; next != NULL; next = next->next) { | 437 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
528 | if (FRAG_CB(next)->offset >= offset) | 438 | if (FRAG_CB(next)->offset >= offset) |
529 | break; /* bingo! */ | 439 | break; /* bingo! */ |
530 | prev = next; | 440 | prev = next; |
@@ -539,8 +449,10 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
539 | 449 | ||
540 | if (i > 0) { | 450 | if (i > 0) { |
541 | offset += i; | 451 | offset += i; |
452 | err = -EINVAL; | ||
542 | if (end <= offset) | 453 | if (end <= offset) |
543 | goto err; | 454 | goto err; |
455 | err = -ENOMEM; | ||
544 | if (!pskb_pull(skb, i)) | 456 | if (!pskb_pull(skb, i)) |
545 | goto err; | 457 | goto err; |
546 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | 458 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
@@ -548,6 +460,8 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
548 | } | 460 | } |
549 | } | 461 | } |
550 | 462 | ||
463 | err = -ENOMEM; | ||
464 | |||
551 | while (next && FRAG_CB(next)->offset < end) { | 465 | while (next && FRAG_CB(next)->offset < end) { |
552 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ | 466 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ |
553 | 467 | ||
@@ -558,7 +472,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
558 | if (!pskb_pull(next, i)) | 472 | if (!pskb_pull(next, i)) |
559 | goto err; | 473 | goto err; |
560 | FRAG_CB(next)->offset += i; | 474 | FRAG_CB(next)->offset += i; |
561 | qp->meat -= i; | 475 | qp->q.meat -= i; |
562 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 476 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
563 | next->ip_summed = CHECKSUM_NONE; | 477 | next->ip_summed = CHECKSUM_NONE; |
564 | break; | 478 | break; |
@@ -573,9 +487,9 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
573 | if (prev) | 487 | if (prev) |
574 | prev->next = next; | 488 | prev->next = next; |
575 | else | 489 | else |
576 | qp->fragments = next; | 490 | qp->q.fragments = next; |
577 | 491 | ||
578 | qp->meat -= free_it->len; | 492 | qp->q.meat -= free_it->len; |
579 | frag_kfree_skb(free_it, NULL); | 493 | frag_kfree_skb(free_it, NULL); |
580 | } | 494 | } |
581 | } | 495 | } |
@@ -587,50 +501,77 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
587 | if (prev) | 501 | if (prev) |
588 | prev->next = skb; | 502 | prev->next = skb; |
589 | else | 503 | else |
590 | qp->fragments = skb; | 504 | qp->q.fragments = skb; |
591 | 505 | ||
592 | if (skb->dev) | 506 | dev = skb->dev; |
593 | qp->iif = skb->dev->ifindex; | 507 | if (dev) { |
594 | skb->dev = NULL; | 508 | qp->iif = dev->ifindex; |
595 | qp->stamp = skb->tstamp; | 509 | skb->dev = NULL; |
596 | qp->meat += skb->len; | 510 | } |
597 | atomic_add(skb->truesize, &ip_frag_mem); | 511 | qp->q.stamp = skb->tstamp; |
512 | qp->q.meat += skb->len; | ||
513 | atomic_add(skb->truesize, &ip4_frags.mem); | ||
598 | if (offset == 0) | 514 | if (offset == 0) |
599 | qp->last_in |= FIRST_IN; | 515 | qp->q.last_in |= FIRST_IN; |
600 | 516 | ||
601 | write_lock(&ipfrag_lock); | 517 | if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len) |
602 | list_move_tail(&qp->lru_list, &ipq_lru_list); | 518 | return ip_frag_reasm(qp, prev, dev); |
603 | write_unlock(&ipfrag_lock); | ||
604 | 519 | ||
605 | return; | 520 | write_lock(&ip4_frags.lock); |
521 | list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list); | ||
522 | write_unlock(&ip4_frags.lock); | ||
523 | return -EINPROGRESS; | ||
606 | 524 | ||
607 | err: | 525 | err: |
608 | kfree_skb(skb); | 526 | kfree_skb(skb); |
527 | return err; | ||
609 | } | 528 | } |
610 | 529 | ||
611 | 530 | ||
612 | /* Build a new IP datagram from all its fragments. */ | 531 | /* Build a new IP datagram from all its fragments. */ |
613 | 532 | ||
614 | static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) | 533 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
534 | struct net_device *dev) | ||
615 | { | 535 | { |
616 | struct iphdr *iph; | 536 | struct iphdr *iph; |
617 | struct sk_buff *fp, *head = qp->fragments; | 537 | struct sk_buff *fp, *head = qp->q.fragments; |
618 | int len; | 538 | int len; |
619 | int ihlen; | 539 | int ihlen; |
540 | int err; | ||
620 | 541 | ||
621 | ipq_kill(qp); | 542 | ipq_kill(qp); |
622 | 543 | ||
544 | /* Make the one we just received the head. */ | ||
545 | if (prev) { | ||
546 | head = prev->next; | ||
547 | fp = skb_clone(head, GFP_ATOMIC); | ||
548 | |||
549 | if (!fp) | ||
550 | goto out_nomem; | ||
551 | |||
552 | fp->next = head->next; | ||
553 | prev->next = fp; | ||
554 | |||
555 | skb_morph(head, qp->q.fragments); | ||
556 | head->next = qp->q.fragments->next; | ||
557 | |||
558 | kfree_skb(qp->q.fragments); | ||
559 | qp->q.fragments = head; | ||
560 | } | ||
561 | |||
623 | BUG_TRAP(head != NULL); | 562 | BUG_TRAP(head != NULL); |
624 | BUG_TRAP(FRAG_CB(head)->offset == 0); | 563 | BUG_TRAP(FRAG_CB(head)->offset == 0); |
625 | 564 | ||
626 | /* Allocate a new buffer for the datagram. */ | 565 | /* Allocate a new buffer for the datagram. */ |
627 | ihlen = ip_hdrlen(head); | 566 | ihlen = ip_hdrlen(head); |
628 | len = ihlen + qp->len; | 567 | len = ihlen + qp->q.len; |
629 | 568 | ||
569 | err = -E2BIG; | ||
630 | if (len > 65535) | 570 | if (len > 65535) |
631 | goto out_oversize; | 571 | goto out_oversize; |
632 | 572 | ||
633 | /* Head of list must not be cloned. */ | 573 | /* Head of list must not be cloned. */ |
574 | err = -ENOMEM; | ||
634 | if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) | 575 | if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) |
635 | goto out_nomem; | 576 | goto out_nomem; |
636 | 577 | ||
@@ -654,12 +595,12 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) | |||
654 | head->len -= clone->len; | 595 | head->len -= clone->len; |
655 | clone->csum = 0; | 596 | clone->csum = 0; |
656 | clone->ip_summed = head->ip_summed; | 597 | clone->ip_summed = head->ip_summed; |
657 | atomic_add(clone->truesize, &ip_frag_mem); | 598 | atomic_add(clone->truesize, &ip4_frags.mem); |
658 | } | 599 | } |
659 | 600 | ||
660 | skb_shinfo(head)->frag_list = head->next; | 601 | skb_shinfo(head)->frag_list = head->next; |
661 | skb_push(head, head->data - skb_network_header(head)); | 602 | skb_push(head, head->data - skb_network_header(head)); |
662 | atomic_sub(head->truesize, &ip_frag_mem); | 603 | atomic_sub(head->truesize, &ip4_frags.mem); |
663 | 604 | ||
664 | for (fp=head->next; fp; fp = fp->next) { | 605 | for (fp=head->next; fp; fp = fp->next) { |
665 | head->data_len += fp->len; | 606 | head->data_len += fp->len; |
@@ -669,19 +610,19 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) | |||
669 | else if (head->ip_summed == CHECKSUM_COMPLETE) | 610 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
670 | head->csum = csum_add(head->csum, fp->csum); | 611 | head->csum = csum_add(head->csum, fp->csum); |
671 | head->truesize += fp->truesize; | 612 | head->truesize += fp->truesize; |
672 | atomic_sub(fp->truesize, &ip_frag_mem); | 613 | atomic_sub(fp->truesize, &ip4_frags.mem); |
673 | } | 614 | } |
674 | 615 | ||
675 | head->next = NULL; | 616 | head->next = NULL; |
676 | head->dev = dev; | 617 | head->dev = dev; |
677 | head->tstamp = qp->stamp; | 618 | head->tstamp = qp->q.stamp; |
678 | 619 | ||
679 | iph = ip_hdr(head); | 620 | iph = ip_hdr(head); |
680 | iph->frag_off = 0; | 621 | iph->frag_off = 0; |
681 | iph->tot_len = htons(len); | 622 | iph->tot_len = htons(len); |
682 | IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); | 623 | IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); |
683 | qp->fragments = NULL; | 624 | qp->q.fragments = NULL; |
684 | return head; | 625 | return 0; |
685 | 626 | ||
686 | out_nomem: | 627 | out_nomem: |
687 | LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " | 628 | LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " |
@@ -694,54 +635,46 @@ out_oversize: | |||
694 | NIPQUAD(qp->saddr)); | 635 | NIPQUAD(qp->saddr)); |
695 | out_fail: | 636 | out_fail: |
696 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 637 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
697 | return NULL; | 638 | return err; |
698 | } | 639 | } |
699 | 640 | ||
700 | /* Process an incoming IP datagram fragment. */ | 641 | /* Process an incoming IP datagram fragment. */ |
701 | struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user) | 642 | int ip_defrag(struct sk_buff *skb, u32 user) |
702 | { | 643 | { |
703 | struct ipq *qp; | 644 | struct ipq *qp; |
704 | struct net_device *dev; | ||
705 | 645 | ||
706 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); | 646 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); |
707 | 647 | ||
708 | /* Start by cleaning up the memory. */ | 648 | /* Start by cleaning up the memory. */ |
709 | if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh) | 649 | if (atomic_read(&ip4_frags.mem) > ip4_frags_ctl.high_thresh) |
710 | ip_evictor(); | 650 | ip_evictor(); |
711 | 651 | ||
712 | dev = skb->dev; | ||
713 | |||
714 | /* Lookup (or create) queue header */ | 652 | /* Lookup (or create) queue header */ |
715 | if ((qp = ip_find(ip_hdr(skb), user)) != NULL) { | 653 | if ((qp = ip_find(ip_hdr(skb), user)) != NULL) { |
716 | struct sk_buff *ret = NULL; | 654 | int ret; |
717 | |||
718 | spin_lock(&qp->lock); | ||
719 | 655 | ||
720 | ip_frag_queue(qp, skb); | 656 | spin_lock(&qp->q.lock); |
721 | 657 | ||
722 | if (qp->last_in == (FIRST_IN|LAST_IN) && | 658 | ret = ip_frag_queue(qp, skb); |
723 | qp->meat == qp->len) | ||
724 | ret = ip_frag_reasm(qp, dev); | ||
725 | 659 | ||
726 | spin_unlock(&qp->lock); | 660 | spin_unlock(&qp->q.lock); |
727 | ipq_put(qp, NULL); | 661 | ipq_put(qp); |
728 | return ret; | 662 | return ret; |
729 | } | 663 | } |
730 | 664 | ||
731 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 665 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
732 | kfree_skb(skb); | 666 | kfree_skb(skb); |
733 | return NULL; | 667 | return -ENOMEM; |
734 | } | 668 | } |
735 | 669 | ||
736 | void __init ipfrag_init(void) | 670 | void __init ipfrag_init(void) |
737 | { | 671 | { |
738 | ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ | 672 | ip4_frags.ctl = &ip4_frags_ctl; |
739 | (jiffies ^ (jiffies >> 6))); | 673 | ip4_frags.hashfn = ip4_hashfn; |
740 | 674 | ip4_frags.destructor = ip4_frag_free; | |
741 | init_timer(&ipfrag_secret_timer); | 675 | ip4_frags.skb_free = NULL; |
742 | ipfrag_secret_timer.function = ipfrag_secret_rebuild; | 676 | ip4_frags.qsize = sizeof(struct ipq); |
743 | ipfrag_secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval; | 677 | inet_frags_init(&ip4_frags); |
744 | add_timer(&ipfrag_secret_timer); | ||
745 | } | 678 | } |
746 | 679 | ||
747 | EXPORT_SYMBOL(ip_defrag); | 680 | EXPORT_SYMBOL(ip_defrag); |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 41d8964591e7..168c871fcd79 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -172,8 +172,7 @@ int ip_call_ra_chain(struct sk_buff *skb) | |||
172 | (!sk->sk_bound_dev_if || | 172 | (!sk->sk_bound_dev_if || |
173 | sk->sk_bound_dev_if == skb->dev->ifindex)) { | 173 | sk->sk_bound_dev_if == skb->dev->ifindex)) { |
174 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { | 174 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
175 | skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN); | 175 | if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { |
176 | if (skb == NULL) { | ||
177 | read_unlock(&ip_ra_lock); | 176 | read_unlock(&ip_ra_lock); |
178 | return 1; | 177 | return 1; |
179 | } | 178 | } |
@@ -196,7 +195,7 @@ int ip_call_ra_chain(struct sk_buff *skb) | |||
196 | return 0; | 195 | return 0; |
197 | } | 196 | } |
198 | 197 | ||
199 | static inline int ip_local_deliver_finish(struct sk_buff *skb) | 198 | static int ip_local_deliver_finish(struct sk_buff *skb) |
200 | { | 199 | { |
201 | __skb_pull(skb, ip_hdrlen(skb)); | 200 | __skb_pull(skb, ip_hdrlen(skb)); |
202 | 201 | ||
@@ -265,8 +264,7 @@ int ip_local_deliver(struct sk_buff *skb) | |||
265 | */ | 264 | */ |
266 | 265 | ||
267 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { | 266 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
268 | skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER); | 267 | if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER)) |
269 | if (!skb) | ||
270 | return 0; | 268 | return 0; |
271 | } | 269 | } |
272 | 270 | ||
@@ -326,7 +324,7 @@ drop: | |||
326 | return -1; | 324 | return -1; |
327 | } | 325 | } |
328 | 326 | ||
329 | static inline int ip_rcv_finish(struct sk_buff *skb) | 327 | static int ip_rcv_finish(struct sk_buff *skb) |
330 | { | 328 | { |
331 | const struct iphdr *iph = ip_hdr(skb); | 329 | const struct iphdr *iph = ip_hdr(skb); |
332 | struct rtable *rt; | 330 | struct rtable *rt; |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 699f06781fd8..f508835ba713 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -202,7 +202,7 @@ static inline int ip_skb_dst_mtu(struct sk_buff *skb) | |||
202 | skb->dst->dev->mtu : dst_mtu(skb->dst); | 202 | skb->dst->dev->mtu : dst_mtu(skb->dst); |
203 | } | 203 | } |
204 | 204 | ||
205 | static inline int ip_finish_output(struct sk_buff *skb) | 205 | static int ip_finish_output(struct sk_buff *skb) |
206 | { | 206 | { |
207 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 207 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
208 | /* Policy lookup after SNAT yielded a new policy */ | 208 | /* Policy lookup after SNAT yielded a new policy */ |
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 341474eefa55..664cb8e97c1c 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
26 | #include <linux/in.h> | 26 | #include <linux/in.h> |
27 | #include <linux/ip.h> | 27 | #include <linux/ip.h> |
28 | #include <linux/netfilter.h> | ||
28 | #include <net/net_namespace.h> | 29 | #include <net/net_namespace.h> |
29 | #include <net/protocol.h> | 30 | #include <net/protocol.h> |
30 | #include <net/tcp.h> | 31 | #include <net/tcp.h> |
@@ -328,18 +329,18 @@ static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, | |||
328 | spin_unlock(&cp->lock); | 329 | spin_unlock(&cp->lock); |
329 | } | 330 | } |
330 | 331 | ||
331 | static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb, | 332 | static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb, |
332 | struct ip_vs_app *app) | 333 | struct ip_vs_app *app) |
333 | { | 334 | { |
334 | int diff; | 335 | int diff; |
335 | const unsigned int tcp_offset = ip_hdrlen(*pskb); | 336 | const unsigned int tcp_offset = ip_hdrlen(skb); |
336 | struct tcphdr *th; | 337 | struct tcphdr *th; |
337 | __u32 seq; | 338 | __u32 seq; |
338 | 339 | ||
339 | if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th))) | 340 | if (!skb_make_writable(skb, tcp_offset + sizeof(*th))) |
340 | return 0; | 341 | return 0; |
341 | 342 | ||
342 | th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset); | 343 | th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset); |
343 | 344 | ||
344 | /* | 345 | /* |
345 | * Remember seq number in case this pkt gets resized | 346 | * Remember seq number in case this pkt gets resized |
@@ -360,7 +361,7 @@ static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb, | |||
360 | if (app->pkt_out == NULL) | 361 | if (app->pkt_out == NULL) |
361 | return 1; | 362 | return 1; |
362 | 363 | ||
363 | if (!app->pkt_out(app, cp, pskb, &diff)) | 364 | if (!app->pkt_out(app, cp, skb, &diff)) |
364 | return 0; | 365 | return 0; |
365 | 366 | ||
366 | /* | 367 | /* |
@@ -378,7 +379,7 @@ static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb, | |||
378 | * called by ipvs packet handler, assumes previously checked cp!=NULL | 379 | * called by ipvs packet handler, assumes previously checked cp!=NULL |
379 | * returns false if it can't handle packet (oom) | 380 | * returns false if it can't handle packet (oom) |
380 | */ | 381 | */ |
381 | int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb) | 382 | int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb) |
382 | { | 383 | { |
383 | struct ip_vs_app *app; | 384 | struct ip_vs_app *app; |
384 | 385 | ||
@@ -391,7 +392,7 @@ int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb) | |||
391 | 392 | ||
392 | /* TCP is complicated */ | 393 | /* TCP is complicated */ |
393 | if (cp->protocol == IPPROTO_TCP) | 394 | if (cp->protocol == IPPROTO_TCP) |
394 | return app_tcp_pkt_out(cp, pskb, app); | 395 | return app_tcp_pkt_out(cp, skb, app); |
395 | 396 | ||
396 | /* | 397 | /* |
397 | * Call private output hook function | 398 | * Call private output hook function |
@@ -399,22 +400,22 @@ int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb) | |||
399 | if (app->pkt_out == NULL) | 400 | if (app->pkt_out == NULL) |
400 | return 1; | 401 | return 1; |
401 | 402 | ||
402 | return app->pkt_out(app, cp, pskb, NULL); | 403 | return app->pkt_out(app, cp, skb, NULL); |
403 | } | 404 | } |
404 | 405 | ||
405 | 406 | ||
406 | static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb, | 407 | static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb, |
407 | struct ip_vs_app *app) | 408 | struct ip_vs_app *app) |
408 | { | 409 | { |
409 | int diff; | 410 | int diff; |
410 | const unsigned int tcp_offset = ip_hdrlen(*pskb); | 411 | const unsigned int tcp_offset = ip_hdrlen(skb); |
411 | struct tcphdr *th; | 412 | struct tcphdr *th; |
412 | __u32 seq; | 413 | __u32 seq; |
413 | 414 | ||
414 | if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th))) | 415 | if (!skb_make_writable(skb, tcp_offset + sizeof(*th))) |
415 | return 0; | 416 | return 0; |
416 | 417 | ||
417 | th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset); | 418 | th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset); |
418 | 419 | ||
419 | /* | 420 | /* |
420 | * Remember seq number in case this pkt gets resized | 421 | * Remember seq number in case this pkt gets resized |
@@ -435,7 +436,7 @@ static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb, | |||
435 | if (app->pkt_in == NULL) | 436 | if (app->pkt_in == NULL) |
436 | return 1; | 437 | return 1; |
437 | 438 | ||
438 | if (!app->pkt_in(app, cp, pskb, &diff)) | 439 | if (!app->pkt_in(app, cp, skb, &diff)) |
439 | return 0; | 440 | return 0; |
440 | 441 | ||
441 | /* | 442 | /* |
@@ -453,7 +454,7 @@ static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb, | |||
453 | * called by ipvs packet handler, assumes previously checked cp!=NULL. | 454 | * called by ipvs packet handler, assumes previously checked cp!=NULL. |
454 | * returns false if can't handle packet (oom). | 455 | * returns false if can't handle packet (oom). |
455 | */ | 456 | */ |
456 | int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb) | 457 | int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb) |
457 | { | 458 | { |
458 | struct ip_vs_app *app; | 459 | struct ip_vs_app *app; |
459 | 460 | ||
@@ -466,7 +467,7 @@ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb) | |||
466 | 467 | ||
467 | /* TCP is complicated */ | 468 | /* TCP is complicated */ |
468 | if (cp->protocol == IPPROTO_TCP) | 469 | if (cp->protocol == IPPROTO_TCP) |
469 | return app_tcp_pkt_in(cp, pskb, app); | 470 | return app_tcp_pkt_in(cp, skb, app); |
470 | 471 | ||
471 | /* | 472 | /* |
472 | * Call private input hook function | 473 | * Call private input hook function |
@@ -474,7 +475,7 @@ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb) | |||
474 | if (app->pkt_in == NULL) | 475 | if (app->pkt_in == NULL) |
475 | return 1; | 476 | return 1; |
476 | 477 | ||
477 | return app->pkt_in(app, cp, pskb, NULL); | 478 | return app->pkt_in(app, cp, skb, NULL); |
478 | } | 479 | } |
479 | 480 | ||
480 | 481 | ||
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index fbca2a2ff29f..c6ed7654e839 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c | |||
@@ -58,7 +58,6 @@ EXPORT_SYMBOL(ip_vs_conn_put); | |||
58 | #ifdef CONFIG_IP_VS_DEBUG | 58 | #ifdef CONFIG_IP_VS_DEBUG |
59 | EXPORT_SYMBOL(ip_vs_get_debug_level); | 59 | EXPORT_SYMBOL(ip_vs_get_debug_level); |
60 | #endif | 60 | #endif |
61 | EXPORT_SYMBOL(ip_vs_make_skb_writable); | ||
62 | 61 | ||
63 | 62 | ||
64 | /* ID used in ICMP lookups */ | 63 | /* ID used in ICMP lookups */ |
@@ -163,42 +162,6 @@ ip_vs_set_state(struct ip_vs_conn *cp, int direction, | |||
163 | } | 162 | } |
164 | 163 | ||
165 | 164 | ||
166 | int ip_vs_make_skb_writable(struct sk_buff **pskb, int writable_len) | ||
167 | { | ||
168 | struct sk_buff *skb = *pskb; | ||
169 | |||
170 | /* skb is already used, better copy skb and its payload */ | ||
171 | if (unlikely(skb_shared(skb) || skb->sk)) | ||
172 | goto copy_skb; | ||
173 | |||
174 | /* skb data is already used, copy it */ | ||
175 | if (unlikely(skb_cloned(skb))) | ||
176 | goto copy_data; | ||
177 | |||
178 | return pskb_may_pull(skb, writable_len); | ||
179 | |||
180 | copy_data: | ||
181 | if (unlikely(writable_len > skb->len)) | ||
182 | return 0; | ||
183 | return !pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
184 | |||
185 | copy_skb: | ||
186 | if (unlikely(writable_len > skb->len)) | ||
187 | return 0; | ||
188 | skb = skb_copy(skb, GFP_ATOMIC); | ||
189 | if (!skb) | ||
190 | return 0; | ||
191 | BUG_ON(skb_is_nonlinear(skb)); | ||
192 | |||
193 | /* Rest of kernel will get very unhappy if we pass it a | ||
194 | suddenly-orphaned skbuff */ | ||
195 | if ((*pskb)->sk) | ||
196 | skb_set_owner_w(skb, (*pskb)->sk); | ||
197 | kfree_skb(*pskb); | ||
198 | *pskb = skb; | ||
199 | return 1; | ||
200 | } | ||
201 | |||
202 | /* | 165 | /* |
203 | * IPVS persistent scheduling function | 166 | * IPVS persistent scheduling function |
204 | * It creates a connection entry according to its template if exists, | 167 | * It creates a connection entry according to its template if exists, |
@@ -525,12 +488,12 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
525 | * for VS/NAT. | 488 | * for VS/NAT. |
526 | */ | 489 | */ |
527 | static unsigned int ip_vs_post_routing(unsigned int hooknum, | 490 | static unsigned int ip_vs_post_routing(unsigned int hooknum, |
528 | struct sk_buff **pskb, | 491 | struct sk_buff *skb, |
529 | const struct net_device *in, | 492 | const struct net_device *in, |
530 | const struct net_device *out, | 493 | const struct net_device *out, |
531 | int (*okfn)(struct sk_buff *)) | 494 | int (*okfn)(struct sk_buff *)) |
532 | { | 495 | { |
533 | if (!((*pskb)->ipvs_property)) | 496 | if (!skb->ipvs_property) |
534 | return NF_ACCEPT; | 497 | return NF_ACCEPT; |
535 | /* The packet was sent from IPVS, exit this chain */ | 498 | /* The packet was sent from IPVS, exit this chain */ |
536 | return NF_STOP; | 499 | return NF_STOP; |
@@ -541,13 +504,14 @@ __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) | |||
541 | return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); | 504 | return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); |
542 | } | 505 | } |
543 | 506 | ||
544 | static inline struct sk_buff * | 507 | static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) |
545 | ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) | ||
546 | { | 508 | { |
547 | skb = ip_defrag(skb, user); | 509 | int err = ip_defrag(skb, user); |
548 | if (skb) | 510 | |
511 | if (!err) | ||
549 | ip_send_check(ip_hdr(skb)); | 512 | ip_send_check(ip_hdr(skb)); |
550 | return skb; | 513 | |
514 | return err; | ||
551 | } | 515 | } |
552 | 516 | ||
553 | /* | 517 | /* |
@@ -605,9 +569,8 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
605 | * Currently handles error types - unreachable, quench, ttl exceeded. | 569 | * Currently handles error types - unreachable, quench, ttl exceeded. |
606 | * (Only used in VS/NAT) | 570 | * (Only used in VS/NAT) |
607 | */ | 571 | */ |
608 | static int ip_vs_out_icmp(struct sk_buff **pskb, int *related) | 572 | static int ip_vs_out_icmp(struct sk_buff *skb, int *related) |
609 | { | 573 | { |
610 | struct sk_buff *skb = *pskb; | ||
611 | struct iphdr *iph; | 574 | struct iphdr *iph; |
612 | struct icmphdr _icmph, *ic; | 575 | struct icmphdr _icmph, *ic; |
613 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ | 576 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ |
@@ -619,10 +582,8 @@ static int ip_vs_out_icmp(struct sk_buff **pskb, int *related) | |||
619 | 582 | ||
620 | /* reassemble IP fragments */ | 583 | /* reassemble IP fragments */ |
621 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { | 584 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
622 | skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT); | 585 | if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) |
623 | if (!skb) | ||
624 | return NF_STOLEN; | 586 | return NF_STOLEN; |
625 | *pskb = skb; | ||
626 | } | 587 | } |
627 | 588 | ||
628 | iph = ip_hdr(skb); | 589 | iph = ip_hdr(skb); |
@@ -690,9 +651,8 @@ static int ip_vs_out_icmp(struct sk_buff **pskb, int *related) | |||
690 | 651 | ||
691 | if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) | 652 | if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) |
692 | offset += 2 * sizeof(__u16); | 653 | offset += 2 * sizeof(__u16); |
693 | if (!ip_vs_make_skb_writable(pskb, offset)) | 654 | if (!skb_make_writable(skb, offset)) |
694 | goto out; | 655 | goto out; |
695 | skb = *pskb; | ||
696 | 656 | ||
697 | ip_vs_nat_icmp(skb, pp, cp, 1); | 657 | ip_vs_nat_icmp(skb, pp, cp, 1); |
698 | 658 | ||
@@ -724,11 +684,10 @@ static inline int is_tcp_reset(const struct sk_buff *skb) | |||
724 | * rewrite addresses of the packet and send it on its way... | 684 | * rewrite addresses of the packet and send it on its way... |
725 | */ | 685 | */ |
726 | static unsigned int | 686 | static unsigned int |
727 | ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | 687 | ip_vs_out(unsigned int hooknum, struct sk_buff *skb, |
728 | const struct net_device *in, const struct net_device *out, | 688 | const struct net_device *in, const struct net_device *out, |
729 | int (*okfn)(struct sk_buff *)) | 689 | int (*okfn)(struct sk_buff *)) |
730 | { | 690 | { |
731 | struct sk_buff *skb = *pskb; | ||
732 | struct iphdr *iph; | 691 | struct iphdr *iph; |
733 | struct ip_vs_protocol *pp; | 692 | struct ip_vs_protocol *pp; |
734 | struct ip_vs_conn *cp; | 693 | struct ip_vs_conn *cp; |
@@ -741,11 +700,10 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | |||
741 | 700 | ||
742 | iph = ip_hdr(skb); | 701 | iph = ip_hdr(skb); |
743 | if (unlikely(iph->protocol == IPPROTO_ICMP)) { | 702 | if (unlikely(iph->protocol == IPPROTO_ICMP)) { |
744 | int related, verdict = ip_vs_out_icmp(pskb, &related); | 703 | int related, verdict = ip_vs_out_icmp(skb, &related); |
745 | 704 | ||
746 | if (related) | 705 | if (related) |
747 | return verdict; | 706 | return verdict; |
748 | skb = *pskb; | ||
749 | iph = ip_hdr(skb); | 707 | iph = ip_hdr(skb); |
750 | } | 708 | } |
751 | 709 | ||
@@ -756,11 +714,9 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | |||
756 | /* reassemble IP fragments */ | 714 | /* reassemble IP fragments */ |
757 | if (unlikely(iph->frag_off & htons(IP_MF|IP_OFFSET) && | 715 | if (unlikely(iph->frag_off & htons(IP_MF|IP_OFFSET) && |
758 | !pp->dont_defrag)) { | 716 | !pp->dont_defrag)) { |
759 | skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT); | 717 | if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) |
760 | if (!skb) | ||
761 | return NF_STOLEN; | 718 | return NF_STOLEN; |
762 | iph = ip_hdr(skb); | 719 | iph = ip_hdr(skb); |
763 | *pskb = skb; | ||
764 | } | 720 | } |
765 | 721 | ||
766 | ihl = iph->ihl << 2; | 722 | ihl = iph->ihl << 2; |
@@ -802,13 +758,12 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | |||
802 | 758 | ||
803 | IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); | 759 | IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); |
804 | 760 | ||
805 | if (!ip_vs_make_skb_writable(pskb, ihl)) | 761 | if (!skb_make_writable(skb, ihl)) |
806 | goto drop; | 762 | goto drop; |
807 | 763 | ||
808 | /* mangle the packet */ | 764 | /* mangle the packet */ |
809 | if (pp->snat_handler && !pp->snat_handler(pskb, pp, cp)) | 765 | if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) |
810 | goto drop; | 766 | goto drop; |
811 | skb = *pskb; | ||
812 | ip_hdr(skb)->saddr = cp->vaddr; | 767 | ip_hdr(skb)->saddr = cp->vaddr; |
813 | ip_send_check(ip_hdr(skb)); | 768 | ip_send_check(ip_hdr(skb)); |
814 | 769 | ||
@@ -818,9 +773,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | |||
818 | * if it came from this machine itself. So re-compute | 773 | * if it came from this machine itself. So re-compute |
819 | * the routing information. | 774 | * the routing information. |
820 | */ | 775 | */ |
821 | if (ip_route_me_harder(pskb, RTN_LOCAL) != 0) | 776 | if (ip_route_me_harder(skb, RTN_LOCAL) != 0) |
822 | goto drop; | 777 | goto drop; |
823 | skb = *pskb; | ||
824 | 778 | ||
825 | IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); | 779 | IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); |
826 | 780 | ||
@@ -835,7 +789,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | |||
835 | 789 | ||
836 | drop: | 790 | drop: |
837 | ip_vs_conn_put(cp); | 791 | ip_vs_conn_put(cp); |
838 | kfree_skb(*pskb); | 792 | kfree_skb(skb); |
839 | return NF_STOLEN; | 793 | return NF_STOLEN; |
840 | } | 794 | } |
841 | 795 | ||
@@ -847,9 +801,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | |||
847 | * Currently handles error types - unreachable, quench, ttl exceeded. | 801 | * Currently handles error types - unreachable, quench, ttl exceeded. |
848 | */ | 802 | */ |
849 | static int | 803 | static int |
850 | ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) | 804 | ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) |
851 | { | 805 | { |
852 | struct sk_buff *skb = *pskb; | ||
853 | struct iphdr *iph; | 806 | struct iphdr *iph; |
854 | struct icmphdr _icmph, *ic; | 807 | struct icmphdr _icmph, *ic; |
855 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ | 808 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ |
@@ -861,12 +814,9 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) | |||
861 | 814 | ||
862 | /* reassemble IP fragments */ | 815 | /* reassemble IP fragments */ |
863 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { | 816 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
864 | skb = ip_vs_gather_frags(skb, | 817 | if (ip_vs_gather_frags(skb, hooknum == NF_IP_LOCAL_IN ? |
865 | hooknum == NF_IP_LOCAL_IN ? | 818 | IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD)) |
866 | IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD); | ||
867 | if (!skb) | ||
868 | return NF_STOLEN; | 819 | return NF_STOLEN; |
869 | *pskb = skb; | ||
870 | } | 820 | } |
871 | 821 | ||
872 | iph = ip_hdr(skb); | 822 | iph = ip_hdr(skb); |
@@ -945,11 +895,10 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) | |||
945 | * and send it on its way... | 895 | * and send it on its way... |
946 | */ | 896 | */ |
947 | static unsigned int | 897 | static unsigned int |
948 | ip_vs_in(unsigned int hooknum, struct sk_buff **pskb, | 898 | ip_vs_in(unsigned int hooknum, struct sk_buff *skb, |
949 | const struct net_device *in, const struct net_device *out, | 899 | const struct net_device *in, const struct net_device *out, |
950 | int (*okfn)(struct sk_buff *)) | 900 | int (*okfn)(struct sk_buff *)) |
951 | { | 901 | { |
952 | struct sk_buff *skb = *pskb; | ||
953 | struct iphdr *iph; | 902 | struct iphdr *iph; |
954 | struct ip_vs_protocol *pp; | 903 | struct ip_vs_protocol *pp; |
955 | struct ip_vs_conn *cp; | 904 | struct ip_vs_conn *cp; |
@@ -971,11 +920,10 @@ ip_vs_in(unsigned int hooknum, struct sk_buff **pskb, | |||
971 | 920 | ||
972 | iph = ip_hdr(skb); | 921 | iph = ip_hdr(skb); |
973 | if (unlikely(iph->protocol == IPPROTO_ICMP)) { | 922 | if (unlikely(iph->protocol == IPPROTO_ICMP)) { |
974 | int related, verdict = ip_vs_in_icmp(pskb, &related, hooknum); | 923 | int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); |
975 | 924 | ||
976 | if (related) | 925 | if (related) |
977 | return verdict; | 926 | return verdict; |
978 | skb = *pskb; | ||
979 | iph = ip_hdr(skb); | 927 | iph = ip_hdr(skb); |
980 | } | 928 | } |
981 | 929 | ||
@@ -1056,16 +1004,16 @@ ip_vs_in(unsigned int hooknum, struct sk_buff **pskb, | |||
1056 | * and send them to ip_vs_in_icmp. | 1004 | * and send them to ip_vs_in_icmp. |
1057 | */ | 1005 | */ |
1058 | static unsigned int | 1006 | static unsigned int |
1059 | ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff **pskb, | 1007 | ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, |
1060 | const struct net_device *in, const struct net_device *out, | 1008 | const struct net_device *in, const struct net_device *out, |
1061 | int (*okfn)(struct sk_buff *)) | 1009 | int (*okfn)(struct sk_buff *)) |
1062 | { | 1010 | { |
1063 | int r; | 1011 | int r; |
1064 | 1012 | ||
1065 | if (ip_hdr(*pskb)->protocol != IPPROTO_ICMP) | 1013 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) |
1066 | return NF_ACCEPT; | 1014 | return NF_ACCEPT; |
1067 | 1015 | ||
1068 | return ip_vs_in_icmp(pskb, &r, hooknum); | 1016 | return ip_vs_in_icmp(skb, &r, hooknum); |
1069 | } | 1017 | } |
1070 | 1018 | ||
1071 | 1019 | ||
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c index 344ddbbdc756..59aa166b7678 100644 --- a/net/ipv4/ipvs/ip_vs_ftp.c +++ b/net/ipv4/ipvs/ip_vs_ftp.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/skbuff.h> | 30 | #include <linux/skbuff.h> |
31 | #include <linux/in.h> | 31 | #include <linux/in.h> |
32 | #include <linux/ip.h> | 32 | #include <linux/ip.h> |
33 | #include <linux/netfilter.h> | ||
33 | #include <net/protocol.h> | 34 | #include <net/protocol.h> |
34 | #include <net/tcp.h> | 35 | #include <net/tcp.h> |
35 | #include <asm/unaligned.h> | 36 | #include <asm/unaligned.h> |
@@ -135,7 +136,7 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit, | |||
135 | * xxx,xxx,xxx,xxx is the server address, ppp,ppp is the server port number. | 136 | * xxx,xxx,xxx,xxx is the server address, ppp,ppp is the server port number. |
136 | */ | 137 | */ |
137 | static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | 138 | static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, |
138 | struct sk_buff **pskb, int *diff) | 139 | struct sk_buff *skb, int *diff) |
139 | { | 140 | { |
140 | struct iphdr *iph; | 141 | struct iphdr *iph; |
141 | struct tcphdr *th; | 142 | struct tcphdr *th; |
@@ -155,14 +156,14 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
155 | return 1; | 156 | return 1; |
156 | 157 | ||
157 | /* Linear packets are much easier to deal with. */ | 158 | /* Linear packets are much easier to deal with. */ |
158 | if (!ip_vs_make_skb_writable(pskb, (*pskb)->len)) | 159 | if (!skb_make_writable(skb, skb->len)) |
159 | return 0; | 160 | return 0; |
160 | 161 | ||
161 | if (cp->app_data == &ip_vs_ftp_pasv) { | 162 | if (cp->app_data == &ip_vs_ftp_pasv) { |
162 | iph = ip_hdr(*pskb); | 163 | iph = ip_hdr(skb); |
163 | th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); | 164 | th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); |
164 | data = (char *)th + (th->doff << 2); | 165 | data = (char *)th + (th->doff << 2); |
165 | data_limit = skb_tail_pointer(*pskb); | 166 | data_limit = skb_tail_pointer(skb); |
166 | 167 | ||
167 | if (ip_vs_ftp_get_addrport(data, data_limit, | 168 | if (ip_vs_ftp_get_addrport(data, data_limit, |
168 | SERVER_STRING, | 169 | SERVER_STRING, |
@@ -213,7 +214,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
213 | memcpy(start, buf, buf_len); | 214 | memcpy(start, buf, buf_len); |
214 | ret = 1; | 215 | ret = 1; |
215 | } else { | 216 | } else { |
216 | ret = !ip_vs_skb_replace(*pskb, GFP_ATOMIC, start, | 217 | ret = !ip_vs_skb_replace(skb, GFP_ATOMIC, start, |
217 | end-start, buf, buf_len); | 218 | end-start, buf, buf_len); |
218 | } | 219 | } |
219 | 220 | ||
@@ -238,7 +239,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
238 | * the client. | 239 | * the client. |
239 | */ | 240 | */ |
240 | static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | 241 | static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, |
241 | struct sk_buff **pskb, int *diff) | 242 | struct sk_buff *skb, int *diff) |
242 | { | 243 | { |
243 | struct iphdr *iph; | 244 | struct iphdr *iph; |
244 | struct tcphdr *th; | 245 | struct tcphdr *th; |
@@ -256,20 +257,20 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
256 | return 1; | 257 | return 1; |
257 | 258 | ||
258 | /* Linear packets are much easier to deal with. */ | 259 | /* Linear packets are much easier to deal with. */ |
259 | if (!ip_vs_make_skb_writable(pskb, (*pskb)->len)) | 260 | if (!skb_make_writable(skb, skb->len)) |
260 | return 0; | 261 | return 0; |
261 | 262 | ||
262 | /* | 263 | /* |
263 | * Detecting whether it is passive | 264 | * Detecting whether it is passive |
264 | */ | 265 | */ |
265 | iph = ip_hdr(*pskb); | 266 | iph = ip_hdr(skb); |
266 | th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); | 267 | th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); |
267 | 268 | ||
268 | /* Since there may be OPTIONS in the TCP packet and the HLEN is | 269 | /* Since there may be OPTIONS in the TCP packet and the HLEN is |
269 | the length of the header in 32-bit multiples, it is accurate | 270 | the length of the header in 32-bit multiples, it is accurate |
270 | to calculate data address by th+HLEN*4 */ | 271 | to calculate data address by th+HLEN*4 */ |
271 | data = data_start = (char *)th + (th->doff << 2); | 272 | data = data_start = (char *)th + (th->doff << 2); |
272 | data_limit = skb_tail_pointer(*pskb); | 273 | data_limit = skb_tail_pointer(skb); |
273 | 274 | ||
274 | while (data <= data_limit - 6) { | 275 | while (data <= data_limit - 6) { |
275 | if (strnicmp(data, "PASV\r\n", 6) == 0) { | 276 | if (strnicmp(data, "PASV\r\n", 6) == 0) { |
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c index e65577a77006..12dc0d640b6d 100644 --- a/net/ipv4/ipvs/ip_vs_proto_tcp.c +++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/tcp.h> /* for tcphdr */ | 20 | #include <linux/tcp.h> /* for tcphdr */ |
21 | #include <net/ip.h> | 21 | #include <net/ip.h> |
22 | #include <net/tcp.h> /* for csum_tcpudp_magic */ | 22 | #include <net/tcp.h> /* for csum_tcpudp_magic */ |
23 | #include <linux/netfilter.h> | ||
23 | #include <linux/netfilter_ipv4.h> | 24 | #include <linux/netfilter_ipv4.h> |
24 | 25 | ||
25 | #include <net/ip_vs.h> | 26 | #include <net/ip_vs.h> |
@@ -122,27 +123,27 @@ tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip, | |||
122 | 123 | ||
123 | 124 | ||
124 | static int | 125 | static int |
125 | tcp_snat_handler(struct sk_buff **pskb, | 126 | tcp_snat_handler(struct sk_buff *skb, |
126 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 127 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
127 | { | 128 | { |
128 | struct tcphdr *tcph; | 129 | struct tcphdr *tcph; |
129 | const unsigned int tcphoff = ip_hdrlen(*pskb); | 130 | const unsigned int tcphoff = ip_hdrlen(skb); |
130 | 131 | ||
131 | /* csum_check requires unshared skb */ | 132 | /* csum_check requires unshared skb */ |
132 | if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph))) | 133 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) |
133 | return 0; | 134 | return 0; |
134 | 135 | ||
135 | if (unlikely(cp->app != NULL)) { | 136 | if (unlikely(cp->app != NULL)) { |
136 | /* Some checks before mangling */ | 137 | /* Some checks before mangling */ |
137 | if (pp->csum_check && !pp->csum_check(*pskb, pp)) | 138 | if (pp->csum_check && !pp->csum_check(skb, pp)) |
138 | return 0; | 139 | return 0; |
139 | 140 | ||
140 | /* Call application helper if needed */ | 141 | /* Call application helper if needed */ |
141 | if (!ip_vs_app_pkt_out(cp, pskb)) | 142 | if (!ip_vs_app_pkt_out(cp, skb)) |
142 | return 0; | 143 | return 0; |
143 | } | 144 | } |
144 | 145 | ||
145 | tcph = (void *)ip_hdr(*pskb) + tcphoff; | 146 | tcph = (void *)ip_hdr(skb) + tcphoff; |
146 | tcph->source = cp->vport; | 147 | tcph->source = cp->vport; |
147 | 148 | ||
148 | /* Adjust TCP checksums */ | 149 | /* Adjust TCP checksums */ |
@@ -150,17 +151,15 @@ tcp_snat_handler(struct sk_buff **pskb, | |||
150 | /* Only port and addr are changed, do fast csum update */ | 151 | /* Only port and addr are changed, do fast csum update */ |
151 | tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr, | 152 | tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr, |
152 | cp->dport, cp->vport); | 153 | cp->dport, cp->vport); |
153 | if ((*pskb)->ip_summed == CHECKSUM_COMPLETE) | 154 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
154 | (*pskb)->ip_summed = CHECKSUM_NONE; | 155 | skb->ip_summed = CHECKSUM_NONE; |
155 | } else { | 156 | } else { |
156 | /* full checksum calculation */ | 157 | /* full checksum calculation */ |
157 | tcph->check = 0; | 158 | tcph->check = 0; |
158 | (*pskb)->csum = skb_checksum(*pskb, tcphoff, | 159 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); |
159 | (*pskb)->len - tcphoff, 0); | ||
160 | tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, | 160 | tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, |
161 | (*pskb)->len - tcphoff, | 161 | skb->len - tcphoff, |
162 | cp->protocol, | 162 | cp->protocol, skb->csum); |
163 | (*pskb)->csum); | ||
164 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", | 163 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", |
165 | pp->name, tcph->check, | 164 | pp->name, tcph->check, |
166 | (char*)&(tcph->check) - (char*)tcph); | 165 | (char*)&(tcph->check) - (char*)tcph); |
@@ -170,30 +169,30 @@ tcp_snat_handler(struct sk_buff **pskb, | |||
170 | 169 | ||
171 | 170 | ||
172 | static int | 171 | static int |
173 | tcp_dnat_handler(struct sk_buff **pskb, | 172 | tcp_dnat_handler(struct sk_buff *skb, |
174 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 173 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
175 | { | 174 | { |
176 | struct tcphdr *tcph; | 175 | struct tcphdr *tcph; |
177 | const unsigned int tcphoff = ip_hdrlen(*pskb); | 176 | const unsigned int tcphoff = ip_hdrlen(skb); |
178 | 177 | ||
179 | /* csum_check requires unshared skb */ | 178 | /* csum_check requires unshared skb */ |
180 | if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph))) | 179 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) |
181 | return 0; | 180 | return 0; |
182 | 181 | ||
183 | if (unlikely(cp->app != NULL)) { | 182 | if (unlikely(cp->app != NULL)) { |
184 | /* Some checks before mangling */ | 183 | /* Some checks before mangling */ |
185 | if (pp->csum_check && !pp->csum_check(*pskb, pp)) | 184 | if (pp->csum_check && !pp->csum_check(skb, pp)) |
186 | return 0; | 185 | return 0; |
187 | 186 | ||
188 | /* | 187 | /* |
189 | * Attempt ip_vs_app call. | 188 | * Attempt ip_vs_app call. |
190 | * It will fix ip_vs_conn and iph ack_seq stuff | 189 | * It will fix ip_vs_conn and iph ack_seq stuff |
191 | */ | 190 | */ |
192 | if (!ip_vs_app_pkt_in(cp, pskb)) | 191 | if (!ip_vs_app_pkt_in(cp, skb)) |
193 | return 0; | 192 | return 0; |
194 | } | 193 | } |
195 | 194 | ||
196 | tcph = (void *)ip_hdr(*pskb) + tcphoff; | 195 | tcph = (void *)ip_hdr(skb) + tcphoff; |
197 | tcph->dest = cp->dport; | 196 | tcph->dest = cp->dport; |
198 | 197 | ||
199 | /* | 198 | /* |
@@ -203,18 +202,16 @@ tcp_dnat_handler(struct sk_buff **pskb, | |||
203 | /* Only port and addr are changed, do fast csum update */ | 202 | /* Only port and addr are changed, do fast csum update */ |
204 | tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr, | 203 | tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr, |
205 | cp->vport, cp->dport); | 204 | cp->vport, cp->dport); |
206 | if ((*pskb)->ip_summed == CHECKSUM_COMPLETE) | 205 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
207 | (*pskb)->ip_summed = CHECKSUM_NONE; | 206 | skb->ip_summed = CHECKSUM_NONE; |
208 | } else { | 207 | } else { |
209 | /* full checksum calculation */ | 208 | /* full checksum calculation */ |
210 | tcph->check = 0; | 209 | tcph->check = 0; |
211 | (*pskb)->csum = skb_checksum(*pskb, tcphoff, | 210 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); |
212 | (*pskb)->len - tcphoff, 0); | ||
213 | tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, | 211 | tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, |
214 | (*pskb)->len - tcphoff, | 212 | skb->len - tcphoff, |
215 | cp->protocol, | 213 | cp->protocol, skb->csum); |
216 | (*pskb)->csum); | 214 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
217 | (*pskb)->ip_summed = CHECKSUM_UNNECESSARY; | ||
218 | } | 215 | } |
219 | return 1; | 216 | return 1; |
220 | } | 217 | } |
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c index 8ee5fe6a101d..1fa7b330b9ac 100644 --- a/net/ipv4/ipvs/ip_vs_proto_udp.c +++ b/net/ipv4/ipvs/ip_vs_proto_udp.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/in.h> | 18 | #include <linux/in.h> |
19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/netfilter.h> | ||
21 | #include <linux/netfilter_ipv4.h> | 22 | #include <linux/netfilter_ipv4.h> |
22 | #include <linux/udp.h> | 23 | #include <linux/udp.h> |
23 | 24 | ||
@@ -129,29 +130,29 @@ udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip, | |||
129 | } | 130 | } |
130 | 131 | ||
131 | static int | 132 | static int |
132 | udp_snat_handler(struct sk_buff **pskb, | 133 | udp_snat_handler(struct sk_buff *skb, |
133 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 134 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
134 | { | 135 | { |
135 | struct udphdr *udph; | 136 | struct udphdr *udph; |
136 | const unsigned int udphoff = ip_hdrlen(*pskb); | 137 | const unsigned int udphoff = ip_hdrlen(skb); |
137 | 138 | ||
138 | /* csum_check requires unshared skb */ | 139 | /* csum_check requires unshared skb */ |
139 | if (!ip_vs_make_skb_writable(pskb, udphoff+sizeof(*udph))) | 140 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) |
140 | return 0; | 141 | return 0; |
141 | 142 | ||
142 | if (unlikely(cp->app != NULL)) { | 143 | if (unlikely(cp->app != NULL)) { |
143 | /* Some checks before mangling */ | 144 | /* Some checks before mangling */ |
144 | if (pp->csum_check && !pp->csum_check(*pskb, pp)) | 145 | if (pp->csum_check && !pp->csum_check(skb, pp)) |
145 | return 0; | 146 | return 0; |
146 | 147 | ||
147 | /* | 148 | /* |
148 | * Call application helper if needed | 149 | * Call application helper if needed |
149 | */ | 150 | */ |
150 | if (!ip_vs_app_pkt_out(cp, pskb)) | 151 | if (!ip_vs_app_pkt_out(cp, skb)) |
151 | return 0; | 152 | return 0; |
152 | } | 153 | } |
153 | 154 | ||
154 | udph = (void *)ip_hdr(*pskb) + udphoff; | 155 | udph = (void *)ip_hdr(skb) + udphoff; |
155 | udph->source = cp->vport; | 156 | udph->source = cp->vport; |
156 | 157 | ||
157 | /* | 158 | /* |
@@ -161,17 +162,15 @@ udp_snat_handler(struct sk_buff **pskb, | |||
161 | /* Only port and addr are changed, do fast csum update */ | 162 | /* Only port and addr are changed, do fast csum update */ |
162 | udp_fast_csum_update(udph, cp->daddr, cp->vaddr, | 163 | udp_fast_csum_update(udph, cp->daddr, cp->vaddr, |
163 | cp->dport, cp->vport); | 164 | cp->dport, cp->vport); |
164 | if ((*pskb)->ip_summed == CHECKSUM_COMPLETE) | 165 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
165 | (*pskb)->ip_summed = CHECKSUM_NONE; | 166 | skb->ip_summed = CHECKSUM_NONE; |
166 | } else { | 167 | } else { |
167 | /* full checksum calculation */ | 168 | /* full checksum calculation */ |
168 | udph->check = 0; | 169 | udph->check = 0; |
169 | (*pskb)->csum = skb_checksum(*pskb, udphoff, | 170 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); |
170 | (*pskb)->len - udphoff, 0); | ||
171 | udph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, | 171 | udph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, |
172 | (*pskb)->len - udphoff, | 172 | skb->len - udphoff, |
173 | cp->protocol, | 173 | cp->protocol, skb->csum); |
174 | (*pskb)->csum); | ||
175 | if (udph->check == 0) | 174 | if (udph->check == 0) |
176 | udph->check = CSUM_MANGLED_0; | 175 | udph->check = CSUM_MANGLED_0; |
177 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", | 176 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", |
@@ -183,30 +182,30 @@ udp_snat_handler(struct sk_buff **pskb, | |||
183 | 182 | ||
184 | 183 | ||
185 | static int | 184 | static int |
186 | udp_dnat_handler(struct sk_buff **pskb, | 185 | udp_dnat_handler(struct sk_buff *skb, |
187 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 186 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
188 | { | 187 | { |
189 | struct udphdr *udph; | 188 | struct udphdr *udph; |
190 | unsigned int udphoff = ip_hdrlen(*pskb); | 189 | unsigned int udphoff = ip_hdrlen(skb); |
191 | 190 | ||
192 | /* csum_check requires unshared skb */ | 191 | /* csum_check requires unshared skb */ |
193 | if (!ip_vs_make_skb_writable(pskb, udphoff+sizeof(*udph))) | 192 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) |
194 | return 0; | 193 | return 0; |
195 | 194 | ||
196 | if (unlikely(cp->app != NULL)) { | 195 | if (unlikely(cp->app != NULL)) { |
197 | /* Some checks before mangling */ | 196 | /* Some checks before mangling */ |
198 | if (pp->csum_check && !pp->csum_check(*pskb, pp)) | 197 | if (pp->csum_check && !pp->csum_check(skb, pp)) |
199 | return 0; | 198 | return 0; |
200 | 199 | ||
201 | /* | 200 | /* |
202 | * Attempt ip_vs_app call. | 201 | * Attempt ip_vs_app call. |
203 | * It will fix ip_vs_conn | 202 | * It will fix ip_vs_conn |
204 | */ | 203 | */ |
205 | if (!ip_vs_app_pkt_in(cp, pskb)) | 204 | if (!ip_vs_app_pkt_in(cp, skb)) |
206 | return 0; | 205 | return 0; |
207 | } | 206 | } |
208 | 207 | ||
209 | udph = (void *)ip_hdr(*pskb) + udphoff; | 208 | udph = (void *)ip_hdr(skb) + udphoff; |
210 | udph->dest = cp->dport; | 209 | udph->dest = cp->dport; |
211 | 210 | ||
212 | /* | 211 | /* |
@@ -216,20 +215,18 @@ udp_dnat_handler(struct sk_buff **pskb, | |||
216 | /* Only port and addr are changed, do fast csum update */ | 215 | /* Only port and addr are changed, do fast csum update */ |
217 | udp_fast_csum_update(udph, cp->vaddr, cp->daddr, | 216 | udp_fast_csum_update(udph, cp->vaddr, cp->daddr, |
218 | cp->vport, cp->dport); | 217 | cp->vport, cp->dport); |
219 | if ((*pskb)->ip_summed == CHECKSUM_COMPLETE) | 218 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
220 | (*pskb)->ip_summed = CHECKSUM_NONE; | 219 | skb->ip_summed = CHECKSUM_NONE; |
221 | } else { | 220 | } else { |
222 | /* full checksum calculation */ | 221 | /* full checksum calculation */ |
223 | udph->check = 0; | 222 | udph->check = 0; |
224 | (*pskb)->csum = skb_checksum(*pskb, udphoff, | 223 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); |
225 | (*pskb)->len - udphoff, 0); | ||
226 | udph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, | 224 | udph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, |
227 | (*pskb)->len - udphoff, | 225 | skb->len - udphoff, |
228 | cp->protocol, | 226 | cp->protocol, skb->csum); |
229 | (*pskb)->csum); | ||
230 | if (udph->check == 0) | 227 | if (udph->check == 0) |
231 | udph->check = CSUM_MANGLED_0; | 228 | udph->check = CSUM_MANGLED_0; |
232 | (*pskb)->ip_summed = CHECKSUM_UNNECESSARY; | 229 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
233 | } | 230 | } |
234 | return 1; | 231 | return 1; |
235 | } | 232 | } |
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index 666e080a74a3..d0a92dec1050 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c | |||
@@ -253,7 +253,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
253 | } | 253 | } |
254 | 254 | ||
255 | /* copy-on-write the packet before mangling it */ | 255 | /* copy-on-write the packet before mangling it */ |
256 | if (!ip_vs_make_skb_writable(&skb, sizeof(struct iphdr))) | 256 | if (!skb_make_writable(skb, sizeof(struct iphdr))) |
257 | goto tx_error_put; | 257 | goto tx_error_put; |
258 | 258 | ||
259 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | 259 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) |
@@ -264,7 +264,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
264 | skb->dst = &rt->u.dst; | 264 | skb->dst = &rt->u.dst; |
265 | 265 | ||
266 | /* mangle the packet */ | 266 | /* mangle the packet */ |
267 | if (pp->dnat_handler && !pp->dnat_handler(&skb, pp, cp)) | 267 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) |
268 | goto tx_error; | 268 | goto tx_error; |
269 | ip_hdr(skb)->daddr = cp->daddr; | 269 | ip_hdr(skb)->daddr = cp->daddr; |
270 | ip_send_check(ip_hdr(skb)); | 270 | ip_send_check(ip_hdr(skb)); |
@@ -529,7 +529,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
529 | } | 529 | } |
530 | 530 | ||
531 | /* copy-on-write the packet before mangling it */ | 531 | /* copy-on-write the packet before mangling it */ |
532 | if (!ip_vs_make_skb_writable(&skb, offset)) | 532 | if (!skb_make_writable(skb, offset)) |
533 | goto tx_error_put; | 533 | goto tx_error_put; |
534 | 534 | ||
535 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | 535 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index b44192924f95..5539debf4973 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -3,14 +3,15 @@ | |||
3 | #include <linux/netfilter.h> | 3 | #include <linux/netfilter.h> |
4 | #include <linux/netfilter_ipv4.h> | 4 | #include <linux/netfilter_ipv4.h> |
5 | #include <linux/ip.h> | 5 | #include <linux/ip.h> |
6 | #include <linux/skbuff.h> | ||
6 | #include <net/route.h> | 7 | #include <net/route.h> |
7 | #include <net/xfrm.h> | 8 | #include <net/xfrm.h> |
8 | #include <net/ip.h> | 9 | #include <net/ip.h> |
9 | 10 | ||
10 | /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ | 11 | /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ |
11 | int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) | 12 | int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) |
12 | { | 13 | { |
13 | const struct iphdr *iph = ip_hdr(*pskb); | 14 | const struct iphdr *iph = ip_hdr(skb); |
14 | struct rtable *rt; | 15 | struct rtable *rt; |
15 | struct flowi fl = {}; | 16 | struct flowi fl = {}; |
16 | struct dst_entry *odst; | 17 | struct dst_entry *odst; |
@@ -29,14 +30,14 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) | |||
29 | if (type == RTN_LOCAL) | 30 | if (type == RTN_LOCAL) |
30 | fl.nl_u.ip4_u.saddr = iph->saddr; | 31 | fl.nl_u.ip4_u.saddr = iph->saddr; |
31 | fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); | 32 | fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); |
32 | fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0; | 33 | fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; |
33 | fl.mark = (*pskb)->mark; | 34 | fl.mark = skb->mark; |
34 | if (ip_route_output_key(&rt, &fl) != 0) | 35 | if (ip_route_output_key(&rt, &fl) != 0) |
35 | return -1; | 36 | return -1; |
36 | 37 | ||
37 | /* Drop old route. */ | 38 | /* Drop old route. */ |
38 | dst_release((*pskb)->dst); | 39 | dst_release(skb->dst); |
39 | (*pskb)->dst = &rt->u.dst; | 40 | skb->dst = &rt->u.dst; |
40 | } else { | 41 | } else { |
41 | /* non-local src, find valid iif to satisfy | 42 | /* non-local src, find valid iif to satisfy |
42 | * rp-filter when calling ip_route_input. */ | 43 | * rp-filter when calling ip_route_input. */ |
@@ -44,8 +45,8 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) | |||
44 | if (ip_route_output_key(&rt, &fl) != 0) | 45 | if (ip_route_output_key(&rt, &fl) != 0) |
45 | return -1; | 46 | return -1; |
46 | 47 | ||
47 | odst = (*pskb)->dst; | 48 | odst = skb->dst; |
48 | if (ip_route_input(*pskb, iph->daddr, iph->saddr, | 49 | if (ip_route_input(skb, iph->daddr, iph->saddr, |
49 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { | 50 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { |
50 | dst_release(&rt->u.dst); | 51 | dst_release(&rt->u.dst); |
51 | return -1; | 52 | return -1; |
@@ -54,70 +55,54 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) | |||
54 | dst_release(odst); | 55 | dst_release(odst); |
55 | } | 56 | } |
56 | 57 | ||
57 | if ((*pskb)->dst->error) | 58 | if (skb->dst->error) |
58 | return -1; | 59 | return -1; |
59 | 60 | ||
60 | #ifdef CONFIG_XFRM | 61 | #ifdef CONFIG_XFRM |
61 | if (!(IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED) && | 62 | if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && |
62 | xfrm_decode_session(*pskb, &fl, AF_INET) == 0) | 63 | xfrm_decode_session(skb, &fl, AF_INET) == 0) |
63 | if (xfrm_lookup(&(*pskb)->dst, &fl, (*pskb)->sk, 0)) | 64 | if (xfrm_lookup(&skb->dst, &fl, skb->sk, 0)) |
64 | return -1; | 65 | return -1; |
65 | #endif | 66 | #endif |
66 | 67 | ||
67 | /* Change in oif may mean change in hh_len. */ | 68 | /* Change in oif may mean change in hh_len. */ |
68 | hh_len = (*pskb)->dst->dev->hard_header_len; | 69 | hh_len = skb->dst->dev->hard_header_len; |
69 | if (skb_headroom(*pskb) < hh_len) { | 70 | if (skb_headroom(skb) < hh_len && |
70 | struct sk_buff *nskb; | 71 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) |
71 | 72 | return -1; | |
72 | nskb = skb_realloc_headroom(*pskb, hh_len); | ||
73 | if (!nskb) | ||
74 | return -1; | ||
75 | if ((*pskb)->sk) | ||
76 | skb_set_owner_w(nskb, (*pskb)->sk); | ||
77 | kfree_skb(*pskb); | ||
78 | *pskb = nskb; | ||
79 | } | ||
80 | 73 | ||
81 | return 0; | 74 | return 0; |
82 | } | 75 | } |
83 | EXPORT_SYMBOL(ip_route_me_harder); | 76 | EXPORT_SYMBOL(ip_route_me_harder); |
84 | 77 | ||
85 | #ifdef CONFIG_XFRM | 78 | #ifdef CONFIG_XFRM |
86 | int ip_xfrm_me_harder(struct sk_buff **pskb) | 79 | int ip_xfrm_me_harder(struct sk_buff *skb) |
87 | { | 80 | { |
88 | struct flowi fl; | 81 | struct flowi fl; |
89 | unsigned int hh_len; | 82 | unsigned int hh_len; |
90 | struct dst_entry *dst; | 83 | struct dst_entry *dst; |
91 | 84 | ||
92 | if (IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED) | 85 | if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) |
93 | return 0; | 86 | return 0; |
94 | if (xfrm_decode_session(*pskb, &fl, AF_INET) < 0) | 87 | if (xfrm_decode_session(skb, &fl, AF_INET) < 0) |
95 | return -1; | 88 | return -1; |
96 | 89 | ||
97 | dst = (*pskb)->dst; | 90 | dst = skb->dst; |
98 | if (dst->xfrm) | 91 | if (dst->xfrm) |
99 | dst = ((struct xfrm_dst *)dst)->route; | 92 | dst = ((struct xfrm_dst *)dst)->route; |
100 | dst_hold(dst); | 93 | dst_hold(dst); |
101 | 94 | ||
102 | if (xfrm_lookup(&dst, &fl, (*pskb)->sk, 0) < 0) | 95 | if (xfrm_lookup(&dst, &fl, skb->sk, 0) < 0) |
103 | return -1; | 96 | return -1; |
104 | 97 | ||
105 | dst_release((*pskb)->dst); | 98 | dst_release(skb->dst); |
106 | (*pskb)->dst = dst; | 99 | skb->dst = dst; |
107 | 100 | ||
108 | /* Change in oif may mean change in hh_len. */ | 101 | /* Change in oif may mean change in hh_len. */ |
109 | hh_len = (*pskb)->dst->dev->hard_header_len; | 102 | hh_len = skb->dst->dev->hard_header_len; |
110 | if (skb_headroom(*pskb) < hh_len) { | 103 | if (skb_headroom(skb) < hh_len && |
111 | struct sk_buff *nskb; | 104 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) |
112 | 105 | return -1; | |
113 | nskb = skb_realloc_headroom(*pskb, hh_len); | ||
114 | if (!nskb) | ||
115 | return -1; | ||
116 | if ((*pskb)->sk) | ||
117 | skb_set_owner_w(nskb, (*pskb)->sk); | ||
118 | kfree_skb(*pskb); | ||
119 | *pskb = nskb; | ||
120 | } | ||
121 | return 0; | 106 | return 0; |
122 | } | 107 | } |
123 | EXPORT_SYMBOL(ip_xfrm_me_harder); | 108 | EXPORT_SYMBOL(ip_xfrm_me_harder); |
@@ -150,17 +135,17 @@ static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_info *info) | |||
150 | } | 135 | } |
151 | } | 136 | } |
152 | 137 | ||
153 | static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info) | 138 | static int nf_ip_reroute(struct sk_buff *skb, const struct nf_info *info) |
154 | { | 139 | { |
155 | const struct ip_rt_info *rt_info = nf_info_reroute(info); | 140 | const struct ip_rt_info *rt_info = nf_info_reroute(info); |
156 | 141 | ||
157 | if (info->hook == NF_IP_LOCAL_OUT) { | 142 | if (info->hook == NF_IP_LOCAL_OUT) { |
158 | const struct iphdr *iph = ip_hdr(*pskb); | 143 | const struct iphdr *iph = ip_hdr(skb); |
159 | 144 | ||
160 | if (!(iph->tos == rt_info->tos | 145 | if (!(iph->tos == rt_info->tos |
161 | && iph->daddr == rt_info->daddr | 146 | && iph->daddr == rt_info->daddr |
162 | && iph->saddr == rt_info->saddr)) | 147 | && iph->saddr == rt_info->saddr)) |
163 | return ip_route_me_harder(pskb, RTN_UNSPEC); | 148 | return ip_route_me_harder(skb, RTN_UNSPEC); |
164 | } | 149 | } |
165 | return 0; | 150 | return 0; |
166 | } | 151 | } |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 29114a9ccd1d..2909c92ecd99 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -197,7 +197,7 @@ static inline int arp_checkentry(const struct arpt_arp *arp) | |||
197 | return 1; | 197 | return 1; |
198 | } | 198 | } |
199 | 199 | ||
200 | static unsigned int arpt_error(struct sk_buff **pskb, | 200 | static unsigned int arpt_error(struct sk_buff *skb, |
201 | const struct net_device *in, | 201 | const struct net_device *in, |
202 | const struct net_device *out, | 202 | const struct net_device *out, |
203 | unsigned int hooknum, | 203 | unsigned int hooknum, |
@@ -215,7 +215,7 @@ static inline struct arpt_entry *get_entry(void *base, unsigned int offset) | |||
215 | return (struct arpt_entry *)(base + offset); | 215 | return (struct arpt_entry *)(base + offset); |
216 | } | 216 | } |
217 | 217 | ||
218 | unsigned int arpt_do_table(struct sk_buff **pskb, | 218 | unsigned int arpt_do_table(struct sk_buff *skb, |
219 | unsigned int hook, | 219 | unsigned int hook, |
220 | const struct net_device *in, | 220 | const struct net_device *in, |
221 | const struct net_device *out, | 221 | const struct net_device *out, |
@@ -231,9 +231,9 @@ unsigned int arpt_do_table(struct sk_buff **pskb, | |||
231 | struct xt_table_info *private; | 231 | struct xt_table_info *private; |
232 | 232 | ||
233 | /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ | 233 | /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ |
234 | if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) + | 234 | if (!pskb_may_pull(skb, (sizeof(struct arphdr) + |
235 | (2 * (*pskb)->dev->addr_len) + | 235 | (2 * skb->dev->addr_len) + |
236 | (2 * sizeof(u32))))) | 236 | (2 * sizeof(u32))))) |
237 | return NF_DROP; | 237 | return NF_DROP; |
238 | 238 | ||
239 | indev = in ? in->name : nulldevname; | 239 | indev = in ? in->name : nulldevname; |
@@ -245,14 +245,14 @@ unsigned int arpt_do_table(struct sk_buff **pskb, | |||
245 | e = get_entry(table_base, private->hook_entry[hook]); | 245 | e = get_entry(table_base, private->hook_entry[hook]); |
246 | back = get_entry(table_base, private->underflow[hook]); | 246 | back = get_entry(table_base, private->underflow[hook]); |
247 | 247 | ||
248 | arp = arp_hdr(*pskb); | 248 | arp = arp_hdr(skb); |
249 | do { | 249 | do { |
250 | if (arp_packet_match(arp, (*pskb)->dev, indev, outdev, &e->arp)) { | 250 | if (arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { |
251 | struct arpt_entry_target *t; | 251 | struct arpt_entry_target *t; |
252 | int hdr_len; | 252 | int hdr_len; |
253 | 253 | ||
254 | hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + | 254 | hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + |
255 | (2 * (*pskb)->dev->addr_len); | 255 | (2 * skb->dev->addr_len); |
256 | ADD_COUNTER(e->counters, hdr_len, 1); | 256 | ADD_COUNTER(e->counters, hdr_len, 1); |
257 | 257 | ||
258 | t = arpt_get_target(e); | 258 | t = arpt_get_target(e); |
@@ -290,14 +290,14 @@ unsigned int arpt_do_table(struct sk_buff **pskb, | |||
290 | /* Targets which reenter must return | 290 | /* Targets which reenter must return |
291 | * abs. verdicts | 291 | * abs. verdicts |
292 | */ | 292 | */ |
293 | verdict = t->u.kernel.target->target(pskb, | 293 | verdict = t->u.kernel.target->target(skb, |
294 | in, out, | 294 | in, out, |
295 | hook, | 295 | hook, |
296 | t->u.kernel.target, | 296 | t->u.kernel.target, |
297 | t->data); | 297 | t->data); |
298 | 298 | ||
299 | /* Target might have changed stuff. */ | 299 | /* Target might have changed stuff. */ |
300 | arp = arp_hdr(*pskb); | 300 | arp = arp_hdr(skb); |
301 | 301 | ||
302 | if (verdict == ARPT_CONTINUE) | 302 | if (verdict == ARPT_CONTINUE) |
303 | e = (void *)e + e->next_offset; | 303 | e = (void *)e + e->next_offset; |
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index c4bdab47597f..45fa4e20094a 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* module that allows mangling of the arp payload */ | 1 | /* module that allows mangling of the arp payload */ |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/netfilter.h> | ||
3 | #include <linux/netfilter_arp/arpt_mangle.h> | 4 | #include <linux/netfilter_arp/arpt_mangle.h> |
4 | #include <net/sock.h> | 5 | #include <net/sock.h> |
5 | 6 | ||
@@ -8,7 +9,7 @@ MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); | |||
8 | MODULE_DESCRIPTION("arptables arp payload mangle target"); | 9 | MODULE_DESCRIPTION("arptables arp payload mangle target"); |
9 | 10 | ||
10 | static unsigned int | 11 | static unsigned int |
11 | target(struct sk_buff **pskb, | 12 | target(struct sk_buff *skb, |
12 | const struct net_device *in, const struct net_device *out, | 13 | const struct net_device *in, const struct net_device *out, |
13 | unsigned int hooknum, const struct xt_target *target, | 14 | unsigned int hooknum, const struct xt_target *target, |
14 | const void *targinfo) | 15 | const void *targinfo) |
@@ -18,47 +19,38 @@ target(struct sk_buff **pskb, | |||
18 | unsigned char *arpptr; | 19 | unsigned char *arpptr; |
19 | int pln, hln; | 20 | int pln, hln; |
20 | 21 | ||
21 | if (skb_shared(*pskb) || skb_cloned(*pskb)) { | 22 | if (skb_make_writable(skb, skb->len)) |
22 | struct sk_buff *nskb; | 23 | return NF_DROP; |
23 | 24 | ||
24 | nskb = skb_copy(*pskb, GFP_ATOMIC); | 25 | arp = arp_hdr(skb); |
25 | if (!nskb) | 26 | arpptr = skb_network_header(skb) + sizeof(*arp); |
26 | return NF_DROP; | ||
27 | if ((*pskb)->sk) | ||
28 | skb_set_owner_w(nskb, (*pskb)->sk); | ||
29 | kfree_skb(*pskb); | ||
30 | *pskb = nskb; | ||
31 | } | ||
32 | |||
33 | arp = arp_hdr(*pskb); | ||
34 | arpptr = skb_network_header(*pskb) + sizeof(*arp); | ||
35 | pln = arp->ar_pln; | 27 | pln = arp->ar_pln; |
36 | hln = arp->ar_hln; | 28 | hln = arp->ar_hln; |
37 | /* We assume that pln and hln were checked in the match */ | 29 | /* We assume that pln and hln were checked in the match */ |
38 | if (mangle->flags & ARPT_MANGLE_SDEV) { | 30 | if (mangle->flags & ARPT_MANGLE_SDEV) { |
39 | if (ARPT_DEV_ADDR_LEN_MAX < hln || | 31 | if (ARPT_DEV_ADDR_LEN_MAX < hln || |
40 | (arpptr + hln > skb_tail_pointer(*pskb))) | 32 | (arpptr + hln > skb_tail_pointer(skb))) |
41 | return NF_DROP; | 33 | return NF_DROP; |
42 | memcpy(arpptr, mangle->src_devaddr, hln); | 34 | memcpy(arpptr, mangle->src_devaddr, hln); |
43 | } | 35 | } |
44 | arpptr += hln; | 36 | arpptr += hln; |
45 | if (mangle->flags & ARPT_MANGLE_SIP) { | 37 | if (mangle->flags & ARPT_MANGLE_SIP) { |
46 | if (ARPT_MANGLE_ADDR_LEN_MAX < pln || | 38 | if (ARPT_MANGLE_ADDR_LEN_MAX < pln || |
47 | (arpptr + pln > skb_tail_pointer(*pskb))) | 39 | (arpptr + pln > skb_tail_pointer(skb))) |
48 | return NF_DROP; | 40 | return NF_DROP; |
49 | memcpy(arpptr, &mangle->u_s.src_ip, pln); | 41 | memcpy(arpptr, &mangle->u_s.src_ip, pln); |
50 | } | 42 | } |
51 | arpptr += pln; | 43 | arpptr += pln; |
52 | if (mangle->flags & ARPT_MANGLE_TDEV) { | 44 | if (mangle->flags & ARPT_MANGLE_TDEV) { |
53 | if (ARPT_DEV_ADDR_LEN_MAX < hln || | 45 | if (ARPT_DEV_ADDR_LEN_MAX < hln || |
54 | (arpptr + hln > skb_tail_pointer(*pskb))) | 46 | (arpptr + hln > skb_tail_pointer(skb))) |
55 | return NF_DROP; | 47 | return NF_DROP; |
56 | memcpy(arpptr, mangle->tgt_devaddr, hln); | 48 | memcpy(arpptr, mangle->tgt_devaddr, hln); |
57 | } | 49 | } |
58 | arpptr += hln; | 50 | arpptr += hln; |
59 | if (mangle->flags & ARPT_MANGLE_TIP) { | 51 | if (mangle->flags & ARPT_MANGLE_TIP) { |
60 | if (ARPT_MANGLE_ADDR_LEN_MAX < pln || | 52 | if (ARPT_MANGLE_ADDR_LEN_MAX < pln || |
61 | (arpptr + pln > skb_tail_pointer(*pskb))) | 53 | (arpptr + pln > skb_tail_pointer(skb))) |
62 | return NF_DROP; | 54 | return NF_DROP; |
63 | memcpy(arpptr, &mangle->u_t.tgt_ip, pln); | 55 | memcpy(arpptr, &mangle->u_t.tgt_ip, pln); |
64 | } | 56 | } |
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index 75c023062533..302d3da5f696 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c | |||
@@ -56,12 +56,12 @@ static struct arpt_table packet_filter = { | |||
56 | 56 | ||
57 | /* The work comes in here from netfilter.c */ | 57 | /* The work comes in here from netfilter.c */ |
58 | static unsigned int arpt_hook(unsigned int hook, | 58 | static unsigned int arpt_hook(unsigned int hook, |
59 | struct sk_buff **pskb, | 59 | struct sk_buff *skb, |
60 | const struct net_device *in, | 60 | const struct net_device *in, |
61 | const struct net_device *out, | 61 | const struct net_device *out, |
62 | int (*okfn)(struct sk_buff *)) | 62 | int (*okfn)(struct sk_buff *)) |
63 | { | 63 | { |
64 | return arpt_do_table(pskb, hook, in, out, &packet_filter); | 64 | return arpt_do_table(skb, hook, in, out, &packet_filter); |
65 | } | 65 | } |
66 | 66 | ||
67 | static struct nf_hook_ops arpt_ops[] = { | 67 | static struct nf_hook_ops arpt_ops[] = { |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 23cbfc7c80fd..10a2ce09fd8e 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -335,6 +335,7 @@ static int | |||
335 | ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) | 335 | ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) |
336 | { | 336 | { |
337 | int diff; | 337 | int diff; |
338 | int err; | ||
338 | struct iphdr *user_iph = (struct iphdr *)v->payload; | 339 | struct iphdr *user_iph = (struct iphdr *)v->payload; |
339 | 340 | ||
340 | if (v->data_len < sizeof(*user_iph)) | 341 | if (v->data_len < sizeof(*user_iph)) |
@@ -347,25 +348,18 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) | |||
347 | if (v->data_len > 0xFFFF) | 348 | if (v->data_len > 0xFFFF) |
348 | return -EINVAL; | 349 | return -EINVAL; |
349 | if (diff > skb_tailroom(e->skb)) { | 350 | if (diff > skb_tailroom(e->skb)) { |
350 | struct sk_buff *newskb; | 351 | err = pskb_expand_head(e->skb, 0, |
351 | 352 | diff - skb_tailroom(e->skb), | |
352 | newskb = skb_copy_expand(e->skb, | 353 | GFP_ATOMIC); |
353 | skb_headroom(e->skb), | 354 | if (err) { |
354 | diff, | 355 | printk(KERN_WARNING "ip_queue: error " |
355 | GFP_ATOMIC); | 356 | "in mangle, dropping packet: %d\n", -err); |
356 | if (newskb == NULL) { | 357 | return err; |
357 | printk(KERN_WARNING "ip_queue: OOM " | ||
358 | "in mangle, dropping packet\n"); | ||
359 | return -ENOMEM; | ||
360 | } | 358 | } |
361 | if (e->skb->sk) | ||
362 | skb_set_owner_w(newskb, e->skb->sk); | ||
363 | kfree_skb(e->skb); | ||
364 | e->skb = newskb; | ||
365 | } | 359 | } |
366 | skb_put(e->skb, diff); | 360 | skb_put(e->skb, diff); |
367 | } | 361 | } |
368 | if (!skb_make_writable(&e->skb, v->data_len)) | 362 | if (!skb_make_writable(e->skb, v->data_len)) |
369 | return -ENOMEM; | 363 | return -ENOMEM; |
370 | skb_copy_to_linear_data(e->skb, v->payload, v->data_len); | 364 | skb_copy_to_linear_data(e->skb, v->payload, v->data_len); |
371 | e->skb->ip_summed = CHECKSUM_NONE; | 365 | e->skb->ip_summed = CHECKSUM_NONE; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 6486894f450c..4b10b98640ac 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -169,7 +169,7 @@ ip_checkentry(const struct ipt_ip *ip) | |||
169 | } | 169 | } |
170 | 170 | ||
171 | static unsigned int | 171 | static unsigned int |
172 | ipt_error(struct sk_buff **pskb, | 172 | ipt_error(struct sk_buff *skb, |
173 | const struct net_device *in, | 173 | const struct net_device *in, |
174 | const struct net_device *out, | 174 | const struct net_device *out, |
175 | unsigned int hooknum, | 175 | unsigned int hooknum, |
@@ -312,7 +312,7 @@ static void trace_packet(struct sk_buff *skb, | |||
312 | 312 | ||
313 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ | 313 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ |
314 | unsigned int | 314 | unsigned int |
315 | ipt_do_table(struct sk_buff **pskb, | 315 | ipt_do_table(struct sk_buff *skb, |
316 | unsigned int hook, | 316 | unsigned int hook, |
317 | const struct net_device *in, | 317 | const struct net_device *in, |
318 | const struct net_device *out, | 318 | const struct net_device *out, |
@@ -331,8 +331,8 @@ ipt_do_table(struct sk_buff **pskb, | |||
331 | struct xt_table_info *private; | 331 | struct xt_table_info *private; |
332 | 332 | ||
333 | /* Initialization */ | 333 | /* Initialization */ |
334 | ip = ip_hdr(*pskb); | 334 | ip = ip_hdr(skb); |
335 | datalen = (*pskb)->len - ip->ihl * 4; | 335 | datalen = skb->len - ip->ihl * 4; |
336 | indev = in ? in->name : nulldevname; | 336 | indev = in ? in->name : nulldevname; |
337 | outdev = out ? out->name : nulldevname; | 337 | outdev = out ? out->name : nulldevname; |
338 | /* We handle fragments by dealing with the first fragment as | 338 | /* We handle fragments by dealing with the first fragment as |
@@ -359,7 +359,7 @@ ipt_do_table(struct sk_buff **pskb, | |||
359 | struct ipt_entry_target *t; | 359 | struct ipt_entry_target *t; |
360 | 360 | ||
361 | if (IPT_MATCH_ITERATE(e, do_match, | 361 | if (IPT_MATCH_ITERATE(e, do_match, |
362 | *pskb, in, out, | 362 | skb, in, out, |
363 | offset, &hotdrop) != 0) | 363 | offset, &hotdrop) != 0) |
364 | goto no_match; | 364 | goto no_match; |
365 | 365 | ||
@@ -371,8 +371,8 @@ ipt_do_table(struct sk_buff **pskb, | |||
371 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 371 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
372 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 372 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
373 | /* The packet is traced: log it */ | 373 | /* The packet is traced: log it */ |
374 | if (unlikely((*pskb)->nf_trace)) | 374 | if (unlikely(skb->nf_trace)) |
375 | trace_packet(*pskb, hook, in, out, | 375 | trace_packet(skb, hook, in, out, |
376 | table->name, private, e); | 376 | table->name, private, e); |
377 | #endif | 377 | #endif |
378 | /* Standard target? */ | 378 | /* Standard target? */ |
@@ -410,7 +410,7 @@ ipt_do_table(struct sk_buff **pskb, | |||
410 | ((struct ipt_entry *)table_base)->comefrom | 410 | ((struct ipt_entry *)table_base)->comefrom |
411 | = 0xeeeeeeec; | 411 | = 0xeeeeeeec; |
412 | #endif | 412 | #endif |
413 | verdict = t->u.kernel.target->target(pskb, | 413 | verdict = t->u.kernel.target->target(skb, |
414 | in, out, | 414 | in, out, |
415 | hook, | 415 | hook, |
416 | t->u.kernel.target, | 416 | t->u.kernel.target, |
@@ -428,8 +428,8 @@ ipt_do_table(struct sk_buff **pskb, | |||
428 | = 0x57acc001; | 428 | = 0x57acc001; |
429 | #endif | 429 | #endif |
430 | /* Target might have changed stuff. */ | 430 | /* Target might have changed stuff. */ |
431 | ip = ip_hdr(*pskb); | 431 | ip = ip_hdr(skb); |
432 | datalen = (*pskb)->len - ip->ihl * 4; | 432 | datalen = skb->len - ip->ihl * 4; |
433 | 433 | ||
434 | if (verdict == IPT_CONTINUE) | 434 | if (verdict == IPT_CONTINUE) |
435 | e = (void *)e + e->next_offset; | 435 | e = (void *)e + e->next_offset; |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 27f14e1ebd8b..2f544dac72df 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -289,7 +289,7 @@ clusterip_responsible(const struct clusterip_config *config, u_int32_t hash) | |||
289 | ***********************************************************************/ | 289 | ***********************************************************************/ |
290 | 290 | ||
291 | static unsigned int | 291 | static unsigned int |
292 | target(struct sk_buff **pskb, | 292 | target(struct sk_buff *skb, |
293 | const struct net_device *in, | 293 | const struct net_device *in, |
294 | const struct net_device *out, | 294 | const struct net_device *out, |
295 | unsigned int hooknum, | 295 | unsigned int hooknum, |
@@ -305,7 +305,7 @@ target(struct sk_buff **pskb, | |||
305 | * is only decremented by destroy() - and ip_tables guarantees | 305 | * is only decremented by destroy() - and ip_tables guarantees |
306 | * that the ->target() function isn't called after ->destroy() */ | 306 | * that the ->target() function isn't called after ->destroy() */ |
307 | 307 | ||
308 | ct = nf_ct_get(*pskb, &ctinfo); | 308 | ct = nf_ct_get(skb, &ctinfo); |
309 | if (ct == NULL) { | 309 | if (ct == NULL) { |
310 | printk(KERN_ERR "CLUSTERIP: no conntrack!\n"); | 310 | printk(KERN_ERR "CLUSTERIP: no conntrack!\n"); |
311 | /* FIXME: need to drop invalid ones, since replies | 311 | /* FIXME: need to drop invalid ones, since replies |
@@ -316,7 +316,7 @@ target(struct sk_buff **pskb, | |||
316 | 316 | ||
317 | /* special case: ICMP error handling. conntrack distinguishes between | 317 | /* special case: ICMP error handling. conntrack distinguishes between |
318 | * error messages (RELATED) and information requests (see below) */ | 318 | * error messages (RELATED) and information requests (see below) */ |
319 | if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP | 319 | if (ip_hdr(skb)->protocol == IPPROTO_ICMP |
320 | && (ctinfo == IP_CT_RELATED | 320 | && (ctinfo == IP_CT_RELATED |
321 | || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) | 321 | || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) |
322 | return XT_CONTINUE; | 322 | return XT_CONTINUE; |
@@ -325,7 +325,7 @@ target(struct sk_buff **pskb, | |||
325 | * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here | 325 | * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here |
326 | * on, which all have an ID field [relevant for hashing]. */ | 326 | * on, which all have an ID field [relevant for hashing]. */ |
327 | 327 | ||
328 | hash = clusterip_hashfn(*pskb, cipinfo->config); | 328 | hash = clusterip_hashfn(skb, cipinfo->config); |
329 | 329 | ||
330 | switch (ctinfo) { | 330 | switch (ctinfo) { |
331 | case IP_CT_NEW: | 331 | case IP_CT_NEW: |
@@ -355,7 +355,7 @@ target(struct sk_buff **pskb, | |||
355 | 355 | ||
356 | /* despite being received via linklayer multicast, this is | 356 | /* despite being received via linklayer multicast, this is |
357 | * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */ | 357 | * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */ |
358 | (*pskb)->pkt_type = PACKET_HOST; | 358 | skb->pkt_type = PACKET_HOST; |
359 | 359 | ||
360 | return XT_CONTINUE; | 360 | return XT_CONTINUE; |
361 | } | 361 | } |
@@ -505,12 +505,12 @@ static void arp_print(struct arp_payload *payload) | |||
505 | 505 | ||
506 | static unsigned int | 506 | static unsigned int |
507 | arp_mangle(unsigned int hook, | 507 | arp_mangle(unsigned int hook, |
508 | struct sk_buff **pskb, | 508 | struct sk_buff *skb, |
509 | const struct net_device *in, | 509 | const struct net_device *in, |
510 | const struct net_device *out, | 510 | const struct net_device *out, |
511 | int (*okfn)(struct sk_buff *)) | 511 | int (*okfn)(struct sk_buff *)) |
512 | { | 512 | { |
513 | struct arphdr *arp = arp_hdr(*pskb); | 513 | struct arphdr *arp = arp_hdr(skb); |
514 | struct arp_payload *payload; | 514 | struct arp_payload *payload; |
515 | struct clusterip_config *c; | 515 | struct clusterip_config *c; |
516 | 516 | ||
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index f1253bd3837f..add110060a22 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
@@ -26,15 +26,15 @@ MODULE_DESCRIPTION("iptables ECN modification module"); | |||
26 | /* set ECT codepoint from IP header. | 26 | /* set ECT codepoint from IP header. |
27 | * return false if there was an error. */ | 27 | * return false if there was an error. */ |
28 | static inline bool | 28 | static inline bool |
29 | set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | 29 | set_ect_ip(struct sk_buff *skb, const struct ipt_ECN_info *einfo) |
30 | { | 30 | { |
31 | struct iphdr *iph = ip_hdr(*pskb); | 31 | struct iphdr *iph = ip_hdr(skb); |
32 | 32 | ||
33 | if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { | 33 | if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { |
34 | __u8 oldtos; | 34 | __u8 oldtos; |
35 | if (!skb_make_writable(pskb, sizeof(struct iphdr))) | 35 | if (!skb_make_writable(skb, sizeof(struct iphdr))) |
36 | return false; | 36 | return false; |
37 | iph = ip_hdr(*pskb); | 37 | iph = ip_hdr(skb); |
38 | oldtos = iph->tos; | 38 | oldtos = iph->tos; |
39 | iph->tos &= ~IPT_ECN_IP_MASK; | 39 | iph->tos &= ~IPT_ECN_IP_MASK; |
40 | iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); | 40 | iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); |
@@ -45,14 +45,13 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | |||
45 | 45 | ||
46 | /* Return false if there was an error. */ | 46 | /* Return false if there was an error. */ |
47 | static inline bool | 47 | static inline bool |
48 | set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | 48 | set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo) |
49 | { | 49 | { |
50 | struct tcphdr _tcph, *tcph; | 50 | struct tcphdr _tcph, *tcph; |
51 | __be16 oldval; | 51 | __be16 oldval; |
52 | 52 | ||
53 | /* Not enought header? */ | 53 | /* Not enought header? */ |
54 | tcph = skb_header_pointer(*pskb, ip_hdrlen(*pskb), | 54 | tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); |
55 | sizeof(_tcph), &_tcph); | ||
56 | if (!tcph) | 55 | if (!tcph) |
57 | return false; | 56 | return false; |
58 | 57 | ||
@@ -62,9 +61,9 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | |||
62 | tcph->cwr == einfo->proto.tcp.cwr)) | 61 | tcph->cwr == einfo->proto.tcp.cwr)) |
63 | return true; | 62 | return true; |
64 | 63 | ||
65 | if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) | 64 | if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph))) |
66 | return false; | 65 | return false; |
67 | tcph = (void *)ip_hdr(*pskb) + ip_hdrlen(*pskb); | 66 | tcph = (void *)ip_hdr(skb) + ip_hdrlen(skb); |
68 | 67 | ||
69 | oldval = ((__be16 *)tcph)[6]; | 68 | oldval = ((__be16 *)tcph)[6]; |
70 | if (einfo->operation & IPT_ECN_OP_SET_ECE) | 69 | if (einfo->operation & IPT_ECN_OP_SET_ECE) |
@@ -72,13 +71,13 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | |||
72 | if (einfo->operation & IPT_ECN_OP_SET_CWR) | 71 | if (einfo->operation & IPT_ECN_OP_SET_CWR) |
73 | tcph->cwr = einfo->proto.tcp.cwr; | 72 | tcph->cwr = einfo->proto.tcp.cwr; |
74 | 73 | ||
75 | nf_proto_csum_replace2(&tcph->check, *pskb, | 74 | nf_proto_csum_replace2(&tcph->check, skb, |
76 | oldval, ((__be16 *)tcph)[6], 0); | 75 | oldval, ((__be16 *)tcph)[6], 0); |
77 | return true; | 76 | return true; |
78 | } | 77 | } |
79 | 78 | ||
80 | static unsigned int | 79 | static unsigned int |
81 | target(struct sk_buff **pskb, | 80 | target(struct sk_buff *skb, |
82 | const struct net_device *in, | 81 | const struct net_device *in, |
83 | const struct net_device *out, | 82 | const struct net_device *out, |
84 | unsigned int hooknum, | 83 | unsigned int hooknum, |
@@ -88,12 +87,12 @@ target(struct sk_buff **pskb, | |||
88 | const struct ipt_ECN_info *einfo = targinfo; | 87 | const struct ipt_ECN_info *einfo = targinfo; |
89 | 88 | ||
90 | if (einfo->operation & IPT_ECN_OP_SET_IP) | 89 | if (einfo->operation & IPT_ECN_OP_SET_IP) |
91 | if (!set_ect_ip(pskb, einfo)) | 90 | if (!set_ect_ip(skb, einfo)) |
92 | return NF_DROP; | 91 | return NF_DROP; |
93 | 92 | ||
94 | if (einfo->operation & (IPT_ECN_OP_SET_ECE | IPT_ECN_OP_SET_CWR) | 93 | if (einfo->operation & (IPT_ECN_OP_SET_ECE | IPT_ECN_OP_SET_CWR) |
95 | && ip_hdr(*pskb)->protocol == IPPROTO_TCP) | 94 | && ip_hdr(skb)->protocol == IPPROTO_TCP) |
96 | if (!set_ect_tcp(pskb, einfo)) | 95 | if (!set_ect_tcp(skb, einfo)) |
97 | return NF_DROP; | 96 | return NF_DROP; |
98 | 97 | ||
99 | return XT_CONTINUE; | 98 | return XT_CONTINUE; |
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index 127a5e89bf14..4b5e8216a4e7 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -418,7 +418,7 @@ ipt_log_packet(unsigned int pf, | |||
418 | } | 418 | } |
419 | 419 | ||
420 | static unsigned int | 420 | static unsigned int |
421 | ipt_log_target(struct sk_buff **pskb, | 421 | ipt_log_target(struct sk_buff *skb, |
422 | const struct net_device *in, | 422 | const struct net_device *in, |
423 | const struct net_device *out, | 423 | const struct net_device *out, |
424 | unsigned int hooknum, | 424 | unsigned int hooknum, |
@@ -432,7 +432,7 @@ ipt_log_target(struct sk_buff **pskb, | |||
432 | li.u.log.level = loginfo->level; | 432 | li.u.log.level = loginfo->level; |
433 | li.u.log.logflags = loginfo->logflags; | 433 | li.u.log.logflags = loginfo->logflags; |
434 | 434 | ||
435 | ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, | 435 | ipt_log_packet(PF_INET, hooknum, skb, in, out, &li, |
436 | loginfo->prefix); | 436 | loginfo->prefix); |
437 | return XT_CONTINUE; | 437 | return XT_CONTINUE; |
438 | } | 438 | } |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 3e0b562b2db7..44b516e7cb79 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -52,7 +52,7 @@ masquerade_check(const char *tablename, | |||
52 | } | 52 | } |
53 | 53 | ||
54 | static unsigned int | 54 | static unsigned int |
55 | masquerade_target(struct sk_buff **pskb, | 55 | masquerade_target(struct sk_buff *skb, |
56 | const struct net_device *in, | 56 | const struct net_device *in, |
57 | const struct net_device *out, | 57 | const struct net_device *out, |
58 | unsigned int hooknum, | 58 | unsigned int hooknum, |
@@ -69,7 +69,7 @@ masquerade_target(struct sk_buff **pskb, | |||
69 | 69 | ||
70 | NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); | 70 | NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); |
71 | 71 | ||
72 | ct = nf_ct_get(*pskb, &ctinfo); | 72 | ct = nf_ct_get(skb, &ctinfo); |
73 | nat = nfct_nat(ct); | 73 | nat = nfct_nat(ct); |
74 | 74 | ||
75 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED | 75 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED |
@@ -82,7 +82,7 @@ masquerade_target(struct sk_buff **pskb, | |||
82 | return NF_ACCEPT; | 82 | return NF_ACCEPT; |
83 | 83 | ||
84 | mr = targinfo; | 84 | mr = targinfo; |
85 | rt = (struct rtable *)(*pskb)->dst; | 85 | rt = (struct rtable *)skb->dst; |
86 | newsrc = inet_select_addr(out, rt->rt_gateway, RT_SCOPE_UNIVERSE); | 86 | newsrc = inet_select_addr(out, rt->rt_gateway, RT_SCOPE_UNIVERSE); |
87 | if (!newsrc) { | 87 | if (!newsrc) { |
88 | printk("MASQUERADE: %s ate my IP address\n", out->name); | 88 | printk("MASQUERADE: %s ate my IP address\n", out->name); |
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c index 41a011d5a065..f8699291e33d 100644 --- a/net/ipv4/netfilter/ipt_NETMAP.c +++ b/net/ipv4/netfilter/ipt_NETMAP.c | |||
@@ -43,7 +43,7 @@ check(const char *tablename, | |||
43 | } | 43 | } |
44 | 44 | ||
45 | static unsigned int | 45 | static unsigned int |
46 | target(struct sk_buff **pskb, | 46 | target(struct sk_buff *skb, |
47 | const struct net_device *in, | 47 | const struct net_device *in, |
48 | const struct net_device *out, | 48 | const struct net_device *out, |
49 | unsigned int hooknum, | 49 | unsigned int hooknum, |
@@ -59,14 +59,14 @@ target(struct sk_buff **pskb, | |||
59 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING | 59 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING |
60 | || hooknum == NF_IP_POST_ROUTING | 60 | || hooknum == NF_IP_POST_ROUTING |
61 | || hooknum == NF_IP_LOCAL_OUT); | 61 | || hooknum == NF_IP_LOCAL_OUT); |
62 | ct = nf_ct_get(*pskb, &ctinfo); | 62 | ct = nf_ct_get(skb, &ctinfo); |
63 | 63 | ||
64 | netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); | 64 | netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); |
65 | 65 | ||
66 | if (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT) | 66 | if (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT) |
67 | new_ip = ip_hdr(*pskb)->daddr & ~netmask; | 67 | new_ip = ip_hdr(skb)->daddr & ~netmask; |
68 | else | 68 | else |
69 | new_ip = ip_hdr(*pskb)->saddr & ~netmask; | 69 | new_ip = ip_hdr(skb)->saddr & ~netmask; |
70 | new_ip |= mr->range[0].min_ip & netmask; | 70 | new_ip |= mr->range[0].min_ip & netmask; |
71 | 71 | ||
72 | newrange = ((struct nf_nat_range) | 72 | newrange = ((struct nf_nat_range) |
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c index 6ac7a2373316..f7cf7d61a2d4 100644 --- a/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/net/ipv4/netfilter/ipt_REDIRECT.c | |||
@@ -47,7 +47,7 @@ redirect_check(const char *tablename, | |||
47 | } | 47 | } |
48 | 48 | ||
49 | static unsigned int | 49 | static unsigned int |
50 | redirect_target(struct sk_buff **pskb, | 50 | redirect_target(struct sk_buff *skb, |
51 | const struct net_device *in, | 51 | const struct net_device *in, |
52 | const struct net_device *out, | 52 | const struct net_device *out, |
53 | unsigned int hooknum, | 53 | unsigned int hooknum, |
@@ -63,7 +63,7 @@ redirect_target(struct sk_buff **pskb, | |||
63 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING | 63 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING |
64 | || hooknum == NF_IP_LOCAL_OUT); | 64 | || hooknum == NF_IP_LOCAL_OUT); |
65 | 65 | ||
66 | ct = nf_ct_get(*pskb, &ctinfo); | 66 | ct = nf_ct_get(skb, &ctinfo); |
67 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); | 67 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); |
68 | 68 | ||
69 | /* Local packets: make them go to loopback */ | 69 | /* Local packets: make them go to loopback */ |
@@ -76,7 +76,7 @@ redirect_target(struct sk_buff **pskb, | |||
76 | newdst = 0; | 76 | newdst = 0; |
77 | 77 | ||
78 | rcu_read_lock(); | 78 | rcu_read_lock(); |
79 | indev = __in_dev_get_rcu((*pskb)->dev); | 79 | indev = __in_dev_get_rcu(skb->dev); |
80 | if (indev && (ifa = indev->ifa_list)) | 80 | if (indev && (ifa = indev->ifa_list)) |
81 | newdst = ifa->ifa_local; | 81 | newdst = ifa->ifa_local; |
82 | rcu_read_unlock(); | 82 | rcu_read_unlock(); |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index cb038c8fbc9d..dcf4d21d5116 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -131,7 +131,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
131 | ) | 131 | ) |
132 | addr_type = RTN_LOCAL; | 132 | addr_type = RTN_LOCAL; |
133 | 133 | ||
134 | if (ip_route_me_harder(&nskb, addr_type)) | 134 | if (ip_route_me_harder(nskb, addr_type)) |
135 | goto free_nskb; | 135 | goto free_nskb; |
136 | 136 | ||
137 | nskb->ip_summed = CHECKSUM_NONE; | 137 | nskb->ip_summed = CHECKSUM_NONE; |
@@ -162,7 +162,7 @@ static inline void send_unreach(struct sk_buff *skb_in, int code) | |||
162 | icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); | 162 | icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); |
163 | } | 163 | } |
164 | 164 | ||
165 | static unsigned int reject(struct sk_buff **pskb, | 165 | static unsigned int reject(struct sk_buff *skb, |
166 | const struct net_device *in, | 166 | const struct net_device *in, |
167 | const struct net_device *out, | 167 | const struct net_device *out, |
168 | unsigned int hooknum, | 168 | unsigned int hooknum, |
@@ -173,7 +173,7 @@ static unsigned int reject(struct sk_buff **pskb, | |||
173 | 173 | ||
174 | /* Our naive response construction doesn't deal with IP | 174 | /* Our naive response construction doesn't deal with IP |
175 | options, and probably shouldn't try. */ | 175 | options, and probably shouldn't try. */ |
176 | if (ip_hdrlen(*pskb) != sizeof(struct iphdr)) | 176 | if (ip_hdrlen(skb) != sizeof(struct iphdr)) |
177 | return NF_DROP; | 177 | return NF_DROP; |
178 | 178 | ||
179 | /* WARNING: This code causes reentry within iptables. | 179 | /* WARNING: This code causes reentry within iptables. |
@@ -181,28 +181,28 @@ static unsigned int reject(struct sk_buff **pskb, | |||
181 | must return an absolute verdict. --RR */ | 181 | must return an absolute verdict. --RR */ |
182 | switch (reject->with) { | 182 | switch (reject->with) { |
183 | case IPT_ICMP_NET_UNREACHABLE: | 183 | case IPT_ICMP_NET_UNREACHABLE: |
184 | send_unreach(*pskb, ICMP_NET_UNREACH); | 184 | send_unreach(skb, ICMP_NET_UNREACH); |
185 | break; | 185 | break; |
186 | case IPT_ICMP_HOST_UNREACHABLE: | 186 | case IPT_ICMP_HOST_UNREACHABLE: |
187 | send_unreach(*pskb, ICMP_HOST_UNREACH); | 187 | send_unreach(skb, ICMP_HOST_UNREACH); |
188 | break; | 188 | break; |
189 | case IPT_ICMP_PROT_UNREACHABLE: | 189 | case IPT_ICMP_PROT_UNREACHABLE: |
190 | send_unreach(*pskb, ICMP_PROT_UNREACH); | 190 | send_unreach(skb, ICMP_PROT_UNREACH); |
191 | break; | 191 | break; |
192 | case IPT_ICMP_PORT_UNREACHABLE: | 192 | case IPT_ICMP_PORT_UNREACHABLE: |
193 | send_unreach(*pskb, ICMP_PORT_UNREACH); | 193 | send_unreach(skb, ICMP_PORT_UNREACH); |
194 | break; | 194 | break; |
195 | case IPT_ICMP_NET_PROHIBITED: | 195 | case IPT_ICMP_NET_PROHIBITED: |
196 | send_unreach(*pskb, ICMP_NET_ANO); | 196 | send_unreach(skb, ICMP_NET_ANO); |
197 | break; | 197 | break; |
198 | case IPT_ICMP_HOST_PROHIBITED: | 198 | case IPT_ICMP_HOST_PROHIBITED: |
199 | send_unreach(*pskb, ICMP_HOST_ANO); | 199 | send_unreach(skb, ICMP_HOST_ANO); |
200 | break; | 200 | break; |
201 | case IPT_ICMP_ADMIN_PROHIBITED: | 201 | case IPT_ICMP_ADMIN_PROHIBITED: |
202 | send_unreach(*pskb, ICMP_PKT_FILTERED); | 202 | send_unreach(skb, ICMP_PKT_FILTERED); |
203 | break; | 203 | break; |
204 | case IPT_TCP_RESET: | 204 | case IPT_TCP_RESET: |
205 | send_reset(*pskb, hooknum); | 205 | send_reset(skb, hooknum); |
206 | case IPT_ICMP_ECHOREPLY: | 206 | case IPT_ICMP_ECHOREPLY: |
207 | /* Doesn't happen. */ | 207 | /* Doesn't happen. */ |
208 | break; | 208 | break; |
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c index 97641f1a97f6..8988571436b8 100644 --- a/net/ipv4/netfilter/ipt_SAME.c +++ b/net/ipv4/netfilter/ipt_SAME.c | |||
@@ -104,7 +104,7 @@ same_destroy(const struct xt_target *target, void *targinfo) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static unsigned int | 106 | static unsigned int |
107 | same_target(struct sk_buff **pskb, | 107 | same_target(struct sk_buff *skb, |
108 | const struct net_device *in, | 108 | const struct net_device *in, |
109 | const struct net_device *out, | 109 | const struct net_device *out, |
110 | unsigned int hooknum, | 110 | unsigned int hooknum, |
@@ -121,7 +121,7 @@ same_target(struct sk_buff **pskb, | |||
121 | 121 | ||
122 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || | 122 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || |
123 | hooknum == NF_IP_POST_ROUTING); | 123 | hooknum == NF_IP_POST_ROUTING); |
124 | ct = nf_ct_get(*pskb, &ctinfo); | 124 | ct = nf_ct_get(skb, &ctinfo); |
125 | 125 | ||
126 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | 126 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
127 | 127 | ||
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c index 25f5d0b39065..d4573baa7f27 100644 --- a/net/ipv4/netfilter/ipt_TOS.c +++ b/net/ipv4/netfilter/ipt_TOS.c | |||
@@ -21,7 +21,7 @@ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | |||
21 | MODULE_DESCRIPTION("iptables TOS mangling module"); | 21 | MODULE_DESCRIPTION("iptables TOS mangling module"); |
22 | 22 | ||
23 | static unsigned int | 23 | static unsigned int |
24 | target(struct sk_buff **pskb, | 24 | target(struct sk_buff *skb, |
25 | const struct net_device *in, | 25 | const struct net_device *in, |
26 | const struct net_device *out, | 26 | const struct net_device *out, |
27 | unsigned int hooknum, | 27 | unsigned int hooknum, |
@@ -29,13 +29,13 @@ target(struct sk_buff **pskb, | |||
29 | const void *targinfo) | 29 | const void *targinfo) |
30 | { | 30 | { |
31 | const struct ipt_tos_target_info *tosinfo = targinfo; | 31 | const struct ipt_tos_target_info *tosinfo = targinfo; |
32 | struct iphdr *iph = ip_hdr(*pskb); | 32 | struct iphdr *iph = ip_hdr(skb); |
33 | 33 | ||
34 | if ((iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) { | 34 | if ((iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) { |
35 | __u8 oldtos; | 35 | __u8 oldtos; |
36 | if (!skb_make_writable(pskb, sizeof(struct iphdr))) | 36 | if (!skb_make_writable(skb, sizeof(struct iphdr))) |
37 | return NF_DROP; | 37 | return NF_DROP; |
38 | iph = ip_hdr(*pskb); | 38 | iph = ip_hdr(skb); |
39 | oldtos = iph->tos; | 39 | oldtos = iph->tos; |
40 | iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; | 40 | iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; |
41 | nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); | 41 | nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); |
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c index 2b54e7b0cfe8..c620a0527666 100644 --- a/net/ipv4/netfilter/ipt_TTL.c +++ b/net/ipv4/netfilter/ipt_TTL.c | |||
@@ -20,7 +20,7 @@ MODULE_DESCRIPTION("IP tables TTL modification module"); | |||
20 | MODULE_LICENSE("GPL"); | 20 | MODULE_LICENSE("GPL"); |
21 | 21 | ||
22 | static unsigned int | 22 | static unsigned int |
23 | ipt_ttl_target(struct sk_buff **pskb, | 23 | ipt_ttl_target(struct sk_buff *skb, |
24 | const struct net_device *in, const struct net_device *out, | 24 | const struct net_device *in, const struct net_device *out, |
25 | unsigned int hooknum, const struct xt_target *target, | 25 | unsigned int hooknum, const struct xt_target *target, |
26 | const void *targinfo) | 26 | const void *targinfo) |
@@ -29,10 +29,10 @@ ipt_ttl_target(struct sk_buff **pskb, | |||
29 | const struct ipt_TTL_info *info = targinfo; | 29 | const struct ipt_TTL_info *info = targinfo; |
30 | int new_ttl; | 30 | int new_ttl; |
31 | 31 | ||
32 | if (!skb_make_writable(pskb, (*pskb)->len)) | 32 | if (!skb_make_writable(skb, skb->len)) |
33 | return NF_DROP; | 33 | return NF_DROP; |
34 | 34 | ||
35 | iph = ip_hdr(*pskb); | 35 | iph = ip_hdr(skb); |
36 | 36 | ||
37 | switch (info->mode) { | 37 | switch (info->mode) { |
38 | case IPT_TTL_SET: | 38 | case IPT_TTL_SET: |
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index c636d6d63574..212b830765a4 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -279,7 +279,7 @@ alloc_failure: | |||
279 | spin_unlock_bh(&ulog_lock); | 279 | spin_unlock_bh(&ulog_lock); |
280 | } | 280 | } |
281 | 281 | ||
282 | static unsigned int ipt_ulog_target(struct sk_buff **pskb, | 282 | static unsigned int ipt_ulog_target(struct sk_buff *skb, |
283 | const struct net_device *in, | 283 | const struct net_device *in, |
284 | const struct net_device *out, | 284 | const struct net_device *out, |
285 | unsigned int hooknum, | 285 | unsigned int hooknum, |
@@ -288,7 +288,7 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb, | |||
288 | { | 288 | { |
289 | struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo; | 289 | struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo; |
290 | 290 | ||
291 | ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL); | 291 | ipt_ulog_packet(hooknum, skb, in, out, loginfo, NULL); |
292 | 292 | ||
293 | return XT_CONTINUE; | 293 | return XT_CONTINUE; |
294 | } | 294 | } |
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index 4f51c1d7d2d6..ba3262c60437 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c | |||
@@ -62,31 +62,31 @@ static struct xt_table packet_filter = { | |||
62 | /* The work comes in here from netfilter.c. */ | 62 | /* The work comes in here from netfilter.c. */ |
63 | static unsigned int | 63 | static unsigned int |
64 | ipt_hook(unsigned int hook, | 64 | ipt_hook(unsigned int hook, |
65 | struct sk_buff **pskb, | 65 | struct sk_buff *skb, |
66 | const struct net_device *in, | 66 | const struct net_device *in, |
67 | const struct net_device *out, | 67 | const struct net_device *out, |
68 | int (*okfn)(struct sk_buff *)) | 68 | int (*okfn)(struct sk_buff *)) |
69 | { | 69 | { |
70 | return ipt_do_table(pskb, hook, in, out, &packet_filter); | 70 | return ipt_do_table(skb, hook, in, out, &packet_filter); |
71 | } | 71 | } |
72 | 72 | ||
73 | static unsigned int | 73 | static unsigned int |
74 | ipt_local_out_hook(unsigned int hook, | 74 | ipt_local_out_hook(unsigned int hook, |
75 | struct sk_buff **pskb, | 75 | struct sk_buff *skb, |
76 | const struct net_device *in, | 76 | const struct net_device *in, |
77 | const struct net_device *out, | 77 | const struct net_device *out, |
78 | int (*okfn)(struct sk_buff *)) | 78 | int (*okfn)(struct sk_buff *)) |
79 | { | 79 | { |
80 | /* root is playing with raw sockets. */ | 80 | /* root is playing with raw sockets. */ |
81 | if ((*pskb)->len < sizeof(struct iphdr) | 81 | if (skb->len < sizeof(struct iphdr) || |
82 | || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { | 82 | ip_hdrlen(skb) < sizeof(struct iphdr)) { |
83 | if (net_ratelimit()) | 83 | if (net_ratelimit()) |
84 | printk("iptable_filter: ignoring short SOCK_RAW " | 84 | printk("iptable_filter: ignoring short SOCK_RAW " |
85 | "packet.\n"); | 85 | "packet.\n"); |
86 | return NF_ACCEPT; | 86 | return NF_ACCEPT; |
87 | } | 87 | } |
88 | 88 | ||
89 | return ipt_do_table(pskb, hook, in, out, &packet_filter); | 89 | return ipt_do_table(skb, hook, in, out, &packet_filter); |
90 | } | 90 | } |
91 | 91 | ||
92 | static struct nf_hook_ops ipt_ops[] = { | 92 | static struct nf_hook_ops ipt_ops[] = { |
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index 902446f7cbca..b4360a69d5ca 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c | |||
@@ -75,17 +75,17 @@ static struct xt_table packet_mangler = { | |||
75 | /* The work comes in here from netfilter.c. */ | 75 | /* The work comes in here from netfilter.c. */ |
76 | static unsigned int | 76 | static unsigned int |
77 | ipt_route_hook(unsigned int hook, | 77 | ipt_route_hook(unsigned int hook, |
78 | struct sk_buff **pskb, | 78 | struct sk_buff *skb, |
79 | const struct net_device *in, | 79 | const struct net_device *in, |
80 | const struct net_device *out, | 80 | const struct net_device *out, |
81 | int (*okfn)(struct sk_buff *)) | 81 | int (*okfn)(struct sk_buff *)) |
82 | { | 82 | { |
83 | return ipt_do_table(pskb, hook, in, out, &packet_mangler); | 83 | return ipt_do_table(skb, hook, in, out, &packet_mangler); |
84 | } | 84 | } |
85 | 85 | ||
86 | static unsigned int | 86 | static unsigned int |
87 | ipt_local_hook(unsigned int hook, | 87 | ipt_local_hook(unsigned int hook, |
88 | struct sk_buff **pskb, | 88 | struct sk_buff *skb, |
89 | const struct net_device *in, | 89 | const struct net_device *in, |
90 | const struct net_device *out, | 90 | const struct net_device *out, |
91 | int (*okfn)(struct sk_buff *)) | 91 | int (*okfn)(struct sk_buff *)) |
@@ -97,8 +97,8 @@ ipt_local_hook(unsigned int hook, | |||
97 | u_int32_t mark; | 97 | u_int32_t mark; |
98 | 98 | ||
99 | /* root is playing with raw sockets. */ | 99 | /* root is playing with raw sockets. */ |
100 | if ((*pskb)->len < sizeof(struct iphdr) | 100 | if (skb->len < sizeof(struct iphdr) |
101 | || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { | 101 | || ip_hdrlen(skb) < sizeof(struct iphdr)) { |
102 | if (net_ratelimit()) | 102 | if (net_ratelimit()) |
103 | printk("iptable_mangle: ignoring short SOCK_RAW " | 103 | printk("iptable_mangle: ignoring short SOCK_RAW " |
104 | "packet.\n"); | 104 | "packet.\n"); |
@@ -106,22 +106,22 @@ ipt_local_hook(unsigned int hook, | |||
106 | } | 106 | } |
107 | 107 | ||
108 | /* Save things which could affect route */ | 108 | /* Save things which could affect route */ |
109 | mark = (*pskb)->mark; | 109 | mark = skb->mark; |
110 | iph = ip_hdr(*pskb); | 110 | iph = ip_hdr(skb); |
111 | saddr = iph->saddr; | 111 | saddr = iph->saddr; |
112 | daddr = iph->daddr; | 112 | daddr = iph->daddr; |
113 | tos = iph->tos; | 113 | tos = iph->tos; |
114 | 114 | ||
115 | ret = ipt_do_table(pskb, hook, in, out, &packet_mangler); | 115 | ret = ipt_do_table(skb, hook, in, out, &packet_mangler); |
116 | /* Reroute for ANY change. */ | 116 | /* Reroute for ANY change. */ |
117 | if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { | 117 | if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { |
118 | iph = ip_hdr(*pskb); | 118 | iph = ip_hdr(skb); |
119 | 119 | ||
120 | if (iph->saddr != saddr || | 120 | if (iph->saddr != saddr || |
121 | iph->daddr != daddr || | 121 | iph->daddr != daddr || |
122 | (*pskb)->mark != mark || | 122 | skb->mark != mark || |
123 | iph->tos != tos) | 123 | iph->tos != tos) |
124 | if (ip_route_me_harder(pskb, RTN_UNSPEC)) | 124 | if (ip_route_me_harder(skb, RTN_UNSPEC)) |
125 | ret = NF_DROP; | 125 | ret = NF_DROP; |
126 | } | 126 | } |
127 | 127 | ||
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index d6e503395684..5de6e57ac55c 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c | |||
@@ -47,30 +47,30 @@ static struct xt_table packet_raw = { | |||
47 | /* The work comes in here from netfilter.c. */ | 47 | /* The work comes in here from netfilter.c. */ |
48 | static unsigned int | 48 | static unsigned int |
49 | ipt_hook(unsigned int hook, | 49 | ipt_hook(unsigned int hook, |
50 | struct sk_buff **pskb, | 50 | struct sk_buff *skb, |
51 | const struct net_device *in, | 51 | const struct net_device *in, |
52 | const struct net_device *out, | 52 | const struct net_device *out, |
53 | int (*okfn)(struct sk_buff *)) | 53 | int (*okfn)(struct sk_buff *)) |
54 | { | 54 | { |
55 | return ipt_do_table(pskb, hook, in, out, &packet_raw); | 55 | return ipt_do_table(skb, hook, in, out, &packet_raw); |
56 | } | 56 | } |
57 | 57 | ||
58 | static unsigned int | 58 | static unsigned int |
59 | ipt_local_hook(unsigned int hook, | 59 | ipt_local_hook(unsigned int hook, |
60 | struct sk_buff **pskb, | 60 | struct sk_buff *skb, |
61 | const struct net_device *in, | 61 | const struct net_device *in, |
62 | const struct net_device *out, | 62 | const struct net_device *out, |
63 | int (*okfn)(struct sk_buff *)) | 63 | int (*okfn)(struct sk_buff *)) |
64 | { | 64 | { |
65 | /* root is playing with raw sockets. */ | 65 | /* root is playing with raw sockets. */ |
66 | if ((*pskb)->len < sizeof(struct iphdr) || | 66 | if (skb->len < sizeof(struct iphdr) || |
67 | ip_hdrlen(*pskb) < sizeof(struct iphdr)) { | 67 | ip_hdrlen(skb) < sizeof(struct iphdr)) { |
68 | if (net_ratelimit()) | 68 | if (net_ratelimit()) |
69 | printk("iptable_raw: ignoring short SOCK_RAW" | 69 | printk("iptable_raw: ignoring short SOCK_RAW" |
70 | "packet.\n"); | 70 | "packet.\n"); |
71 | return NF_ACCEPT; | 71 | return NF_ACCEPT; |
72 | } | 72 | } |
73 | return ipt_do_table(pskb, hook, in, out, &packet_raw); | 73 | return ipt_do_table(skb, hook, in, out, &packet_raw); |
74 | } | 74 | } |
75 | 75 | ||
76 | /* 'raw' is the very first table. */ | 76 | /* 'raw' is the very first table. */ |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 2fcb9249a8da..831e9b29806d 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -63,19 +63,20 @@ static int ipv4_print_conntrack(struct seq_file *s, | |||
63 | } | 63 | } |
64 | 64 | ||
65 | /* Returns new sk_buff, or NULL */ | 65 | /* Returns new sk_buff, or NULL */ |
66 | static struct sk_buff * | 66 | static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) |
67 | nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) | ||
68 | { | 67 | { |
68 | int err; | ||
69 | |||
69 | skb_orphan(skb); | 70 | skb_orphan(skb); |
70 | 71 | ||
71 | local_bh_disable(); | 72 | local_bh_disable(); |
72 | skb = ip_defrag(skb, user); | 73 | err = ip_defrag(skb, user); |
73 | local_bh_enable(); | 74 | local_bh_enable(); |
74 | 75 | ||
75 | if (skb) | 76 | if (!err) |
76 | ip_send_check(ip_hdr(skb)); | 77 | ip_send_check(ip_hdr(skb)); |
77 | 78 | ||
78 | return skb; | 79 | return err; |
79 | } | 80 | } |
80 | 81 | ||
81 | static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | 82 | static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, |
@@ -99,17 +100,17 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | |||
99 | } | 100 | } |
100 | 101 | ||
101 | static unsigned int ipv4_confirm(unsigned int hooknum, | 102 | static unsigned int ipv4_confirm(unsigned int hooknum, |
102 | struct sk_buff **pskb, | 103 | struct sk_buff *skb, |
103 | const struct net_device *in, | 104 | const struct net_device *in, |
104 | const struct net_device *out, | 105 | const struct net_device *out, |
105 | int (*okfn)(struct sk_buff *)) | 106 | int (*okfn)(struct sk_buff *)) |
106 | { | 107 | { |
107 | /* We've seen it coming out the other side: confirm it */ | 108 | /* We've seen it coming out the other side: confirm it */ |
108 | return nf_conntrack_confirm(pskb); | 109 | return nf_conntrack_confirm(skb); |
109 | } | 110 | } |
110 | 111 | ||
111 | static unsigned int ipv4_conntrack_help(unsigned int hooknum, | 112 | static unsigned int ipv4_conntrack_help(unsigned int hooknum, |
112 | struct sk_buff **pskb, | 113 | struct sk_buff *skb, |
113 | const struct net_device *in, | 114 | const struct net_device *in, |
114 | const struct net_device *out, | 115 | const struct net_device *out, |
115 | int (*okfn)(struct sk_buff *)) | 116 | int (*okfn)(struct sk_buff *)) |
@@ -120,7 +121,7 @@ static unsigned int ipv4_conntrack_help(unsigned int hooknum, | |||
120 | struct nf_conntrack_helper *helper; | 121 | struct nf_conntrack_helper *helper; |
121 | 122 | ||
122 | /* This is where we call the helper: as the packet goes out. */ | 123 | /* This is where we call the helper: as the packet goes out. */ |
123 | ct = nf_ct_get(*pskb, &ctinfo); | 124 | ct = nf_ct_get(skb, &ctinfo); |
124 | if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) | 125 | if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) |
125 | return NF_ACCEPT; | 126 | return NF_ACCEPT; |
126 | 127 | ||
@@ -131,56 +132,55 @@ static unsigned int ipv4_conntrack_help(unsigned int hooknum, | |||
131 | helper = rcu_dereference(help->helper); | 132 | helper = rcu_dereference(help->helper); |
132 | if (!helper) | 133 | if (!helper) |
133 | return NF_ACCEPT; | 134 | return NF_ACCEPT; |
134 | return helper->help(pskb, skb_network_offset(*pskb) + ip_hdrlen(*pskb), | 135 | return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), |
135 | ct, ctinfo); | 136 | ct, ctinfo); |
136 | } | 137 | } |
137 | 138 | ||
138 | static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, | 139 | static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, |
139 | struct sk_buff **pskb, | 140 | struct sk_buff *skb, |
140 | const struct net_device *in, | 141 | const struct net_device *in, |
141 | const struct net_device *out, | 142 | const struct net_device *out, |
142 | int (*okfn)(struct sk_buff *)) | 143 | int (*okfn)(struct sk_buff *)) |
143 | { | 144 | { |
144 | /* Previously seen (loopback)? Ignore. Do this before | 145 | /* Previously seen (loopback)? Ignore. Do this before |
145 | fragment check. */ | 146 | fragment check. */ |
146 | if ((*pskb)->nfct) | 147 | if (skb->nfct) |
147 | return NF_ACCEPT; | 148 | return NF_ACCEPT; |
148 | 149 | ||
149 | /* Gather fragments. */ | 150 | /* Gather fragments. */ |
150 | if (ip_hdr(*pskb)->frag_off & htons(IP_MF | IP_OFFSET)) { | 151 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
151 | *pskb = nf_ct_ipv4_gather_frags(*pskb, | 152 | if (nf_ct_ipv4_gather_frags(skb, |
152 | hooknum == NF_IP_PRE_ROUTING ? | 153 | hooknum == NF_IP_PRE_ROUTING ? |
153 | IP_DEFRAG_CONNTRACK_IN : | 154 | IP_DEFRAG_CONNTRACK_IN : |
154 | IP_DEFRAG_CONNTRACK_OUT); | 155 | IP_DEFRAG_CONNTRACK_OUT)) |
155 | if (!*pskb) | ||
156 | return NF_STOLEN; | 156 | return NF_STOLEN; |
157 | } | 157 | } |
158 | return NF_ACCEPT; | 158 | return NF_ACCEPT; |
159 | } | 159 | } |
160 | 160 | ||
161 | static unsigned int ipv4_conntrack_in(unsigned int hooknum, | 161 | static unsigned int ipv4_conntrack_in(unsigned int hooknum, |
162 | struct sk_buff **pskb, | 162 | struct sk_buff *skb, |
163 | const struct net_device *in, | 163 | const struct net_device *in, |
164 | const struct net_device *out, | 164 | const struct net_device *out, |
165 | int (*okfn)(struct sk_buff *)) | 165 | int (*okfn)(struct sk_buff *)) |
166 | { | 166 | { |
167 | return nf_conntrack_in(PF_INET, hooknum, pskb); | 167 | return nf_conntrack_in(PF_INET, hooknum, skb); |
168 | } | 168 | } |
169 | 169 | ||
170 | static unsigned int ipv4_conntrack_local(unsigned int hooknum, | 170 | static unsigned int ipv4_conntrack_local(unsigned int hooknum, |
171 | struct sk_buff **pskb, | 171 | struct sk_buff *skb, |
172 | const struct net_device *in, | 172 | const struct net_device *in, |
173 | const struct net_device *out, | 173 | const struct net_device *out, |
174 | int (*okfn)(struct sk_buff *)) | 174 | int (*okfn)(struct sk_buff *)) |
175 | { | 175 | { |
176 | /* root is playing with raw sockets. */ | 176 | /* root is playing with raw sockets. */ |
177 | if ((*pskb)->len < sizeof(struct iphdr) | 177 | if (skb->len < sizeof(struct iphdr) || |
178 | || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { | 178 | ip_hdrlen(skb) < sizeof(struct iphdr)) { |
179 | if (net_ratelimit()) | 179 | if (net_ratelimit()) |
180 | printk("ipt_hook: happy cracking.\n"); | 180 | printk("ipt_hook: happy cracking.\n"); |
181 | return NF_ACCEPT; | 181 | return NF_ACCEPT; |
182 | } | 182 | } |
183 | return nf_conntrack_in(PF_INET, hooknum, pskb); | 183 | return nf_conntrack_in(PF_INET, hooknum, skb); |
184 | } | 184 | } |
185 | 185 | ||
186 | /* Connection tracking may drop packets, but never alters them, so | 186 | /* Connection tracking may drop packets, but never alters them, so |
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c index bd93a1d71052..35a5aa69cd92 100644 --- a/net/ipv4/netfilter/nf_nat_amanda.c +++ b/net/ipv4/netfilter/nf_nat_amanda.c | |||
@@ -24,7 +24,7 @@ MODULE_DESCRIPTION("Amanda NAT helper"); | |||
24 | MODULE_LICENSE("GPL"); | 24 | MODULE_LICENSE("GPL"); |
25 | MODULE_ALIAS("ip_nat_amanda"); | 25 | MODULE_ALIAS("ip_nat_amanda"); |
26 | 26 | ||
27 | static unsigned int help(struct sk_buff **pskb, | 27 | static unsigned int help(struct sk_buff *skb, |
28 | enum ip_conntrack_info ctinfo, | 28 | enum ip_conntrack_info ctinfo, |
29 | unsigned int matchoff, | 29 | unsigned int matchoff, |
30 | unsigned int matchlen, | 30 | unsigned int matchlen, |
@@ -53,7 +53,7 @@ static unsigned int help(struct sk_buff **pskb, | |||
53 | return NF_DROP; | 53 | return NF_DROP; |
54 | 54 | ||
55 | sprintf(buffer, "%u", port); | 55 | sprintf(buffer, "%u", port); |
56 | ret = nf_nat_mangle_udp_packet(pskb, exp->master, ctinfo, | 56 | ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo, |
57 | matchoff, matchlen, | 57 | matchoff, matchlen, |
58 | buffer, strlen(buffer)); | 58 | buffer, strlen(buffer)); |
59 | if (ret != NF_ACCEPT) | 59 | if (ret != NF_ACCEPT) |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 7221aa20e6ff..56e93f692e82 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -349,7 +349,7 @@ EXPORT_SYMBOL(nf_nat_setup_info); | |||
349 | /* Returns true if succeeded. */ | 349 | /* Returns true if succeeded. */ |
350 | static int | 350 | static int |
351 | manip_pkt(u_int16_t proto, | 351 | manip_pkt(u_int16_t proto, |
352 | struct sk_buff **pskb, | 352 | struct sk_buff *skb, |
353 | unsigned int iphdroff, | 353 | unsigned int iphdroff, |
354 | const struct nf_conntrack_tuple *target, | 354 | const struct nf_conntrack_tuple *target, |
355 | enum nf_nat_manip_type maniptype) | 355 | enum nf_nat_manip_type maniptype) |
@@ -357,19 +357,19 @@ manip_pkt(u_int16_t proto, | |||
357 | struct iphdr *iph; | 357 | struct iphdr *iph; |
358 | struct nf_nat_protocol *p; | 358 | struct nf_nat_protocol *p; |
359 | 359 | ||
360 | if (!skb_make_writable(pskb, iphdroff + sizeof(*iph))) | 360 | if (!skb_make_writable(skb, iphdroff + sizeof(*iph))) |
361 | return 0; | 361 | return 0; |
362 | 362 | ||
363 | iph = (void *)(*pskb)->data + iphdroff; | 363 | iph = (void *)skb->data + iphdroff; |
364 | 364 | ||
365 | /* Manipulate protcol part. */ | 365 | /* Manipulate protcol part. */ |
366 | 366 | ||
367 | /* rcu_read_lock()ed by nf_hook_slow */ | 367 | /* rcu_read_lock()ed by nf_hook_slow */ |
368 | p = __nf_nat_proto_find(proto); | 368 | p = __nf_nat_proto_find(proto); |
369 | if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) | 369 | if (!p->manip_pkt(skb, iphdroff, target, maniptype)) |
370 | return 0; | 370 | return 0; |
371 | 371 | ||
372 | iph = (void *)(*pskb)->data + iphdroff; | 372 | iph = (void *)skb->data + iphdroff; |
373 | 373 | ||
374 | if (maniptype == IP_NAT_MANIP_SRC) { | 374 | if (maniptype == IP_NAT_MANIP_SRC) { |
375 | nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); | 375 | nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); |
@@ -385,7 +385,7 @@ manip_pkt(u_int16_t proto, | |||
385 | unsigned int nf_nat_packet(struct nf_conn *ct, | 385 | unsigned int nf_nat_packet(struct nf_conn *ct, |
386 | enum ip_conntrack_info ctinfo, | 386 | enum ip_conntrack_info ctinfo, |
387 | unsigned int hooknum, | 387 | unsigned int hooknum, |
388 | struct sk_buff **pskb) | 388 | struct sk_buff *skb) |
389 | { | 389 | { |
390 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 390 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
391 | unsigned long statusbit; | 391 | unsigned long statusbit; |
@@ -407,7 +407,7 @@ unsigned int nf_nat_packet(struct nf_conn *ct, | |||
407 | /* We are aiming to look like inverse of other direction. */ | 407 | /* We are aiming to look like inverse of other direction. */ |
408 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 408 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
409 | 409 | ||
410 | if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype)) | 410 | if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype)) |
411 | return NF_DROP; | 411 | return NF_DROP; |
412 | } | 412 | } |
413 | return NF_ACCEPT; | 413 | return NF_ACCEPT; |
@@ -418,7 +418,7 @@ EXPORT_SYMBOL_GPL(nf_nat_packet); | |||
418 | int nf_nat_icmp_reply_translation(struct nf_conn *ct, | 418 | int nf_nat_icmp_reply_translation(struct nf_conn *ct, |
419 | enum ip_conntrack_info ctinfo, | 419 | enum ip_conntrack_info ctinfo, |
420 | unsigned int hooknum, | 420 | unsigned int hooknum, |
421 | struct sk_buff **pskb) | 421 | struct sk_buff *skb) |
422 | { | 422 | { |
423 | struct { | 423 | struct { |
424 | struct icmphdr icmp; | 424 | struct icmphdr icmp; |
@@ -426,24 +426,24 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
426 | } *inside; | 426 | } *inside; |
427 | struct nf_conntrack_l4proto *l4proto; | 427 | struct nf_conntrack_l4proto *l4proto; |
428 | struct nf_conntrack_tuple inner, target; | 428 | struct nf_conntrack_tuple inner, target; |
429 | int hdrlen = ip_hdrlen(*pskb); | 429 | int hdrlen = ip_hdrlen(skb); |
430 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 430 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
431 | unsigned long statusbit; | 431 | unsigned long statusbit; |
432 | enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); | 432 | enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); |
433 | 433 | ||
434 | if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) | 434 | if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) |
435 | return 0; | 435 | return 0; |
436 | 436 | ||
437 | inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); | 437 | inside = (void *)skb->data + ip_hdrlen(skb); |
438 | 438 | ||
439 | /* We're actually going to mangle it beyond trivial checksum | 439 | /* We're actually going to mangle it beyond trivial checksum |
440 | adjustment, so make sure the current checksum is correct. */ | 440 | adjustment, so make sure the current checksum is correct. */ |
441 | if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0)) | 441 | if (nf_ip_checksum(skb, hooknum, hdrlen, 0)) |
442 | return 0; | 442 | return 0; |
443 | 443 | ||
444 | /* Must be RELATED */ | 444 | /* Must be RELATED */ |
445 | NF_CT_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED || | 445 | NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED || |
446 | (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); | 446 | skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); |
447 | 447 | ||
448 | /* Redirects on non-null nats must be dropped, else they'll | 448 | /* Redirects on non-null nats must be dropped, else they'll |
449 | start talking to each other without our translation, and be | 449 | start talking to each other without our translation, and be |
@@ -458,15 +458,15 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
458 | } | 458 | } |
459 | 459 | ||
460 | pr_debug("icmp_reply_translation: translating error %p manip %u " | 460 | pr_debug("icmp_reply_translation: translating error %p manip %u " |
461 | "dir %s\n", *pskb, manip, | 461 | "dir %s\n", skb, manip, |
462 | dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); | 462 | dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); |
463 | 463 | ||
464 | /* rcu_read_lock()ed by nf_hook_slow */ | 464 | /* rcu_read_lock()ed by nf_hook_slow */ |
465 | l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); | 465 | l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); |
466 | 466 | ||
467 | if (!nf_ct_get_tuple(*pskb, | 467 | if (!nf_ct_get_tuple(skb, |
468 | ip_hdrlen(*pskb) + sizeof(struct icmphdr), | 468 | ip_hdrlen(skb) + sizeof(struct icmphdr), |
469 | (ip_hdrlen(*pskb) + | 469 | (ip_hdrlen(skb) + |
470 | sizeof(struct icmphdr) + inside->ip.ihl * 4), | 470 | sizeof(struct icmphdr) + inside->ip.ihl * 4), |
471 | (u_int16_t)AF_INET, | 471 | (u_int16_t)AF_INET, |
472 | inside->ip.protocol, | 472 | inside->ip.protocol, |
@@ -478,19 +478,19 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
478 | pass all hooks (locally-generated ICMP). Consider incoming | 478 | pass all hooks (locally-generated ICMP). Consider incoming |
479 | packet: PREROUTING (DST manip), routing produces ICMP, goes | 479 | packet: PREROUTING (DST manip), routing produces ICMP, goes |
480 | through POSTROUTING (which must correct the DST manip). */ | 480 | through POSTROUTING (which must correct the DST manip). */ |
481 | if (!manip_pkt(inside->ip.protocol, pskb, | 481 | if (!manip_pkt(inside->ip.protocol, skb, |
482 | ip_hdrlen(*pskb) + sizeof(inside->icmp), | 482 | ip_hdrlen(skb) + sizeof(inside->icmp), |
483 | &ct->tuplehash[!dir].tuple, | 483 | &ct->tuplehash[!dir].tuple, |
484 | !manip)) | 484 | !manip)) |
485 | return 0; | 485 | return 0; |
486 | 486 | ||
487 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | 487 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
488 | /* Reloading "inside" here since manip_pkt inner. */ | 488 | /* Reloading "inside" here since manip_pkt inner. */ |
489 | inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); | 489 | inside = (void *)skb->data + ip_hdrlen(skb); |
490 | inside->icmp.checksum = 0; | 490 | inside->icmp.checksum = 0; |
491 | inside->icmp.checksum = | 491 | inside->icmp.checksum = |
492 | csum_fold(skb_checksum(*pskb, hdrlen, | 492 | csum_fold(skb_checksum(skb, hdrlen, |
493 | (*pskb)->len - hdrlen, 0)); | 493 | skb->len - hdrlen, 0)); |
494 | } | 494 | } |
495 | 495 | ||
496 | /* Change outer to look the reply to an incoming packet | 496 | /* Change outer to look the reply to an incoming packet |
@@ -506,7 +506,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
506 | 506 | ||
507 | if (ct->status & statusbit) { | 507 | if (ct->status & statusbit) { |
508 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 508 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
509 | if (!manip_pkt(0, pskb, 0, &target, manip)) | 509 | if (!manip_pkt(0, skb, 0, &target, manip)) |
510 | return 0; | 510 | return 0; |
511 | } | 511 | } |
512 | 512 | ||
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c index 3663bd879c39..e1a16d3ea4cb 100644 --- a/net/ipv4/netfilter/nf_nat_ftp.c +++ b/net/ipv4/netfilter/nf_nat_ftp.c | |||
@@ -28,7 +28,7 @@ MODULE_ALIAS("ip_nat_ftp"); | |||
28 | /* FIXME: Time out? --RR */ | 28 | /* FIXME: Time out? --RR */ |
29 | 29 | ||
30 | static int | 30 | static int |
31 | mangle_rfc959_packet(struct sk_buff **pskb, | 31 | mangle_rfc959_packet(struct sk_buff *skb, |
32 | __be32 newip, | 32 | __be32 newip, |
33 | u_int16_t port, | 33 | u_int16_t port, |
34 | unsigned int matchoff, | 34 | unsigned int matchoff, |
@@ -43,13 +43,13 @@ mangle_rfc959_packet(struct sk_buff **pskb, | |||
43 | 43 | ||
44 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); | 44 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); |
45 | 45 | ||
46 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 46 | return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, |
47 | matchlen, buffer, strlen(buffer)); | 47 | matchlen, buffer, strlen(buffer)); |
48 | } | 48 | } |
49 | 49 | ||
50 | /* |1|132.235.1.2|6275| */ | 50 | /* |1|132.235.1.2|6275| */ |
51 | static int | 51 | static int |
52 | mangle_eprt_packet(struct sk_buff **pskb, | 52 | mangle_eprt_packet(struct sk_buff *skb, |
53 | __be32 newip, | 53 | __be32 newip, |
54 | u_int16_t port, | 54 | u_int16_t port, |
55 | unsigned int matchoff, | 55 | unsigned int matchoff, |
@@ -63,13 +63,13 @@ mangle_eprt_packet(struct sk_buff **pskb, | |||
63 | 63 | ||
64 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); | 64 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); |
65 | 65 | ||
66 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 66 | return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, |
67 | matchlen, buffer, strlen(buffer)); | 67 | matchlen, buffer, strlen(buffer)); |
68 | } | 68 | } |
69 | 69 | ||
70 | /* |1|132.235.1.2|6275| */ | 70 | /* |1|132.235.1.2|6275| */ |
71 | static int | 71 | static int |
72 | mangle_epsv_packet(struct sk_buff **pskb, | 72 | mangle_epsv_packet(struct sk_buff *skb, |
73 | __be32 newip, | 73 | __be32 newip, |
74 | u_int16_t port, | 74 | u_int16_t port, |
75 | unsigned int matchoff, | 75 | unsigned int matchoff, |
@@ -83,11 +83,11 @@ mangle_epsv_packet(struct sk_buff **pskb, | |||
83 | 83 | ||
84 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); | 84 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); |
85 | 85 | ||
86 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 86 | return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, |
87 | matchlen, buffer, strlen(buffer)); | 87 | matchlen, buffer, strlen(buffer)); |
88 | } | 88 | } |
89 | 89 | ||
90 | static int (*mangle[])(struct sk_buff **, __be32, u_int16_t, | 90 | static int (*mangle[])(struct sk_buff *, __be32, u_int16_t, |
91 | unsigned int, unsigned int, struct nf_conn *, | 91 | unsigned int, unsigned int, struct nf_conn *, |
92 | enum ip_conntrack_info) | 92 | enum ip_conntrack_info) |
93 | = { | 93 | = { |
@@ -99,7 +99,7 @@ static int (*mangle[])(struct sk_buff **, __be32, u_int16_t, | |||
99 | 99 | ||
100 | /* So, this packet has hit the connection tracking matching code. | 100 | /* So, this packet has hit the connection tracking matching code. |
101 | Mangle it, and change the expectation to match the new version. */ | 101 | Mangle it, and change the expectation to match the new version. */ |
102 | static unsigned int nf_nat_ftp(struct sk_buff **pskb, | 102 | static unsigned int nf_nat_ftp(struct sk_buff *skb, |
103 | enum ip_conntrack_info ctinfo, | 103 | enum ip_conntrack_info ctinfo, |
104 | enum nf_ct_ftp_type type, | 104 | enum nf_ct_ftp_type type, |
105 | unsigned int matchoff, | 105 | unsigned int matchoff, |
@@ -132,7 +132,7 @@ static unsigned int nf_nat_ftp(struct sk_buff **pskb, | |||
132 | if (port == 0) | 132 | if (port == 0) |
133 | return NF_DROP; | 133 | return NF_DROP; |
134 | 134 | ||
135 | if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo)) { | 135 | if (!mangle[type](skb, newip, port, matchoff, matchlen, ct, ctinfo)) { |
136 | nf_ct_unexpect_related(exp); | 136 | nf_ct_unexpect_related(exp); |
137 | return NF_DROP; | 137 | return NF_DROP; |
138 | } | 138 | } |
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index c1b059a73708..a868c8c41328 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c | |||
@@ -22,12 +22,12 @@ | |||
22 | #include <linux/netfilter/nf_conntrack_h323.h> | 22 | #include <linux/netfilter/nf_conntrack_h323.h> |
23 | 23 | ||
24 | /****************************************************************************/ | 24 | /****************************************************************************/ |
25 | static int set_addr(struct sk_buff **pskb, | 25 | static int set_addr(struct sk_buff *skb, |
26 | unsigned char **data, int dataoff, | 26 | unsigned char **data, int dataoff, |
27 | unsigned int addroff, __be32 ip, __be16 port) | 27 | unsigned int addroff, __be32 ip, __be16 port) |
28 | { | 28 | { |
29 | enum ip_conntrack_info ctinfo; | 29 | enum ip_conntrack_info ctinfo; |
30 | struct nf_conn *ct = nf_ct_get(*pskb, &ctinfo); | 30 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
31 | struct { | 31 | struct { |
32 | __be32 ip; | 32 | __be32 ip; |
33 | __be16 port; | 33 | __be16 port; |
@@ -38,8 +38,8 @@ static int set_addr(struct sk_buff **pskb, | |||
38 | buf.port = port; | 38 | buf.port = port; |
39 | addroff += dataoff; | 39 | addroff += dataoff; |
40 | 40 | ||
41 | if (ip_hdr(*pskb)->protocol == IPPROTO_TCP) { | 41 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) { |
42 | if (!nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 42 | if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, |
43 | addroff, sizeof(buf), | 43 | addroff, sizeof(buf), |
44 | (char *) &buf, sizeof(buf))) { | 44 | (char *) &buf, sizeof(buf))) { |
45 | if (net_ratelimit()) | 45 | if (net_ratelimit()) |
@@ -49,14 +49,13 @@ static int set_addr(struct sk_buff **pskb, | |||
49 | } | 49 | } |
50 | 50 | ||
51 | /* Relocate data pointer */ | 51 | /* Relocate data pointer */ |
52 | th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), | 52 | th = skb_header_pointer(skb, ip_hdrlen(skb), |
53 | sizeof(_tcph), &_tcph); | 53 | sizeof(_tcph), &_tcph); |
54 | if (th == NULL) | 54 | if (th == NULL) |
55 | return -1; | 55 | return -1; |
56 | *data = (*pskb)->data + ip_hdrlen(*pskb) + | 56 | *data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff; |
57 | th->doff * 4 + dataoff; | ||
58 | } else { | 57 | } else { |
59 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | 58 | if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, |
60 | addroff, sizeof(buf), | 59 | addroff, sizeof(buf), |
61 | (char *) &buf, sizeof(buf))) { | 60 | (char *) &buf, sizeof(buf))) { |
62 | if (net_ratelimit()) | 61 | if (net_ratelimit()) |
@@ -67,36 +66,35 @@ static int set_addr(struct sk_buff **pskb, | |||
67 | /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy | 66 | /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy |
68 | * or pull everything in a linear buffer, so we can safely | 67 | * or pull everything in a linear buffer, so we can safely |
69 | * use the skb pointers now */ | 68 | * use the skb pointers now */ |
70 | *data = ((*pskb)->data + ip_hdrlen(*pskb) + | 69 | *data = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); |
71 | sizeof(struct udphdr)); | ||
72 | } | 70 | } |
73 | 71 | ||
74 | return 0; | 72 | return 0; |
75 | } | 73 | } |
76 | 74 | ||
77 | /****************************************************************************/ | 75 | /****************************************************************************/ |
78 | static int set_h225_addr(struct sk_buff **pskb, | 76 | static int set_h225_addr(struct sk_buff *skb, |
79 | unsigned char **data, int dataoff, | 77 | unsigned char **data, int dataoff, |
80 | TransportAddress *taddr, | 78 | TransportAddress *taddr, |
81 | union nf_conntrack_address *addr, __be16 port) | 79 | union nf_conntrack_address *addr, __be16 port) |
82 | { | 80 | { |
83 | return set_addr(pskb, data, dataoff, taddr->ipAddress.ip, | 81 | return set_addr(skb, data, dataoff, taddr->ipAddress.ip, |
84 | addr->ip, port); | 82 | addr->ip, port); |
85 | } | 83 | } |
86 | 84 | ||
87 | /****************************************************************************/ | 85 | /****************************************************************************/ |
88 | static int set_h245_addr(struct sk_buff **pskb, | 86 | static int set_h245_addr(struct sk_buff *skb, |
89 | unsigned char **data, int dataoff, | 87 | unsigned char **data, int dataoff, |
90 | H245_TransportAddress *taddr, | 88 | H245_TransportAddress *taddr, |
91 | union nf_conntrack_address *addr, __be16 port) | 89 | union nf_conntrack_address *addr, __be16 port) |
92 | { | 90 | { |
93 | return set_addr(pskb, data, dataoff, | 91 | return set_addr(skb, data, dataoff, |
94 | taddr->unicastAddress.iPAddress.network, | 92 | taddr->unicastAddress.iPAddress.network, |
95 | addr->ip, port); | 93 | addr->ip, port); |
96 | } | 94 | } |
97 | 95 | ||
98 | /****************************************************************************/ | 96 | /****************************************************************************/ |
99 | static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct, | 97 | static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, |
100 | enum ip_conntrack_info ctinfo, | 98 | enum ip_conntrack_info ctinfo, |
101 | unsigned char **data, | 99 | unsigned char **data, |
102 | TransportAddress *taddr, int count) | 100 | TransportAddress *taddr, int count) |
@@ -125,7 +123,7 @@ static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct, | |||
125 | NIPQUAD(addr.ip), port, | 123 | NIPQUAD(addr.ip), port, |
126 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), | 124 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), |
127 | info->sig_port[!dir]); | 125 | info->sig_port[!dir]); |
128 | return set_h225_addr(pskb, data, 0, &taddr[i], | 126 | return set_h225_addr(skb, data, 0, &taddr[i], |
129 | &ct->tuplehash[!dir]. | 127 | &ct->tuplehash[!dir]. |
130 | tuple.dst.u3, | 128 | tuple.dst.u3, |
131 | info->sig_port[!dir]); | 129 | info->sig_port[!dir]); |
@@ -137,7 +135,7 @@ static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct, | |||
137 | NIPQUAD(addr.ip), port, | 135 | NIPQUAD(addr.ip), port, |
138 | NIPQUAD(ct->tuplehash[!dir].tuple.src.u3.ip), | 136 | NIPQUAD(ct->tuplehash[!dir].tuple.src.u3.ip), |
139 | info->sig_port[!dir]); | 137 | info->sig_port[!dir]); |
140 | return set_h225_addr(pskb, data, 0, &taddr[i], | 138 | return set_h225_addr(skb, data, 0, &taddr[i], |
141 | &ct->tuplehash[!dir]. | 139 | &ct->tuplehash[!dir]. |
142 | tuple.src.u3, | 140 | tuple.src.u3, |
143 | info->sig_port[!dir]); | 141 | info->sig_port[!dir]); |
@@ -149,7 +147,7 @@ static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct, | |||
149 | } | 147 | } |
150 | 148 | ||
151 | /****************************************************************************/ | 149 | /****************************************************************************/ |
152 | static int set_ras_addr(struct sk_buff **pskb, struct nf_conn *ct, | 150 | static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct, |
153 | enum ip_conntrack_info ctinfo, | 151 | enum ip_conntrack_info ctinfo, |
154 | unsigned char **data, | 152 | unsigned char **data, |
155 | TransportAddress *taddr, int count) | 153 | TransportAddress *taddr, int count) |
@@ -168,7 +166,7 @@ static int set_ras_addr(struct sk_buff **pskb, struct nf_conn *ct, | |||
168 | NIPQUAD(addr.ip), ntohs(port), | 166 | NIPQUAD(addr.ip), ntohs(port), |
169 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), | 167 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), |
170 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); | 168 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); |
171 | return set_h225_addr(pskb, data, 0, &taddr[i], | 169 | return set_h225_addr(skb, data, 0, &taddr[i], |
172 | &ct->tuplehash[!dir].tuple.dst.u3, | 170 | &ct->tuplehash[!dir].tuple.dst.u3, |
173 | ct->tuplehash[!dir].tuple. | 171 | ct->tuplehash[!dir].tuple. |
174 | dst.u.udp.port); | 172 | dst.u.udp.port); |
@@ -179,7 +177,7 @@ static int set_ras_addr(struct sk_buff **pskb, struct nf_conn *ct, | |||
179 | } | 177 | } |
180 | 178 | ||
181 | /****************************************************************************/ | 179 | /****************************************************************************/ |
182 | static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | 180 | static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, |
183 | enum ip_conntrack_info ctinfo, | 181 | enum ip_conntrack_info ctinfo, |
184 | unsigned char **data, int dataoff, | 182 | unsigned char **data, int dataoff, |
185 | H245_TransportAddress *taddr, | 183 | H245_TransportAddress *taddr, |
@@ -244,7 +242,7 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | |||
244 | } | 242 | } |
245 | 243 | ||
246 | /* Modify signal */ | 244 | /* Modify signal */ |
247 | if (set_h245_addr(pskb, data, dataoff, taddr, | 245 | if (set_h245_addr(skb, data, dataoff, taddr, |
248 | &ct->tuplehash[!dir].tuple.dst.u3, | 246 | &ct->tuplehash[!dir].tuple.dst.u3, |
249 | htons((port & htons(1)) ? nated_port + 1 : | 247 | htons((port & htons(1)) ? nated_port + 1 : |
250 | nated_port)) == 0) { | 248 | nated_port)) == 0) { |
@@ -273,7 +271,7 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | |||
273 | } | 271 | } |
274 | 272 | ||
275 | /****************************************************************************/ | 273 | /****************************************************************************/ |
276 | static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct, | 274 | static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, |
277 | enum ip_conntrack_info ctinfo, | 275 | enum ip_conntrack_info ctinfo, |
278 | unsigned char **data, int dataoff, | 276 | unsigned char **data, int dataoff, |
279 | H245_TransportAddress *taddr, __be16 port, | 277 | H245_TransportAddress *taddr, __be16 port, |
@@ -301,7 +299,7 @@ static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct, | |||
301 | } | 299 | } |
302 | 300 | ||
303 | /* Modify signal */ | 301 | /* Modify signal */ |
304 | if (set_h245_addr(pskb, data, dataoff, taddr, | 302 | if (set_h245_addr(skb, data, dataoff, taddr, |
305 | &ct->tuplehash[!dir].tuple.dst.u3, | 303 | &ct->tuplehash[!dir].tuple.dst.u3, |
306 | htons(nated_port)) < 0) { | 304 | htons(nated_port)) < 0) { |
307 | nf_ct_unexpect_related(exp); | 305 | nf_ct_unexpect_related(exp); |
@@ -318,7 +316,7 @@ static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct, | |||
318 | } | 316 | } |
319 | 317 | ||
320 | /****************************************************************************/ | 318 | /****************************************************************************/ |
321 | static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct, | 319 | static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, |
322 | enum ip_conntrack_info ctinfo, | 320 | enum ip_conntrack_info ctinfo, |
323 | unsigned char **data, int dataoff, | 321 | unsigned char **data, int dataoff, |
324 | TransportAddress *taddr, __be16 port, | 322 | TransportAddress *taddr, __be16 port, |
@@ -351,7 +349,7 @@ static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct, | |||
351 | } | 349 | } |
352 | 350 | ||
353 | /* Modify signal */ | 351 | /* Modify signal */ |
354 | if (set_h225_addr(pskb, data, dataoff, taddr, | 352 | if (set_h225_addr(skb, data, dataoff, taddr, |
355 | &ct->tuplehash[!dir].tuple.dst.u3, | 353 | &ct->tuplehash[!dir].tuple.dst.u3, |
356 | htons(nated_port)) == 0) { | 354 | htons(nated_port)) == 0) { |
357 | /* Save ports */ | 355 | /* Save ports */ |
@@ -406,7 +404,7 @@ static void ip_nat_q931_expect(struct nf_conn *new, | |||
406 | } | 404 | } |
407 | 405 | ||
408 | /****************************************************************************/ | 406 | /****************************************************************************/ |
409 | static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct, | 407 | static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, |
410 | enum ip_conntrack_info ctinfo, | 408 | enum ip_conntrack_info ctinfo, |
411 | unsigned char **data, TransportAddress *taddr, int idx, | 409 | unsigned char **data, TransportAddress *taddr, int idx, |
412 | __be16 port, struct nf_conntrack_expect *exp) | 410 | __be16 port, struct nf_conntrack_expect *exp) |
@@ -439,7 +437,7 @@ static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct, | |||
439 | } | 437 | } |
440 | 438 | ||
441 | /* Modify signal */ | 439 | /* Modify signal */ |
442 | if (set_h225_addr(pskb, data, 0, &taddr[idx], | 440 | if (set_h225_addr(skb, data, 0, &taddr[idx], |
443 | &ct->tuplehash[!dir].tuple.dst.u3, | 441 | &ct->tuplehash[!dir].tuple.dst.u3, |
444 | htons(nated_port)) == 0) { | 442 | htons(nated_port)) == 0) { |
445 | /* Save ports */ | 443 | /* Save ports */ |
@@ -450,7 +448,7 @@ static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct, | |||
450 | if (idx > 0 && | 448 | if (idx > 0 && |
451 | get_h225_addr(ct, *data, &taddr[0], &addr, &port) && | 449 | get_h225_addr(ct, *data, &taddr[0], &addr, &port) && |
452 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { | 450 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { |
453 | set_h225_addr(pskb, data, 0, &taddr[0], | 451 | set_h225_addr(skb, data, 0, &taddr[0], |
454 | &ct->tuplehash[!dir].tuple.dst.u3, | 452 | &ct->tuplehash[!dir].tuple.dst.u3, |
455 | info->sig_port[!dir]); | 453 | info->sig_port[!dir]); |
456 | } | 454 | } |
@@ -495,7 +493,7 @@ static void ip_nat_callforwarding_expect(struct nf_conn *new, | |||
495 | } | 493 | } |
496 | 494 | ||
497 | /****************************************************************************/ | 495 | /****************************************************************************/ |
498 | static int nat_callforwarding(struct sk_buff **pskb, struct nf_conn *ct, | 496 | static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, |
499 | enum ip_conntrack_info ctinfo, | 497 | enum ip_conntrack_info ctinfo, |
500 | unsigned char **data, int dataoff, | 498 | unsigned char **data, int dataoff, |
501 | TransportAddress *taddr, __be16 port, | 499 | TransportAddress *taddr, __be16 port, |
@@ -525,7 +523,7 @@ static int nat_callforwarding(struct sk_buff **pskb, struct nf_conn *ct, | |||
525 | } | 523 | } |
526 | 524 | ||
527 | /* Modify signal */ | 525 | /* Modify signal */ |
528 | if (!set_h225_addr(pskb, data, dataoff, taddr, | 526 | if (!set_h225_addr(skb, data, dataoff, taddr, |
529 | &ct->tuplehash[!dir].tuple.dst.u3, | 527 | &ct->tuplehash[!dir].tuple.dst.u3, |
530 | htons(nated_port)) == 0) { | 528 | htons(nated_port)) == 0) { |
531 | nf_ct_unexpect_related(exp); | 529 | nf_ct_unexpect_related(exp); |
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index 93d8a0a8f035..8718da00ef2a 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -111,22 +111,14 @@ static void mangle_contents(struct sk_buff *skb, | |||
111 | } | 111 | } |
112 | 112 | ||
113 | /* Unusual, but possible case. */ | 113 | /* Unusual, but possible case. */ |
114 | static int enlarge_skb(struct sk_buff **pskb, unsigned int extra) | 114 | static int enlarge_skb(struct sk_buff *skb, unsigned int extra) |
115 | { | 115 | { |
116 | struct sk_buff *nskb; | 116 | if (skb->len + extra > 65535) |
117 | |||
118 | if ((*pskb)->len + extra > 65535) | ||
119 | return 0; | 117 | return 0; |
120 | 118 | ||
121 | nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC); | 119 | if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC)) |
122 | if (!nskb) | ||
123 | return 0; | 120 | return 0; |
124 | 121 | ||
125 | /* Transfer socket to new skb. */ | ||
126 | if ((*pskb)->sk) | ||
127 | skb_set_owner_w(nskb, (*pskb)->sk); | ||
128 | kfree_skb(*pskb); | ||
129 | *pskb = nskb; | ||
130 | return 1; | 122 | return 1; |
131 | } | 123 | } |
132 | 124 | ||
@@ -139,7 +131,7 @@ static int enlarge_skb(struct sk_buff **pskb, unsigned int extra) | |||
139 | * | 131 | * |
140 | * */ | 132 | * */ |
141 | int | 133 | int |
142 | nf_nat_mangle_tcp_packet(struct sk_buff **pskb, | 134 | nf_nat_mangle_tcp_packet(struct sk_buff *skb, |
143 | struct nf_conn *ct, | 135 | struct nf_conn *ct, |
144 | enum ip_conntrack_info ctinfo, | 136 | enum ip_conntrack_info ctinfo, |
145 | unsigned int match_offset, | 137 | unsigned int match_offset, |
@@ -147,37 +139,37 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
147 | const char *rep_buffer, | 139 | const char *rep_buffer, |
148 | unsigned int rep_len) | 140 | unsigned int rep_len) |
149 | { | 141 | { |
150 | struct rtable *rt = (struct rtable *)(*pskb)->dst; | 142 | struct rtable *rt = (struct rtable *)skb->dst; |
151 | struct iphdr *iph; | 143 | struct iphdr *iph; |
152 | struct tcphdr *tcph; | 144 | struct tcphdr *tcph; |
153 | int oldlen, datalen; | 145 | int oldlen, datalen; |
154 | 146 | ||
155 | if (!skb_make_writable(pskb, (*pskb)->len)) | 147 | if (!skb_make_writable(skb, skb->len)) |
156 | return 0; | 148 | return 0; |
157 | 149 | ||
158 | if (rep_len > match_len && | 150 | if (rep_len > match_len && |
159 | rep_len - match_len > skb_tailroom(*pskb) && | 151 | rep_len - match_len > skb_tailroom(skb) && |
160 | !enlarge_skb(pskb, rep_len - match_len)) | 152 | !enlarge_skb(skb, rep_len - match_len)) |
161 | return 0; | 153 | return 0; |
162 | 154 | ||
163 | SKB_LINEAR_ASSERT(*pskb); | 155 | SKB_LINEAR_ASSERT(skb); |
164 | 156 | ||
165 | iph = ip_hdr(*pskb); | 157 | iph = ip_hdr(skb); |
166 | tcph = (void *)iph + iph->ihl*4; | 158 | tcph = (void *)iph + iph->ihl*4; |
167 | 159 | ||
168 | oldlen = (*pskb)->len - iph->ihl*4; | 160 | oldlen = skb->len - iph->ihl*4; |
169 | mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4, | 161 | mangle_contents(skb, iph->ihl*4 + tcph->doff*4, |
170 | match_offset, match_len, rep_buffer, rep_len); | 162 | match_offset, match_len, rep_buffer, rep_len); |
171 | 163 | ||
172 | datalen = (*pskb)->len - iph->ihl*4; | 164 | datalen = skb->len - iph->ihl*4; |
173 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | 165 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
174 | if (!(rt->rt_flags & RTCF_LOCAL) && | 166 | if (!(rt->rt_flags & RTCF_LOCAL) && |
175 | (*pskb)->dev->features & NETIF_F_V4_CSUM) { | 167 | skb->dev->features & NETIF_F_V4_CSUM) { |
176 | (*pskb)->ip_summed = CHECKSUM_PARTIAL; | 168 | skb->ip_summed = CHECKSUM_PARTIAL; |
177 | (*pskb)->csum_start = skb_headroom(*pskb) + | 169 | skb->csum_start = skb_headroom(skb) + |
178 | skb_network_offset(*pskb) + | 170 | skb_network_offset(skb) + |
179 | iph->ihl * 4; | 171 | iph->ihl * 4; |
180 | (*pskb)->csum_offset = offsetof(struct tcphdr, check); | 172 | skb->csum_offset = offsetof(struct tcphdr, check); |
181 | tcph->check = ~tcp_v4_check(datalen, | 173 | tcph->check = ~tcp_v4_check(datalen, |
182 | iph->saddr, iph->daddr, 0); | 174 | iph->saddr, iph->daddr, 0); |
183 | } else { | 175 | } else { |
@@ -188,7 +180,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
188 | datalen, 0)); | 180 | datalen, 0)); |
189 | } | 181 | } |
190 | } else | 182 | } else |
191 | nf_proto_csum_replace2(&tcph->check, *pskb, | 183 | nf_proto_csum_replace2(&tcph->check, skb, |
192 | htons(oldlen), htons(datalen), 1); | 184 | htons(oldlen), htons(datalen), 1); |
193 | 185 | ||
194 | if (rep_len != match_len) { | 186 | if (rep_len != match_len) { |
@@ -197,7 +189,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
197 | (int)rep_len - (int)match_len, | 189 | (int)rep_len - (int)match_len, |
198 | ct, ctinfo); | 190 | ct, ctinfo); |
199 | /* Tell TCP window tracking about seq change */ | 191 | /* Tell TCP window tracking about seq change */ |
200 | nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb), | 192 | nf_conntrack_tcp_update(skb, ip_hdrlen(skb), |
201 | ct, CTINFO2DIR(ctinfo)); | 193 | ct, CTINFO2DIR(ctinfo)); |
202 | } | 194 | } |
203 | return 1; | 195 | return 1; |
@@ -215,7 +207,7 @@ EXPORT_SYMBOL(nf_nat_mangle_tcp_packet); | |||
215 | * should be fairly easy to do. | 207 | * should be fairly easy to do. |
216 | */ | 208 | */ |
217 | int | 209 | int |
218 | nf_nat_mangle_udp_packet(struct sk_buff **pskb, | 210 | nf_nat_mangle_udp_packet(struct sk_buff *skb, |
219 | struct nf_conn *ct, | 211 | struct nf_conn *ct, |
220 | enum ip_conntrack_info ctinfo, | 212 | enum ip_conntrack_info ctinfo, |
221 | unsigned int match_offset, | 213 | unsigned int match_offset, |
@@ -223,48 +215,48 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
223 | const char *rep_buffer, | 215 | const char *rep_buffer, |
224 | unsigned int rep_len) | 216 | unsigned int rep_len) |
225 | { | 217 | { |
226 | struct rtable *rt = (struct rtable *)(*pskb)->dst; | 218 | struct rtable *rt = (struct rtable *)skb->dst; |
227 | struct iphdr *iph; | 219 | struct iphdr *iph; |
228 | struct udphdr *udph; | 220 | struct udphdr *udph; |
229 | int datalen, oldlen; | 221 | int datalen, oldlen; |
230 | 222 | ||
231 | /* UDP helpers might accidentally mangle the wrong packet */ | 223 | /* UDP helpers might accidentally mangle the wrong packet */ |
232 | iph = ip_hdr(*pskb); | 224 | iph = ip_hdr(skb); |
233 | if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + | 225 | if (skb->len < iph->ihl*4 + sizeof(*udph) + |
234 | match_offset + match_len) | 226 | match_offset + match_len) |
235 | return 0; | 227 | return 0; |
236 | 228 | ||
237 | if (!skb_make_writable(pskb, (*pskb)->len)) | 229 | if (!skb_make_writable(skb, skb->len)) |
238 | return 0; | 230 | return 0; |
239 | 231 | ||
240 | if (rep_len > match_len && | 232 | if (rep_len > match_len && |
241 | rep_len - match_len > skb_tailroom(*pskb) && | 233 | rep_len - match_len > skb_tailroom(skb) && |
242 | !enlarge_skb(pskb, rep_len - match_len)) | 234 | !enlarge_skb(skb, rep_len - match_len)) |
243 | return 0; | 235 | return 0; |
244 | 236 | ||
245 | iph = ip_hdr(*pskb); | 237 | iph = ip_hdr(skb); |
246 | udph = (void *)iph + iph->ihl*4; | 238 | udph = (void *)iph + iph->ihl*4; |
247 | 239 | ||
248 | oldlen = (*pskb)->len - iph->ihl*4; | 240 | oldlen = skb->len - iph->ihl*4; |
249 | mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph), | 241 | mangle_contents(skb, iph->ihl*4 + sizeof(*udph), |
250 | match_offset, match_len, rep_buffer, rep_len); | 242 | match_offset, match_len, rep_buffer, rep_len); |
251 | 243 | ||
252 | /* update the length of the UDP packet */ | 244 | /* update the length of the UDP packet */ |
253 | datalen = (*pskb)->len - iph->ihl*4; | 245 | datalen = skb->len - iph->ihl*4; |
254 | udph->len = htons(datalen); | 246 | udph->len = htons(datalen); |
255 | 247 | ||
256 | /* fix udp checksum if udp checksum was previously calculated */ | 248 | /* fix udp checksum if udp checksum was previously calculated */ |
257 | if (!udph->check && (*pskb)->ip_summed != CHECKSUM_PARTIAL) | 249 | if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL) |
258 | return 1; | 250 | return 1; |
259 | 251 | ||
260 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | 252 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
261 | if (!(rt->rt_flags & RTCF_LOCAL) && | 253 | if (!(rt->rt_flags & RTCF_LOCAL) && |
262 | (*pskb)->dev->features & NETIF_F_V4_CSUM) { | 254 | skb->dev->features & NETIF_F_V4_CSUM) { |
263 | (*pskb)->ip_summed = CHECKSUM_PARTIAL; | 255 | skb->ip_summed = CHECKSUM_PARTIAL; |
264 | (*pskb)->csum_start = skb_headroom(*pskb) + | 256 | skb->csum_start = skb_headroom(skb) + |
265 | skb_network_offset(*pskb) + | 257 | skb_network_offset(skb) + |
266 | iph->ihl * 4; | 258 | iph->ihl * 4; |
267 | (*pskb)->csum_offset = offsetof(struct udphdr, check); | 259 | skb->csum_offset = offsetof(struct udphdr, check); |
268 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | 260 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, |
269 | datalen, IPPROTO_UDP, | 261 | datalen, IPPROTO_UDP, |
270 | 0); | 262 | 0); |
@@ -278,7 +270,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
278 | udph->check = CSUM_MANGLED_0; | 270 | udph->check = CSUM_MANGLED_0; |
279 | } | 271 | } |
280 | } else | 272 | } else |
281 | nf_proto_csum_replace2(&udph->check, *pskb, | 273 | nf_proto_csum_replace2(&udph->check, skb, |
282 | htons(oldlen), htons(datalen), 1); | 274 | htons(oldlen), htons(datalen), 1); |
283 | 275 | ||
284 | return 1; | 276 | return 1; |
@@ -330,7 +322,7 @@ sack_adjust(struct sk_buff *skb, | |||
330 | 322 | ||
331 | /* TCP SACK sequence number adjustment */ | 323 | /* TCP SACK sequence number adjustment */ |
332 | static inline unsigned int | 324 | static inline unsigned int |
333 | nf_nat_sack_adjust(struct sk_buff **pskb, | 325 | nf_nat_sack_adjust(struct sk_buff *skb, |
334 | struct tcphdr *tcph, | 326 | struct tcphdr *tcph, |
335 | struct nf_conn *ct, | 327 | struct nf_conn *ct, |
336 | enum ip_conntrack_info ctinfo) | 328 | enum ip_conntrack_info ctinfo) |
@@ -338,17 +330,17 @@ nf_nat_sack_adjust(struct sk_buff **pskb, | |||
338 | unsigned int dir, optoff, optend; | 330 | unsigned int dir, optoff, optend; |
339 | struct nf_conn_nat *nat = nfct_nat(ct); | 331 | struct nf_conn_nat *nat = nfct_nat(ct); |
340 | 332 | ||
341 | optoff = ip_hdrlen(*pskb) + sizeof(struct tcphdr); | 333 | optoff = ip_hdrlen(skb) + sizeof(struct tcphdr); |
342 | optend = ip_hdrlen(*pskb) + tcph->doff * 4; | 334 | optend = ip_hdrlen(skb) + tcph->doff * 4; |
343 | 335 | ||
344 | if (!skb_make_writable(pskb, optend)) | 336 | if (!skb_make_writable(skb, optend)) |
345 | return 0; | 337 | return 0; |
346 | 338 | ||
347 | dir = CTINFO2DIR(ctinfo); | 339 | dir = CTINFO2DIR(ctinfo); |
348 | 340 | ||
349 | while (optoff < optend) { | 341 | while (optoff < optend) { |
350 | /* Usually: option, length. */ | 342 | /* Usually: option, length. */ |
351 | unsigned char *op = (*pskb)->data + optoff; | 343 | unsigned char *op = skb->data + optoff; |
352 | 344 | ||
353 | switch (op[0]) { | 345 | switch (op[0]) { |
354 | case TCPOPT_EOL: | 346 | case TCPOPT_EOL: |
@@ -365,7 +357,7 @@ nf_nat_sack_adjust(struct sk_buff **pskb, | |||
365 | if (op[0] == TCPOPT_SACK && | 357 | if (op[0] == TCPOPT_SACK && |
366 | op[1] >= 2+TCPOLEN_SACK_PERBLOCK && | 358 | op[1] >= 2+TCPOLEN_SACK_PERBLOCK && |
367 | ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) | 359 | ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) |
368 | sack_adjust(*pskb, tcph, optoff+2, | 360 | sack_adjust(skb, tcph, optoff+2, |
369 | optoff+op[1], &nat->seq[!dir]); | 361 | optoff+op[1], &nat->seq[!dir]); |
370 | optoff += op[1]; | 362 | optoff += op[1]; |
371 | } | 363 | } |
@@ -375,7 +367,7 @@ nf_nat_sack_adjust(struct sk_buff **pskb, | |||
375 | 367 | ||
376 | /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ | 368 | /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ |
377 | int | 369 | int |
378 | nf_nat_seq_adjust(struct sk_buff **pskb, | 370 | nf_nat_seq_adjust(struct sk_buff *skb, |
379 | struct nf_conn *ct, | 371 | struct nf_conn *ct, |
380 | enum ip_conntrack_info ctinfo) | 372 | enum ip_conntrack_info ctinfo) |
381 | { | 373 | { |
@@ -390,10 +382,10 @@ nf_nat_seq_adjust(struct sk_buff **pskb, | |||
390 | this_way = &nat->seq[dir]; | 382 | this_way = &nat->seq[dir]; |
391 | other_way = &nat->seq[!dir]; | 383 | other_way = &nat->seq[!dir]; |
392 | 384 | ||
393 | if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) | 385 | if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph))) |
394 | return 0; | 386 | return 0; |
395 | 387 | ||
396 | tcph = (void *)(*pskb)->data + ip_hdrlen(*pskb); | 388 | tcph = (void *)skb->data + ip_hdrlen(skb); |
397 | if (after(ntohl(tcph->seq), this_way->correction_pos)) | 389 | if (after(ntohl(tcph->seq), this_way->correction_pos)) |
398 | newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); | 390 | newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); |
399 | else | 391 | else |
@@ -405,8 +397,8 @@ nf_nat_seq_adjust(struct sk_buff **pskb, | |||
405 | else | 397 | else |
406 | newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); | 398 | newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); |
407 | 399 | ||
408 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0); | 400 | nf_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0); |
409 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0); | 401 | nf_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0); |
410 | 402 | ||
411 | pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n", | 403 | pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n", |
412 | ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), | 404 | ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), |
@@ -415,10 +407,10 @@ nf_nat_seq_adjust(struct sk_buff **pskb, | |||
415 | tcph->seq = newseq; | 407 | tcph->seq = newseq; |
416 | tcph->ack_seq = newack; | 408 | tcph->ack_seq = newack; |
417 | 409 | ||
418 | if (!nf_nat_sack_adjust(pskb, tcph, ct, ctinfo)) | 410 | if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo)) |
419 | return 0; | 411 | return 0; |
420 | 412 | ||
421 | nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb), ct, dir); | 413 | nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir); |
422 | 414 | ||
423 | return 1; | 415 | return 1; |
424 | } | 416 | } |
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c index bcf274bba602..766e2c16c6b9 100644 --- a/net/ipv4/netfilter/nf_nat_irc.c +++ b/net/ipv4/netfilter/nf_nat_irc.c | |||
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("IRC (DCC) NAT helper"); | |||
27 | MODULE_LICENSE("GPL"); | 27 | MODULE_LICENSE("GPL"); |
28 | MODULE_ALIAS("ip_nat_irc"); | 28 | MODULE_ALIAS("ip_nat_irc"); |
29 | 29 | ||
30 | static unsigned int help(struct sk_buff **pskb, | 30 | static unsigned int help(struct sk_buff *skb, |
31 | enum ip_conntrack_info ctinfo, | 31 | enum ip_conntrack_info ctinfo, |
32 | unsigned int matchoff, | 32 | unsigned int matchoff, |
33 | unsigned int matchlen, | 33 | unsigned int matchlen, |
@@ -58,7 +58,7 @@ static unsigned int help(struct sk_buff **pskb, | |||
58 | pr_debug("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n", | 58 | pr_debug("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n", |
59 | buffer, NIPQUAD(ip), port); | 59 | buffer, NIPQUAD(ip), port); |
60 | 60 | ||
61 | ret = nf_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, | 61 | ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, |
62 | matchoff, matchlen, buffer, | 62 | matchoff, matchlen, buffer, |
63 | strlen(buffer)); | 63 | strlen(buffer)); |
64 | if (ret != NF_ACCEPT) | 64 | if (ret != NF_ACCEPT) |
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 984ec8308b2e..e1385a099079 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c | |||
@@ -110,7 +110,7 @@ static void pptp_nat_expected(struct nf_conn *ct, | |||
110 | 110 | ||
111 | /* outbound packets == from PNS to PAC */ | 111 | /* outbound packets == from PNS to PAC */ |
112 | static int | 112 | static int |
113 | pptp_outbound_pkt(struct sk_buff **pskb, | 113 | pptp_outbound_pkt(struct sk_buff *skb, |
114 | struct nf_conn *ct, | 114 | struct nf_conn *ct, |
115 | enum ip_conntrack_info ctinfo, | 115 | enum ip_conntrack_info ctinfo, |
116 | struct PptpControlHeader *ctlh, | 116 | struct PptpControlHeader *ctlh, |
@@ -175,7 +175,7 @@ pptp_outbound_pkt(struct sk_buff **pskb, | |||
175 | ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); | 175 | ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); |
176 | 176 | ||
177 | /* mangle packet */ | 177 | /* mangle packet */ |
178 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 178 | if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, |
179 | cid_off + sizeof(struct pptp_pkt_hdr) + | 179 | cid_off + sizeof(struct pptp_pkt_hdr) + |
180 | sizeof(struct PptpControlHeader), | 180 | sizeof(struct PptpControlHeader), |
181 | sizeof(new_callid), (char *)&new_callid, | 181 | sizeof(new_callid), (char *)&new_callid, |
@@ -213,7 +213,7 @@ pptp_exp_gre(struct nf_conntrack_expect *expect_orig, | |||
213 | 213 | ||
214 | /* inbound packets == from PAC to PNS */ | 214 | /* inbound packets == from PAC to PNS */ |
215 | static int | 215 | static int |
216 | pptp_inbound_pkt(struct sk_buff **pskb, | 216 | pptp_inbound_pkt(struct sk_buff *skb, |
217 | struct nf_conn *ct, | 217 | struct nf_conn *ct, |
218 | enum ip_conntrack_info ctinfo, | 218 | enum ip_conntrack_info ctinfo, |
219 | struct PptpControlHeader *ctlh, | 219 | struct PptpControlHeader *ctlh, |
@@ -268,7 +268,7 @@ pptp_inbound_pkt(struct sk_buff **pskb, | |||
268 | pr_debug("altering peer call id from 0x%04x to 0x%04x\n", | 268 | pr_debug("altering peer call id from 0x%04x to 0x%04x\n", |
269 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); | 269 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); |
270 | 270 | ||
271 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 271 | if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, |
272 | pcid_off + sizeof(struct pptp_pkt_hdr) + | 272 | pcid_off + sizeof(struct pptp_pkt_hdr) + |
273 | sizeof(struct PptpControlHeader), | 273 | sizeof(struct PptpControlHeader), |
274 | sizeof(new_pcid), (char *)&new_pcid, | 274 | sizeof(new_pcid), (char *)&new_pcid, |
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c index d562290b1820..b820f9960356 100644 --- a/net/ipv4/netfilter/nf_nat_proto_gre.c +++ b/net/ipv4/netfilter/nf_nat_proto_gre.c | |||
@@ -98,21 +98,21 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
98 | 98 | ||
99 | /* manipulate a GRE packet according to maniptype */ | 99 | /* manipulate a GRE packet according to maniptype */ |
100 | static int | 100 | static int |
101 | gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff, | 101 | gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, |
102 | const struct nf_conntrack_tuple *tuple, | 102 | const struct nf_conntrack_tuple *tuple, |
103 | enum nf_nat_manip_type maniptype) | 103 | enum nf_nat_manip_type maniptype) |
104 | { | 104 | { |
105 | struct gre_hdr *greh; | 105 | struct gre_hdr *greh; |
106 | struct gre_hdr_pptp *pgreh; | 106 | struct gre_hdr_pptp *pgreh; |
107 | struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); | 107 | struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); |
108 | unsigned int hdroff = iphdroff + iph->ihl * 4; | 108 | unsigned int hdroff = iphdroff + iph->ihl * 4; |
109 | 109 | ||
110 | /* pgreh includes two optional 32bit fields which are not required | 110 | /* pgreh includes two optional 32bit fields which are not required |
111 | * to be there. That's where the magic '8' comes from */ | 111 | * to be there. That's where the magic '8' comes from */ |
112 | if (!skb_make_writable(pskb, hdroff + sizeof(*pgreh) - 8)) | 112 | if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8)) |
113 | return 0; | 113 | return 0; |
114 | 114 | ||
115 | greh = (void *)(*pskb)->data + hdroff; | 115 | greh = (void *)skb->data + hdroff; |
116 | pgreh = (struct gre_hdr_pptp *)greh; | 116 | pgreh = (struct gre_hdr_pptp *)greh; |
117 | 117 | ||
118 | /* we only have destination manip of a packet, since 'source key' | 118 | /* we only have destination manip of a packet, since 'source key' |
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c index 898d73771155..b9fc724388fc 100644 --- a/net/ipv4/netfilter/nf_nat_proto_icmp.c +++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c | |||
@@ -52,20 +52,20 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
52 | } | 52 | } |
53 | 53 | ||
54 | static int | 54 | static int |
55 | icmp_manip_pkt(struct sk_buff **pskb, | 55 | icmp_manip_pkt(struct sk_buff *skb, |
56 | unsigned int iphdroff, | 56 | unsigned int iphdroff, |
57 | const struct nf_conntrack_tuple *tuple, | 57 | const struct nf_conntrack_tuple *tuple, |
58 | enum nf_nat_manip_type maniptype) | 58 | enum nf_nat_manip_type maniptype) |
59 | { | 59 | { |
60 | struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); | 60 | struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); |
61 | struct icmphdr *hdr; | 61 | struct icmphdr *hdr; |
62 | unsigned int hdroff = iphdroff + iph->ihl*4; | 62 | unsigned int hdroff = iphdroff + iph->ihl*4; |
63 | 63 | ||
64 | if (!skb_make_writable(pskb, hdroff + sizeof(*hdr))) | 64 | if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) |
65 | return 0; | 65 | return 0; |
66 | 66 | ||
67 | hdr = (struct icmphdr *)((*pskb)->data + hdroff); | 67 | hdr = (struct icmphdr *)(skb->data + hdroff); |
68 | nf_proto_csum_replace2(&hdr->checksum, *pskb, | 68 | nf_proto_csum_replace2(&hdr->checksum, skb, |
69 | hdr->un.echo.id, tuple->src.u.icmp.id, 0); | 69 | hdr->un.echo.id, tuple->src.u.icmp.id, 0); |
70 | hdr->un.echo.id = tuple->src.u.icmp.id; | 70 | hdr->un.echo.id = tuple->src.u.icmp.id; |
71 | return 1; | 71 | return 1; |
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c index 5bbbb2acdc70..6bab2e184455 100644 --- a/net/ipv4/netfilter/nf_nat_proto_tcp.c +++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c | |||
@@ -88,12 +88,12 @@ tcp_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
88 | } | 88 | } |
89 | 89 | ||
90 | static int | 90 | static int |
91 | tcp_manip_pkt(struct sk_buff **pskb, | 91 | tcp_manip_pkt(struct sk_buff *skb, |
92 | unsigned int iphdroff, | 92 | unsigned int iphdroff, |
93 | const struct nf_conntrack_tuple *tuple, | 93 | const struct nf_conntrack_tuple *tuple, |
94 | enum nf_nat_manip_type maniptype) | 94 | enum nf_nat_manip_type maniptype) |
95 | { | 95 | { |
96 | struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); | 96 | struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); |
97 | struct tcphdr *hdr; | 97 | struct tcphdr *hdr; |
98 | unsigned int hdroff = iphdroff + iph->ihl*4; | 98 | unsigned int hdroff = iphdroff + iph->ihl*4; |
99 | __be32 oldip, newip; | 99 | __be32 oldip, newip; |
@@ -103,14 +103,14 @@ tcp_manip_pkt(struct sk_buff **pskb, | |||
103 | /* this could be a inner header returned in icmp packet; in such | 103 | /* this could be a inner header returned in icmp packet; in such |
104 | cases we cannot update the checksum field since it is outside of | 104 | cases we cannot update the checksum field since it is outside of |
105 | the 8 bytes of transport layer headers we are guaranteed */ | 105 | the 8 bytes of transport layer headers we are guaranteed */ |
106 | if ((*pskb)->len >= hdroff + sizeof(struct tcphdr)) | 106 | if (skb->len >= hdroff + sizeof(struct tcphdr)) |
107 | hdrsize = sizeof(struct tcphdr); | 107 | hdrsize = sizeof(struct tcphdr); |
108 | 108 | ||
109 | if (!skb_make_writable(pskb, hdroff + hdrsize)) | 109 | if (!skb_make_writable(skb, hdroff + hdrsize)) |
110 | return 0; | 110 | return 0; |
111 | 111 | ||
112 | iph = (struct iphdr *)((*pskb)->data + iphdroff); | 112 | iph = (struct iphdr *)(skb->data + iphdroff); |
113 | hdr = (struct tcphdr *)((*pskb)->data + hdroff); | 113 | hdr = (struct tcphdr *)(skb->data + hdroff); |
114 | 114 | ||
115 | if (maniptype == IP_NAT_MANIP_SRC) { | 115 | if (maniptype == IP_NAT_MANIP_SRC) { |
116 | /* Get rid of src ip and src pt */ | 116 | /* Get rid of src ip and src pt */ |
@@ -132,8 +132,8 @@ tcp_manip_pkt(struct sk_buff **pskb, | |||
132 | if (hdrsize < sizeof(*hdr)) | 132 | if (hdrsize < sizeof(*hdr)) |
133 | return 1; | 133 | return 1; |
134 | 134 | ||
135 | nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1); | 135 | nf_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); |
136 | nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0); | 136 | nf_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0); |
137 | return 1; | 137 | return 1; |
138 | } | 138 | } |
139 | 139 | ||
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c index a0af4fd95584..cbf1a61e2908 100644 --- a/net/ipv4/netfilter/nf_nat_proto_udp.c +++ b/net/ipv4/netfilter/nf_nat_proto_udp.c | |||
@@ -86,22 +86,22 @@ udp_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
86 | } | 86 | } |
87 | 87 | ||
88 | static int | 88 | static int |
89 | udp_manip_pkt(struct sk_buff **pskb, | 89 | udp_manip_pkt(struct sk_buff *skb, |
90 | unsigned int iphdroff, | 90 | unsigned int iphdroff, |
91 | const struct nf_conntrack_tuple *tuple, | 91 | const struct nf_conntrack_tuple *tuple, |
92 | enum nf_nat_manip_type maniptype) | 92 | enum nf_nat_manip_type maniptype) |
93 | { | 93 | { |
94 | struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); | 94 | struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); |
95 | struct udphdr *hdr; | 95 | struct udphdr *hdr; |
96 | unsigned int hdroff = iphdroff + iph->ihl*4; | 96 | unsigned int hdroff = iphdroff + iph->ihl*4; |
97 | __be32 oldip, newip; | 97 | __be32 oldip, newip; |
98 | __be16 *portptr, newport; | 98 | __be16 *portptr, newport; |
99 | 99 | ||
100 | if (!skb_make_writable(pskb, hdroff + sizeof(*hdr))) | 100 | if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) |
101 | return 0; | 101 | return 0; |
102 | 102 | ||
103 | iph = (struct iphdr *)((*pskb)->data + iphdroff); | 103 | iph = (struct iphdr *)(skb->data + iphdroff); |
104 | hdr = (struct udphdr *)((*pskb)->data + hdroff); | 104 | hdr = (struct udphdr *)(skb->data + hdroff); |
105 | 105 | ||
106 | if (maniptype == IP_NAT_MANIP_SRC) { | 106 | if (maniptype == IP_NAT_MANIP_SRC) { |
107 | /* Get rid of src ip and src pt */ | 107 | /* Get rid of src ip and src pt */ |
@@ -116,9 +116,9 @@ udp_manip_pkt(struct sk_buff **pskb, | |||
116 | newport = tuple->dst.u.udp.port; | 116 | newport = tuple->dst.u.udp.port; |
117 | portptr = &hdr->dest; | 117 | portptr = &hdr->dest; |
118 | } | 118 | } |
119 | if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) { | 119 | if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) { |
120 | nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1); | 120 | nf_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); |
121 | nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport, | 121 | nf_proto_csum_replace2(&hdr->check, skb, *portptr, newport, |
122 | 0); | 122 | 0); |
123 | if (!hdr->check) | 123 | if (!hdr->check) |
124 | hdr->check = CSUM_MANGLED_0; | 124 | hdr->check = CSUM_MANGLED_0; |
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c index f50d0203f9c0..cfd2742e9706 100644 --- a/net/ipv4/netfilter/nf_nat_proto_unknown.c +++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c | |||
@@ -37,7 +37,7 @@ static int unknown_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
37 | } | 37 | } |
38 | 38 | ||
39 | static int | 39 | static int |
40 | unknown_manip_pkt(struct sk_buff **pskb, | 40 | unknown_manip_pkt(struct sk_buff *skb, |
41 | unsigned int iphdroff, | 41 | unsigned int iphdroff, |
42 | const struct nf_conntrack_tuple *tuple, | 42 | const struct nf_conntrack_tuple *tuple, |
43 | enum nf_nat_manip_type maniptype) | 43 | enum nf_nat_manip_type maniptype) |
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 76ec59ae524d..46b25ab5f78b 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -65,7 +65,7 @@ static struct xt_table nat_table = { | |||
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* Source NAT */ | 67 | /* Source NAT */ |
68 | static unsigned int ipt_snat_target(struct sk_buff **pskb, | 68 | static unsigned int ipt_snat_target(struct sk_buff *skb, |
69 | const struct net_device *in, | 69 | const struct net_device *in, |
70 | const struct net_device *out, | 70 | const struct net_device *out, |
71 | unsigned int hooknum, | 71 | unsigned int hooknum, |
@@ -78,7 +78,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb, | |||
78 | 78 | ||
79 | NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); | 79 | NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); |
80 | 80 | ||
81 | ct = nf_ct_get(*pskb, &ctinfo); | 81 | ct = nf_ct_get(skb, &ctinfo); |
82 | 82 | ||
83 | /* Connection must be valid and new. */ | 83 | /* Connection must be valid and new. */ |
84 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || | 84 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || |
@@ -107,7 +107,7 @@ static void warn_if_extra_mangle(__be32 dstip, __be32 srcip) | |||
107 | ip_rt_put(rt); | 107 | ip_rt_put(rt); |
108 | } | 108 | } |
109 | 109 | ||
110 | static unsigned int ipt_dnat_target(struct sk_buff **pskb, | 110 | static unsigned int ipt_dnat_target(struct sk_buff *skb, |
111 | const struct net_device *in, | 111 | const struct net_device *in, |
112 | const struct net_device *out, | 112 | const struct net_device *out, |
113 | unsigned int hooknum, | 113 | unsigned int hooknum, |
@@ -121,14 +121,14 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb, | |||
121 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || | 121 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || |
122 | hooknum == NF_IP_LOCAL_OUT); | 122 | hooknum == NF_IP_LOCAL_OUT); |
123 | 123 | ||
124 | ct = nf_ct_get(*pskb, &ctinfo); | 124 | ct = nf_ct_get(skb, &ctinfo); |
125 | 125 | ||
126 | /* Connection must be valid and new. */ | 126 | /* Connection must be valid and new. */ |
127 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); | 127 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); |
128 | 128 | ||
129 | if (hooknum == NF_IP_LOCAL_OUT && | 129 | if (hooknum == NF_IP_LOCAL_OUT && |
130 | mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) | 130 | mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) |
131 | warn_if_extra_mangle(ip_hdr(*pskb)->daddr, | 131 | warn_if_extra_mangle(ip_hdr(skb)->daddr, |
132 | mr->range[0].min_ip); | 132 | mr->range[0].min_ip); |
133 | 133 | ||
134 | return nf_nat_setup_info(ct, &mr->range[0], hooknum); | 134 | return nf_nat_setup_info(ct, &mr->range[0], hooknum); |
@@ -204,7 +204,7 @@ alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum) | |||
204 | return nf_nat_setup_info(ct, &range, hooknum); | 204 | return nf_nat_setup_info(ct, &range, hooknum); |
205 | } | 205 | } |
206 | 206 | ||
207 | int nf_nat_rule_find(struct sk_buff **pskb, | 207 | int nf_nat_rule_find(struct sk_buff *skb, |
208 | unsigned int hooknum, | 208 | unsigned int hooknum, |
209 | const struct net_device *in, | 209 | const struct net_device *in, |
210 | const struct net_device *out, | 210 | const struct net_device *out, |
@@ -212,7 +212,7 @@ int nf_nat_rule_find(struct sk_buff **pskb, | |||
212 | { | 212 | { |
213 | int ret; | 213 | int ret; |
214 | 214 | ||
215 | ret = ipt_do_table(pskb, hooknum, in, out, &nat_table); | 215 | ret = ipt_do_table(skb, hooknum, in, out, &nat_table); |
216 | 216 | ||
217 | if (ret == NF_ACCEPT) { | 217 | if (ret == NF_ACCEPT) { |
218 | if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) | 218 | if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) |
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index e14d41976c27..ce9edbcc01e3 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c | |||
@@ -60,7 +60,7 @@ static void addr_map_init(struct nf_conn *ct, struct addr_map *map) | |||
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
63 | static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, | 63 | static int map_sip_addr(struct sk_buff *skb, enum ip_conntrack_info ctinfo, |
64 | struct nf_conn *ct, const char **dptr, size_t dlen, | 64 | struct nf_conn *ct, const char **dptr, size_t dlen, |
65 | enum sip_header_pos pos, struct addr_map *map) | 65 | enum sip_header_pos pos, struct addr_map *map) |
66 | { | 66 | { |
@@ -84,15 +84,15 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, | |||
84 | } else | 84 | } else |
85 | return 1; | 85 | return 1; |
86 | 86 | ||
87 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | 87 | if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, |
88 | matchoff, matchlen, addr, addrlen)) | 88 | matchoff, matchlen, addr, addrlen)) |
89 | return 0; | 89 | return 0; |
90 | *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr); | 90 | *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); |
91 | return 1; | 91 | return 1; |
92 | 92 | ||
93 | } | 93 | } |
94 | 94 | ||
95 | static unsigned int ip_nat_sip(struct sk_buff **pskb, | 95 | static unsigned int ip_nat_sip(struct sk_buff *skb, |
96 | enum ip_conntrack_info ctinfo, | 96 | enum ip_conntrack_info ctinfo, |
97 | struct nf_conn *ct, | 97 | struct nf_conn *ct, |
98 | const char **dptr) | 98 | const char **dptr) |
@@ -101,8 +101,8 @@ static unsigned int ip_nat_sip(struct sk_buff **pskb, | |||
101 | struct addr_map map; | 101 | struct addr_map map; |
102 | int dataoff, datalen; | 102 | int dataoff, datalen; |
103 | 103 | ||
104 | dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); | 104 | dataoff = ip_hdrlen(skb) + sizeof(struct udphdr); |
105 | datalen = (*pskb)->len - dataoff; | 105 | datalen = skb->len - dataoff; |
106 | if (datalen < sizeof("SIP/2.0") - 1) | 106 | if (datalen < sizeof("SIP/2.0") - 1) |
107 | return NF_ACCEPT; | 107 | return NF_ACCEPT; |
108 | 108 | ||
@@ -121,19 +121,19 @@ static unsigned int ip_nat_sip(struct sk_buff **pskb, | |||
121 | else | 121 | else |
122 | pos = POS_REQ_URI; | 122 | pos = POS_REQ_URI; |
123 | 123 | ||
124 | if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, pos, &map)) | 124 | if (!map_sip_addr(skb, ctinfo, ct, dptr, datalen, pos, &map)) |
125 | return NF_DROP; | 125 | return NF_DROP; |
126 | } | 126 | } |
127 | 127 | ||
128 | if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_FROM, &map) || | 128 | if (!map_sip_addr(skb, ctinfo, ct, dptr, datalen, POS_FROM, &map) || |
129 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_TO, &map) || | 129 | !map_sip_addr(skb, ctinfo, ct, dptr, datalen, POS_TO, &map) || |
130 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_VIA, &map) || | 130 | !map_sip_addr(skb, ctinfo, ct, dptr, datalen, POS_VIA, &map) || |
131 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map)) | 131 | !map_sip_addr(skb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map)) |
132 | return NF_DROP; | 132 | return NF_DROP; |
133 | return NF_ACCEPT; | 133 | return NF_ACCEPT; |
134 | } | 134 | } |
135 | 135 | ||
136 | static unsigned int mangle_sip_packet(struct sk_buff **pskb, | 136 | static unsigned int mangle_sip_packet(struct sk_buff *skb, |
137 | enum ip_conntrack_info ctinfo, | 137 | enum ip_conntrack_info ctinfo, |
138 | struct nf_conn *ct, | 138 | struct nf_conn *ct, |
139 | const char **dptr, size_t dlen, | 139 | const char **dptr, size_t dlen, |
@@ -145,16 +145,16 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb, | |||
145 | if (ct_sip_get_info(ct, *dptr, dlen, &matchoff, &matchlen, pos) <= 0) | 145 | if (ct_sip_get_info(ct, *dptr, dlen, &matchoff, &matchlen, pos) <= 0) |
146 | return 0; | 146 | return 0; |
147 | 147 | ||
148 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | 148 | if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, |
149 | matchoff, matchlen, buffer, bufflen)) | 149 | matchoff, matchlen, buffer, bufflen)) |
150 | return 0; | 150 | return 0; |
151 | 151 | ||
152 | /* We need to reload this. Thanks Patrick. */ | 152 | /* We need to reload this. Thanks Patrick. */ |
153 | *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr); | 153 | *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); |
154 | return 1; | 154 | return 1; |
155 | } | 155 | } |
156 | 156 | ||
157 | static int mangle_content_len(struct sk_buff **pskb, | 157 | static int mangle_content_len(struct sk_buff *skb, |
158 | enum ip_conntrack_info ctinfo, | 158 | enum ip_conntrack_info ctinfo, |
159 | struct nf_conn *ct, | 159 | struct nf_conn *ct, |
160 | const char *dptr) | 160 | const char *dptr) |
@@ -163,22 +163,22 @@ static int mangle_content_len(struct sk_buff **pskb, | |||
163 | char buffer[sizeof("65536")]; | 163 | char buffer[sizeof("65536")]; |
164 | int bufflen; | 164 | int bufflen; |
165 | 165 | ||
166 | dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); | 166 | dataoff = ip_hdrlen(skb) + sizeof(struct udphdr); |
167 | 167 | ||
168 | /* Get actual SDP lenght */ | 168 | /* Get actual SDP lenght */ |
169 | if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, | 169 | if (ct_sip_get_info(ct, dptr, skb->len - dataoff, &matchoff, |
170 | &matchlen, POS_SDP_HEADER) > 0) { | 170 | &matchlen, POS_SDP_HEADER) > 0) { |
171 | 171 | ||
172 | /* since ct_sip_get_info() give us a pointer passing 'v=' | 172 | /* since ct_sip_get_info() give us a pointer passing 'v=' |
173 | we need to add 2 bytes in this count. */ | 173 | we need to add 2 bytes in this count. */ |
174 | int c_len = (*pskb)->len - dataoff - matchoff + 2; | 174 | int c_len = skb->len - dataoff - matchoff + 2; |
175 | 175 | ||
176 | /* Now, update SDP length */ | 176 | /* Now, update SDP length */ |
177 | if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, | 177 | if (ct_sip_get_info(ct, dptr, skb->len - dataoff, &matchoff, |
178 | &matchlen, POS_CONTENT) > 0) { | 178 | &matchlen, POS_CONTENT) > 0) { |
179 | 179 | ||
180 | bufflen = sprintf(buffer, "%u", c_len); | 180 | bufflen = sprintf(buffer, "%u", c_len); |
181 | return nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | 181 | return nf_nat_mangle_udp_packet(skb, ct, ctinfo, |
182 | matchoff, matchlen, | 182 | matchoff, matchlen, |
183 | buffer, bufflen); | 183 | buffer, bufflen); |
184 | } | 184 | } |
@@ -186,7 +186,7 @@ static int mangle_content_len(struct sk_buff **pskb, | |||
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | static unsigned int mangle_sdp(struct sk_buff **pskb, | 189 | static unsigned int mangle_sdp(struct sk_buff *skb, |
190 | enum ip_conntrack_info ctinfo, | 190 | enum ip_conntrack_info ctinfo, |
191 | struct nf_conn *ct, | 191 | struct nf_conn *ct, |
192 | __be32 newip, u_int16_t port, | 192 | __be32 newip, u_int16_t port, |
@@ -195,25 +195,25 @@ static unsigned int mangle_sdp(struct sk_buff **pskb, | |||
195 | char buffer[sizeof("nnn.nnn.nnn.nnn")]; | 195 | char buffer[sizeof("nnn.nnn.nnn.nnn")]; |
196 | unsigned int dataoff, bufflen; | 196 | unsigned int dataoff, bufflen; |
197 | 197 | ||
198 | dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); | 198 | dataoff = ip_hdrlen(skb) + sizeof(struct udphdr); |
199 | 199 | ||
200 | /* Mangle owner and contact info. */ | 200 | /* Mangle owner and contact info. */ |
201 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); | 201 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); |
202 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 202 | if (!mangle_sip_packet(skb, ctinfo, ct, &dptr, skb->len - dataoff, |
203 | buffer, bufflen, POS_OWNER_IP4)) | 203 | buffer, bufflen, POS_OWNER_IP4)) |
204 | return 0; | 204 | return 0; |
205 | 205 | ||
206 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 206 | if (!mangle_sip_packet(skb, ctinfo, ct, &dptr, skb->len - dataoff, |
207 | buffer, bufflen, POS_CONNECTION_IP4)) | 207 | buffer, bufflen, POS_CONNECTION_IP4)) |
208 | return 0; | 208 | return 0; |
209 | 209 | ||
210 | /* Mangle media port. */ | 210 | /* Mangle media port. */ |
211 | bufflen = sprintf(buffer, "%u", port); | 211 | bufflen = sprintf(buffer, "%u", port); |
212 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 212 | if (!mangle_sip_packet(skb, ctinfo, ct, &dptr, skb->len - dataoff, |
213 | buffer, bufflen, POS_MEDIA)) | 213 | buffer, bufflen, POS_MEDIA)) |
214 | return 0; | 214 | return 0; |
215 | 215 | ||
216 | return mangle_content_len(pskb, ctinfo, ct, dptr); | 216 | return mangle_content_len(skb, ctinfo, ct, dptr); |
217 | } | 217 | } |
218 | 218 | ||
219 | static void ip_nat_sdp_expect(struct nf_conn *ct, | 219 | static void ip_nat_sdp_expect(struct nf_conn *ct, |
@@ -241,7 +241,7 @@ static void ip_nat_sdp_expect(struct nf_conn *ct, | |||
241 | 241 | ||
242 | /* So, this packet has hit the connection tracking matching code. | 242 | /* So, this packet has hit the connection tracking matching code. |
243 | Mangle it, and change the expectation to match the new version. */ | 243 | Mangle it, and change the expectation to match the new version. */ |
244 | static unsigned int ip_nat_sdp(struct sk_buff **pskb, | 244 | static unsigned int ip_nat_sdp(struct sk_buff *skb, |
245 | enum ip_conntrack_info ctinfo, | 245 | enum ip_conntrack_info ctinfo, |
246 | struct nf_conntrack_expect *exp, | 246 | struct nf_conntrack_expect *exp, |
247 | const char *dptr) | 247 | const char *dptr) |
@@ -277,7 +277,7 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb, | |||
277 | if (port == 0) | 277 | if (port == 0) |
278 | return NF_DROP; | 278 | return NF_DROP; |
279 | 279 | ||
280 | if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) { | 280 | if (!mangle_sdp(skb, ctinfo, ct, newip, port, dptr)) { |
281 | nf_ct_unexpect_related(exp); | 281 | nf_ct_unexpect_related(exp); |
282 | return NF_DROP; | 282 | return NF_DROP; |
283 | } | 283 | } |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 6bfcd3a90f08..03709d6b4b06 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -1188,9 +1188,9 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1188 | */ | 1188 | */ |
1189 | static int snmp_translate(struct nf_conn *ct, | 1189 | static int snmp_translate(struct nf_conn *ct, |
1190 | enum ip_conntrack_info ctinfo, | 1190 | enum ip_conntrack_info ctinfo, |
1191 | struct sk_buff **pskb) | 1191 | struct sk_buff *skb) |
1192 | { | 1192 | { |
1193 | struct iphdr *iph = ip_hdr(*pskb); | 1193 | struct iphdr *iph = ip_hdr(skb); |
1194 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); | 1194 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); |
1195 | u_int16_t udplen = ntohs(udph->len); | 1195 | u_int16_t udplen = ntohs(udph->len); |
1196 | u_int16_t paylen = udplen - sizeof(struct udphdr); | 1196 | u_int16_t paylen = udplen - sizeof(struct udphdr); |
@@ -1225,13 +1225,13 @@ static int snmp_translate(struct nf_conn *ct, | |||
1225 | 1225 | ||
1226 | /* We don't actually set up expectations, just adjust internal IP | 1226 | /* We don't actually set up expectations, just adjust internal IP |
1227 | * addresses if this is being NATted */ | 1227 | * addresses if this is being NATted */ |
1228 | static int help(struct sk_buff **pskb, unsigned int protoff, | 1228 | static int help(struct sk_buff *skb, unsigned int protoff, |
1229 | struct nf_conn *ct, | 1229 | struct nf_conn *ct, |
1230 | enum ip_conntrack_info ctinfo) | 1230 | enum ip_conntrack_info ctinfo) |
1231 | { | 1231 | { |
1232 | int dir = CTINFO2DIR(ctinfo); | 1232 | int dir = CTINFO2DIR(ctinfo); |
1233 | unsigned int ret; | 1233 | unsigned int ret; |
1234 | struct iphdr *iph = ip_hdr(*pskb); | 1234 | struct iphdr *iph = ip_hdr(skb); |
1235 | struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); | 1235 | struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); |
1236 | 1236 | ||
1237 | /* SNMP replies and originating SNMP traps get mangled */ | 1237 | /* SNMP replies and originating SNMP traps get mangled */ |
@@ -1250,7 +1250,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff, | |||
1250 | * enough room for a UDP header. Just verify the UDP length field so we | 1250 | * enough room for a UDP header. Just verify the UDP length field so we |
1251 | * can mess around with the payload. | 1251 | * can mess around with the payload. |
1252 | */ | 1252 | */ |
1253 | if (ntohs(udph->len) != (*pskb)->len - (iph->ihl << 2)) { | 1253 | if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { |
1254 | if (net_ratelimit()) | 1254 | if (net_ratelimit()) |
1255 | printk(KERN_WARNING "SNMP: dropping malformed packet " | 1255 | printk(KERN_WARNING "SNMP: dropping malformed packet " |
1256 | "src=%u.%u.%u.%u dst=%u.%u.%u.%u\n", | 1256 | "src=%u.%u.%u.%u dst=%u.%u.%u.%u\n", |
@@ -1258,11 +1258,11 @@ static int help(struct sk_buff **pskb, unsigned int protoff, | |||
1258 | return NF_DROP; | 1258 | return NF_DROP; |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | if (!skb_make_writable(pskb, (*pskb)->len)) | 1261 | if (!skb_make_writable(skb, skb->len)) |
1262 | return NF_DROP; | 1262 | return NF_DROP; |
1263 | 1263 | ||
1264 | spin_lock_bh(&snmp_lock); | 1264 | spin_lock_bh(&snmp_lock); |
1265 | ret = snmp_translate(ct, ctinfo, pskb); | 1265 | ret = snmp_translate(ct, ctinfo, skb); |
1266 | spin_unlock_bh(&snmp_lock); | 1266 | spin_unlock_bh(&snmp_lock); |
1267 | return ret; | 1267 | return ret; |
1268 | } | 1268 | } |
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 46cc99def165..7db76ea9af91 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -67,7 +67,7 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) | |||
67 | 67 | ||
68 | static unsigned int | 68 | static unsigned int |
69 | nf_nat_fn(unsigned int hooknum, | 69 | nf_nat_fn(unsigned int hooknum, |
70 | struct sk_buff **pskb, | 70 | struct sk_buff *skb, |
71 | const struct net_device *in, | 71 | const struct net_device *in, |
72 | const struct net_device *out, | 72 | const struct net_device *out, |
73 | int (*okfn)(struct sk_buff *)) | 73 | int (*okfn)(struct sk_buff *)) |
@@ -80,9 +80,9 @@ nf_nat_fn(unsigned int hooknum, | |||
80 | 80 | ||
81 | /* We never see fragments: conntrack defrags on pre-routing | 81 | /* We never see fragments: conntrack defrags on pre-routing |
82 | and local-out, and nf_nat_out protects post-routing. */ | 82 | and local-out, and nf_nat_out protects post-routing. */ |
83 | NF_CT_ASSERT(!(ip_hdr(*pskb)->frag_off & htons(IP_MF | IP_OFFSET))); | 83 | NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))); |
84 | 84 | ||
85 | ct = nf_ct_get(*pskb, &ctinfo); | 85 | ct = nf_ct_get(skb, &ctinfo); |
86 | /* Can't track? It's not due to stress, or conntrack would | 86 | /* Can't track? It's not due to stress, or conntrack would |
87 | have dropped it. Hence it's the user's responsibilty to | 87 | have dropped it. Hence it's the user's responsibilty to |
88 | packet filter it out, or implement conntrack/NAT for that | 88 | packet filter it out, or implement conntrack/NAT for that |
@@ -91,10 +91,10 @@ nf_nat_fn(unsigned int hooknum, | |||
91 | /* Exception: ICMP redirect to new connection (not in | 91 | /* Exception: ICMP redirect to new connection (not in |
92 | hash table yet). We must not let this through, in | 92 | hash table yet). We must not let this through, in |
93 | case we're doing NAT to the same network. */ | 93 | case we're doing NAT to the same network. */ |
94 | if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) { | 94 | if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { |
95 | struct icmphdr _hdr, *hp; | 95 | struct icmphdr _hdr, *hp; |
96 | 96 | ||
97 | hp = skb_header_pointer(*pskb, ip_hdrlen(*pskb), | 97 | hp = skb_header_pointer(skb, ip_hdrlen(skb), |
98 | sizeof(_hdr), &_hdr); | 98 | sizeof(_hdr), &_hdr); |
99 | if (hp != NULL && | 99 | if (hp != NULL && |
100 | hp->type == ICMP_REDIRECT) | 100 | hp->type == ICMP_REDIRECT) |
@@ -119,9 +119,9 @@ nf_nat_fn(unsigned int hooknum, | |||
119 | switch (ctinfo) { | 119 | switch (ctinfo) { |
120 | case IP_CT_RELATED: | 120 | case IP_CT_RELATED: |
121 | case IP_CT_RELATED+IP_CT_IS_REPLY: | 121 | case IP_CT_RELATED+IP_CT_IS_REPLY: |
122 | if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) { | 122 | if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { |
123 | if (!nf_nat_icmp_reply_translation(ct, ctinfo, | 123 | if (!nf_nat_icmp_reply_translation(ct, ctinfo, |
124 | hooknum, pskb)) | 124 | hooknum, skb)) |
125 | return NF_DROP; | 125 | return NF_DROP; |
126 | else | 126 | else |
127 | return NF_ACCEPT; | 127 | return NF_ACCEPT; |
@@ -141,7 +141,7 @@ nf_nat_fn(unsigned int hooknum, | |||
141 | /* LOCAL_IN hook doesn't have a chain! */ | 141 | /* LOCAL_IN hook doesn't have a chain! */ |
142 | ret = alloc_null_binding(ct, hooknum); | 142 | ret = alloc_null_binding(ct, hooknum); |
143 | else | 143 | else |
144 | ret = nf_nat_rule_find(pskb, hooknum, in, out, | 144 | ret = nf_nat_rule_find(skb, hooknum, in, out, |
145 | ct); | 145 | ct); |
146 | 146 | ||
147 | if (ret != NF_ACCEPT) { | 147 | if (ret != NF_ACCEPT) { |
@@ -159,31 +159,31 @@ nf_nat_fn(unsigned int hooknum, | |||
159 | ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); | 159 | ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); |
160 | } | 160 | } |
161 | 161 | ||
162 | return nf_nat_packet(ct, ctinfo, hooknum, pskb); | 162 | return nf_nat_packet(ct, ctinfo, hooknum, skb); |
163 | } | 163 | } |
164 | 164 | ||
165 | static unsigned int | 165 | static unsigned int |
166 | nf_nat_in(unsigned int hooknum, | 166 | nf_nat_in(unsigned int hooknum, |
167 | struct sk_buff **pskb, | 167 | struct sk_buff *skb, |
168 | const struct net_device *in, | 168 | const struct net_device *in, |
169 | const struct net_device *out, | 169 | const struct net_device *out, |
170 | int (*okfn)(struct sk_buff *)) | 170 | int (*okfn)(struct sk_buff *)) |
171 | { | 171 | { |
172 | unsigned int ret; | 172 | unsigned int ret; |
173 | __be32 daddr = ip_hdr(*pskb)->daddr; | 173 | __be32 daddr = ip_hdr(skb)->daddr; |
174 | 174 | ||
175 | ret = nf_nat_fn(hooknum, pskb, in, out, okfn); | 175 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); |
176 | if (ret != NF_DROP && ret != NF_STOLEN && | 176 | if (ret != NF_DROP && ret != NF_STOLEN && |
177 | daddr != ip_hdr(*pskb)->daddr) { | 177 | daddr != ip_hdr(skb)->daddr) { |
178 | dst_release((*pskb)->dst); | 178 | dst_release(skb->dst); |
179 | (*pskb)->dst = NULL; | 179 | skb->dst = NULL; |
180 | } | 180 | } |
181 | return ret; | 181 | return ret; |
182 | } | 182 | } |
183 | 183 | ||
184 | static unsigned int | 184 | static unsigned int |
185 | nf_nat_out(unsigned int hooknum, | 185 | nf_nat_out(unsigned int hooknum, |
186 | struct sk_buff **pskb, | 186 | struct sk_buff *skb, |
187 | const struct net_device *in, | 187 | const struct net_device *in, |
188 | const struct net_device *out, | 188 | const struct net_device *out, |
189 | int (*okfn)(struct sk_buff *)) | 189 | int (*okfn)(struct sk_buff *)) |
@@ -195,14 +195,14 @@ nf_nat_out(unsigned int hooknum, | |||
195 | unsigned int ret; | 195 | unsigned int ret; |
196 | 196 | ||
197 | /* root is playing with raw sockets. */ | 197 | /* root is playing with raw sockets. */ |
198 | if ((*pskb)->len < sizeof(struct iphdr) || | 198 | if (skb->len < sizeof(struct iphdr) || |
199 | ip_hdrlen(*pskb) < sizeof(struct iphdr)) | 199 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
200 | return NF_ACCEPT; | 200 | return NF_ACCEPT; |
201 | 201 | ||
202 | ret = nf_nat_fn(hooknum, pskb, in, out, okfn); | 202 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); |
203 | #ifdef CONFIG_XFRM | 203 | #ifdef CONFIG_XFRM |
204 | if (ret != NF_DROP && ret != NF_STOLEN && | 204 | if (ret != NF_DROP && ret != NF_STOLEN && |
205 | (ct = nf_ct_get(*pskb, &ctinfo)) != NULL) { | 205 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { |
206 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 206 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
207 | 207 | ||
208 | if (ct->tuplehash[dir].tuple.src.u3.ip != | 208 | if (ct->tuplehash[dir].tuple.src.u3.ip != |
@@ -210,7 +210,7 @@ nf_nat_out(unsigned int hooknum, | |||
210 | || ct->tuplehash[dir].tuple.src.u.all != | 210 | || ct->tuplehash[dir].tuple.src.u.all != |
211 | ct->tuplehash[!dir].tuple.dst.u.all | 211 | ct->tuplehash[!dir].tuple.dst.u.all |
212 | ) | 212 | ) |
213 | return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP; | 213 | return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP; |
214 | } | 214 | } |
215 | #endif | 215 | #endif |
216 | return ret; | 216 | return ret; |
@@ -218,7 +218,7 @@ nf_nat_out(unsigned int hooknum, | |||
218 | 218 | ||
219 | static unsigned int | 219 | static unsigned int |
220 | nf_nat_local_fn(unsigned int hooknum, | 220 | nf_nat_local_fn(unsigned int hooknum, |
221 | struct sk_buff **pskb, | 221 | struct sk_buff *skb, |
222 | const struct net_device *in, | 222 | const struct net_device *in, |
223 | const struct net_device *out, | 223 | const struct net_device *out, |
224 | int (*okfn)(struct sk_buff *)) | 224 | int (*okfn)(struct sk_buff *)) |
@@ -228,24 +228,24 @@ nf_nat_local_fn(unsigned int hooknum, | |||
228 | unsigned int ret; | 228 | unsigned int ret; |
229 | 229 | ||
230 | /* root is playing with raw sockets. */ | 230 | /* root is playing with raw sockets. */ |
231 | if ((*pskb)->len < sizeof(struct iphdr) || | 231 | if (skb->len < sizeof(struct iphdr) || |
232 | ip_hdrlen(*pskb) < sizeof(struct iphdr)) | 232 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
233 | return NF_ACCEPT; | 233 | return NF_ACCEPT; |
234 | 234 | ||
235 | ret = nf_nat_fn(hooknum, pskb, in, out, okfn); | 235 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); |
236 | if (ret != NF_DROP && ret != NF_STOLEN && | 236 | if (ret != NF_DROP && ret != NF_STOLEN && |
237 | (ct = nf_ct_get(*pskb, &ctinfo)) != NULL) { | 237 | (ct = nf_ct_get(skb, &ctinfo)) != NULL) { |
238 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 238 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
239 | 239 | ||
240 | if (ct->tuplehash[dir].tuple.dst.u3.ip != | 240 | if (ct->tuplehash[dir].tuple.dst.u3.ip != |
241 | ct->tuplehash[!dir].tuple.src.u3.ip) { | 241 | ct->tuplehash[!dir].tuple.src.u3.ip) { |
242 | if (ip_route_me_harder(pskb, RTN_UNSPEC)) | 242 | if (ip_route_me_harder(skb, RTN_UNSPEC)) |
243 | ret = NF_DROP; | 243 | ret = NF_DROP; |
244 | } | 244 | } |
245 | #ifdef CONFIG_XFRM | 245 | #ifdef CONFIG_XFRM |
246 | else if (ct->tuplehash[dir].tuple.dst.u.all != | 246 | else if (ct->tuplehash[dir].tuple.dst.u.all != |
247 | ct->tuplehash[!dir].tuple.src.u.all) | 247 | ct->tuplehash[!dir].tuple.src.u.all) |
248 | if (ip_xfrm_me_harder(pskb)) | 248 | if (ip_xfrm_me_harder(skb)) |
249 | ret = NF_DROP; | 249 | ret = NF_DROP; |
250 | #endif | 250 | #endif |
251 | } | 251 | } |
@@ -254,7 +254,7 @@ nf_nat_local_fn(unsigned int hooknum, | |||
254 | 254 | ||
255 | static unsigned int | 255 | static unsigned int |
256 | nf_nat_adjust(unsigned int hooknum, | 256 | nf_nat_adjust(unsigned int hooknum, |
257 | struct sk_buff **pskb, | 257 | struct sk_buff *skb, |
258 | const struct net_device *in, | 258 | const struct net_device *in, |
259 | const struct net_device *out, | 259 | const struct net_device *out, |
260 | int (*okfn)(struct sk_buff *)) | 260 | int (*okfn)(struct sk_buff *)) |
@@ -262,10 +262,10 @@ nf_nat_adjust(unsigned int hooknum, | |||
262 | struct nf_conn *ct; | 262 | struct nf_conn *ct; |
263 | enum ip_conntrack_info ctinfo; | 263 | enum ip_conntrack_info ctinfo; |
264 | 264 | ||
265 | ct = nf_ct_get(*pskb, &ctinfo); | 265 | ct = nf_ct_get(skb, &ctinfo); |
266 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { | 266 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { |
267 | pr_debug("nf_nat_standalone: adjusting sequence number\n"); | 267 | pr_debug("nf_nat_standalone: adjusting sequence number\n"); |
268 | if (!nf_nat_seq_adjust(pskb, ct, ctinfo)) | 268 | if (!nf_nat_seq_adjust(skb, ct, ctinfo)) |
269 | return NF_DROP; | 269 | return NF_DROP; |
270 | } | 270 | } |
271 | return NF_ACCEPT; | 271 | return NF_ACCEPT; |
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c index 04dfeaefec02..0ecec701cb44 100644 --- a/net/ipv4/netfilter/nf_nat_tftp.c +++ b/net/ipv4/netfilter/nf_nat_tftp.c | |||
@@ -20,7 +20,7 @@ MODULE_DESCRIPTION("TFTP NAT helper"); | |||
20 | MODULE_LICENSE("GPL"); | 20 | MODULE_LICENSE("GPL"); |
21 | MODULE_ALIAS("ip_nat_tftp"); | 21 | MODULE_ALIAS("ip_nat_tftp"); |
22 | 22 | ||
23 | static unsigned int help(struct sk_buff **pskb, | 23 | static unsigned int help(struct sk_buff *skb, |
24 | enum ip_conntrack_info ctinfo, | 24 | enum ip_conntrack_info ctinfo, |
25 | struct nf_conntrack_expect *exp) | 25 | struct nf_conntrack_expect *exp) |
26 | { | 26 | { |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index e5b05b039101..fd16cb8f8abe 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -70,8 +70,8 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) | |||
70 | seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot)); | 70 | seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot)); |
71 | seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot)); | 71 | seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot)); |
72 | seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot)); | 72 | seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot)); |
73 | seq_printf(seq, "FRAG: inuse %d memory %d\n", ip_frag_nqueues, | 73 | seq_printf(seq, "FRAG: inuse %d memory %d\n", |
74 | atomic_read(&ip_frag_mem)); | 74 | ip_frag_nqueues(), ip_frag_mem()); |
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index eb286abcf5dc..c98ef16effd2 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <net/route.h> | 19 | #include <net/route.h> |
20 | #include <net/tcp.h> | 20 | #include <net/tcp.h> |
21 | #include <net/cipso_ipv4.h> | 21 | #include <net/cipso_ipv4.h> |
22 | #include <net/inet_frag.h> | ||
22 | 23 | ||
23 | /* From af_inet.c */ | 24 | /* From af_inet.c */ |
24 | extern int sysctl_ip_nonlocal_bind; | 25 | extern int sysctl_ip_nonlocal_bind; |
@@ -357,7 +358,7 @@ ctl_table ipv4_table[] = { | |||
357 | { | 358 | { |
358 | .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, | 359 | .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, |
359 | .procname = "ipfrag_high_thresh", | 360 | .procname = "ipfrag_high_thresh", |
360 | .data = &sysctl_ipfrag_high_thresh, | 361 | .data = &ip4_frags_ctl.high_thresh, |
361 | .maxlen = sizeof(int), | 362 | .maxlen = sizeof(int), |
362 | .mode = 0644, | 363 | .mode = 0644, |
363 | .proc_handler = &proc_dointvec | 364 | .proc_handler = &proc_dointvec |
@@ -365,7 +366,7 @@ ctl_table ipv4_table[] = { | |||
365 | { | 366 | { |
366 | .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH, | 367 | .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH, |
367 | .procname = "ipfrag_low_thresh", | 368 | .procname = "ipfrag_low_thresh", |
368 | .data = &sysctl_ipfrag_low_thresh, | 369 | .data = &ip4_frags_ctl.low_thresh, |
369 | .maxlen = sizeof(int), | 370 | .maxlen = sizeof(int), |
370 | .mode = 0644, | 371 | .mode = 0644, |
371 | .proc_handler = &proc_dointvec | 372 | .proc_handler = &proc_dointvec |
@@ -381,7 +382,7 @@ ctl_table ipv4_table[] = { | |||
381 | { | 382 | { |
382 | .ctl_name = NET_IPV4_IPFRAG_TIME, | 383 | .ctl_name = NET_IPV4_IPFRAG_TIME, |
383 | .procname = "ipfrag_time", | 384 | .procname = "ipfrag_time", |
384 | .data = &sysctl_ipfrag_time, | 385 | .data = &ip4_frags_ctl.timeout, |
385 | .maxlen = sizeof(int), | 386 | .maxlen = sizeof(int), |
386 | .mode = 0644, | 387 | .mode = 0644, |
387 | .proc_handler = &proc_dointvec_jiffies, | 388 | .proc_handler = &proc_dointvec_jiffies, |
@@ -732,7 +733,7 @@ ctl_table ipv4_table[] = { | |||
732 | { | 733 | { |
733 | .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL, | 734 | .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL, |
734 | .procname = "ipfrag_secret_interval", | 735 | .procname = "ipfrag_secret_interval", |
735 | .data = &sysctl_ipfrag_secret_interval, | 736 | .data = &ip4_frags_ctl.secret_interval, |
736 | .maxlen = sizeof(int), | 737 | .maxlen = sizeof(int), |
737 | .mode = 0644, | 738 | .mode = 0644, |
738 | .proc_handler = &proc_dointvec_jiffies, | 739 | .proc_handler = &proc_dointvec_jiffies, |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0a42e9340346..0f00966b1784 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1995,8 +1995,7 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, | |||
1995 | } | 1995 | } |
1996 | 1996 | ||
1997 | /* Mark head of queue up as lost. */ | 1997 | /* Mark head of queue up as lost. */ |
1998 | static void tcp_mark_head_lost(struct sock *sk, | 1998 | static void tcp_mark_head_lost(struct sock *sk, int packets) |
1999 | int packets, u32 high_seq) | ||
2000 | { | 1999 | { |
2001 | struct tcp_sock *tp = tcp_sk(sk); | 2000 | struct tcp_sock *tp = tcp_sk(sk); |
2002 | struct sk_buff *skb; | 2001 | struct sk_buff *skb; |
@@ -2019,7 +2018,7 @@ static void tcp_mark_head_lost(struct sock *sk, | |||
2019 | tp->lost_skb_hint = skb; | 2018 | tp->lost_skb_hint = skb; |
2020 | tp->lost_cnt_hint = cnt; | 2019 | tp->lost_cnt_hint = cnt; |
2021 | cnt += tcp_skb_pcount(skb); | 2020 | cnt += tcp_skb_pcount(skb); |
2022 | if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, high_seq)) | 2021 | if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) |
2023 | break; | 2022 | break; |
2024 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { | 2023 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { |
2025 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 2024 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
@@ -2040,9 +2039,9 @@ static void tcp_update_scoreboard(struct sock *sk) | |||
2040 | int lost = tp->fackets_out - tp->reordering; | 2039 | int lost = tp->fackets_out - tp->reordering; |
2041 | if (lost <= 0) | 2040 | if (lost <= 0) |
2042 | lost = 1; | 2041 | lost = 1; |
2043 | tcp_mark_head_lost(sk, lost, tp->high_seq); | 2042 | tcp_mark_head_lost(sk, lost); |
2044 | } else { | 2043 | } else { |
2045 | tcp_mark_head_lost(sk, 1, tp->high_seq); | 2044 | tcp_mark_head_lost(sk, 1); |
2046 | } | 2045 | } |
2047 | 2046 | ||
2048 | /* New heuristics: it is possible only after we switched | 2047 | /* New heuristics: it is possible only after we switched |
@@ -2381,7 +2380,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2381 | before(tp->snd_una, tp->high_seq) && | 2380 | before(tp->snd_una, tp->high_seq) && |
2382 | icsk->icsk_ca_state != TCP_CA_Open && | 2381 | icsk->icsk_ca_state != TCP_CA_Open && |
2383 | tp->fackets_out > tp->reordering) { | 2382 | tp->fackets_out > tp->reordering) { |
2384 | tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq); | 2383 | tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); |
2385 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); | 2384 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); |
2386 | } | 2385 | } |
2387 | 2386 | ||
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 434ef302ba83..a4edd666318b 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -78,7 +78,7 @@ static int xfrm4_output_finish2(struct sk_buff *skb) | |||
78 | while (likely((err = xfrm4_output_one(skb)) == 0)) { | 78 | while (likely((err = xfrm4_output_one(skb)) == 0)) { |
79 | nf_reset(skb); | 79 | nf_reset(skb); |
80 | 80 | ||
81 | err = nf_hook(PF_INET, NF_IP_LOCAL_OUT, &skb, NULL, | 81 | err = nf_hook(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, |
82 | skb->dst->dev, dst_output); | 82 | skb->dst->dev, dst_output); |
83 | if (unlikely(err != 1)) | 83 | if (unlikely(err != 1)) |
84 | break; | 84 | break; |
@@ -86,7 +86,7 @@ static int xfrm4_output_finish2(struct sk_buff *skb) | |||
86 | if (!skb->dst->xfrm) | 86 | if (!skb->dst->xfrm) |
87 | return dst_output(skb); | 87 | return dst_output(skb); |
88 | 88 | ||
89 | err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL, | 89 | err = nf_hook(PF_INET, NF_IP_POST_ROUTING, skb, NULL, |
90 | skb->dst->dev, xfrm4_output_finish2); | 90 | skb->dst->dev, xfrm4_output_finish2); |
91 | if (unlikely(err != 1)) | 91 | if (unlikely(err != 1)) |
92 | break; | 92 | break; |