diff options
Diffstat (limited to 'drivers/net/ppp/ppp_generic.c')
-rw-r--r-- | drivers/net/ppp/ppp_generic.c | 2954 |
1 files changed, 2954 insertions, 0 deletions
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c new file mode 100644 index 000000000000..10e5d985afa3 --- /dev/null +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -0,0 +1,2954 @@ | |||
1 | /* | ||
2 | * Generic PPP layer for Linux. | ||
3 | * | ||
4 | * Copyright 1999-2002 Paul Mackerras. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * The generic PPP layer handles the PPP network interfaces, the | ||
12 | * /dev/ppp device, packet and VJ compression, and multilink. | ||
13 | * It talks to PPP `channels' via the interface defined in | ||
14 | * include/linux/ppp_channel.h. Channels provide the basic means for | ||
15 | * sending and receiving PPP frames on some kind of communications | ||
16 | * channel. | ||
17 | * | ||
18 | * Part of the code in this driver was inspired by the old async-only | ||
19 | * PPP driver, written by Michael Callahan and Al Longyear, and | ||
20 | * subsequently hacked by Paul Mackerras. | ||
21 | * | ||
22 | * ==FILEVERSION 20041108== | ||
23 | */ | ||
24 | |||
25 | #include <linux/module.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/kmod.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/list.h> | ||
30 | #include <linux/idr.h> | ||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/poll.h> | ||
33 | #include <linux/ppp_defs.h> | ||
34 | #include <linux/filter.h> | ||
35 | #include <linux/if_ppp.h> | ||
36 | #include <linux/ppp_channel.h> | ||
37 | #include <linux/ppp-comp.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <linux/rtnetlink.h> | ||
40 | #include <linux/if_arp.h> | ||
41 | #include <linux/ip.h> | ||
42 | #include <linux/tcp.h> | ||
43 | #include <linux/spinlock.h> | ||
44 | #include <linux/rwsem.h> | ||
45 | #include <linux/stddef.h> | ||
46 | #include <linux/device.h> | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <asm/unaligned.h> | ||
50 | #include <net/slhc_vj.h> | ||
51 | #include <linux/atomic.h> | ||
52 | |||
53 | #include <linux/nsproxy.h> | ||
54 | #include <net/net_namespace.h> | ||
55 | #include <net/netns/generic.h> | ||
56 | |||
57 | #define PPP_VERSION "2.4.2" | ||
58 | |||
59 | /* | ||
60 | * Network protocols we support. | ||
61 | */ | ||
62 | #define NP_IP 0 /* Internet Protocol V4 */ | ||
63 | #define NP_IPV6 1 /* Internet Protocol V6 */ | ||
64 | #define NP_IPX 2 /* IPX protocol */ | ||
65 | #define NP_AT 3 /* Appletalk protocol */ | ||
66 | #define NP_MPLS_UC 4 /* MPLS unicast */ | ||
67 | #define NP_MPLS_MC 5 /* MPLS multicast */ | ||
68 | #define NUM_NP 6 /* Number of NPs. */ | ||
69 | |||
70 | #define MPHDRLEN 6 /* multilink protocol header length */ | ||
71 | #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ | ||
72 | |||
73 | /* | ||
74 | * An instance of /dev/ppp can be associated with either a ppp | ||
75 | * interface unit or a ppp channel. In both cases, file->private_data | ||
76 | * points to one of these. | ||
77 | */ | ||
78 | struct ppp_file { | ||
79 | enum { | ||
80 | INTERFACE=1, CHANNEL | ||
81 | } kind; | ||
82 | struct sk_buff_head xq; /* pppd transmit queue */ | ||
83 | struct sk_buff_head rq; /* receive queue for pppd */ | ||
84 | wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ | ||
85 | atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ | ||
86 | int hdrlen; /* space to leave for headers */ | ||
87 | int index; /* interface unit / channel number */ | ||
88 | int dead; /* unit/channel has been shut down */ | ||
89 | }; | ||
90 | |||
91 | #define PF_TO_X(pf, X) container_of(pf, X, file) | ||
92 | |||
93 | #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) | ||
94 | #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) | ||
95 | |||
96 | /* | ||
97 | * Data structure describing one ppp unit. | ||
98 | * A ppp unit corresponds to a ppp network interface device | ||
99 | * and represents a multilink bundle. | ||
100 | * It can have 0 or more ppp channels connected to it. | ||
101 | */ | ||
102 | struct ppp { | ||
103 | struct ppp_file file; /* stuff for read/write/poll 0 */ | ||
104 | struct file *owner; /* file that owns this unit 48 */ | ||
105 | struct list_head channels; /* list of attached channels 4c */ | ||
106 | int n_channels; /* how many channels are attached 54 */ | ||
107 | spinlock_t rlock; /* lock for receive side 58 */ | ||
108 | spinlock_t wlock; /* lock for transmit side 5c */ | ||
109 | int mru; /* max receive unit 60 */ | ||
110 | unsigned int flags; /* control bits 64 */ | ||
111 | unsigned int xstate; /* transmit state bits 68 */ | ||
112 | unsigned int rstate; /* receive state bits 6c */ | ||
113 | int debug; /* debug flags 70 */ | ||
114 | struct slcompress *vj; /* state for VJ header compression */ | ||
115 | enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ | ||
116 | struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ | ||
117 | struct compressor *xcomp; /* transmit packet compressor 8c */ | ||
118 | void *xc_state; /* its internal state 90 */ | ||
119 | struct compressor *rcomp; /* receive decompressor 94 */ | ||
120 | void *rc_state; /* its internal state 98 */ | ||
121 | unsigned long last_xmit; /* jiffies when last pkt sent 9c */ | ||
122 | unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ | ||
123 | struct net_device *dev; /* network interface device a4 */ | ||
124 | int closing; /* is device closing down? a8 */ | ||
125 | #ifdef CONFIG_PPP_MULTILINK | ||
126 | int nxchan; /* next channel to send something on */ | ||
127 | u32 nxseq; /* next sequence number to send */ | ||
128 | int mrru; /* MP: max reconst. receive unit */ | ||
129 | u32 nextseq; /* MP: seq no of next packet */ | ||
130 | u32 minseq; /* MP: min of most recent seqnos */ | ||
131 | struct sk_buff_head mrq; /* MP: receive reconstruction queue */ | ||
132 | #endif /* CONFIG_PPP_MULTILINK */ | ||
133 | #ifdef CONFIG_PPP_FILTER | ||
134 | struct sock_filter *pass_filter; /* filter for packets to pass */ | ||
135 | struct sock_filter *active_filter;/* filter for pkts to reset idle */ | ||
136 | unsigned pass_len, active_len; | ||
137 | #endif /* CONFIG_PPP_FILTER */ | ||
138 | struct net *ppp_net; /* the net we belong to */ | ||
139 | }; | ||
140 | |||
141 | /* | ||
142 | * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, | ||
143 | * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP, | ||
144 | * SC_MUST_COMP | ||
145 | * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. | ||
146 | * Bits in xstate: SC_COMP_RUN | ||
147 | */ | ||
148 | #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ | ||
149 | |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ | ||
150 | |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP) | ||
151 | |||
152 | /* | ||
153 | * Private data structure for each channel. | ||
154 | * This includes the data structure used for multilink. | ||
155 | */ | ||
156 | struct channel { | ||
157 | struct ppp_file file; /* stuff for read/write/poll */ | ||
158 | struct list_head list; /* link in all/new_channels list */ | ||
159 | struct ppp_channel *chan; /* public channel data structure */ | ||
160 | struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ | ||
161 | spinlock_t downl; /* protects `chan', file.xq dequeue */ | ||
162 | struct ppp *ppp; /* ppp unit we're connected to */ | ||
163 | struct net *chan_net; /* the net channel belongs to */ | ||
164 | struct list_head clist; /* link in list of channels per unit */ | ||
165 | rwlock_t upl; /* protects `ppp' */ | ||
166 | #ifdef CONFIG_PPP_MULTILINK | ||
167 | u8 avail; /* flag used in multilink stuff */ | ||
168 | u8 had_frag; /* >= 1 fragments have been sent */ | ||
169 | u32 lastseq; /* MP: last sequence # received */ | ||
170 | int speed; /* speed of the corresponding ppp channel*/ | ||
171 | #endif /* CONFIG_PPP_MULTILINK */ | ||
172 | }; | ||
173 | |||
174 | /* | ||
175 | * SMP locking issues: | ||
176 | * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels | ||
177 | * list and the ppp.n_channels field, you need to take both locks | ||
178 | * before you modify them. | ||
179 | * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> | ||
180 | * channel.downl. | ||
181 | */ | ||
182 | |||
183 | static DEFINE_MUTEX(ppp_mutex); | ||
184 | static atomic_t ppp_unit_count = ATOMIC_INIT(0); | ||
185 | static atomic_t channel_count = ATOMIC_INIT(0); | ||
186 | |||
187 | /* per-net private data for this module */ | ||
188 | static int ppp_net_id __read_mostly; | ||
189 | struct ppp_net { | ||
190 | /* units to ppp mapping */ | ||
191 | struct idr units_idr; | ||
192 | |||
193 | /* | ||
194 | * all_ppp_mutex protects the units_idr mapping. | ||
195 | * It also ensures that finding a ppp unit in the units_idr | ||
196 | * map and updating its file.refcnt field is atomic. | ||
197 | */ | ||
198 | struct mutex all_ppp_mutex; | ||
199 | |||
200 | /* channels */ | ||
201 | struct list_head all_channels; | ||
202 | struct list_head new_channels; | ||
203 | int last_channel_index; | ||
204 | |||
205 | /* | ||
206 | * all_channels_lock protects all_channels and | ||
207 | * last_channel_index, and the atomicity of find | ||
208 | * a channel and updating its file.refcnt field. | ||
209 | */ | ||
210 | spinlock_t all_channels_lock; | ||
211 | }; | ||
212 | |||
213 | /* Get the PPP protocol number from a skb */ | ||
214 | #define PPP_PROTO(skb) get_unaligned_be16((skb)->data) | ||
215 | |||
216 | /* We limit the length of ppp->file.rq to this (arbitrary) value */ | ||
217 | #define PPP_MAX_RQLEN 32 | ||
218 | |||
219 | /* | ||
220 | * Maximum number of multilink fragments queued up. | ||
221 | * This has to be large enough to cope with the maximum latency of | ||
222 | * the slowest channel relative to the others. Strictly it should | ||
223 | * depend on the number of channels and their characteristics. | ||
224 | */ | ||
225 | #define PPP_MP_MAX_QLEN 128 | ||
226 | |||
227 | /* Multilink header bits. */ | ||
228 | #define B 0x80 /* this fragment begins a packet */ | ||
229 | #define E 0x40 /* this fragment ends a packet */ | ||
230 | |||
231 | /* Compare multilink sequence numbers (assumed to be 32 bits wide) */ | ||
232 | #define seq_before(a, b) ((s32)((a) - (b)) < 0) | ||
233 | #define seq_after(a, b) ((s32)((a) - (b)) > 0) | ||
234 | |||
235 | /* Prototypes. */ | ||
236 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, | ||
237 | struct file *file, unsigned int cmd, unsigned long arg); | ||
238 | static void ppp_xmit_process(struct ppp *ppp); | ||
239 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); | ||
240 | static void ppp_push(struct ppp *ppp); | ||
241 | static void ppp_channel_push(struct channel *pch); | ||
242 | static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, | ||
243 | struct channel *pch); | ||
244 | static void ppp_receive_error(struct ppp *ppp); | ||
245 | static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); | ||
246 | static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, | ||
247 | struct sk_buff *skb); | ||
248 | #ifdef CONFIG_PPP_MULTILINK | ||
249 | static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, | ||
250 | struct channel *pch); | ||
251 | static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); | ||
252 | static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); | ||
253 | static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); | ||
254 | #endif /* CONFIG_PPP_MULTILINK */ | ||
255 | static int ppp_set_compress(struct ppp *ppp, unsigned long arg); | ||
256 | static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); | ||
257 | static void ppp_ccp_closed(struct ppp *ppp); | ||
258 | static struct compressor *find_compressor(int type); | ||
259 | static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); | ||
260 | static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); | ||
261 | static void init_ppp_file(struct ppp_file *pf, int kind); | ||
262 | static void ppp_shutdown_interface(struct ppp *ppp); | ||
263 | static void ppp_destroy_interface(struct ppp *ppp); | ||
264 | static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); | ||
265 | static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); | ||
266 | static int ppp_connect_channel(struct channel *pch, int unit); | ||
267 | static int ppp_disconnect_channel(struct channel *pch); | ||
268 | static void ppp_destroy_channel(struct channel *pch); | ||
269 | static int unit_get(struct idr *p, void *ptr); | ||
270 | static int unit_set(struct idr *p, void *ptr, int n); | ||
271 | static void unit_put(struct idr *p, int n); | ||
272 | static void *unit_find(struct idr *p, int n); | ||
273 | |||
274 | static struct class *ppp_class; | ||
275 | |||
276 | /* per net-namespace data */ | ||
277 | static inline struct ppp_net *ppp_pernet(struct net *net) | ||
278 | { | ||
279 | BUG_ON(!net); | ||
280 | |||
281 | return net_generic(net, ppp_net_id); | ||
282 | } | ||
283 | |||
284 | /* Translates a PPP protocol number to a NP index (NP == network protocol) */ | ||
285 | static inline int proto_to_npindex(int proto) | ||
286 | { | ||
287 | switch (proto) { | ||
288 | case PPP_IP: | ||
289 | return NP_IP; | ||
290 | case PPP_IPV6: | ||
291 | return NP_IPV6; | ||
292 | case PPP_IPX: | ||
293 | return NP_IPX; | ||
294 | case PPP_AT: | ||
295 | return NP_AT; | ||
296 | case PPP_MPLS_UC: | ||
297 | return NP_MPLS_UC; | ||
298 | case PPP_MPLS_MC: | ||
299 | return NP_MPLS_MC; | ||
300 | } | ||
301 | return -EINVAL; | ||
302 | } | ||
303 | |||
304 | /* Translates an NP index into a PPP protocol number */ | ||
305 | static const int npindex_to_proto[NUM_NP] = { | ||
306 | PPP_IP, | ||
307 | PPP_IPV6, | ||
308 | PPP_IPX, | ||
309 | PPP_AT, | ||
310 | PPP_MPLS_UC, | ||
311 | PPP_MPLS_MC, | ||
312 | }; | ||
313 | |||
314 | /* Translates an ethertype into an NP index */ | ||
315 | static inline int ethertype_to_npindex(int ethertype) | ||
316 | { | ||
317 | switch (ethertype) { | ||
318 | case ETH_P_IP: | ||
319 | return NP_IP; | ||
320 | case ETH_P_IPV6: | ||
321 | return NP_IPV6; | ||
322 | case ETH_P_IPX: | ||
323 | return NP_IPX; | ||
324 | case ETH_P_PPPTALK: | ||
325 | case ETH_P_ATALK: | ||
326 | return NP_AT; | ||
327 | case ETH_P_MPLS_UC: | ||
328 | return NP_MPLS_UC; | ||
329 | case ETH_P_MPLS_MC: | ||
330 | return NP_MPLS_MC; | ||
331 | } | ||
332 | return -1; | ||
333 | } | ||
334 | |||
335 | /* Translates an NP index into an ethertype */ | ||
336 | static const int npindex_to_ethertype[NUM_NP] = { | ||
337 | ETH_P_IP, | ||
338 | ETH_P_IPV6, | ||
339 | ETH_P_IPX, | ||
340 | ETH_P_PPPTALK, | ||
341 | ETH_P_MPLS_UC, | ||
342 | ETH_P_MPLS_MC, | ||
343 | }; | ||
344 | |||
345 | /* | ||
346 | * Locking shorthand. | ||
347 | */ | ||
348 | #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) | ||
349 | #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) | ||
350 | #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) | ||
351 | #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) | ||
352 | #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ | ||
353 | ppp_recv_lock(ppp); } while (0) | ||
354 | #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ | ||
355 | ppp_xmit_unlock(ppp); } while (0) | ||
356 | |||
357 | /* | ||
358 | * /dev/ppp device routines. | ||
359 | * The /dev/ppp device is used by pppd to control the ppp unit. | ||
360 | * It supports the read, write, ioctl and poll functions. | ||
361 | * Open instances of /dev/ppp can be in one of three states: | ||
362 | * unattached, attached to a ppp unit, or attached to a ppp channel. | ||
363 | */ | ||
364 | static int ppp_open(struct inode *inode, struct file *file) | ||
365 | { | ||
366 | /* | ||
367 | * This could (should?) be enforced by the permissions on /dev/ppp. | ||
368 | */ | ||
369 | if (!capable(CAP_NET_ADMIN)) | ||
370 | return -EPERM; | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static int ppp_release(struct inode *unused, struct file *file) | ||
375 | { | ||
376 | struct ppp_file *pf = file->private_data; | ||
377 | struct ppp *ppp; | ||
378 | |||
379 | if (pf) { | ||
380 | file->private_data = NULL; | ||
381 | if (pf->kind == INTERFACE) { | ||
382 | ppp = PF_TO_PPP(pf); | ||
383 | if (file == ppp->owner) | ||
384 | ppp_shutdown_interface(ppp); | ||
385 | } | ||
386 | if (atomic_dec_and_test(&pf->refcnt)) { | ||
387 | switch (pf->kind) { | ||
388 | case INTERFACE: | ||
389 | ppp_destroy_interface(PF_TO_PPP(pf)); | ||
390 | break; | ||
391 | case CHANNEL: | ||
392 | ppp_destroy_channel(PF_TO_CHANNEL(pf)); | ||
393 | break; | ||
394 | } | ||
395 | } | ||
396 | } | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static ssize_t ppp_read(struct file *file, char __user *buf, | ||
401 | size_t count, loff_t *ppos) | ||
402 | { | ||
403 | struct ppp_file *pf = file->private_data; | ||
404 | DECLARE_WAITQUEUE(wait, current); | ||
405 | ssize_t ret; | ||
406 | struct sk_buff *skb = NULL; | ||
407 | struct iovec iov; | ||
408 | |||
409 | ret = count; | ||
410 | |||
411 | if (!pf) | ||
412 | return -ENXIO; | ||
413 | add_wait_queue(&pf->rwait, &wait); | ||
414 | for (;;) { | ||
415 | set_current_state(TASK_INTERRUPTIBLE); | ||
416 | skb = skb_dequeue(&pf->rq); | ||
417 | if (skb) | ||
418 | break; | ||
419 | ret = 0; | ||
420 | if (pf->dead) | ||
421 | break; | ||
422 | if (pf->kind == INTERFACE) { | ||
423 | /* | ||
424 | * Return 0 (EOF) on an interface that has no | ||
425 | * channels connected, unless it is looping | ||
426 | * network traffic (demand mode). | ||
427 | */ | ||
428 | struct ppp *ppp = PF_TO_PPP(pf); | ||
429 | if (ppp->n_channels == 0 && | ||
430 | (ppp->flags & SC_LOOP_TRAFFIC) == 0) | ||
431 | break; | ||
432 | } | ||
433 | ret = -EAGAIN; | ||
434 | if (file->f_flags & O_NONBLOCK) | ||
435 | break; | ||
436 | ret = -ERESTARTSYS; | ||
437 | if (signal_pending(current)) | ||
438 | break; | ||
439 | schedule(); | ||
440 | } | ||
441 | set_current_state(TASK_RUNNING); | ||
442 | remove_wait_queue(&pf->rwait, &wait); | ||
443 | |||
444 | if (!skb) | ||
445 | goto out; | ||
446 | |||
447 | ret = -EOVERFLOW; | ||
448 | if (skb->len > count) | ||
449 | goto outf; | ||
450 | ret = -EFAULT; | ||
451 | iov.iov_base = buf; | ||
452 | iov.iov_len = count; | ||
453 | if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len)) | ||
454 | goto outf; | ||
455 | ret = skb->len; | ||
456 | |||
457 | outf: | ||
458 | kfree_skb(skb); | ||
459 | out: | ||
460 | return ret; | ||
461 | } | ||
462 | |||
463 | static ssize_t ppp_write(struct file *file, const char __user *buf, | ||
464 | size_t count, loff_t *ppos) | ||
465 | { | ||
466 | struct ppp_file *pf = file->private_data; | ||
467 | struct sk_buff *skb; | ||
468 | ssize_t ret; | ||
469 | |||
470 | if (!pf) | ||
471 | return -ENXIO; | ||
472 | ret = -ENOMEM; | ||
473 | skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); | ||
474 | if (!skb) | ||
475 | goto out; | ||
476 | skb_reserve(skb, pf->hdrlen); | ||
477 | ret = -EFAULT; | ||
478 | if (copy_from_user(skb_put(skb, count), buf, count)) { | ||
479 | kfree_skb(skb); | ||
480 | goto out; | ||
481 | } | ||
482 | |||
483 | skb_queue_tail(&pf->xq, skb); | ||
484 | |||
485 | switch (pf->kind) { | ||
486 | case INTERFACE: | ||
487 | ppp_xmit_process(PF_TO_PPP(pf)); | ||
488 | break; | ||
489 | case CHANNEL: | ||
490 | ppp_channel_push(PF_TO_CHANNEL(pf)); | ||
491 | break; | ||
492 | } | ||
493 | |||
494 | ret = count; | ||
495 | |||
496 | out: | ||
497 | return ret; | ||
498 | } | ||
499 | |||
500 | /* No kernel lock - fine */ | ||
501 | static unsigned int ppp_poll(struct file *file, poll_table *wait) | ||
502 | { | ||
503 | struct ppp_file *pf = file->private_data; | ||
504 | unsigned int mask; | ||
505 | |||
506 | if (!pf) | ||
507 | return 0; | ||
508 | poll_wait(file, &pf->rwait, wait); | ||
509 | mask = POLLOUT | POLLWRNORM; | ||
510 | if (skb_peek(&pf->rq)) | ||
511 | mask |= POLLIN | POLLRDNORM; | ||
512 | if (pf->dead) | ||
513 | mask |= POLLHUP; | ||
514 | else if (pf->kind == INTERFACE) { | ||
515 | /* see comment in ppp_read */ | ||
516 | struct ppp *ppp = PF_TO_PPP(pf); | ||
517 | if (ppp->n_channels == 0 && | ||
518 | (ppp->flags & SC_LOOP_TRAFFIC) == 0) | ||
519 | mask |= POLLIN | POLLRDNORM; | ||
520 | } | ||
521 | |||
522 | return mask; | ||
523 | } | ||
524 | |||
525 | #ifdef CONFIG_PPP_FILTER | ||
526 | static int get_filter(void __user *arg, struct sock_filter **p) | ||
527 | { | ||
528 | struct sock_fprog uprog; | ||
529 | struct sock_filter *code = NULL; | ||
530 | int len, err; | ||
531 | |||
532 | if (copy_from_user(&uprog, arg, sizeof(uprog))) | ||
533 | return -EFAULT; | ||
534 | |||
535 | if (!uprog.len) { | ||
536 | *p = NULL; | ||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | len = uprog.len * sizeof(struct sock_filter); | ||
541 | code = memdup_user(uprog.filter, len); | ||
542 | if (IS_ERR(code)) | ||
543 | return PTR_ERR(code); | ||
544 | |||
545 | err = sk_chk_filter(code, uprog.len); | ||
546 | if (err) { | ||
547 | kfree(code); | ||
548 | return err; | ||
549 | } | ||
550 | |||
551 | *p = code; | ||
552 | return uprog.len; | ||
553 | } | ||
554 | #endif /* CONFIG_PPP_FILTER */ | ||
555 | |||
556 | static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
557 | { | ||
558 | struct ppp_file *pf = file->private_data; | ||
559 | struct ppp *ppp; | ||
560 | int err = -EFAULT, val, val2, i; | ||
561 | struct ppp_idle idle; | ||
562 | struct npioctl npi; | ||
563 | int unit, cflags; | ||
564 | struct slcompress *vj; | ||
565 | void __user *argp = (void __user *)arg; | ||
566 | int __user *p = argp; | ||
567 | |||
568 | if (!pf) | ||
569 | return ppp_unattached_ioctl(current->nsproxy->net_ns, | ||
570 | pf, file, cmd, arg); | ||
571 | |||
572 | if (cmd == PPPIOCDETACH) { | ||
573 | /* | ||
574 | * We have to be careful here... if the file descriptor | ||
575 | * has been dup'd, we could have another process in the | ||
576 | * middle of a poll using the same file *, so we had | ||
577 | * better not free the interface data structures - | ||
578 | * instead we fail the ioctl. Even in this case, we | ||
579 | * shut down the interface if we are the owner of it. | ||
580 | * Actually, we should get rid of PPPIOCDETACH, userland | ||
581 | * (i.e. pppd) could achieve the same effect by closing | ||
582 | * this fd and reopening /dev/ppp. | ||
583 | */ | ||
584 | err = -EINVAL; | ||
585 | mutex_lock(&ppp_mutex); | ||
586 | if (pf->kind == INTERFACE) { | ||
587 | ppp = PF_TO_PPP(pf); | ||
588 | if (file == ppp->owner) | ||
589 | ppp_shutdown_interface(ppp); | ||
590 | } | ||
591 | if (atomic_long_read(&file->f_count) <= 2) { | ||
592 | ppp_release(NULL, file); | ||
593 | err = 0; | ||
594 | } else | ||
595 | pr_warn("PPPIOCDETACH file->f_count=%ld\n", | ||
596 | atomic_long_read(&file->f_count)); | ||
597 | mutex_unlock(&ppp_mutex); | ||
598 | return err; | ||
599 | } | ||
600 | |||
601 | if (pf->kind == CHANNEL) { | ||
602 | struct channel *pch; | ||
603 | struct ppp_channel *chan; | ||
604 | |||
605 | mutex_lock(&ppp_mutex); | ||
606 | pch = PF_TO_CHANNEL(pf); | ||
607 | |||
608 | switch (cmd) { | ||
609 | case PPPIOCCONNECT: | ||
610 | if (get_user(unit, p)) | ||
611 | break; | ||
612 | err = ppp_connect_channel(pch, unit); | ||
613 | break; | ||
614 | |||
615 | case PPPIOCDISCONN: | ||
616 | err = ppp_disconnect_channel(pch); | ||
617 | break; | ||
618 | |||
619 | default: | ||
620 | down_read(&pch->chan_sem); | ||
621 | chan = pch->chan; | ||
622 | err = -ENOTTY; | ||
623 | if (chan && chan->ops->ioctl) | ||
624 | err = chan->ops->ioctl(chan, cmd, arg); | ||
625 | up_read(&pch->chan_sem); | ||
626 | } | ||
627 | mutex_unlock(&ppp_mutex); | ||
628 | return err; | ||
629 | } | ||
630 | |||
631 | if (pf->kind != INTERFACE) { | ||
632 | /* can't happen */ | ||
633 | pr_err("PPP: not interface or channel??\n"); | ||
634 | return -EINVAL; | ||
635 | } | ||
636 | |||
637 | mutex_lock(&ppp_mutex); | ||
638 | ppp = PF_TO_PPP(pf); | ||
639 | switch (cmd) { | ||
640 | case PPPIOCSMRU: | ||
641 | if (get_user(val, p)) | ||
642 | break; | ||
643 | ppp->mru = val; | ||
644 | err = 0; | ||
645 | break; | ||
646 | |||
647 | case PPPIOCSFLAGS: | ||
648 | if (get_user(val, p)) | ||
649 | break; | ||
650 | ppp_lock(ppp); | ||
651 | cflags = ppp->flags & ~val; | ||
652 | ppp->flags = val & SC_FLAG_BITS; | ||
653 | ppp_unlock(ppp); | ||
654 | if (cflags & SC_CCP_OPEN) | ||
655 | ppp_ccp_closed(ppp); | ||
656 | err = 0; | ||
657 | break; | ||
658 | |||
659 | case PPPIOCGFLAGS: | ||
660 | val = ppp->flags | ppp->xstate | ppp->rstate; | ||
661 | if (put_user(val, p)) | ||
662 | break; | ||
663 | err = 0; | ||
664 | break; | ||
665 | |||
666 | case PPPIOCSCOMPRESS: | ||
667 | err = ppp_set_compress(ppp, arg); | ||
668 | break; | ||
669 | |||
670 | case PPPIOCGUNIT: | ||
671 | if (put_user(ppp->file.index, p)) | ||
672 | break; | ||
673 | err = 0; | ||
674 | break; | ||
675 | |||
676 | case PPPIOCSDEBUG: | ||
677 | if (get_user(val, p)) | ||
678 | break; | ||
679 | ppp->debug = val; | ||
680 | err = 0; | ||
681 | break; | ||
682 | |||
683 | case PPPIOCGDEBUG: | ||
684 | if (put_user(ppp->debug, p)) | ||
685 | break; | ||
686 | err = 0; | ||
687 | break; | ||
688 | |||
689 | case PPPIOCGIDLE: | ||
690 | idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; | ||
691 | idle.recv_idle = (jiffies - ppp->last_recv) / HZ; | ||
692 | if (copy_to_user(argp, &idle, sizeof(idle))) | ||
693 | break; | ||
694 | err = 0; | ||
695 | break; | ||
696 | |||
697 | case PPPIOCSMAXCID: | ||
698 | if (get_user(val, p)) | ||
699 | break; | ||
700 | val2 = 15; | ||
701 | if ((val >> 16) != 0) { | ||
702 | val2 = val >> 16; | ||
703 | val &= 0xffff; | ||
704 | } | ||
705 | vj = slhc_init(val2+1, val+1); | ||
706 | if (!vj) { | ||
707 | netdev_err(ppp->dev, | ||
708 | "PPP: no memory (VJ compressor)\n"); | ||
709 | err = -ENOMEM; | ||
710 | break; | ||
711 | } | ||
712 | ppp_lock(ppp); | ||
713 | if (ppp->vj) | ||
714 | slhc_free(ppp->vj); | ||
715 | ppp->vj = vj; | ||
716 | ppp_unlock(ppp); | ||
717 | err = 0; | ||
718 | break; | ||
719 | |||
720 | case PPPIOCGNPMODE: | ||
721 | case PPPIOCSNPMODE: | ||
722 | if (copy_from_user(&npi, argp, sizeof(npi))) | ||
723 | break; | ||
724 | err = proto_to_npindex(npi.protocol); | ||
725 | if (err < 0) | ||
726 | break; | ||
727 | i = err; | ||
728 | if (cmd == PPPIOCGNPMODE) { | ||
729 | err = -EFAULT; | ||
730 | npi.mode = ppp->npmode[i]; | ||
731 | if (copy_to_user(argp, &npi, sizeof(npi))) | ||
732 | break; | ||
733 | } else { | ||
734 | ppp->npmode[i] = npi.mode; | ||
735 | /* we may be able to transmit more packets now (??) */ | ||
736 | netif_wake_queue(ppp->dev); | ||
737 | } | ||
738 | err = 0; | ||
739 | break; | ||
740 | |||
741 | #ifdef CONFIG_PPP_FILTER | ||
742 | case PPPIOCSPASS: | ||
743 | { | ||
744 | struct sock_filter *code; | ||
745 | err = get_filter(argp, &code); | ||
746 | if (err >= 0) { | ||
747 | ppp_lock(ppp); | ||
748 | kfree(ppp->pass_filter); | ||
749 | ppp->pass_filter = code; | ||
750 | ppp->pass_len = err; | ||
751 | ppp_unlock(ppp); | ||
752 | err = 0; | ||
753 | } | ||
754 | break; | ||
755 | } | ||
756 | case PPPIOCSACTIVE: | ||
757 | { | ||
758 | struct sock_filter *code; | ||
759 | err = get_filter(argp, &code); | ||
760 | if (err >= 0) { | ||
761 | ppp_lock(ppp); | ||
762 | kfree(ppp->active_filter); | ||
763 | ppp->active_filter = code; | ||
764 | ppp->active_len = err; | ||
765 | ppp_unlock(ppp); | ||
766 | err = 0; | ||
767 | } | ||
768 | break; | ||
769 | } | ||
770 | #endif /* CONFIG_PPP_FILTER */ | ||
771 | |||
772 | #ifdef CONFIG_PPP_MULTILINK | ||
773 | case PPPIOCSMRRU: | ||
774 | if (get_user(val, p)) | ||
775 | break; | ||
776 | ppp_recv_lock(ppp); | ||
777 | ppp->mrru = val; | ||
778 | ppp_recv_unlock(ppp); | ||
779 | err = 0; | ||
780 | break; | ||
781 | #endif /* CONFIG_PPP_MULTILINK */ | ||
782 | |||
783 | default: | ||
784 | err = -ENOTTY; | ||
785 | } | ||
786 | mutex_unlock(&ppp_mutex); | ||
787 | return err; | ||
788 | } | ||
789 | |||
790 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, | ||
791 | struct file *file, unsigned int cmd, unsigned long arg) | ||
792 | { | ||
793 | int unit, err = -EFAULT; | ||
794 | struct ppp *ppp; | ||
795 | struct channel *chan; | ||
796 | struct ppp_net *pn; | ||
797 | int __user *p = (int __user *)arg; | ||
798 | |||
799 | mutex_lock(&ppp_mutex); | ||
800 | switch (cmd) { | ||
801 | case PPPIOCNEWUNIT: | ||
802 | /* Create a new ppp unit */ | ||
803 | if (get_user(unit, p)) | ||
804 | break; | ||
805 | ppp = ppp_create_interface(net, unit, &err); | ||
806 | if (!ppp) | ||
807 | break; | ||
808 | file->private_data = &ppp->file; | ||
809 | ppp->owner = file; | ||
810 | err = -EFAULT; | ||
811 | if (put_user(ppp->file.index, p)) | ||
812 | break; | ||
813 | err = 0; | ||
814 | break; | ||
815 | |||
816 | case PPPIOCATTACH: | ||
817 | /* Attach to an existing ppp unit */ | ||
818 | if (get_user(unit, p)) | ||
819 | break; | ||
820 | err = -ENXIO; | ||
821 | pn = ppp_pernet(net); | ||
822 | mutex_lock(&pn->all_ppp_mutex); | ||
823 | ppp = ppp_find_unit(pn, unit); | ||
824 | if (ppp) { | ||
825 | atomic_inc(&ppp->file.refcnt); | ||
826 | file->private_data = &ppp->file; | ||
827 | err = 0; | ||
828 | } | ||
829 | mutex_unlock(&pn->all_ppp_mutex); | ||
830 | break; | ||
831 | |||
832 | case PPPIOCATTCHAN: | ||
833 | if (get_user(unit, p)) | ||
834 | break; | ||
835 | err = -ENXIO; | ||
836 | pn = ppp_pernet(net); | ||
837 | spin_lock_bh(&pn->all_channels_lock); | ||
838 | chan = ppp_find_channel(pn, unit); | ||
839 | if (chan) { | ||
840 | atomic_inc(&chan->file.refcnt); | ||
841 | file->private_data = &chan->file; | ||
842 | err = 0; | ||
843 | } | ||
844 | spin_unlock_bh(&pn->all_channels_lock); | ||
845 | break; | ||
846 | |||
847 | default: | ||
848 | err = -ENOTTY; | ||
849 | } | ||
850 | mutex_unlock(&ppp_mutex); | ||
851 | return err; | ||
852 | } | ||
853 | |||
854 | static const struct file_operations ppp_device_fops = { | ||
855 | .owner = THIS_MODULE, | ||
856 | .read = ppp_read, | ||
857 | .write = ppp_write, | ||
858 | .poll = ppp_poll, | ||
859 | .unlocked_ioctl = ppp_ioctl, | ||
860 | .open = ppp_open, | ||
861 | .release = ppp_release, | ||
862 | .llseek = noop_llseek, | ||
863 | }; | ||
864 | |||
865 | static __net_init int ppp_init_net(struct net *net) | ||
866 | { | ||
867 | struct ppp_net *pn = net_generic(net, ppp_net_id); | ||
868 | |||
869 | idr_init(&pn->units_idr); | ||
870 | mutex_init(&pn->all_ppp_mutex); | ||
871 | |||
872 | INIT_LIST_HEAD(&pn->all_channels); | ||
873 | INIT_LIST_HEAD(&pn->new_channels); | ||
874 | |||
875 | spin_lock_init(&pn->all_channels_lock); | ||
876 | |||
877 | return 0; | ||
878 | } | ||
879 | |||
880 | static __net_exit void ppp_exit_net(struct net *net) | ||
881 | { | ||
882 | struct ppp_net *pn = net_generic(net, ppp_net_id); | ||
883 | |||
884 | idr_destroy(&pn->units_idr); | ||
885 | } | ||
886 | |||
887 | static struct pernet_operations ppp_net_ops = { | ||
888 | .init = ppp_init_net, | ||
889 | .exit = ppp_exit_net, | ||
890 | .id = &ppp_net_id, | ||
891 | .size = sizeof(struct ppp_net), | ||
892 | }; | ||
893 | |||
894 | #define PPP_MAJOR 108 | ||
895 | |||
896 | /* Called at boot time if ppp is compiled into the kernel, | ||
897 | or at module load time (from init_module) if compiled as a module. */ | ||
898 | static int __init ppp_init(void) | ||
899 | { | ||
900 | int err; | ||
901 | |||
902 | pr_info("PPP generic driver version " PPP_VERSION "\n"); | ||
903 | |||
904 | err = register_pernet_device(&ppp_net_ops); | ||
905 | if (err) { | ||
906 | pr_err("failed to register PPP pernet device (%d)\n", err); | ||
907 | goto out; | ||
908 | } | ||
909 | |||
910 | err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); | ||
911 | if (err) { | ||
912 | pr_err("failed to register PPP device (%d)\n", err); | ||
913 | goto out_net; | ||
914 | } | ||
915 | |||
916 | ppp_class = class_create(THIS_MODULE, "ppp"); | ||
917 | if (IS_ERR(ppp_class)) { | ||
918 | err = PTR_ERR(ppp_class); | ||
919 | goto out_chrdev; | ||
920 | } | ||
921 | |||
922 | /* not a big deal if we fail here :-) */ | ||
923 | device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); | ||
924 | |||
925 | return 0; | ||
926 | |||
927 | out_chrdev: | ||
928 | unregister_chrdev(PPP_MAJOR, "ppp"); | ||
929 | out_net: | ||
930 | unregister_pernet_device(&ppp_net_ops); | ||
931 | out: | ||
932 | return err; | ||
933 | } | ||
934 | |||
935 | /* | ||
936 | * Network interface unit routines. | ||
937 | */ | ||
938 | static netdev_tx_t | ||
939 | ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
940 | { | ||
941 | struct ppp *ppp = netdev_priv(dev); | ||
942 | int npi, proto; | ||
943 | unsigned char *pp; | ||
944 | |||
945 | npi = ethertype_to_npindex(ntohs(skb->protocol)); | ||
946 | if (npi < 0) | ||
947 | goto outf; | ||
948 | |||
949 | /* Drop, accept or reject the packet */ | ||
950 | switch (ppp->npmode[npi]) { | ||
951 | case NPMODE_PASS: | ||
952 | break; | ||
953 | case NPMODE_QUEUE: | ||
954 | /* it would be nice to have a way to tell the network | ||
955 | system to queue this one up for later. */ | ||
956 | goto outf; | ||
957 | case NPMODE_DROP: | ||
958 | case NPMODE_ERROR: | ||
959 | goto outf; | ||
960 | } | ||
961 | |||
962 | /* Put the 2-byte PPP protocol number on the front, | ||
963 | making sure there is room for the address and control fields. */ | ||
964 | if (skb_cow_head(skb, PPP_HDRLEN)) | ||
965 | goto outf; | ||
966 | |||
967 | pp = skb_push(skb, 2); | ||
968 | proto = npindex_to_proto[npi]; | ||
969 | put_unaligned_be16(proto, pp); | ||
970 | |||
971 | netif_stop_queue(dev); | ||
972 | skb_queue_tail(&ppp->file.xq, skb); | ||
973 | ppp_xmit_process(ppp); | ||
974 | return NETDEV_TX_OK; | ||
975 | |||
976 | outf: | ||
977 | kfree_skb(skb); | ||
978 | ++dev->stats.tx_dropped; | ||
979 | return NETDEV_TX_OK; | ||
980 | } | ||
981 | |||
982 | static int | ||
983 | ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
984 | { | ||
985 | struct ppp *ppp = netdev_priv(dev); | ||
986 | int err = -EFAULT; | ||
987 | void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; | ||
988 | struct ppp_stats stats; | ||
989 | struct ppp_comp_stats cstats; | ||
990 | char *vers; | ||
991 | |||
992 | switch (cmd) { | ||
993 | case SIOCGPPPSTATS: | ||
994 | ppp_get_stats(ppp, &stats); | ||
995 | if (copy_to_user(addr, &stats, sizeof(stats))) | ||
996 | break; | ||
997 | err = 0; | ||
998 | break; | ||
999 | |||
1000 | case SIOCGPPPCSTATS: | ||
1001 | memset(&cstats, 0, sizeof(cstats)); | ||
1002 | if (ppp->xc_state) | ||
1003 | ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); | ||
1004 | if (ppp->rc_state) | ||
1005 | ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); | ||
1006 | if (copy_to_user(addr, &cstats, sizeof(cstats))) | ||
1007 | break; | ||
1008 | err = 0; | ||
1009 | break; | ||
1010 | |||
1011 | case SIOCGPPPVER: | ||
1012 | vers = PPP_VERSION; | ||
1013 | if (copy_to_user(addr, vers, strlen(vers) + 1)) | ||
1014 | break; | ||
1015 | err = 0; | ||
1016 | break; | ||
1017 | |||
1018 | default: | ||
1019 | err = -EINVAL; | ||
1020 | } | ||
1021 | |||
1022 | return err; | ||
1023 | } | ||
1024 | |||
1025 | static const struct net_device_ops ppp_netdev_ops = { | ||
1026 | .ndo_start_xmit = ppp_start_xmit, | ||
1027 | .ndo_do_ioctl = ppp_net_ioctl, | ||
1028 | }; | ||
1029 | |||
1030 | static void ppp_setup(struct net_device *dev) | ||
1031 | { | ||
1032 | dev->netdev_ops = &ppp_netdev_ops; | ||
1033 | dev->hard_header_len = PPP_HDRLEN; | ||
1034 | dev->mtu = PPP_MTU; | ||
1035 | dev->addr_len = 0; | ||
1036 | dev->tx_queue_len = 3; | ||
1037 | dev->type = ARPHRD_PPP; | ||
1038 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | ||
1039 | dev->features |= NETIF_F_NETNS_LOCAL; | ||
1040 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * Transmit-side routines. | ||
1045 | */ | ||
1046 | |||
1047 | /* | ||
1048 | * Called to do any work queued up on the transmit side | ||
1049 | * that can now be done. | ||
1050 | */ | ||
1051 | static void | ||
1052 | ppp_xmit_process(struct ppp *ppp) | ||
1053 | { | ||
1054 | struct sk_buff *skb; | ||
1055 | |||
1056 | ppp_xmit_lock(ppp); | ||
1057 | if (!ppp->closing) { | ||
1058 | ppp_push(ppp); | ||
1059 | while (!ppp->xmit_pending && | ||
1060 | (skb = skb_dequeue(&ppp->file.xq))) | ||
1061 | ppp_send_frame(ppp, skb); | ||
1062 | /* If there's no work left to do, tell the core net | ||
1063 | code that we can accept some more. */ | ||
1064 | if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) | ||
1065 | netif_wake_queue(ppp->dev); | ||
1066 | } | ||
1067 | ppp_xmit_unlock(ppp); | ||
1068 | } | ||
1069 | |||
1070 | static inline struct sk_buff * | ||
1071 | pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) | ||
1072 | { | ||
1073 | struct sk_buff *new_skb; | ||
1074 | int len; | ||
1075 | int new_skb_size = ppp->dev->mtu + | ||
1076 | ppp->xcomp->comp_extra + ppp->dev->hard_header_len; | ||
1077 | int compressor_skb_size = ppp->dev->mtu + | ||
1078 | ppp->xcomp->comp_extra + PPP_HDRLEN; | ||
1079 | new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); | ||
1080 | if (!new_skb) { | ||
1081 | if (net_ratelimit()) | ||
1082 | netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n"); | ||
1083 | return NULL; | ||
1084 | } | ||
1085 | if (ppp->dev->hard_header_len > PPP_HDRLEN) | ||
1086 | skb_reserve(new_skb, | ||
1087 | ppp->dev->hard_header_len - PPP_HDRLEN); | ||
1088 | |||
1089 | /* compressor still expects A/C bytes in hdr */ | ||
1090 | len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, | ||
1091 | new_skb->data, skb->len + 2, | ||
1092 | compressor_skb_size); | ||
1093 | if (len > 0 && (ppp->flags & SC_CCP_UP)) { | ||
1094 | kfree_skb(skb); | ||
1095 | skb = new_skb; | ||
1096 | skb_put(skb, len); | ||
1097 | skb_pull(skb, 2); /* pull off A/C bytes */ | ||
1098 | } else if (len == 0) { | ||
1099 | /* didn't compress, or CCP not up yet */ | ||
1100 | kfree_skb(new_skb); | ||
1101 | new_skb = skb; | ||
1102 | } else { | ||
1103 | /* | ||
1104 | * (len < 0) | ||
1105 | * MPPE requires that we do not send unencrypted | ||
1106 | * frames. The compressor will return -1 if we | ||
1107 | * should drop the frame. We cannot simply test | ||
1108 | * the compress_proto because MPPE and MPPC share | ||
1109 | * the same number. | ||
1110 | */ | ||
1111 | if (net_ratelimit()) | ||
1112 | netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); | ||
1113 | kfree_skb(skb); | ||
1114 | kfree_skb(new_skb); | ||
1115 | new_skb = NULL; | ||
1116 | } | ||
1117 | return new_skb; | ||
1118 | } | ||
1119 | |||
1120 | /* | ||
1121 | * Compress and send a frame. | ||
1122 | * The caller should have locked the xmit path, | ||
1123 | * and xmit_pending should be 0. | ||
1124 | */ | ||
1125 | static void | ||
1126 | ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) | ||
1127 | { | ||
1128 | int proto = PPP_PROTO(skb); | ||
1129 | struct sk_buff *new_skb; | ||
1130 | int len; | ||
1131 | unsigned char *cp; | ||
1132 | |||
1133 | if (proto < 0x8000) { | ||
1134 | #ifdef CONFIG_PPP_FILTER | ||
1135 | /* check if we should pass this packet */ | ||
1136 | /* the filter instructions are constructed assuming | ||
1137 | a four-byte PPP header on each packet */ | ||
1138 | *skb_push(skb, 2) = 1; | ||
1139 | if (ppp->pass_filter && | ||
1140 | sk_run_filter(skb, ppp->pass_filter) == 0) { | ||
1141 | if (ppp->debug & 1) | ||
1142 | netdev_printk(KERN_DEBUG, ppp->dev, | ||
1143 | "PPP: outbound frame " | ||
1144 | "not passed\n"); | ||
1145 | kfree_skb(skb); | ||
1146 | return; | ||
1147 | } | ||
1148 | /* if this packet passes the active filter, record the time */ | ||
1149 | if (!(ppp->active_filter && | ||
1150 | sk_run_filter(skb, ppp->active_filter) == 0)) | ||
1151 | ppp->last_xmit = jiffies; | ||
1152 | skb_pull(skb, 2); | ||
1153 | #else | ||
1154 | /* for data packets, record the time */ | ||
1155 | ppp->last_xmit = jiffies; | ||
1156 | #endif /* CONFIG_PPP_FILTER */ | ||
1157 | } | ||
1158 | |||
1159 | ++ppp->dev->stats.tx_packets; | ||
1160 | ppp->dev->stats.tx_bytes += skb->len - 2; | ||
1161 | |||
1162 | switch (proto) { | ||
1163 | case PPP_IP: | ||
1164 | if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0) | ||
1165 | break; | ||
1166 | /* try to do VJ TCP header compression */ | ||
1167 | new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, | ||
1168 | GFP_ATOMIC); | ||
1169 | if (!new_skb) { | ||
1170 | netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n"); | ||
1171 | goto drop; | ||
1172 | } | ||
1173 | skb_reserve(new_skb, ppp->dev->hard_header_len - 2); | ||
1174 | cp = skb->data + 2; | ||
1175 | len = slhc_compress(ppp->vj, cp, skb->len - 2, | ||
1176 | new_skb->data + 2, &cp, | ||
1177 | !(ppp->flags & SC_NO_TCP_CCID)); | ||
1178 | if (cp == skb->data + 2) { | ||
1179 | /* didn't compress */ | ||
1180 | kfree_skb(new_skb); | ||
1181 | } else { | ||
1182 | if (cp[0] & SL_TYPE_COMPRESSED_TCP) { | ||
1183 | proto = PPP_VJC_COMP; | ||
1184 | cp[0] &= ~SL_TYPE_COMPRESSED_TCP; | ||
1185 | } else { | ||
1186 | proto = PPP_VJC_UNCOMP; | ||
1187 | cp[0] = skb->data[2]; | ||
1188 | } | ||
1189 | kfree_skb(skb); | ||
1190 | skb = new_skb; | ||
1191 | cp = skb_put(skb, len + 2); | ||
1192 | cp[0] = 0; | ||
1193 | cp[1] = proto; | ||
1194 | } | ||
1195 | break; | ||
1196 | |||
1197 | case PPP_CCP: | ||
1198 | /* peek at outbound CCP frames */ | ||
1199 | ppp_ccp_peek(ppp, skb, 0); | ||
1200 | break; | ||
1201 | } | ||
1202 | |||
1203 | /* try to do packet compression */ | ||
1204 | if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state && | ||
1205 | proto != PPP_LCP && proto != PPP_CCP) { | ||
1206 | if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { | ||
1207 | if (net_ratelimit()) | ||
1208 | netdev_err(ppp->dev, | ||
1209 | "ppp: compression required but " | ||
1210 | "down - pkt dropped.\n"); | ||
1211 | goto drop; | ||
1212 | } | ||
1213 | skb = pad_compress_skb(ppp, skb); | ||
1214 | if (!skb) | ||
1215 | goto drop; | ||
1216 | } | ||
1217 | |||
1218 | /* | ||
1219 | * If we are waiting for traffic (demand dialling), | ||
1220 | * queue it up for pppd to receive. | ||
1221 | */ | ||
1222 | if (ppp->flags & SC_LOOP_TRAFFIC) { | ||
1223 | if (ppp->file.rq.qlen > PPP_MAX_RQLEN) | ||
1224 | goto drop; | ||
1225 | skb_queue_tail(&ppp->file.rq, skb); | ||
1226 | wake_up_interruptible(&ppp->file.rwait); | ||
1227 | return; | ||
1228 | } | ||
1229 | |||
1230 | ppp->xmit_pending = skb; | ||
1231 | ppp_push(ppp); | ||
1232 | return; | ||
1233 | |||
1234 | drop: | ||
1235 | kfree_skb(skb); | ||
1236 | ++ppp->dev->stats.tx_errors; | ||
1237 | } | ||
1238 | |||
1239 | /* | ||
1240 | * Try to send the frame in xmit_pending. | ||
1241 | * The caller should have the xmit path locked. | ||
1242 | */ | ||
1243 | static void | ||
1244 | ppp_push(struct ppp *ppp) | ||
1245 | { | ||
1246 | struct list_head *list; | ||
1247 | struct channel *pch; | ||
1248 | struct sk_buff *skb = ppp->xmit_pending; | ||
1249 | |||
1250 | if (!skb) | ||
1251 | return; | ||
1252 | |||
1253 | list = &ppp->channels; | ||
1254 | if (list_empty(list)) { | ||
1255 | /* nowhere to send the packet, just drop it */ | ||
1256 | ppp->xmit_pending = NULL; | ||
1257 | kfree_skb(skb); | ||
1258 | return; | ||
1259 | } | ||
1260 | |||
1261 | if ((ppp->flags & SC_MULTILINK) == 0) { | ||
1262 | /* not doing multilink: send it down the first channel */ | ||
1263 | list = list->next; | ||
1264 | pch = list_entry(list, struct channel, clist); | ||
1265 | |||
1266 | spin_lock_bh(&pch->downl); | ||
1267 | if (pch->chan) { | ||
1268 | if (pch->chan->ops->start_xmit(pch->chan, skb)) | ||
1269 | ppp->xmit_pending = NULL; | ||
1270 | } else { | ||
1271 | /* channel got unregistered */ | ||
1272 | kfree_skb(skb); | ||
1273 | ppp->xmit_pending = NULL; | ||
1274 | } | ||
1275 | spin_unlock_bh(&pch->downl); | ||
1276 | return; | ||
1277 | } | ||
1278 | |||
1279 | #ifdef CONFIG_PPP_MULTILINK | ||
1280 | /* Multilink: fragment the packet over as many links | ||
1281 | as can take the packet at the moment. */ | ||
1282 | if (!ppp_mp_explode(ppp, skb)) | ||
1283 | return; | ||
1284 | #endif /* CONFIG_PPP_MULTILINK */ | ||
1285 | |||
1286 | ppp->xmit_pending = NULL; | ||
1287 | kfree_skb(skb); | ||
1288 | } | ||
1289 | |||
1290 | #ifdef CONFIG_PPP_MULTILINK | ||
1291 | static bool mp_protocol_compress __read_mostly = true; | ||
1292 | module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR); | ||
1293 | MODULE_PARM_DESC(mp_protocol_compress, | ||
1294 | "compress protocol id in multilink fragments"); | ||
1295 | |||
1296 | /* | ||
1297 | * Divide a packet to be transmitted into fragments and | ||
1298 | * send them out the individual links. | ||
1299 | */ | ||
1300 | static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | ||
1301 | { | ||
1302 | int len, totlen; | ||
1303 | int i, bits, hdrlen, mtu; | ||
1304 | int flen; | ||
1305 | int navail, nfree, nzero; | ||
1306 | int nbigger; | ||
1307 | int totspeed; | ||
1308 | int totfree; | ||
1309 | unsigned char *p, *q; | ||
1310 | struct list_head *list; | ||
1311 | struct channel *pch; | ||
1312 | struct sk_buff *frag; | ||
1313 | struct ppp_channel *chan; | ||
1314 | |||
1315 | totspeed = 0; /*total bitrate of the bundle*/ | ||
1316 | nfree = 0; /* # channels which have no packet already queued */ | ||
1317 | navail = 0; /* total # of usable channels (not deregistered) */ | ||
1318 | nzero = 0; /* number of channels with zero speed associated*/ | ||
1319 | totfree = 0; /*total # of channels available and | ||
1320 | *having no queued packets before | ||
1321 | *starting the fragmentation*/ | ||
1322 | |||
1323 | hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; | ||
1324 | i = 0; | ||
1325 | list_for_each_entry(pch, &ppp->channels, clist) { | ||
1326 | if (pch->chan) { | ||
1327 | pch->avail = 1; | ||
1328 | navail++; | ||
1329 | pch->speed = pch->chan->speed; | ||
1330 | } else { | ||
1331 | pch->avail = 0; | ||
1332 | } | ||
1333 | if (pch->avail) { | ||
1334 | if (skb_queue_empty(&pch->file.xq) || | ||
1335 | !pch->had_frag) { | ||
1336 | if (pch->speed == 0) | ||
1337 | nzero++; | ||
1338 | else | ||
1339 | totspeed += pch->speed; | ||
1340 | |||
1341 | pch->avail = 2; | ||
1342 | ++nfree; | ||
1343 | ++totfree; | ||
1344 | } | ||
1345 | if (!pch->had_frag && i < ppp->nxchan) | ||
1346 | ppp->nxchan = i; | ||
1347 | } | ||
1348 | ++i; | ||
1349 | } | ||
1350 | /* | ||
1351 | * Don't start sending this packet unless at least half of | ||
1352 | * the channels are free. This gives much better TCP | ||
1353 | * performance if we have a lot of channels. | ||
1354 | */ | ||
1355 | if (nfree == 0 || nfree < navail / 2) | ||
1356 | return 0; /* can't take now, leave it in xmit_pending */ | ||
1357 | |||
1358 | /* Do protocol field compression */ | ||
1359 | p = skb->data; | ||
1360 | len = skb->len; | ||
1361 | if (*p == 0 && mp_protocol_compress) { | ||
1362 | ++p; | ||
1363 | --len; | ||
1364 | } | ||
1365 | |||
1366 | totlen = len; | ||
1367 | nbigger = len % nfree; | ||
1368 | |||
1369 | /* skip to the channel after the one we last used | ||
1370 | and start at that one */ | ||
1371 | list = &ppp->channels; | ||
1372 | for (i = 0; i < ppp->nxchan; ++i) { | ||
1373 | list = list->next; | ||
1374 | if (list == &ppp->channels) { | ||
1375 | i = 0; | ||
1376 | break; | ||
1377 | } | ||
1378 | } | ||
1379 | |||
1380 | /* create a fragment for each channel */ | ||
1381 | bits = B; | ||
1382 | while (len > 0) { | ||
1383 | list = list->next; | ||
1384 | if (list == &ppp->channels) { | ||
1385 | i = 0; | ||
1386 | continue; | ||
1387 | } | ||
1388 | pch = list_entry(list, struct channel, clist); | ||
1389 | ++i; | ||
1390 | if (!pch->avail) | ||
1391 | continue; | ||
1392 | |||
1393 | /* | ||
1394 | * Skip this channel if it has a fragment pending already and | ||
1395 | * we haven't given a fragment to all of the free channels. | ||
1396 | */ | ||
1397 | if (pch->avail == 1) { | ||
1398 | if (nfree > 0) | ||
1399 | continue; | ||
1400 | } else { | ||
1401 | pch->avail = 1; | ||
1402 | } | ||
1403 | |||
1404 | /* check the channel's mtu and whether it is still attached. */ | ||
1405 | spin_lock_bh(&pch->downl); | ||
1406 | if (pch->chan == NULL) { | ||
1407 | /* can't use this channel, it's being deregistered */ | ||
1408 | if (pch->speed == 0) | ||
1409 | nzero--; | ||
1410 | else | ||
1411 | totspeed -= pch->speed; | ||
1412 | |||
1413 | spin_unlock_bh(&pch->downl); | ||
1414 | pch->avail = 0; | ||
1415 | totlen = len; | ||
1416 | totfree--; | ||
1417 | nfree--; | ||
1418 | if (--navail == 0) | ||
1419 | break; | ||
1420 | continue; | ||
1421 | } | ||
1422 | |||
1423 | /* | ||
1424 | *if the channel speed is not set divide | ||
1425 | *the packet evenly among the free channels; | ||
1426 | *otherwise divide it according to the speed | ||
1427 | *of the channel we are going to transmit on | ||
1428 | */ | ||
1429 | flen = len; | ||
1430 | if (nfree > 0) { | ||
1431 | if (pch->speed == 0) { | ||
1432 | flen = len/nfree; | ||
1433 | if (nbigger > 0) { | ||
1434 | flen++; | ||
1435 | nbigger--; | ||
1436 | } | ||
1437 | } else { | ||
1438 | flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / | ||
1439 | ((totspeed*totfree)/pch->speed)) - hdrlen; | ||
1440 | if (nbigger > 0) { | ||
1441 | flen += ((totfree - nzero)*pch->speed)/totspeed; | ||
1442 | nbigger -= ((totfree - nzero)*pch->speed)/ | ||
1443 | totspeed; | ||
1444 | } | ||
1445 | } | ||
1446 | nfree--; | ||
1447 | } | ||
1448 | |||
1449 | /* | ||
1450 | *check if we are on the last channel or | ||
1451 | *we exceded the length of the data to | ||
1452 | *fragment | ||
1453 | */ | ||
1454 | if ((nfree <= 0) || (flen > len)) | ||
1455 | flen = len; | ||
1456 | /* | ||
1457 | *it is not worth to tx on slow channels: | ||
1458 | *in that case from the resulting flen according to the | ||
1459 | *above formula will be equal or less than zero. | ||
1460 | *Skip the channel in this case | ||
1461 | */ | ||
1462 | if (flen <= 0) { | ||
1463 | pch->avail = 2; | ||
1464 | spin_unlock_bh(&pch->downl); | ||
1465 | continue; | ||
1466 | } | ||
1467 | |||
1468 | mtu = pch->chan->mtu - hdrlen; | ||
1469 | if (mtu < 4) | ||
1470 | mtu = 4; | ||
1471 | if (flen > mtu) | ||
1472 | flen = mtu; | ||
1473 | if (flen == len) | ||
1474 | bits |= E; | ||
1475 | frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); | ||
1476 | if (!frag) | ||
1477 | goto noskb; | ||
1478 | q = skb_put(frag, flen + hdrlen); | ||
1479 | |||
1480 | /* make the MP header */ | ||
1481 | put_unaligned_be16(PPP_MP, q); | ||
1482 | if (ppp->flags & SC_MP_XSHORTSEQ) { | ||
1483 | q[2] = bits + ((ppp->nxseq >> 8) & 0xf); | ||
1484 | q[3] = ppp->nxseq; | ||
1485 | } else { | ||
1486 | q[2] = bits; | ||
1487 | q[3] = ppp->nxseq >> 16; | ||
1488 | q[4] = ppp->nxseq >> 8; | ||
1489 | q[5] = ppp->nxseq; | ||
1490 | } | ||
1491 | |||
1492 | memcpy(q + hdrlen, p, flen); | ||
1493 | |||
1494 | /* try to send it down the channel */ | ||
1495 | chan = pch->chan; | ||
1496 | if (!skb_queue_empty(&pch->file.xq) || | ||
1497 | !chan->ops->start_xmit(chan, frag)) | ||
1498 | skb_queue_tail(&pch->file.xq, frag); | ||
1499 | pch->had_frag = 1; | ||
1500 | p += flen; | ||
1501 | len -= flen; | ||
1502 | ++ppp->nxseq; | ||
1503 | bits = 0; | ||
1504 | spin_unlock_bh(&pch->downl); | ||
1505 | } | ||
1506 | ppp->nxchan = i; | ||
1507 | |||
1508 | return 1; | ||
1509 | |||
1510 | noskb: | ||
1511 | spin_unlock_bh(&pch->downl); | ||
1512 | if (ppp->debug & 1) | ||
1513 | netdev_err(ppp->dev, "PPP: no memory (fragment)\n"); | ||
1514 | ++ppp->dev->stats.tx_errors; | ||
1515 | ++ppp->nxseq; | ||
1516 | return 1; /* abandon the frame */ | ||
1517 | } | ||
1518 | #endif /* CONFIG_PPP_MULTILINK */ | ||
1519 | |||
1520 | /* | ||
1521 | * Try to send data out on a channel. | ||
1522 | */ | ||
1523 | static void | ||
1524 | ppp_channel_push(struct channel *pch) | ||
1525 | { | ||
1526 | struct sk_buff *skb; | ||
1527 | struct ppp *ppp; | ||
1528 | |||
1529 | spin_lock_bh(&pch->downl); | ||
1530 | if (pch->chan) { | ||
1531 | while (!skb_queue_empty(&pch->file.xq)) { | ||
1532 | skb = skb_dequeue(&pch->file.xq); | ||
1533 | if (!pch->chan->ops->start_xmit(pch->chan, skb)) { | ||
1534 | /* put the packet back and try again later */ | ||
1535 | skb_queue_head(&pch->file.xq, skb); | ||
1536 | break; | ||
1537 | } | ||
1538 | } | ||
1539 | } else { | ||
1540 | /* channel got deregistered */ | ||
1541 | skb_queue_purge(&pch->file.xq); | ||
1542 | } | ||
1543 | spin_unlock_bh(&pch->downl); | ||
1544 | /* see if there is anything from the attached unit to be sent */ | ||
1545 | if (skb_queue_empty(&pch->file.xq)) { | ||
1546 | read_lock_bh(&pch->upl); | ||
1547 | ppp = pch->ppp; | ||
1548 | if (ppp) | ||
1549 | ppp_xmit_process(ppp); | ||
1550 | read_unlock_bh(&pch->upl); | ||
1551 | } | ||
1552 | } | ||
1553 | |||
1554 | /* | ||
1555 | * Receive-side routines. | ||
1556 | */ | ||
1557 | |||
1558 | struct ppp_mp_skb_parm { | ||
1559 | u32 sequence; | ||
1560 | u8 BEbits; | ||
1561 | }; | ||
1562 | #define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb)) | ||
1563 | |||
1564 | static inline void | ||
1565 | ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | ||
1566 | { | ||
1567 | ppp_recv_lock(ppp); | ||
1568 | if (!ppp->closing) | ||
1569 | ppp_receive_frame(ppp, skb, pch); | ||
1570 | else | ||
1571 | kfree_skb(skb); | ||
1572 | ppp_recv_unlock(ppp); | ||
1573 | } | ||
1574 | |||
1575 | void | ||
1576 | ppp_input(struct ppp_channel *chan, struct sk_buff *skb) | ||
1577 | { | ||
1578 | struct channel *pch = chan->ppp; | ||
1579 | int proto; | ||
1580 | |||
1581 | if (!pch) { | ||
1582 | kfree_skb(skb); | ||
1583 | return; | ||
1584 | } | ||
1585 | |||
1586 | read_lock_bh(&pch->upl); | ||
1587 | if (!pskb_may_pull(skb, 2)) { | ||
1588 | kfree_skb(skb); | ||
1589 | if (pch->ppp) { | ||
1590 | ++pch->ppp->dev->stats.rx_length_errors; | ||
1591 | ppp_receive_error(pch->ppp); | ||
1592 | } | ||
1593 | goto done; | ||
1594 | } | ||
1595 | |||
1596 | proto = PPP_PROTO(skb); | ||
1597 | if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { | ||
1598 | /* put it on the channel queue */ | ||
1599 | skb_queue_tail(&pch->file.rq, skb); | ||
1600 | /* drop old frames if queue too long */ | ||
1601 | while (pch->file.rq.qlen > PPP_MAX_RQLEN && | ||
1602 | (skb = skb_dequeue(&pch->file.rq))) | ||
1603 | kfree_skb(skb); | ||
1604 | wake_up_interruptible(&pch->file.rwait); | ||
1605 | } else { | ||
1606 | ppp_do_recv(pch->ppp, skb, pch); | ||
1607 | } | ||
1608 | |||
1609 | done: | ||
1610 | read_unlock_bh(&pch->upl); | ||
1611 | } | ||
1612 | |||
1613 | /* Put a 0-length skb in the receive queue as an error indication */ | ||
1614 | void | ||
1615 | ppp_input_error(struct ppp_channel *chan, int code) | ||
1616 | { | ||
1617 | struct channel *pch = chan->ppp; | ||
1618 | struct sk_buff *skb; | ||
1619 | |||
1620 | if (!pch) | ||
1621 | return; | ||
1622 | |||
1623 | read_lock_bh(&pch->upl); | ||
1624 | if (pch->ppp) { | ||
1625 | skb = alloc_skb(0, GFP_ATOMIC); | ||
1626 | if (skb) { | ||
1627 | skb->len = 0; /* probably unnecessary */ | ||
1628 | skb->cb[0] = code; | ||
1629 | ppp_do_recv(pch->ppp, skb, pch); | ||
1630 | } | ||
1631 | } | ||
1632 | read_unlock_bh(&pch->upl); | ||
1633 | } | ||
1634 | |||
1635 | /* | ||
1636 | * We come in here to process a received frame. | ||
1637 | * The receive side of the ppp unit is locked. | ||
1638 | */ | ||
1639 | static void | ||
1640 | ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | ||
1641 | { | ||
1642 | /* note: a 0-length skb is used as an error indication */ | ||
1643 | if (skb->len > 0) { | ||
1644 | #ifdef CONFIG_PPP_MULTILINK | ||
1645 | /* XXX do channel-level decompression here */ | ||
1646 | if (PPP_PROTO(skb) == PPP_MP) | ||
1647 | ppp_receive_mp_frame(ppp, skb, pch); | ||
1648 | else | ||
1649 | #endif /* CONFIG_PPP_MULTILINK */ | ||
1650 | ppp_receive_nonmp_frame(ppp, skb); | ||
1651 | } else { | ||
1652 | kfree_skb(skb); | ||
1653 | ppp_receive_error(ppp); | ||
1654 | } | ||
1655 | } | ||
1656 | |||
1657 | static void | ||
1658 | ppp_receive_error(struct ppp *ppp) | ||
1659 | { | ||
1660 | ++ppp->dev->stats.rx_errors; | ||
1661 | if (ppp->vj) | ||
1662 | slhc_toss(ppp->vj); | ||
1663 | } | ||
1664 | |||
1665 | static void | ||
1666 | ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) | ||
1667 | { | ||
1668 | struct sk_buff *ns; | ||
1669 | int proto, len, npi; | ||
1670 | |||
1671 | /* | ||
1672 | * Decompress the frame, if compressed. | ||
1673 | * Note that some decompressors need to see uncompressed frames | ||
1674 | * that come in as well as compressed frames. | ||
1675 | */ | ||
1676 | if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) && | ||
1677 | (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) | ||
1678 | skb = ppp_decompress_frame(ppp, skb); | ||
1679 | |||
1680 | if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) | ||
1681 | goto err; | ||
1682 | |||
1683 | proto = PPP_PROTO(skb); | ||
1684 | switch (proto) { | ||
1685 | case PPP_VJC_COMP: | ||
1686 | /* decompress VJ compressed packets */ | ||
1687 | if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) | ||
1688 | goto err; | ||
1689 | |||
1690 | if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { | ||
1691 | /* copy to a new sk_buff with more tailroom */ | ||
1692 | ns = dev_alloc_skb(skb->len + 128); | ||
1693 | if (!ns) { | ||
1694 | netdev_err(ppp->dev, "PPP: no memory " | ||
1695 | "(VJ decomp)\n"); | ||
1696 | goto err; | ||
1697 | } | ||
1698 | skb_reserve(ns, 2); | ||
1699 | skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); | ||
1700 | kfree_skb(skb); | ||
1701 | skb = ns; | ||
1702 | } | ||
1703 | else | ||
1704 | skb->ip_summed = CHECKSUM_NONE; | ||
1705 | |||
1706 | len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); | ||
1707 | if (len <= 0) { | ||
1708 | netdev_printk(KERN_DEBUG, ppp->dev, | ||
1709 | "PPP: VJ decompression error\n"); | ||
1710 | goto err; | ||
1711 | } | ||
1712 | len += 2; | ||
1713 | if (len > skb->len) | ||
1714 | skb_put(skb, len - skb->len); | ||
1715 | else if (len < skb->len) | ||
1716 | skb_trim(skb, len); | ||
1717 | proto = PPP_IP; | ||
1718 | break; | ||
1719 | |||
1720 | case PPP_VJC_UNCOMP: | ||
1721 | if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) | ||
1722 | goto err; | ||
1723 | |||
1724 | /* Until we fix the decompressor need to make sure | ||
1725 | * data portion is linear. | ||
1726 | */ | ||
1727 | if (!pskb_may_pull(skb, skb->len)) | ||
1728 | goto err; | ||
1729 | |||
1730 | if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { | ||
1731 | netdev_err(ppp->dev, "PPP: VJ uncompressed error\n"); | ||
1732 | goto err; | ||
1733 | } | ||
1734 | proto = PPP_IP; | ||
1735 | break; | ||
1736 | |||
1737 | case PPP_CCP: | ||
1738 | ppp_ccp_peek(ppp, skb, 1); | ||
1739 | break; | ||
1740 | } | ||
1741 | |||
1742 | ++ppp->dev->stats.rx_packets; | ||
1743 | ppp->dev->stats.rx_bytes += skb->len - 2; | ||
1744 | |||
1745 | npi = proto_to_npindex(proto); | ||
1746 | if (npi < 0) { | ||
1747 | /* control or unknown frame - pass it to pppd */ | ||
1748 | skb_queue_tail(&ppp->file.rq, skb); | ||
1749 | /* limit queue length by dropping old frames */ | ||
1750 | while (ppp->file.rq.qlen > PPP_MAX_RQLEN && | ||
1751 | (skb = skb_dequeue(&ppp->file.rq))) | ||
1752 | kfree_skb(skb); | ||
1753 | /* wake up any process polling or blocking on read */ | ||
1754 | wake_up_interruptible(&ppp->file.rwait); | ||
1755 | |||
1756 | } else { | ||
1757 | /* network protocol frame - give it to the kernel */ | ||
1758 | |||
1759 | #ifdef CONFIG_PPP_FILTER | ||
1760 | /* check if the packet passes the pass and active filters */ | ||
1761 | /* the filter instructions are constructed assuming | ||
1762 | a four-byte PPP header on each packet */ | ||
1763 | if (ppp->pass_filter || ppp->active_filter) { | ||
1764 | if (skb_cloned(skb) && | ||
1765 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | ||
1766 | goto err; | ||
1767 | |||
1768 | *skb_push(skb, 2) = 0; | ||
1769 | if (ppp->pass_filter && | ||
1770 | sk_run_filter(skb, ppp->pass_filter) == 0) { | ||
1771 | if (ppp->debug & 1) | ||
1772 | netdev_printk(KERN_DEBUG, ppp->dev, | ||
1773 | "PPP: inbound frame " | ||
1774 | "not passed\n"); | ||
1775 | kfree_skb(skb); | ||
1776 | return; | ||
1777 | } | ||
1778 | if (!(ppp->active_filter && | ||
1779 | sk_run_filter(skb, ppp->active_filter) == 0)) | ||
1780 | ppp->last_recv = jiffies; | ||
1781 | __skb_pull(skb, 2); | ||
1782 | } else | ||
1783 | #endif /* CONFIG_PPP_FILTER */ | ||
1784 | ppp->last_recv = jiffies; | ||
1785 | |||
1786 | if ((ppp->dev->flags & IFF_UP) == 0 || | ||
1787 | ppp->npmode[npi] != NPMODE_PASS) { | ||
1788 | kfree_skb(skb); | ||
1789 | } else { | ||
1790 | /* chop off protocol */ | ||
1791 | skb_pull_rcsum(skb, 2); | ||
1792 | skb->dev = ppp->dev; | ||
1793 | skb->protocol = htons(npindex_to_ethertype[npi]); | ||
1794 | skb_reset_mac_header(skb); | ||
1795 | netif_rx(skb); | ||
1796 | } | ||
1797 | } | ||
1798 | return; | ||
1799 | |||
1800 | err: | ||
1801 | kfree_skb(skb); | ||
1802 | ppp_receive_error(ppp); | ||
1803 | } | ||
1804 | |||
1805 | static struct sk_buff * | ||
1806 | ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) | ||
1807 | { | ||
1808 | int proto = PPP_PROTO(skb); | ||
1809 | struct sk_buff *ns; | ||
1810 | int len; | ||
1811 | |||
1812 | /* Until we fix all the decompressor's need to make sure | ||
1813 | * data portion is linear. | ||
1814 | */ | ||
1815 | if (!pskb_may_pull(skb, skb->len)) | ||
1816 | goto err; | ||
1817 | |||
1818 | if (proto == PPP_COMP) { | ||
1819 | int obuff_size; | ||
1820 | |||
1821 | switch(ppp->rcomp->compress_proto) { | ||
1822 | case CI_MPPE: | ||
1823 | obuff_size = ppp->mru + PPP_HDRLEN + 1; | ||
1824 | break; | ||
1825 | default: | ||
1826 | obuff_size = ppp->mru + PPP_HDRLEN; | ||
1827 | break; | ||
1828 | } | ||
1829 | |||
1830 | ns = dev_alloc_skb(obuff_size); | ||
1831 | if (!ns) { | ||
1832 | netdev_err(ppp->dev, "ppp_decompress_frame: " | ||
1833 | "no memory\n"); | ||
1834 | goto err; | ||
1835 | } | ||
1836 | /* the decompressor still expects the A/C bytes in the hdr */ | ||
1837 | len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, | ||
1838 | skb->len + 2, ns->data, obuff_size); | ||
1839 | if (len < 0) { | ||
1840 | /* Pass the compressed frame to pppd as an | ||
1841 | error indication. */ | ||
1842 | if (len == DECOMP_FATALERROR) | ||
1843 | ppp->rstate |= SC_DC_FERROR; | ||
1844 | kfree_skb(ns); | ||
1845 | goto err; | ||
1846 | } | ||
1847 | |||
1848 | kfree_skb(skb); | ||
1849 | skb = ns; | ||
1850 | skb_put(skb, len); | ||
1851 | skb_pull(skb, 2); /* pull off the A/C bytes */ | ||
1852 | |||
1853 | } else { | ||
1854 | /* Uncompressed frame - pass to decompressor so it | ||
1855 | can update its dictionary if necessary. */ | ||
1856 | if (ppp->rcomp->incomp) | ||
1857 | ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, | ||
1858 | skb->len + 2); | ||
1859 | } | ||
1860 | |||
1861 | return skb; | ||
1862 | |||
1863 | err: | ||
1864 | ppp->rstate |= SC_DC_ERROR; | ||
1865 | ppp_receive_error(ppp); | ||
1866 | return skb; | ||
1867 | } | ||
1868 | |||
1869 | #ifdef CONFIG_PPP_MULTILINK | ||
1870 | /* | ||
1871 | * Receive a multilink frame. | ||
1872 | * We put it on the reconstruction queue and then pull off | ||
1873 | * as many completed frames as we can. | ||
1874 | */ | ||
1875 | static void | ||
1876 | ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | ||
1877 | { | ||
1878 | u32 mask, seq; | ||
1879 | struct channel *ch; | ||
1880 | int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; | ||
1881 | |||
1882 | if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) | ||
1883 | goto err; /* no good, throw it away */ | ||
1884 | |||
1885 | /* Decode sequence number and begin/end bits */ | ||
1886 | if (ppp->flags & SC_MP_SHORTSEQ) { | ||
1887 | seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; | ||
1888 | mask = 0xfff; | ||
1889 | } else { | ||
1890 | seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; | ||
1891 | mask = 0xffffff; | ||
1892 | } | ||
1893 | PPP_MP_CB(skb)->BEbits = skb->data[2]; | ||
1894 | skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ | ||
1895 | |||
1896 | /* | ||
1897 | * Do protocol ID decompression on the first fragment of each packet. | ||
1898 | */ | ||
1899 | if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1)) | ||
1900 | *skb_push(skb, 1) = 0; | ||
1901 | |||
1902 | /* | ||
1903 | * Expand sequence number to 32 bits, making it as close | ||
1904 | * as possible to ppp->minseq. | ||
1905 | */ | ||
1906 | seq |= ppp->minseq & ~mask; | ||
1907 | if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) | ||
1908 | seq += mask + 1; | ||
1909 | else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) | ||
1910 | seq -= mask + 1; /* should never happen */ | ||
1911 | PPP_MP_CB(skb)->sequence = seq; | ||
1912 | pch->lastseq = seq; | ||
1913 | |||
1914 | /* | ||
1915 | * If this packet comes before the next one we were expecting, | ||
1916 | * drop it. | ||
1917 | */ | ||
1918 | if (seq_before(seq, ppp->nextseq)) { | ||
1919 | kfree_skb(skb); | ||
1920 | ++ppp->dev->stats.rx_dropped; | ||
1921 | ppp_receive_error(ppp); | ||
1922 | return; | ||
1923 | } | ||
1924 | |||
1925 | /* | ||
1926 | * Reevaluate minseq, the minimum over all channels of the | ||
1927 | * last sequence number received on each channel. Because of | ||
1928 | * the increasing sequence number rule, we know that any fragment | ||
1929 | * before `minseq' which hasn't arrived is never going to arrive. | ||
1930 | * The list of channels can't change because we have the receive | ||
1931 | * side of the ppp unit locked. | ||
1932 | */ | ||
1933 | list_for_each_entry(ch, &ppp->channels, clist) { | ||
1934 | if (seq_before(ch->lastseq, seq)) | ||
1935 | seq = ch->lastseq; | ||
1936 | } | ||
1937 | if (seq_before(ppp->minseq, seq)) | ||
1938 | ppp->minseq = seq; | ||
1939 | |||
1940 | /* Put the fragment on the reconstruction queue */ | ||
1941 | ppp_mp_insert(ppp, skb); | ||
1942 | |||
1943 | /* If the queue is getting long, don't wait any longer for packets | ||
1944 | before the start of the queue. */ | ||
1945 | if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { | ||
1946 | struct sk_buff *mskb = skb_peek(&ppp->mrq); | ||
1947 | if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence)) | ||
1948 | ppp->minseq = PPP_MP_CB(mskb)->sequence; | ||
1949 | } | ||
1950 | |||
1951 | /* Pull completed packets off the queue and receive them. */ | ||
1952 | while ((skb = ppp_mp_reconstruct(ppp))) { | ||
1953 | if (pskb_may_pull(skb, 2)) | ||
1954 | ppp_receive_nonmp_frame(ppp, skb); | ||
1955 | else { | ||
1956 | ++ppp->dev->stats.rx_length_errors; | ||
1957 | kfree_skb(skb); | ||
1958 | ppp_receive_error(ppp); | ||
1959 | } | ||
1960 | } | ||
1961 | |||
1962 | return; | ||
1963 | |||
1964 | err: | ||
1965 | kfree_skb(skb); | ||
1966 | ppp_receive_error(ppp); | ||
1967 | } | ||
1968 | |||
1969 | /* | ||
1970 | * Insert a fragment on the MP reconstruction queue. | ||
1971 | * The queue is ordered by increasing sequence number. | ||
1972 | */ | ||
1973 | static void | ||
1974 | ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) | ||
1975 | { | ||
1976 | struct sk_buff *p; | ||
1977 | struct sk_buff_head *list = &ppp->mrq; | ||
1978 | u32 seq = PPP_MP_CB(skb)->sequence; | ||
1979 | |||
1980 | /* N.B. we don't need to lock the list lock because we have the | ||
1981 | ppp unit receive-side lock. */ | ||
1982 | skb_queue_walk(list, p) { | ||
1983 | if (seq_before(seq, PPP_MP_CB(p)->sequence)) | ||
1984 | break; | ||
1985 | } | ||
1986 | __skb_queue_before(list, p, skb); | ||
1987 | } | ||
1988 | |||
1989 | /* | ||
1990 | * Reconstruct a packet from the MP fragment queue. | ||
1991 | * We go through increasing sequence numbers until we find a | ||
1992 | * complete packet, or we get to the sequence number for a fragment | ||
1993 | * which hasn't arrived but might still do so. | ||
1994 | */ | ||
1995 | static struct sk_buff * | ||
1996 | ppp_mp_reconstruct(struct ppp *ppp) | ||
1997 | { | ||
1998 | u32 seq = ppp->nextseq; | ||
1999 | u32 minseq = ppp->minseq; | ||
2000 | struct sk_buff_head *list = &ppp->mrq; | ||
2001 | struct sk_buff *p, *tmp; | ||
2002 | struct sk_buff *head, *tail; | ||
2003 | struct sk_buff *skb = NULL; | ||
2004 | int lost = 0, len = 0; | ||
2005 | |||
2006 | if (ppp->mrru == 0) /* do nothing until mrru is set */ | ||
2007 | return NULL; | ||
2008 | head = list->next; | ||
2009 | tail = NULL; | ||
2010 | skb_queue_walk_safe(list, p, tmp) { | ||
2011 | again: | ||
2012 | if (seq_before(PPP_MP_CB(p)->sequence, seq)) { | ||
2013 | /* this can't happen, anyway ignore the skb */ | ||
2014 | netdev_err(ppp->dev, "ppp_mp_reconstruct bad " | ||
2015 | "seq %u < %u\n", | ||
2016 | PPP_MP_CB(p)->sequence, seq); | ||
2017 | __skb_unlink(p, list); | ||
2018 | kfree_skb(p); | ||
2019 | continue; | ||
2020 | } | ||
2021 | if (PPP_MP_CB(p)->sequence != seq) { | ||
2022 | /* Fragment `seq' is missing. If it is after | ||
2023 | minseq, it might arrive later, so stop here. */ | ||
2024 | if (seq_after(seq, minseq)) | ||
2025 | break; | ||
2026 | /* Fragment `seq' is lost, keep going. */ | ||
2027 | lost = 1; | ||
2028 | seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? | ||
2029 | minseq + 1: PPP_MP_CB(p)->sequence; | ||
2030 | goto again; | ||
2031 | } | ||
2032 | |||
2033 | /* | ||
2034 | * At this point we know that all the fragments from | ||
2035 | * ppp->nextseq to seq are either present or lost. | ||
2036 | * Also, there are no complete packets in the queue | ||
2037 | * that have no missing fragments and end before this | ||
2038 | * fragment. | ||
2039 | */ | ||
2040 | |||
2041 | /* B bit set indicates this fragment starts a packet */ | ||
2042 | if (PPP_MP_CB(p)->BEbits & B) { | ||
2043 | head = p; | ||
2044 | lost = 0; | ||
2045 | len = 0; | ||
2046 | } | ||
2047 | |||
2048 | len += p->len; | ||
2049 | |||
2050 | /* Got a complete packet yet? */ | ||
2051 | if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) && | ||
2052 | (PPP_MP_CB(head)->BEbits & B)) { | ||
2053 | if (len > ppp->mrru + 2) { | ||
2054 | ++ppp->dev->stats.rx_length_errors; | ||
2055 | netdev_printk(KERN_DEBUG, ppp->dev, | ||
2056 | "PPP: reconstructed packet" | ||
2057 | " is too long (%d)\n", len); | ||
2058 | } else { | ||
2059 | tail = p; | ||
2060 | break; | ||
2061 | } | ||
2062 | ppp->nextseq = seq + 1; | ||
2063 | } | ||
2064 | |||
2065 | /* | ||
2066 | * If this is the ending fragment of a packet, | ||
2067 | * and we haven't found a complete valid packet yet, | ||
2068 | * we can discard up to and including this fragment. | ||
2069 | */ | ||
2070 | if (PPP_MP_CB(p)->BEbits & E) { | ||
2071 | struct sk_buff *tmp2; | ||
2072 | |||
2073 | skb_queue_reverse_walk_from_safe(list, p, tmp2) { | ||
2074 | __skb_unlink(p, list); | ||
2075 | kfree_skb(p); | ||
2076 | } | ||
2077 | head = skb_peek(list); | ||
2078 | if (!head) | ||
2079 | break; | ||
2080 | } | ||
2081 | ++seq; | ||
2082 | } | ||
2083 | |||
2084 | /* If we have a complete packet, copy it all into one skb. */ | ||
2085 | if (tail != NULL) { | ||
2086 | /* If we have discarded any fragments, | ||
2087 | signal a receive error. */ | ||
2088 | if (PPP_MP_CB(head)->sequence != ppp->nextseq) { | ||
2089 | if (ppp->debug & 1) | ||
2090 | netdev_printk(KERN_DEBUG, ppp->dev, | ||
2091 | " missed pkts %u..%u\n", | ||
2092 | ppp->nextseq, | ||
2093 | PPP_MP_CB(head)->sequence-1); | ||
2094 | ++ppp->dev->stats.rx_dropped; | ||
2095 | ppp_receive_error(ppp); | ||
2096 | } | ||
2097 | |||
2098 | skb = head; | ||
2099 | if (head != tail) { | ||
2100 | struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list; | ||
2101 | p = skb_queue_next(list, head); | ||
2102 | __skb_unlink(skb, list); | ||
2103 | skb_queue_walk_from_safe(list, p, tmp) { | ||
2104 | __skb_unlink(p, list); | ||
2105 | *fragpp = p; | ||
2106 | p->next = NULL; | ||
2107 | fragpp = &p->next; | ||
2108 | |||
2109 | skb->len += p->len; | ||
2110 | skb->data_len += p->len; | ||
2111 | skb->truesize += p->len; | ||
2112 | |||
2113 | if (p == tail) | ||
2114 | break; | ||
2115 | } | ||
2116 | } else { | ||
2117 | __skb_unlink(skb, list); | ||
2118 | } | ||
2119 | |||
2120 | ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; | ||
2121 | } | ||
2122 | |||
2123 | return skb; | ||
2124 | } | ||
2125 | #endif /* CONFIG_PPP_MULTILINK */ | ||
2126 | |||
2127 | /* | ||
2128 | * Channel interface. | ||
2129 | */ | ||
2130 | |||
2131 | /* Create a new, unattached ppp channel. */ | ||
2132 | int ppp_register_channel(struct ppp_channel *chan) | ||
2133 | { | ||
2134 | return ppp_register_net_channel(current->nsproxy->net_ns, chan); | ||
2135 | } | ||
2136 | |||
2137 | /* Create a new, unattached ppp channel for specified net. */ | ||
2138 | int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) | ||
2139 | { | ||
2140 | struct channel *pch; | ||
2141 | struct ppp_net *pn; | ||
2142 | |||
2143 | pch = kzalloc(sizeof(struct channel), GFP_KERNEL); | ||
2144 | if (!pch) | ||
2145 | return -ENOMEM; | ||
2146 | |||
2147 | pn = ppp_pernet(net); | ||
2148 | |||
2149 | pch->ppp = NULL; | ||
2150 | pch->chan = chan; | ||
2151 | pch->chan_net = net; | ||
2152 | chan->ppp = pch; | ||
2153 | init_ppp_file(&pch->file, CHANNEL); | ||
2154 | pch->file.hdrlen = chan->hdrlen; | ||
2155 | #ifdef CONFIG_PPP_MULTILINK | ||
2156 | pch->lastseq = -1; | ||
2157 | #endif /* CONFIG_PPP_MULTILINK */ | ||
2158 | init_rwsem(&pch->chan_sem); | ||
2159 | spin_lock_init(&pch->downl); | ||
2160 | rwlock_init(&pch->upl); | ||
2161 | |||
2162 | spin_lock_bh(&pn->all_channels_lock); | ||
2163 | pch->file.index = ++pn->last_channel_index; | ||
2164 | list_add(&pch->list, &pn->new_channels); | ||
2165 | atomic_inc(&channel_count); | ||
2166 | spin_unlock_bh(&pn->all_channels_lock); | ||
2167 | |||
2168 | return 0; | ||
2169 | } | ||
2170 | |||
2171 | /* | ||
2172 | * Return the index of a channel. | ||
2173 | */ | ||
2174 | int ppp_channel_index(struct ppp_channel *chan) | ||
2175 | { | ||
2176 | struct channel *pch = chan->ppp; | ||
2177 | |||
2178 | if (pch) | ||
2179 | return pch->file.index; | ||
2180 | return -1; | ||
2181 | } | ||
2182 | |||
2183 | /* | ||
2184 | * Return the PPP unit number to which a channel is connected. | ||
2185 | */ | ||
2186 | int ppp_unit_number(struct ppp_channel *chan) | ||
2187 | { | ||
2188 | struct channel *pch = chan->ppp; | ||
2189 | int unit = -1; | ||
2190 | |||
2191 | if (pch) { | ||
2192 | read_lock_bh(&pch->upl); | ||
2193 | if (pch->ppp) | ||
2194 | unit = pch->ppp->file.index; | ||
2195 | read_unlock_bh(&pch->upl); | ||
2196 | } | ||
2197 | return unit; | ||
2198 | } | ||
2199 | |||
2200 | /* | ||
2201 | * Return the PPP device interface name of a channel. | ||
2202 | */ | ||
2203 | char *ppp_dev_name(struct ppp_channel *chan) | ||
2204 | { | ||
2205 | struct channel *pch = chan->ppp; | ||
2206 | char *name = NULL; | ||
2207 | |||
2208 | if (pch) { | ||
2209 | read_lock_bh(&pch->upl); | ||
2210 | if (pch->ppp && pch->ppp->dev) | ||
2211 | name = pch->ppp->dev->name; | ||
2212 | read_unlock_bh(&pch->upl); | ||
2213 | } | ||
2214 | return name; | ||
2215 | } | ||
2216 | |||
2217 | |||
2218 | /* | ||
2219 | * Disconnect a channel from the generic layer. | ||
2220 | * This must be called in process context. | ||
2221 | */ | ||
2222 | void | ||
2223 | ppp_unregister_channel(struct ppp_channel *chan) | ||
2224 | { | ||
2225 | struct channel *pch = chan->ppp; | ||
2226 | struct ppp_net *pn; | ||
2227 | |||
2228 | if (!pch) | ||
2229 | return; /* should never happen */ | ||
2230 | |||
2231 | chan->ppp = NULL; | ||
2232 | |||
2233 | /* | ||
2234 | * This ensures that we have returned from any calls into the | ||
2235 | * the channel's start_xmit or ioctl routine before we proceed. | ||
2236 | */ | ||
2237 | down_write(&pch->chan_sem); | ||
2238 | spin_lock_bh(&pch->downl); | ||
2239 | pch->chan = NULL; | ||
2240 | spin_unlock_bh(&pch->downl); | ||
2241 | up_write(&pch->chan_sem); | ||
2242 | ppp_disconnect_channel(pch); | ||
2243 | |||
2244 | pn = ppp_pernet(pch->chan_net); | ||
2245 | spin_lock_bh(&pn->all_channels_lock); | ||
2246 | list_del(&pch->list); | ||
2247 | spin_unlock_bh(&pn->all_channels_lock); | ||
2248 | |||
2249 | pch->file.dead = 1; | ||
2250 | wake_up_interruptible(&pch->file.rwait); | ||
2251 | if (atomic_dec_and_test(&pch->file.refcnt)) | ||
2252 | ppp_destroy_channel(pch); | ||
2253 | } | ||
2254 | |||
2255 | /* | ||
2256 | * Callback from a channel when it can accept more to transmit. | ||
2257 | * This should be called at BH/softirq level, not interrupt level. | ||
2258 | */ | ||
2259 | void | ||
2260 | ppp_output_wakeup(struct ppp_channel *chan) | ||
2261 | { | ||
2262 | struct channel *pch = chan->ppp; | ||
2263 | |||
2264 | if (!pch) | ||
2265 | return; | ||
2266 | ppp_channel_push(pch); | ||
2267 | } | ||
2268 | |||
2269 | /* | ||
2270 | * Compression control. | ||
2271 | */ | ||
2272 | |||
2273 | /* Process the PPPIOCSCOMPRESS ioctl. */ | ||
2274 | static int | ||
2275 | ppp_set_compress(struct ppp *ppp, unsigned long arg) | ||
2276 | { | ||
2277 | int err; | ||
2278 | struct compressor *cp, *ocomp; | ||
2279 | struct ppp_option_data data; | ||
2280 | void *state, *ostate; | ||
2281 | unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; | ||
2282 | |||
2283 | err = -EFAULT; | ||
2284 | if (copy_from_user(&data, (void __user *) arg, sizeof(data)) || | ||
2285 | (data.length <= CCP_MAX_OPTION_LENGTH && | ||
2286 | copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) | ||
2287 | goto out; | ||
2288 | err = -EINVAL; | ||
2289 | if (data.length > CCP_MAX_OPTION_LENGTH || | ||
2290 | ccp_option[1] < 2 || ccp_option[1] > data.length) | ||
2291 | goto out; | ||
2292 | |||
2293 | cp = try_then_request_module( | ||
2294 | find_compressor(ccp_option[0]), | ||
2295 | "ppp-compress-%d", ccp_option[0]); | ||
2296 | if (!cp) | ||
2297 | goto out; | ||
2298 | |||
2299 | err = -ENOBUFS; | ||
2300 | if (data.transmit) { | ||
2301 | state = cp->comp_alloc(ccp_option, data.length); | ||
2302 | if (state) { | ||
2303 | ppp_xmit_lock(ppp); | ||
2304 | ppp->xstate &= ~SC_COMP_RUN; | ||
2305 | ocomp = ppp->xcomp; | ||
2306 | ostate = ppp->xc_state; | ||
2307 | ppp->xcomp = cp; | ||
2308 | ppp->xc_state = state; | ||
2309 | ppp_xmit_unlock(ppp); | ||
2310 | if (ostate) { | ||
2311 | ocomp->comp_free(ostate); | ||
2312 | module_put(ocomp->owner); | ||
2313 | } | ||
2314 | err = 0; | ||
2315 | } else | ||
2316 | module_put(cp->owner); | ||
2317 | |||
2318 | } else { | ||
2319 | state = cp->decomp_alloc(ccp_option, data.length); | ||
2320 | if (state) { | ||
2321 | ppp_recv_lock(ppp); | ||
2322 | ppp->rstate &= ~SC_DECOMP_RUN; | ||
2323 | ocomp = ppp->rcomp; | ||
2324 | ostate = ppp->rc_state; | ||
2325 | ppp->rcomp = cp; | ||
2326 | ppp->rc_state = state; | ||
2327 | ppp_recv_unlock(ppp); | ||
2328 | if (ostate) { | ||
2329 | ocomp->decomp_free(ostate); | ||
2330 | module_put(ocomp->owner); | ||
2331 | } | ||
2332 | err = 0; | ||
2333 | } else | ||
2334 | module_put(cp->owner); | ||
2335 | } | ||
2336 | |||
2337 | out: | ||
2338 | return err; | ||
2339 | } | ||
2340 | |||
2341 | /* | ||
2342 | * Look at a CCP packet and update our state accordingly. | ||
2343 | * We assume the caller has the xmit or recv path locked. | ||
2344 | */ | ||
2345 | static void | ||
2346 | ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) | ||
2347 | { | ||
2348 | unsigned char *dp; | ||
2349 | int len; | ||
2350 | |||
2351 | if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) | ||
2352 | return; /* no header */ | ||
2353 | dp = skb->data + 2; | ||
2354 | |||
2355 | switch (CCP_CODE(dp)) { | ||
2356 | case CCP_CONFREQ: | ||
2357 | |||
2358 | /* A ConfReq starts negotiation of compression | ||
2359 | * in one direction of transmission, | ||
2360 | * and hence brings it down...but which way? | ||
2361 | * | ||
2362 | * Remember: | ||
2363 | * A ConfReq indicates what the sender would like to receive | ||
2364 | */ | ||
2365 | if(inbound) | ||
2366 | /* He is proposing what I should send */ | ||
2367 | ppp->xstate &= ~SC_COMP_RUN; | ||
2368 | else | ||
2369 | /* I am proposing to what he should send */ | ||
2370 | ppp->rstate &= ~SC_DECOMP_RUN; | ||
2371 | |||
2372 | break; | ||
2373 | |||
2374 | case CCP_TERMREQ: | ||
2375 | case CCP_TERMACK: | ||
2376 | /* | ||
2377 | * CCP is going down, both directions of transmission | ||
2378 | */ | ||
2379 | ppp->rstate &= ~SC_DECOMP_RUN; | ||
2380 | ppp->xstate &= ~SC_COMP_RUN; | ||
2381 | break; | ||
2382 | |||
2383 | case CCP_CONFACK: | ||
2384 | if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) | ||
2385 | break; | ||
2386 | len = CCP_LENGTH(dp); | ||
2387 | if (!pskb_may_pull(skb, len + 2)) | ||
2388 | return; /* too short */ | ||
2389 | dp += CCP_HDRLEN; | ||
2390 | len -= CCP_HDRLEN; | ||
2391 | if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) | ||
2392 | break; | ||
2393 | if (inbound) { | ||
2394 | /* we will start receiving compressed packets */ | ||
2395 | if (!ppp->rc_state) | ||
2396 | break; | ||
2397 | if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, | ||
2398 | ppp->file.index, 0, ppp->mru, ppp->debug)) { | ||
2399 | ppp->rstate |= SC_DECOMP_RUN; | ||
2400 | ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); | ||
2401 | } | ||
2402 | } else { | ||
2403 | /* we will soon start sending compressed packets */ | ||
2404 | if (!ppp->xc_state) | ||
2405 | break; | ||
2406 | if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, | ||
2407 | ppp->file.index, 0, ppp->debug)) | ||
2408 | ppp->xstate |= SC_COMP_RUN; | ||
2409 | } | ||
2410 | break; | ||
2411 | |||
2412 | case CCP_RESETACK: | ||
2413 | /* reset the [de]compressor */ | ||
2414 | if ((ppp->flags & SC_CCP_UP) == 0) | ||
2415 | break; | ||
2416 | if (inbound) { | ||
2417 | if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { | ||
2418 | ppp->rcomp->decomp_reset(ppp->rc_state); | ||
2419 | ppp->rstate &= ~SC_DC_ERROR; | ||
2420 | } | ||
2421 | } else { | ||
2422 | if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) | ||
2423 | ppp->xcomp->comp_reset(ppp->xc_state); | ||
2424 | } | ||
2425 | break; | ||
2426 | } | ||
2427 | } | ||
2428 | |||
2429 | /* Free up compression resources. */ | ||
2430 | static void | ||
2431 | ppp_ccp_closed(struct ppp *ppp) | ||
2432 | { | ||
2433 | void *xstate, *rstate; | ||
2434 | struct compressor *xcomp, *rcomp; | ||
2435 | |||
2436 | ppp_lock(ppp); | ||
2437 | ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); | ||
2438 | ppp->xstate = 0; | ||
2439 | xcomp = ppp->xcomp; | ||
2440 | xstate = ppp->xc_state; | ||
2441 | ppp->xc_state = NULL; | ||
2442 | ppp->rstate = 0; | ||
2443 | rcomp = ppp->rcomp; | ||
2444 | rstate = ppp->rc_state; | ||
2445 | ppp->rc_state = NULL; | ||
2446 | ppp_unlock(ppp); | ||
2447 | |||
2448 | if (xstate) { | ||
2449 | xcomp->comp_free(xstate); | ||
2450 | module_put(xcomp->owner); | ||
2451 | } | ||
2452 | if (rstate) { | ||
2453 | rcomp->decomp_free(rstate); | ||
2454 | module_put(rcomp->owner); | ||
2455 | } | ||
2456 | } | ||
2457 | |||
2458 | /* List of compressors. */ | ||
2459 | static LIST_HEAD(compressor_list); | ||
2460 | static DEFINE_SPINLOCK(compressor_list_lock); | ||
2461 | |||
2462 | struct compressor_entry { | ||
2463 | struct list_head list; | ||
2464 | struct compressor *comp; | ||
2465 | }; | ||
2466 | |||
2467 | static struct compressor_entry * | ||
2468 | find_comp_entry(int proto) | ||
2469 | { | ||
2470 | struct compressor_entry *ce; | ||
2471 | |||
2472 | list_for_each_entry(ce, &compressor_list, list) { | ||
2473 | if (ce->comp->compress_proto == proto) | ||
2474 | return ce; | ||
2475 | } | ||
2476 | return NULL; | ||
2477 | } | ||
2478 | |||
2479 | /* Register a compressor */ | ||
2480 | int | ||
2481 | ppp_register_compressor(struct compressor *cp) | ||
2482 | { | ||
2483 | struct compressor_entry *ce; | ||
2484 | int ret; | ||
2485 | spin_lock(&compressor_list_lock); | ||
2486 | ret = -EEXIST; | ||
2487 | if (find_comp_entry(cp->compress_proto)) | ||
2488 | goto out; | ||
2489 | ret = -ENOMEM; | ||
2490 | ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); | ||
2491 | if (!ce) | ||
2492 | goto out; | ||
2493 | ret = 0; | ||
2494 | ce->comp = cp; | ||
2495 | list_add(&ce->list, &compressor_list); | ||
2496 | out: | ||
2497 | spin_unlock(&compressor_list_lock); | ||
2498 | return ret; | ||
2499 | } | ||
2500 | |||
2501 | /* Unregister a compressor */ | ||
2502 | void | ||
2503 | ppp_unregister_compressor(struct compressor *cp) | ||
2504 | { | ||
2505 | struct compressor_entry *ce; | ||
2506 | |||
2507 | spin_lock(&compressor_list_lock); | ||
2508 | ce = find_comp_entry(cp->compress_proto); | ||
2509 | if (ce && ce->comp == cp) { | ||
2510 | list_del(&ce->list); | ||
2511 | kfree(ce); | ||
2512 | } | ||
2513 | spin_unlock(&compressor_list_lock); | ||
2514 | } | ||
2515 | |||
2516 | /* Find a compressor. */ | ||
2517 | static struct compressor * | ||
2518 | find_compressor(int type) | ||
2519 | { | ||
2520 | struct compressor_entry *ce; | ||
2521 | struct compressor *cp = NULL; | ||
2522 | |||
2523 | spin_lock(&compressor_list_lock); | ||
2524 | ce = find_comp_entry(type); | ||
2525 | if (ce) { | ||
2526 | cp = ce->comp; | ||
2527 | if (!try_module_get(cp->owner)) | ||
2528 | cp = NULL; | ||
2529 | } | ||
2530 | spin_unlock(&compressor_list_lock); | ||
2531 | return cp; | ||
2532 | } | ||
2533 | |||
2534 | /* | ||
2535 | * Miscelleneous stuff. | ||
2536 | */ | ||
2537 | |||
2538 | static void | ||
2539 | ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) | ||
2540 | { | ||
2541 | struct slcompress *vj = ppp->vj; | ||
2542 | |||
2543 | memset(st, 0, sizeof(*st)); | ||
2544 | st->p.ppp_ipackets = ppp->dev->stats.rx_packets; | ||
2545 | st->p.ppp_ierrors = ppp->dev->stats.rx_errors; | ||
2546 | st->p.ppp_ibytes = ppp->dev->stats.rx_bytes; | ||
2547 | st->p.ppp_opackets = ppp->dev->stats.tx_packets; | ||
2548 | st->p.ppp_oerrors = ppp->dev->stats.tx_errors; | ||
2549 | st->p.ppp_obytes = ppp->dev->stats.tx_bytes; | ||
2550 | if (!vj) | ||
2551 | return; | ||
2552 | st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; | ||
2553 | st->vj.vjs_compressed = vj->sls_o_compressed; | ||
2554 | st->vj.vjs_searches = vj->sls_o_searches; | ||
2555 | st->vj.vjs_misses = vj->sls_o_misses; | ||
2556 | st->vj.vjs_errorin = vj->sls_i_error; | ||
2557 | st->vj.vjs_tossed = vj->sls_i_tossed; | ||
2558 | st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; | ||
2559 | st->vj.vjs_compressedin = vj->sls_i_compressed; | ||
2560 | } | ||
2561 | |||
2562 | /* | ||
2563 | * Stuff for handling the lists of ppp units and channels | ||
2564 | * and for initialization. | ||
2565 | */ | ||
2566 | |||
2567 | /* | ||
2568 | * Create a new ppp interface unit. Fails if it can't allocate memory | ||
2569 | * or if there is already a unit with the requested number. | ||
2570 | * unit == -1 means allocate a new number. | ||
2571 | */ | ||
2572 | static struct ppp * | ||
2573 | ppp_create_interface(struct net *net, int unit, int *retp) | ||
2574 | { | ||
2575 | struct ppp *ppp; | ||
2576 | struct ppp_net *pn; | ||
2577 | struct net_device *dev = NULL; | ||
2578 | int ret = -ENOMEM; | ||
2579 | int i; | ||
2580 | |||
2581 | dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup); | ||
2582 | if (!dev) | ||
2583 | goto out1; | ||
2584 | |||
2585 | pn = ppp_pernet(net); | ||
2586 | |||
2587 | ppp = netdev_priv(dev); | ||
2588 | ppp->dev = dev; | ||
2589 | ppp->mru = PPP_MRU; | ||
2590 | init_ppp_file(&ppp->file, INTERFACE); | ||
2591 | ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ | ||
2592 | for (i = 0; i < NUM_NP; ++i) | ||
2593 | ppp->npmode[i] = NPMODE_PASS; | ||
2594 | INIT_LIST_HEAD(&ppp->channels); | ||
2595 | spin_lock_init(&ppp->rlock); | ||
2596 | spin_lock_init(&ppp->wlock); | ||
2597 | #ifdef CONFIG_PPP_MULTILINK | ||
2598 | ppp->minseq = -1; | ||
2599 | skb_queue_head_init(&ppp->mrq); | ||
2600 | #endif /* CONFIG_PPP_MULTILINK */ | ||
2601 | |||
2602 | /* | ||
2603 | * drum roll: don't forget to set | ||
2604 | * the net device is belong to | ||
2605 | */ | ||
2606 | dev_net_set(dev, net); | ||
2607 | |||
2608 | mutex_lock(&pn->all_ppp_mutex); | ||
2609 | |||
2610 | if (unit < 0) { | ||
2611 | unit = unit_get(&pn->units_idr, ppp); | ||
2612 | if (unit < 0) { | ||
2613 | ret = unit; | ||
2614 | goto out2; | ||
2615 | } | ||
2616 | } else { | ||
2617 | ret = -EEXIST; | ||
2618 | if (unit_find(&pn->units_idr, unit)) | ||
2619 | goto out2; /* unit already exists */ | ||
2620 | /* | ||
2621 | * if caller need a specified unit number | ||
2622 | * lets try to satisfy him, otherwise -- | ||
2623 | * he should better ask us for new unit number | ||
2624 | * | ||
2625 | * NOTE: yes I know that returning EEXIST it's not | ||
2626 | * fair but at least pppd will ask us to allocate | ||
2627 | * new unit in this case so user is happy :) | ||
2628 | */ | ||
2629 | unit = unit_set(&pn->units_idr, ppp, unit); | ||
2630 | if (unit < 0) | ||
2631 | goto out2; | ||
2632 | } | ||
2633 | |||
2634 | /* Initialize the new ppp unit */ | ||
2635 | ppp->file.index = unit; | ||
2636 | sprintf(dev->name, "ppp%d", unit); | ||
2637 | |||
2638 | ret = register_netdev(dev); | ||
2639 | if (ret != 0) { | ||
2640 | unit_put(&pn->units_idr, unit); | ||
2641 | netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", | ||
2642 | dev->name, ret); | ||
2643 | goto out2; | ||
2644 | } | ||
2645 | |||
2646 | ppp->ppp_net = net; | ||
2647 | |||
2648 | atomic_inc(&ppp_unit_count); | ||
2649 | mutex_unlock(&pn->all_ppp_mutex); | ||
2650 | |||
2651 | *retp = 0; | ||
2652 | return ppp; | ||
2653 | |||
2654 | out2: | ||
2655 | mutex_unlock(&pn->all_ppp_mutex); | ||
2656 | free_netdev(dev); | ||
2657 | out1: | ||
2658 | *retp = ret; | ||
2659 | return NULL; | ||
2660 | } | ||
2661 | |||
2662 | /* | ||
2663 | * Initialize a ppp_file structure. | ||
2664 | */ | ||
2665 | static void | ||
2666 | init_ppp_file(struct ppp_file *pf, int kind) | ||
2667 | { | ||
2668 | pf->kind = kind; | ||
2669 | skb_queue_head_init(&pf->xq); | ||
2670 | skb_queue_head_init(&pf->rq); | ||
2671 | atomic_set(&pf->refcnt, 1); | ||
2672 | init_waitqueue_head(&pf->rwait); | ||
2673 | } | ||
2674 | |||
2675 | /* | ||
2676 | * Take down a ppp interface unit - called when the owning file | ||
2677 | * (the one that created the unit) is closed or detached. | ||
2678 | */ | ||
2679 | static void ppp_shutdown_interface(struct ppp *ppp) | ||
2680 | { | ||
2681 | struct ppp_net *pn; | ||
2682 | |||
2683 | pn = ppp_pernet(ppp->ppp_net); | ||
2684 | mutex_lock(&pn->all_ppp_mutex); | ||
2685 | |||
2686 | /* This will call dev_close() for us. */ | ||
2687 | ppp_lock(ppp); | ||
2688 | if (!ppp->closing) { | ||
2689 | ppp->closing = 1; | ||
2690 | ppp_unlock(ppp); | ||
2691 | unregister_netdev(ppp->dev); | ||
2692 | unit_put(&pn->units_idr, ppp->file.index); | ||
2693 | } else | ||
2694 | ppp_unlock(ppp); | ||
2695 | |||
2696 | ppp->file.dead = 1; | ||
2697 | ppp->owner = NULL; | ||
2698 | wake_up_interruptible(&ppp->file.rwait); | ||
2699 | |||
2700 | mutex_unlock(&pn->all_ppp_mutex); | ||
2701 | } | ||
2702 | |||
2703 | /* | ||
2704 | * Free the memory used by a ppp unit. This is only called once | ||
2705 | * there are no channels connected to the unit and no file structs | ||
2706 | * that reference the unit. | ||
2707 | */ | ||
2708 | static void ppp_destroy_interface(struct ppp *ppp) | ||
2709 | { | ||
2710 | atomic_dec(&ppp_unit_count); | ||
2711 | |||
2712 | if (!ppp->file.dead || ppp->n_channels) { | ||
2713 | /* "can't happen" */ | ||
2714 | netdev_err(ppp->dev, "ppp: destroying ppp struct %p " | ||
2715 | "but dead=%d n_channels=%d !\n", | ||
2716 | ppp, ppp->file.dead, ppp->n_channels); | ||
2717 | return; | ||
2718 | } | ||
2719 | |||
2720 | ppp_ccp_closed(ppp); | ||
2721 | if (ppp->vj) { | ||
2722 | slhc_free(ppp->vj); | ||
2723 | ppp->vj = NULL; | ||
2724 | } | ||
2725 | skb_queue_purge(&ppp->file.xq); | ||
2726 | skb_queue_purge(&ppp->file.rq); | ||
2727 | #ifdef CONFIG_PPP_MULTILINK | ||
2728 | skb_queue_purge(&ppp->mrq); | ||
2729 | #endif /* CONFIG_PPP_MULTILINK */ | ||
2730 | #ifdef CONFIG_PPP_FILTER | ||
2731 | kfree(ppp->pass_filter); | ||
2732 | ppp->pass_filter = NULL; | ||
2733 | kfree(ppp->active_filter); | ||
2734 | ppp->active_filter = NULL; | ||
2735 | #endif /* CONFIG_PPP_FILTER */ | ||
2736 | |||
2737 | kfree_skb(ppp->xmit_pending); | ||
2738 | |||
2739 | free_netdev(ppp->dev); | ||
2740 | } | ||
2741 | |||
2742 | /* | ||
2743 | * Locate an existing ppp unit. | ||
2744 | * The caller should have locked the all_ppp_mutex. | ||
2745 | */ | ||
2746 | static struct ppp * | ||
2747 | ppp_find_unit(struct ppp_net *pn, int unit) | ||
2748 | { | ||
2749 | return unit_find(&pn->units_idr, unit); | ||
2750 | } | ||
2751 | |||
2752 | /* | ||
2753 | * Locate an existing ppp channel. | ||
2754 | * The caller should have locked the all_channels_lock. | ||
2755 | * First we look in the new_channels list, then in the | ||
2756 | * all_channels list. If found in the new_channels list, | ||
2757 | * we move it to the all_channels list. This is for speed | ||
2758 | * when we have a lot of channels in use. | ||
2759 | */ | ||
2760 | static struct channel * | ||
2761 | ppp_find_channel(struct ppp_net *pn, int unit) | ||
2762 | { | ||
2763 | struct channel *pch; | ||
2764 | |||
2765 | list_for_each_entry(pch, &pn->new_channels, list) { | ||
2766 | if (pch->file.index == unit) { | ||
2767 | list_move(&pch->list, &pn->all_channels); | ||
2768 | return pch; | ||
2769 | } | ||
2770 | } | ||
2771 | |||
2772 | list_for_each_entry(pch, &pn->all_channels, list) { | ||
2773 | if (pch->file.index == unit) | ||
2774 | return pch; | ||
2775 | } | ||
2776 | |||
2777 | return NULL; | ||
2778 | } | ||
2779 | |||
2780 | /* | ||
2781 | * Connect a PPP channel to a PPP interface unit. | ||
2782 | */ | ||
2783 | static int | ||
2784 | ppp_connect_channel(struct channel *pch, int unit) | ||
2785 | { | ||
2786 | struct ppp *ppp; | ||
2787 | struct ppp_net *pn; | ||
2788 | int ret = -ENXIO; | ||
2789 | int hdrlen; | ||
2790 | |||
2791 | pn = ppp_pernet(pch->chan_net); | ||
2792 | |||
2793 | mutex_lock(&pn->all_ppp_mutex); | ||
2794 | ppp = ppp_find_unit(pn, unit); | ||
2795 | if (!ppp) | ||
2796 | goto out; | ||
2797 | write_lock_bh(&pch->upl); | ||
2798 | ret = -EINVAL; | ||
2799 | if (pch->ppp) | ||
2800 | goto outl; | ||
2801 | |||
2802 | ppp_lock(ppp); | ||
2803 | if (pch->file.hdrlen > ppp->file.hdrlen) | ||
2804 | ppp->file.hdrlen = pch->file.hdrlen; | ||
2805 | hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ | ||
2806 | if (hdrlen > ppp->dev->hard_header_len) | ||
2807 | ppp->dev->hard_header_len = hdrlen; | ||
2808 | list_add_tail(&pch->clist, &ppp->channels); | ||
2809 | ++ppp->n_channels; | ||
2810 | pch->ppp = ppp; | ||
2811 | atomic_inc(&ppp->file.refcnt); | ||
2812 | ppp_unlock(ppp); | ||
2813 | ret = 0; | ||
2814 | |||
2815 | outl: | ||
2816 | write_unlock_bh(&pch->upl); | ||
2817 | out: | ||
2818 | mutex_unlock(&pn->all_ppp_mutex); | ||
2819 | return ret; | ||
2820 | } | ||
2821 | |||
2822 | /* | ||
2823 | * Disconnect a channel from its ppp unit. | ||
2824 | */ | ||
2825 | static int | ||
2826 | ppp_disconnect_channel(struct channel *pch) | ||
2827 | { | ||
2828 | struct ppp *ppp; | ||
2829 | int err = -EINVAL; | ||
2830 | |||
2831 | write_lock_bh(&pch->upl); | ||
2832 | ppp = pch->ppp; | ||
2833 | pch->ppp = NULL; | ||
2834 | write_unlock_bh(&pch->upl); | ||
2835 | if (ppp) { | ||
2836 | /* remove it from the ppp unit's list */ | ||
2837 | ppp_lock(ppp); | ||
2838 | list_del(&pch->clist); | ||
2839 | if (--ppp->n_channels == 0) | ||
2840 | wake_up_interruptible(&ppp->file.rwait); | ||
2841 | ppp_unlock(ppp); | ||
2842 | if (atomic_dec_and_test(&ppp->file.refcnt)) | ||
2843 | ppp_destroy_interface(ppp); | ||
2844 | err = 0; | ||
2845 | } | ||
2846 | return err; | ||
2847 | } | ||
2848 | |||
2849 | /* | ||
2850 | * Free up the resources used by a ppp channel. | ||
2851 | */ | ||
2852 | static void ppp_destroy_channel(struct channel *pch) | ||
2853 | { | ||
2854 | atomic_dec(&channel_count); | ||
2855 | |||
2856 | if (!pch->file.dead) { | ||
2857 | /* "can't happen" */ | ||
2858 | pr_err("ppp: destroying undead channel %p !\n", pch); | ||
2859 | return; | ||
2860 | } | ||
2861 | skb_queue_purge(&pch->file.xq); | ||
2862 | skb_queue_purge(&pch->file.rq); | ||
2863 | kfree(pch); | ||
2864 | } | ||
2865 | |||
2866 | static void __exit ppp_cleanup(void) | ||
2867 | { | ||
2868 | /* should never happen */ | ||
2869 | if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) | ||
2870 | pr_err("PPP: removing module but units remain!\n"); | ||
2871 | unregister_chrdev(PPP_MAJOR, "ppp"); | ||
2872 | device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); | ||
2873 | class_destroy(ppp_class); | ||
2874 | unregister_pernet_device(&ppp_net_ops); | ||
2875 | } | ||
2876 | |||
2877 | /* | ||
2878 | * Units handling. Caller must protect concurrent access | ||
2879 | * by holding all_ppp_mutex | ||
2880 | */ | ||
2881 | |||
2882 | static int __unit_alloc(struct idr *p, void *ptr, int n) | ||
2883 | { | ||
2884 | int unit, err; | ||
2885 | |||
2886 | again: | ||
2887 | if (!idr_pre_get(p, GFP_KERNEL)) { | ||
2888 | pr_err("PPP: No free memory for idr\n"); | ||
2889 | return -ENOMEM; | ||
2890 | } | ||
2891 | |||
2892 | err = idr_get_new_above(p, ptr, n, &unit); | ||
2893 | if (err < 0) { | ||
2894 | if (err == -EAGAIN) | ||
2895 | goto again; | ||
2896 | return err; | ||
2897 | } | ||
2898 | |||
2899 | return unit; | ||
2900 | } | ||
2901 | |||
2902 | /* associate pointer with specified number */ | ||
2903 | static int unit_set(struct idr *p, void *ptr, int n) | ||
2904 | { | ||
2905 | int unit; | ||
2906 | |||
2907 | unit = __unit_alloc(p, ptr, n); | ||
2908 | if (unit < 0) | ||
2909 | return unit; | ||
2910 | else if (unit != n) { | ||
2911 | idr_remove(p, unit); | ||
2912 | return -EINVAL; | ||
2913 | } | ||
2914 | |||
2915 | return unit; | ||
2916 | } | ||
2917 | |||
2918 | /* get new free unit number and associate pointer with it */ | ||
2919 | static int unit_get(struct idr *p, void *ptr) | ||
2920 | { | ||
2921 | return __unit_alloc(p, ptr, 0); | ||
2922 | } | ||
2923 | |||
2924 | /* put unit number back to a pool */ | ||
2925 | static void unit_put(struct idr *p, int n) | ||
2926 | { | ||
2927 | idr_remove(p, n); | ||
2928 | } | ||
2929 | |||
2930 | /* get pointer associated with the number */ | ||
2931 | static void *unit_find(struct idr *p, int n) | ||
2932 | { | ||
2933 | return idr_find(p, n); | ||
2934 | } | ||
2935 | |||
2936 | /* Module/initialization stuff */ | ||
2937 | |||
2938 | module_init(ppp_init); | ||
2939 | module_exit(ppp_cleanup); | ||
2940 | |||
2941 | EXPORT_SYMBOL(ppp_register_net_channel); | ||
2942 | EXPORT_SYMBOL(ppp_register_channel); | ||
2943 | EXPORT_SYMBOL(ppp_unregister_channel); | ||
2944 | EXPORT_SYMBOL(ppp_channel_index); | ||
2945 | EXPORT_SYMBOL(ppp_unit_number); | ||
2946 | EXPORT_SYMBOL(ppp_dev_name); | ||
2947 | EXPORT_SYMBOL(ppp_input); | ||
2948 | EXPORT_SYMBOL(ppp_input_error); | ||
2949 | EXPORT_SYMBOL(ppp_output_wakeup); | ||
2950 | EXPORT_SYMBOL(ppp_register_compressor); | ||
2951 | EXPORT_SYMBOL(ppp_unregister_compressor); | ||
2952 | MODULE_LICENSE("GPL"); | ||
2953 | MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); | ||
2954 | MODULE_ALIAS("devname:ppp"); | ||