diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/ppp_generic.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/net/ppp_generic.c')
-rw-r--r-- | drivers/net/ppp_generic.c | 2746 |
1 files changed, 2746 insertions, 0 deletions
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c new file mode 100644 index 000000000000..c456dc81b873 --- /dev/null +++ b/drivers/net/ppp_generic.c | |||
@@ -0,0 +1,2746 @@ | |||
1 | /* | ||
2 | * Generic PPP layer for Linux. | ||
3 | * | ||
4 | * Copyright 1999-2002 Paul Mackerras. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * The generic PPP layer handles the PPP network interfaces, the | ||
12 | * /dev/ppp device, packet and VJ compression, and multilink. | ||
13 | * It talks to PPP `channels' via the interface defined in | ||
14 | * include/linux/ppp_channel.h. Channels provide the basic means for | ||
15 | * sending and receiving PPP frames on some kind of communications | ||
16 | * channel. | ||
17 | * | ||
18 | * Part of the code in this driver was inspired by the old async-only | ||
19 | * PPP driver, written by Michael Callahan and Al Longyear, and | ||
20 | * subsequently hacked by Paul Mackerras. | ||
21 | * | ||
22 | * ==FILEVERSION 20041108== | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/kmod.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/list.h> | ||
31 | #include <linux/devfs_fs_kernel.h> | ||
32 | #include <linux/netdevice.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/ppp_defs.h> | ||
35 | #include <linux/filter.h> | ||
36 | #include <linux/if_ppp.h> | ||
37 | #include <linux/ppp_channel.h> | ||
38 | #include <linux/ppp-comp.h> | ||
39 | #include <linux/skbuff.h> | ||
40 | #include <linux/rtnetlink.h> | ||
41 | #include <linux/if_arp.h> | ||
42 | #include <linux/ip.h> | ||
43 | #include <linux/tcp.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | #include <linux/smp_lock.h> | ||
46 | #include <linux/rwsem.h> | ||
47 | #include <linux/stddef.h> | ||
48 | #include <linux/device.h> | ||
49 | #include <net/slhc_vj.h> | ||
50 | #include <asm/atomic.h> | ||
51 | |||
52 | #define PPP_VERSION "2.4.2" | ||
53 | |||
54 | /* | ||
55 | * Network protocols we support. | ||
56 | */ | ||
57 | #define NP_IP 0 /* Internet Protocol V4 */ | ||
58 | #define NP_IPV6 1 /* Internet Protocol V6 */ | ||
59 | #define NP_IPX 2 /* IPX protocol */ | ||
60 | #define NP_AT 3 /* Appletalk protocol */ | ||
61 | #define NP_MPLS_UC 4 /* MPLS unicast */ | ||
62 | #define NP_MPLS_MC 5 /* MPLS multicast */ | ||
63 | #define NUM_NP 6 /* Number of NPs. */ | ||
64 | |||
65 | #define MPHDRLEN 6 /* multilink protocol header length */ | ||
66 | #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ | ||
67 | #define MIN_FRAG_SIZE 64 | ||
68 | |||
69 | /* | ||
70 | * An instance of /dev/ppp can be associated with either a ppp | ||
71 | * interface unit or a ppp channel. In both cases, file->private_data | ||
72 | * points to one of these. | ||
73 | */ | ||
74 | struct ppp_file { | ||
75 | enum { | ||
76 | INTERFACE=1, CHANNEL | ||
77 | } kind; | ||
78 | struct sk_buff_head xq; /* pppd transmit queue */ | ||
79 | struct sk_buff_head rq; /* receive queue for pppd */ | ||
80 | wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ | ||
81 | atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ | ||
82 | int hdrlen; /* space to leave for headers */ | ||
83 | int index; /* interface unit / channel number */ | ||
84 | int dead; /* unit/channel has been shut down */ | ||
85 | }; | ||
86 | |||
87 | #define PF_TO_X(pf, X) ((X *)((char *)(pf) - offsetof(X, file))) | ||
88 | |||
89 | #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) | ||
90 | #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) | ||
91 | |||
92 | #define ROUNDUP(n, x) (((n) + (x) - 1) / (x)) | ||
93 | |||
94 | /* | ||
95 | * Data structure describing one ppp unit. | ||
96 | * A ppp unit corresponds to a ppp network interface device | ||
97 | * and represents a multilink bundle. | ||
98 | * It can have 0 or more ppp channels connected to it. | ||
99 | */ | ||
100 | struct ppp { | ||
101 | struct ppp_file file; /* stuff for read/write/poll 0 */ | ||
102 | struct file *owner; /* file that owns this unit 48 */ | ||
103 | struct list_head channels; /* list of attached channels 4c */ | ||
104 | int n_channels; /* how many channels are attached 54 */ | ||
105 | spinlock_t rlock; /* lock for receive side 58 */ | ||
106 | spinlock_t wlock; /* lock for transmit side 5c */ | ||
107 | int mru; /* max receive unit 60 */ | ||
108 | unsigned int flags; /* control bits 64 */ | ||
109 | unsigned int xstate; /* transmit state bits 68 */ | ||
110 | unsigned int rstate; /* receive state bits 6c */ | ||
111 | int debug; /* debug flags 70 */ | ||
112 | struct slcompress *vj; /* state for VJ header compression */ | ||
113 | enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ | ||
114 | struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ | ||
115 | struct compressor *xcomp; /* transmit packet compressor 8c */ | ||
116 | void *xc_state; /* its internal state 90 */ | ||
117 | struct compressor *rcomp; /* receive decompressor 94 */ | ||
118 | void *rc_state; /* its internal state 98 */ | ||
119 | unsigned long last_xmit; /* jiffies when last pkt sent 9c */ | ||
120 | unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ | ||
121 | struct net_device *dev; /* network interface device a4 */ | ||
122 | #ifdef CONFIG_PPP_MULTILINK | ||
123 | int nxchan; /* next channel to send something on */ | ||
124 | u32 nxseq; /* next sequence number to send */ | ||
125 | int mrru; /* MP: max reconst. receive unit */ | ||
126 | u32 nextseq; /* MP: seq no of next packet */ | ||
127 | u32 minseq; /* MP: min of most recent seqnos */ | ||
128 | struct sk_buff_head mrq; /* MP: receive reconstruction queue */ | ||
129 | #endif /* CONFIG_PPP_MULTILINK */ | ||
130 | struct net_device_stats stats; /* statistics */ | ||
131 | #ifdef CONFIG_PPP_FILTER | ||
132 | struct sock_filter *pass_filter; /* filter for packets to pass */ | ||
133 | struct sock_filter *active_filter;/* filter for pkts to reset idle */ | ||
134 | unsigned pass_len, active_len; | ||
135 | #endif /* CONFIG_PPP_FILTER */ | ||
136 | }; | ||
137 | |||
138 | /* | ||
139 | * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, | ||
140 | * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP. | ||
141 | * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. | ||
142 | * Bits in xstate: SC_COMP_RUN | ||
143 | */ | ||
144 | #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ | ||
145 | |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ | ||
146 | |SC_COMP_TCP|SC_REJ_COMP_TCP) | ||
147 | |||
148 | /* | ||
149 | * Private data structure for each channel. | ||
150 | * This includes the data structure used for multilink. | ||
151 | */ | ||
152 | struct channel { | ||
153 | struct ppp_file file; /* stuff for read/write/poll */ | ||
154 | struct list_head list; /* link in all/new_channels list */ | ||
155 | struct ppp_channel *chan; /* public channel data structure */ | ||
156 | struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ | ||
157 | spinlock_t downl; /* protects `chan', file.xq dequeue */ | ||
158 | struct ppp *ppp; /* ppp unit we're connected to */ | ||
159 | struct list_head clist; /* link in list of channels per unit */ | ||
160 | rwlock_t upl; /* protects `ppp' */ | ||
161 | #ifdef CONFIG_PPP_MULTILINK | ||
162 | u8 avail; /* flag used in multilink stuff */ | ||
163 | u8 had_frag; /* >= 1 fragments have been sent */ | ||
164 | u32 lastseq; /* MP: last sequence # received */ | ||
165 | #endif /* CONFIG_PPP_MULTILINK */ | ||
166 | }; | ||
167 | |||
168 | /* | ||
169 | * SMP locking issues: | ||
170 | * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels | ||
171 | * list and the ppp.n_channels field, you need to take both locks | ||
172 | * before you modify them. | ||
173 | * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> | ||
174 | * channel.downl. | ||
175 | */ | ||
176 | |||
177 | /* | ||
178 | * A cardmap represents a mapping from unsigned integers to pointers, | ||
179 | * and provides a fast "find lowest unused number" operation. | ||
180 | * It uses a broad (32-way) tree with a bitmap at each level. | ||
181 | * It is designed to be space-efficient for small numbers of entries | ||
182 | * and time-efficient for large numbers of entries. | ||
183 | */ | ||
184 | #define CARDMAP_ORDER 5 | ||
185 | #define CARDMAP_WIDTH (1U << CARDMAP_ORDER) | ||
186 | #define CARDMAP_MASK (CARDMAP_WIDTH - 1) | ||
187 | |||
188 | struct cardmap { | ||
189 | int shift; | ||
190 | unsigned long inuse; | ||
191 | struct cardmap *parent; | ||
192 | void *ptr[CARDMAP_WIDTH]; | ||
193 | }; | ||
194 | static void *cardmap_get(struct cardmap *map, unsigned int nr); | ||
195 | static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); | ||
196 | static unsigned int cardmap_find_first_free(struct cardmap *map); | ||
197 | static void cardmap_destroy(struct cardmap **map); | ||
198 | |||
199 | /* | ||
200 | * all_ppp_sem protects the all_ppp_units mapping. | ||
201 | * It also ensures that finding a ppp unit in the all_ppp_units map | ||
202 | * and updating its file.refcnt field is atomic. | ||
203 | */ | ||
204 | static DECLARE_MUTEX(all_ppp_sem); | ||
205 | static struct cardmap *all_ppp_units; | ||
206 | static atomic_t ppp_unit_count = ATOMIC_INIT(0); | ||
207 | |||
208 | /* | ||
209 | * all_channels_lock protects all_channels and last_channel_index, | ||
210 | * and the atomicity of find a channel and updating its file.refcnt | ||
211 | * field. | ||
212 | */ | ||
213 | static DEFINE_SPINLOCK(all_channels_lock); | ||
214 | static LIST_HEAD(all_channels); | ||
215 | static LIST_HEAD(new_channels); | ||
216 | static int last_channel_index; | ||
217 | static atomic_t channel_count = ATOMIC_INIT(0); | ||
218 | |||
219 | /* Get the PPP protocol number from a skb */ | ||
220 | #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) | ||
221 | |||
222 | /* We limit the length of ppp->file.rq to this (arbitrary) value */ | ||
223 | #define PPP_MAX_RQLEN 32 | ||
224 | |||
225 | /* | ||
226 | * Maximum number of multilink fragments queued up. | ||
227 | * This has to be large enough to cope with the maximum latency of | ||
228 | * the slowest channel relative to the others. Strictly it should | ||
229 | * depend on the number of channels and their characteristics. | ||
230 | */ | ||
231 | #define PPP_MP_MAX_QLEN 128 | ||
232 | |||
233 | /* Multilink header bits. */ | ||
234 | #define B 0x80 /* this fragment begins a packet */ | ||
235 | #define E 0x40 /* this fragment ends a packet */ | ||
236 | |||
237 | /* Compare multilink sequence numbers (assumed to be 32 bits wide) */ | ||
238 | #define seq_before(a, b) ((s32)((a) - (b)) < 0) | ||
239 | #define seq_after(a, b) ((s32)((a) - (b)) > 0) | ||
240 | |||
241 | /* Prototypes. */ | ||
242 | static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, | ||
243 | unsigned int cmd, unsigned long arg); | ||
244 | static void ppp_xmit_process(struct ppp *ppp); | ||
245 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); | ||
246 | static void ppp_push(struct ppp *ppp); | ||
247 | static void ppp_channel_push(struct channel *pch); | ||
248 | static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, | ||
249 | struct channel *pch); | ||
250 | static void ppp_receive_error(struct ppp *ppp); | ||
251 | static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); | ||
252 | static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, | ||
253 | struct sk_buff *skb); | ||
254 | #ifdef CONFIG_PPP_MULTILINK | ||
255 | static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, | ||
256 | struct channel *pch); | ||
257 | static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); | ||
258 | static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); | ||
259 | static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); | ||
260 | #endif /* CONFIG_PPP_MULTILINK */ | ||
261 | static int ppp_set_compress(struct ppp *ppp, unsigned long arg); | ||
262 | static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); | ||
263 | static void ppp_ccp_closed(struct ppp *ppp); | ||
264 | static struct compressor *find_compressor(int type); | ||
265 | static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); | ||
266 | static struct ppp *ppp_create_interface(int unit, int *retp); | ||
267 | static void init_ppp_file(struct ppp_file *pf, int kind); | ||
268 | static void ppp_shutdown_interface(struct ppp *ppp); | ||
269 | static void ppp_destroy_interface(struct ppp *ppp); | ||
270 | static struct ppp *ppp_find_unit(int unit); | ||
271 | static struct channel *ppp_find_channel(int unit); | ||
272 | static int ppp_connect_channel(struct channel *pch, int unit); | ||
273 | static int ppp_disconnect_channel(struct channel *pch); | ||
274 | static void ppp_destroy_channel(struct channel *pch); | ||
275 | |||
276 | static struct class_simple *ppp_class; | ||
277 | |||
278 | /* Translates a PPP protocol number to a NP index (NP == network protocol) */ | ||
279 | static inline int proto_to_npindex(int proto) | ||
280 | { | ||
281 | switch (proto) { | ||
282 | case PPP_IP: | ||
283 | return NP_IP; | ||
284 | case PPP_IPV6: | ||
285 | return NP_IPV6; | ||
286 | case PPP_IPX: | ||
287 | return NP_IPX; | ||
288 | case PPP_AT: | ||
289 | return NP_AT; | ||
290 | case PPP_MPLS_UC: | ||
291 | return NP_MPLS_UC; | ||
292 | case PPP_MPLS_MC: | ||
293 | return NP_MPLS_MC; | ||
294 | } | ||
295 | return -EINVAL; | ||
296 | } | ||
297 | |||
298 | /* Translates an NP index into a PPP protocol number */ | ||
299 | static const int npindex_to_proto[NUM_NP] = { | ||
300 | PPP_IP, | ||
301 | PPP_IPV6, | ||
302 | PPP_IPX, | ||
303 | PPP_AT, | ||
304 | PPP_MPLS_UC, | ||
305 | PPP_MPLS_MC, | ||
306 | }; | ||
307 | |||
308 | /* Translates an ethertype into an NP index */ | ||
309 | static inline int ethertype_to_npindex(int ethertype) | ||
310 | { | ||
311 | switch (ethertype) { | ||
312 | case ETH_P_IP: | ||
313 | return NP_IP; | ||
314 | case ETH_P_IPV6: | ||
315 | return NP_IPV6; | ||
316 | case ETH_P_IPX: | ||
317 | return NP_IPX; | ||
318 | case ETH_P_PPPTALK: | ||
319 | case ETH_P_ATALK: | ||
320 | return NP_AT; | ||
321 | case ETH_P_MPLS_UC: | ||
322 | return NP_MPLS_UC; | ||
323 | case ETH_P_MPLS_MC: | ||
324 | return NP_MPLS_MC; | ||
325 | } | ||
326 | return -1; | ||
327 | } | ||
328 | |||
329 | /* Translates an NP index into an ethertype */ | ||
330 | static const int npindex_to_ethertype[NUM_NP] = { | ||
331 | ETH_P_IP, | ||
332 | ETH_P_IPV6, | ||
333 | ETH_P_IPX, | ||
334 | ETH_P_PPPTALK, | ||
335 | ETH_P_MPLS_UC, | ||
336 | ETH_P_MPLS_MC, | ||
337 | }; | ||
338 | |||
339 | /* | ||
340 | * Locking shorthand. | ||
341 | */ | ||
342 | #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) | ||
343 | #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) | ||
344 | #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) | ||
345 | #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) | ||
346 | #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ | ||
347 | ppp_recv_lock(ppp); } while (0) | ||
348 | #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ | ||
349 | ppp_xmit_unlock(ppp); } while (0) | ||
350 | |||
351 | /* | ||
352 | * /dev/ppp device routines. | ||
353 | * The /dev/ppp device is used by pppd to control the ppp unit. | ||
354 | * It supports the read, write, ioctl and poll functions. | ||
355 | * Open instances of /dev/ppp can be in one of three states: | ||
356 | * unattached, attached to a ppp unit, or attached to a ppp channel. | ||
357 | */ | ||
358 | static int ppp_open(struct inode *inode, struct file *file) | ||
359 | { | ||
360 | /* | ||
361 | * This could (should?) be enforced by the permissions on /dev/ppp. | ||
362 | */ | ||
363 | if (!capable(CAP_NET_ADMIN)) | ||
364 | return -EPERM; | ||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | static int ppp_release(struct inode *inode, struct file *file) | ||
369 | { | ||
370 | struct ppp_file *pf = file->private_data; | ||
371 | struct ppp *ppp; | ||
372 | |||
373 | if (pf != 0) { | ||
374 | file->private_data = NULL; | ||
375 | if (pf->kind == INTERFACE) { | ||
376 | ppp = PF_TO_PPP(pf); | ||
377 | if (file == ppp->owner) | ||
378 | ppp_shutdown_interface(ppp); | ||
379 | } | ||
380 | if (atomic_dec_and_test(&pf->refcnt)) { | ||
381 | switch (pf->kind) { | ||
382 | case INTERFACE: | ||
383 | ppp_destroy_interface(PF_TO_PPP(pf)); | ||
384 | break; | ||
385 | case CHANNEL: | ||
386 | ppp_destroy_channel(PF_TO_CHANNEL(pf)); | ||
387 | break; | ||
388 | } | ||
389 | } | ||
390 | } | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | static ssize_t ppp_read(struct file *file, char __user *buf, | ||
395 | size_t count, loff_t *ppos) | ||
396 | { | ||
397 | struct ppp_file *pf = file->private_data; | ||
398 | DECLARE_WAITQUEUE(wait, current); | ||
399 | ssize_t ret; | ||
400 | struct sk_buff *skb = NULL; | ||
401 | |||
402 | ret = count; | ||
403 | |||
404 | if (pf == 0) | ||
405 | return -ENXIO; | ||
406 | add_wait_queue(&pf->rwait, &wait); | ||
407 | for (;;) { | ||
408 | set_current_state(TASK_INTERRUPTIBLE); | ||
409 | skb = skb_dequeue(&pf->rq); | ||
410 | if (skb) | ||
411 | break; | ||
412 | ret = 0; | ||
413 | if (pf->dead) | ||
414 | break; | ||
415 | if (pf->kind == INTERFACE) { | ||
416 | /* | ||
417 | * Return 0 (EOF) on an interface that has no | ||
418 | * channels connected, unless it is looping | ||
419 | * network traffic (demand mode). | ||
420 | */ | ||
421 | struct ppp *ppp = PF_TO_PPP(pf); | ||
422 | if (ppp->n_channels == 0 | ||
423 | && (ppp->flags & SC_LOOP_TRAFFIC) == 0) | ||
424 | break; | ||
425 | } | ||
426 | ret = -EAGAIN; | ||
427 | if (file->f_flags & O_NONBLOCK) | ||
428 | break; | ||
429 | ret = -ERESTARTSYS; | ||
430 | if (signal_pending(current)) | ||
431 | break; | ||
432 | schedule(); | ||
433 | } | ||
434 | set_current_state(TASK_RUNNING); | ||
435 | remove_wait_queue(&pf->rwait, &wait); | ||
436 | |||
437 | if (skb == 0) | ||
438 | goto out; | ||
439 | |||
440 | ret = -EOVERFLOW; | ||
441 | if (skb->len > count) | ||
442 | goto outf; | ||
443 | ret = -EFAULT; | ||
444 | if (copy_to_user(buf, skb->data, skb->len)) | ||
445 | goto outf; | ||
446 | ret = skb->len; | ||
447 | |||
448 | outf: | ||
449 | kfree_skb(skb); | ||
450 | out: | ||
451 | return ret; | ||
452 | } | ||
453 | |||
454 | static ssize_t ppp_write(struct file *file, const char __user *buf, | ||
455 | size_t count, loff_t *ppos) | ||
456 | { | ||
457 | struct ppp_file *pf = file->private_data; | ||
458 | struct sk_buff *skb; | ||
459 | ssize_t ret; | ||
460 | |||
461 | if (pf == 0) | ||
462 | return -ENXIO; | ||
463 | ret = -ENOMEM; | ||
464 | skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); | ||
465 | if (skb == 0) | ||
466 | goto out; | ||
467 | skb_reserve(skb, pf->hdrlen); | ||
468 | ret = -EFAULT; | ||
469 | if (copy_from_user(skb_put(skb, count), buf, count)) { | ||
470 | kfree_skb(skb); | ||
471 | goto out; | ||
472 | } | ||
473 | |||
474 | skb_queue_tail(&pf->xq, skb); | ||
475 | |||
476 | switch (pf->kind) { | ||
477 | case INTERFACE: | ||
478 | ppp_xmit_process(PF_TO_PPP(pf)); | ||
479 | break; | ||
480 | case CHANNEL: | ||
481 | ppp_channel_push(PF_TO_CHANNEL(pf)); | ||
482 | break; | ||
483 | } | ||
484 | |||
485 | ret = count; | ||
486 | |||
487 | out: | ||
488 | return ret; | ||
489 | } | ||
490 | |||
491 | /* No kernel lock - fine */ | ||
492 | static unsigned int ppp_poll(struct file *file, poll_table *wait) | ||
493 | { | ||
494 | struct ppp_file *pf = file->private_data; | ||
495 | unsigned int mask; | ||
496 | |||
497 | if (pf == 0) | ||
498 | return 0; | ||
499 | poll_wait(file, &pf->rwait, wait); | ||
500 | mask = POLLOUT | POLLWRNORM; | ||
501 | if (skb_peek(&pf->rq) != 0) | ||
502 | mask |= POLLIN | POLLRDNORM; | ||
503 | if (pf->dead) | ||
504 | mask |= POLLHUP; | ||
505 | else if (pf->kind == INTERFACE) { | ||
506 | /* see comment in ppp_read */ | ||
507 | struct ppp *ppp = PF_TO_PPP(pf); | ||
508 | if (ppp->n_channels == 0 | ||
509 | && (ppp->flags & SC_LOOP_TRAFFIC) == 0) | ||
510 | mask |= POLLIN | POLLRDNORM; | ||
511 | } | ||
512 | |||
513 | return mask; | ||
514 | } | ||
515 | |||
516 | #ifdef CONFIG_PPP_FILTER | ||
517 | static int get_filter(void __user *arg, struct sock_filter **p) | ||
518 | { | ||
519 | struct sock_fprog uprog; | ||
520 | struct sock_filter *code = NULL; | ||
521 | int len, err; | ||
522 | |||
523 | if (copy_from_user(&uprog, arg, sizeof(uprog))) | ||
524 | return -EFAULT; | ||
525 | |||
526 | if (uprog.len > BPF_MAXINSNS) | ||
527 | return -EINVAL; | ||
528 | |||
529 | if (!uprog.len) { | ||
530 | *p = NULL; | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | len = uprog.len * sizeof(struct sock_filter); | ||
535 | code = kmalloc(len, GFP_KERNEL); | ||
536 | if (code == NULL) | ||
537 | return -ENOMEM; | ||
538 | |||
539 | if (copy_from_user(code, uprog.filter, len)) { | ||
540 | kfree(code); | ||
541 | return -EFAULT; | ||
542 | } | ||
543 | |||
544 | err = sk_chk_filter(code, uprog.len); | ||
545 | if (err) { | ||
546 | kfree(code); | ||
547 | return err; | ||
548 | } | ||
549 | |||
550 | *p = code; | ||
551 | return uprog.len; | ||
552 | } | ||
553 | #endif /* CONFIG_PPP_FILTER */ | ||
554 | |||
555 | static int ppp_ioctl(struct inode *inode, struct file *file, | ||
556 | unsigned int cmd, unsigned long arg) | ||
557 | { | ||
558 | struct ppp_file *pf = file->private_data; | ||
559 | struct ppp *ppp; | ||
560 | int err = -EFAULT, val, val2, i; | ||
561 | struct ppp_idle idle; | ||
562 | struct npioctl npi; | ||
563 | int unit, cflags; | ||
564 | struct slcompress *vj; | ||
565 | void __user *argp = (void __user *)arg; | ||
566 | int __user *p = argp; | ||
567 | |||
568 | if (pf == 0) | ||
569 | return ppp_unattached_ioctl(pf, file, cmd, arg); | ||
570 | |||
571 | if (cmd == PPPIOCDETACH) { | ||
572 | /* | ||
573 | * We have to be careful here... if the file descriptor | ||
574 | * has been dup'd, we could have another process in the | ||
575 | * middle of a poll using the same file *, so we had | ||
576 | * better not free the interface data structures - | ||
577 | * instead we fail the ioctl. Even in this case, we | ||
578 | * shut down the interface if we are the owner of it. | ||
579 | * Actually, we should get rid of PPPIOCDETACH, userland | ||
580 | * (i.e. pppd) could achieve the same effect by closing | ||
581 | * this fd and reopening /dev/ppp. | ||
582 | */ | ||
583 | err = -EINVAL; | ||
584 | if (pf->kind == INTERFACE) { | ||
585 | ppp = PF_TO_PPP(pf); | ||
586 | if (file == ppp->owner) | ||
587 | ppp_shutdown_interface(ppp); | ||
588 | } | ||
589 | if (atomic_read(&file->f_count) <= 2) { | ||
590 | ppp_release(inode, file); | ||
591 | err = 0; | ||
592 | } else | ||
593 | printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", | ||
594 | atomic_read(&file->f_count)); | ||
595 | return err; | ||
596 | } | ||
597 | |||
598 | if (pf->kind == CHANNEL) { | ||
599 | struct channel *pch = PF_TO_CHANNEL(pf); | ||
600 | struct ppp_channel *chan; | ||
601 | |||
602 | switch (cmd) { | ||
603 | case PPPIOCCONNECT: | ||
604 | if (get_user(unit, p)) | ||
605 | break; | ||
606 | err = ppp_connect_channel(pch, unit); | ||
607 | break; | ||
608 | |||
609 | case PPPIOCDISCONN: | ||
610 | err = ppp_disconnect_channel(pch); | ||
611 | break; | ||
612 | |||
613 | default: | ||
614 | down_read(&pch->chan_sem); | ||
615 | chan = pch->chan; | ||
616 | err = -ENOTTY; | ||
617 | if (chan && chan->ops->ioctl) | ||
618 | err = chan->ops->ioctl(chan, cmd, arg); | ||
619 | up_read(&pch->chan_sem); | ||
620 | } | ||
621 | return err; | ||
622 | } | ||
623 | |||
624 | if (pf->kind != INTERFACE) { | ||
625 | /* can't happen */ | ||
626 | printk(KERN_ERR "PPP: not interface or channel??\n"); | ||
627 | return -EINVAL; | ||
628 | } | ||
629 | |||
630 | ppp = PF_TO_PPP(pf); | ||
631 | switch (cmd) { | ||
632 | case PPPIOCSMRU: | ||
633 | if (get_user(val, p)) | ||
634 | break; | ||
635 | ppp->mru = val; | ||
636 | err = 0; | ||
637 | break; | ||
638 | |||
639 | case PPPIOCSFLAGS: | ||
640 | if (get_user(val, p)) | ||
641 | break; | ||
642 | ppp_lock(ppp); | ||
643 | cflags = ppp->flags & ~val; | ||
644 | ppp->flags = val & SC_FLAG_BITS; | ||
645 | ppp_unlock(ppp); | ||
646 | if (cflags & SC_CCP_OPEN) | ||
647 | ppp_ccp_closed(ppp); | ||
648 | err = 0; | ||
649 | break; | ||
650 | |||
651 | case PPPIOCGFLAGS: | ||
652 | val = ppp->flags | ppp->xstate | ppp->rstate; | ||
653 | if (put_user(val, p)) | ||
654 | break; | ||
655 | err = 0; | ||
656 | break; | ||
657 | |||
658 | case PPPIOCSCOMPRESS: | ||
659 | err = ppp_set_compress(ppp, arg); | ||
660 | break; | ||
661 | |||
662 | case PPPIOCGUNIT: | ||
663 | if (put_user(ppp->file.index, p)) | ||
664 | break; | ||
665 | err = 0; | ||
666 | break; | ||
667 | |||
668 | case PPPIOCSDEBUG: | ||
669 | if (get_user(val, p)) | ||
670 | break; | ||
671 | ppp->debug = val; | ||
672 | err = 0; | ||
673 | break; | ||
674 | |||
675 | case PPPIOCGDEBUG: | ||
676 | if (put_user(ppp->debug, p)) | ||
677 | break; | ||
678 | err = 0; | ||
679 | break; | ||
680 | |||
681 | case PPPIOCGIDLE: | ||
682 | idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; | ||
683 | idle.recv_idle = (jiffies - ppp->last_recv) / HZ; | ||
684 | if (copy_to_user(argp, &idle, sizeof(idle))) | ||
685 | break; | ||
686 | err = 0; | ||
687 | break; | ||
688 | |||
689 | case PPPIOCSMAXCID: | ||
690 | if (get_user(val, p)) | ||
691 | break; | ||
692 | val2 = 15; | ||
693 | if ((val >> 16) != 0) { | ||
694 | val2 = val >> 16; | ||
695 | val &= 0xffff; | ||
696 | } | ||
697 | vj = slhc_init(val2+1, val+1); | ||
698 | if (vj == 0) { | ||
699 | printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); | ||
700 | err = -ENOMEM; | ||
701 | break; | ||
702 | } | ||
703 | ppp_lock(ppp); | ||
704 | if (ppp->vj != 0) | ||
705 | slhc_free(ppp->vj); | ||
706 | ppp->vj = vj; | ||
707 | ppp_unlock(ppp); | ||
708 | err = 0; | ||
709 | break; | ||
710 | |||
711 | case PPPIOCGNPMODE: | ||
712 | case PPPIOCSNPMODE: | ||
713 | if (copy_from_user(&npi, argp, sizeof(npi))) | ||
714 | break; | ||
715 | err = proto_to_npindex(npi.protocol); | ||
716 | if (err < 0) | ||
717 | break; | ||
718 | i = err; | ||
719 | if (cmd == PPPIOCGNPMODE) { | ||
720 | err = -EFAULT; | ||
721 | npi.mode = ppp->npmode[i]; | ||
722 | if (copy_to_user(argp, &npi, sizeof(npi))) | ||
723 | break; | ||
724 | } else { | ||
725 | ppp->npmode[i] = npi.mode; | ||
726 | /* we may be able to transmit more packets now (??) */ | ||
727 | netif_wake_queue(ppp->dev); | ||
728 | } | ||
729 | err = 0; | ||
730 | break; | ||
731 | |||
732 | #ifdef CONFIG_PPP_FILTER | ||
733 | case PPPIOCSPASS: | ||
734 | { | ||
735 | struct sock_filter *code; | ||
736 | err = get_filter(argp, &code); | ||
737 | if (err >= 0) { | ||
738 | ppp_lock(ppp); | ||
739 | kfree(ppp->pass_filter); | ||
740 | ppp->pass_filter = code; | ||
741 | ppp->pass_len = err; | ||
742 | ppp_unlock(ppp); | ||
743 | err = 0; | ||
744 | } | ||
745 | break; | ||
746 | } | ||
747 | case PPPIOCSACTIVE: | ||
748 | { | ||
749 | struct sock_filter *code; | ||
750 | err = get_filter(argp, &code); | ||
751 | if (err >= 0) { | ||
752 | ppp_lock(ppp); | ||
753 | kfree(ppp->active_filter); | ||
754 | ppp->active_filter = code; | ||
755 | ppp->active_len = err; | ||
756 | ppp_unlock(ppp); | ||
757 | err = 0; | ||
758 | } | ||
759 | break; | ||
760 | } | ||
761 | #endif /* CONFIG_PPP_FILTER */ | ||
762 | |||
763 | #ifdef CONFIG_PPP_MULTILINK | ||
764 | case PPPIOCSMRRU: | ||
765 | if (get_user(val, p)) | ||
766 | break; | ||
767 | ppp_recv_lock(ppp); | ||
768 | ppp->mrru = val; | ||
769 | ppp_recv_unlock(ppp); | ||
770 | err = 0; | ||
771 | break; | ||
772 | #endif /* CONFIG_PPP_MULTILINK */ | ||
773 | |||
774 | default: | ||
775 | err = -ENOTTY; | ||
776 | } | ||
777 | |||
778 | return err; | ||
779 | } | ||
780 | |||
781 | static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, | ||
782 | unsigned int cmd, unsigned long arg) | ||
783 | { | ||
784 | int unit, err = -EFAULT; | ||
785 | struct ppp *ppp; | ||
786 | struct channel *chan; | ||
787 | int __user *p = (int __user *)arg; | ||
788 | |||
789 | switch (cmd) { | ||
790 | case PPPIOCNEWUNIT: | ||
791 | /* Create a new ppp unit */ | ||
792 | if (get_user(unit, p)) | ||
793 | break; | ||
794 | ppp = ppp_create_interface(unit, &err); | ||
795 | if (ppp == 0) | ||
796 | break; | ||
797 | file->private_data = &ppp->file; | ||
798 | ppp->owner = file; | ||
799 | err = -EFAULT; | ||
800 | if (put_user(ppp->file.index, p)) | ||
801 | break; | ||
802 | err = 0; | ||
803 | break; | ||
804 | |||
805 | case PPPIOCATTACH: | ||
806 | /* Attach to an existing ppp unit */ | ||
807 | if (get_user(unit, p)) | ||
808 | break; | ||
809 | down(&all_ppp_sem); | ||
810 | err = -ENXIO; | ||
811 | ppp = ppp_find_unit(unit); | ||
812 | if (ppp != 0) { | ||
813 | atomic_inc(&ppp->file.refcnt); | ||
814 | file->private_data = &ppp->file; | ||
815 | err = 0; | ||
816 | } | ||
817 | up(&all_ppp_sem); | ||
818 | break; | ||
819 | |||
820 | case PPPIOCATTCHAN: | ||
821 | if (get_user(unit, p)) | ||
822 | break; | ||
823 | spin_lock_bh(&all_channels_lock); | ||
824 | err = -ENXIO; | ||
825 | chan = ppp_find_channel(unit); | ||
826 | if (chan != 0) { | ||
827 | atomic_inc(&chan->file.refcnt); | ||
828 | file->private_data = &chan->file; | ||
829 | err = 0; | ||
830 | } | ||
831 | spin_unlock_bh(&all_channels_lock); | ||
832 | break; | ||
833 | |||
834 | default: | ||
835 | err = -ENOTTY; | ||
836 | } | ||
837 | return err; | ||
838 | } | ||
839 | |||
840 | static struct file_operations ppp_device_fops = { | ||
841 | .owner = THIS_MODULE, | ||
842 | .read = ppp_read, | ||
843 | .write = ppp_write, | ||
844 | .poll = ppp_poll, | ||
845 | .ioctl = ppp_ioctl, | ||
846 | .open = ppp_open, | ||
847 | .release = ppp_release | ||
848 | }; | ||
849 | |||
850 | #define PPP_MAJOR 108 | ||
851 | |||
852 | /* Called at boot time if ppp is compiled into the kernel, | ||
853 | or at module load time (from init_module) if compiled as a module. */ | ||
854 | static int __init ppp_init(void) | ||
855 | { | ||
856 | int err; | ||
857 | |||
858 | printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); | ||
859 | err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); | ||
860 | if (!err) { | ||
861 | ppp_class = class_simple_create(THIS_MODULE, "ppp"); | ||
862 | if (IS_ERR(ppp_class)) { | ||
863 | err = PTR_ERR(ppp_class); | ||
864 | goto out_chrdev; | ||
865 | } | ||
866 | class_simple_device_add(ppp_class, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); | ||
867 | err = devfs_mk_cdev(MKDEV(PPP_MAJOR, 0), | ||
868 | S_IFCHR|S_IRUSR|S_IWUSR, "ppp"); | ||
869 | if (err) | ||
870 | goto out_class; | ||
871 | } | ||
872 | |||
873 | out: | ||
874 | if (err) | ||
875 | printk(KERN_ERR "failed to register PPP device (%d)\n", err); | ||
876 | return err; | ||
877 | |||
878 | out_class: | ||
879 | class_simple_device_remove(MKDEV(PPP_MAJOR,0)); | ||
880 | class_simple_destroy(ppp_class); | ||
881 | out_chrdev: | ||
882 | unregister_chrdev(PPP_MAJOR, "ppp"); | ||
883 | goto out; | ||
884 | } | ||
885 | |||
886 | /* | ||
887 | * Network interface unit routines. | ||
888 | */ | ||
889 | static int | ||
890 | ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
891 | { | ||
892 | struct ppp *ppp = (struct ppp *) dev->priv; | ||
893 | int npi, proto; | ||
894 | unsigned char *pp; | ||
895 | |||
896 | npi = ethertype_to_npindex(ntohs(skb->protocol)); | ||
897 | if (npi < 0) | ||
898 | goto outf; | ||
899 | |||
900 | /* Drop, accept or reject the packet */ | ||
901 | switch (ppp->npmode[npi]) { | ||
902 | case NPMODE_PASS: | ||
903 | break; | ||
904 | case NPMODE_QUEUE: | ||
905 | /* it would be nice to have a way to tell the network | ||
906 | system to queue this one up for later. */ | ||
907 | goto outf; | ||
908 | case NPMODE_DROP: | ||
909 | case NPMODE_ERROR: | ||
910 | goto outf; | ||
911 | } | ||
912 | |||
913 | /* Put the 2-byte PPP protocol number on the front, | ||
914 | making sure there is room for the address and control fields. */ | ||
915 | if (skb_headroom(skb) < PPP_HDRLEN) { | ||
916 | struct sk_buff *ns; | ||
917 | |||
918 | ns = alloc_skb(skb->len + dev->hard_header_len, GFP_ATOMIC); | ||
919 | if (ns == 0) | ||
920 | goto outf; | ||
921 | skb_reserve(ns, dev->hard_header_len); | ||
922 | skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); | ||
923 | kfree_skb(skb); | ||
924 | skb = ns; | ||
925 | } | ||
926 | pp = skb_push(skb, 2); | ||
927 | proto = npindex_to_proto[npi]; | ||
928 | pp[0] = proto >> 8; | ||
929 | pp[1] = proto; | ||
930 | |||
931 | netif_stop_queue(dev); | ||
932 | skb_queue_tail(&ppp->file.xq, skb); | ||
933 | ppp_xmit_process(ppp); | ||
934 | return 0; | ||
935 | |||
936 | outf: | ||
937 | kfree_skb(skb); | ||
938 | ++ppp->stats.tx_dropped; | ||
939 | return 0; | ||
940 | } | ||
941 | |||
942 | static struct net_device_stats * | ||
943 | ppp_net_stats(struct net_device *dev) | ||
944 | { | ||
945 | struct ppp *ppp = (struct ppp *) dev->priv; | ||
946 | |||
947 | return &ppp->stats; | ||
948 | } | ||
949 | |||
950 | static int | ||
951 | ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
952 | { | ||
953 | struct ppp *ppp = dev->priv; | ||
954 | int err = -EFAULT; | ||
955 | void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; | ||
956 | struct ppp_stats stats; | ||
957 | struct ppp_comp_stats cstats; | ||
958 | char *vers; | ||
959 | |||
960 | switch (cmd) { | ||
961 | case SIOCGPPPSTATS: | ||
962 | ppp_get_stats(ppp, &stats); | ||
963 | if (copy_to_user(addr, &stats, sizeof(stats))) | ||
964 | break; | ||
965 | err = 0; | ||
966 | break; | ||
967 | |||
968 | case SIOCGPPPCSTATS: | ||
969 | memset(&cstats, 0, sizeof(cstats)); | ||
970 | if (ppp->xc_state != 0) | ||
971 | ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); | ||
972 | if (ppp->rc_state != 0) | ||
973 | ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); | ||
974 | if (copy_to_user(addr, &cstats, sizeof(cstats))) | ||
975 | break; | ||
976 | err = 0; | ||
977 | break; | ||
978 | |||
979 | case SIOCGPPPVER: | ||
980 | vers = PPP_VERSION; | ||
981 | if (copy_to_user(addr, vers, strlen(vers) + 1)) | ||
982 | break; | ||
983 | err = 0; | ||
984 | break; | ||
985 | |||
986 | default: | ||
987 | err = -EINVAL; | ||
988 | } | ||
989 | |||
990 | return err; | ||
991 | } | ||
992 | |||
993 | static void ppp_setup(struct net_device *dev) | ||
994 | { | ||
995 | dev->hard_header_len = PPP_HDRLEN; | ||
996 | dev->mtu = PPP_MTU; | ||
997 | dev->addr_len = 0; | ||
998 | dev->tx_queue_len = 3; | ||
999 | dev->type = ARPHRD_PPP; | ||
1000 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | ||
1001 | } | ||
1002 | |||
1003 | /* | ||
1004 | * Transmit-side routines. | ||
1005 | */ | ||
1006 | |||
1007 | /* | ||
1008 | * Called to do any work queued up on the transmit side | ||
1009 | * that can now be done. | ||
1010 | */ | ||
1011 | static void | ||
1012 | ppp_xmit_process(struct ppp *ppp) | ||
1013 | { | ||
1014 | struct sk_buff *skb; | ||
1015 | |||
1016 | ppp_xmit_lock(ppp); | ||
1017 | if (ppp->dev != 0) { | ||
1018 | ppp_push(ppp); | ||
1019 | while (ppp->xmit_pending == 0 | ||
1020 | && (skb = skb_dequeue(&ppp->file.xq)) != 0) | ||
1021 | ppp_send_frame(ppp, skb); | ||
1022 | /* If there's no work left to do, tell the core net | ||
1023 | code that we can accept some more. */ | ||
1024 | if (ppp->xmit_pending == 0 && skb_peek(&ppp->file.xq) == 0) | ||
1025 | netif_wake_queue(ppp->dev); | ||
1026 | } | ||
1027 | ppp_xmit_unlock(ppp); | ||
1028 | } | ||
1029 | |||
1030 | /* | ||
1031 | * Compress and send a frame. | ||
1032 | * The caller should have locked the xmit path, | ||
1033 | * and xmit_pending should be 0. | ||
1034 | */ | ||
1035 | static void | ||
1036 | ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) | ||
1037 | { | ||
1038 | int proto = PPP_PROTO(skb); | ||
1039 | struct sk_buff *new_skb; | ||
1040 | int len; | ||
1041 | unsigned char *cp; | ||
1042 | |||
1043 | if (proto < 0x8000) { | ||
1044 | #ifdef CONFIG_PPP_FILTER | ||
1045 | /* check if we should pass this packet */ | ||
1046 | /* the filter instructions are constructed assuming | ||
1047 | a four-byte PPP header on each packet */ | ||
1048 | *skb_push(skb, 2) = 1; | ||
1049 | if (ppp->pass_filter | ||
1050 | && sk_run_filter(skb, ppp->pass_filter, | ||
1051 | ppp->pass_len) == 0) { | ||
1052 | if (ppp->debug & 1) | ||
1053 | printk(KERN_DEBUG "PPP: outbound frame not passed\n"); | ||
1054 | kfree_skb(skb); | ||
1055 | return; | ||
1056 | } | ||
1057 | /* if this packet passes the active filter, record the time */ | ||
1058 | if (!(ppp->active_filter | ||
1059 | && sk_run_filter(skb, ppp->active_filter, | ||
1060 | ppp->active_len) == 0)) | ||
1061 | ppp->last_xmit = jiffies; | ||
1062 | skb_pull(skb, 2); | ||
1063 | #else | ||
1064 | /* for data packets, record the time */ | ||
1065 | ppp->last_xmit = jiffies; | ||
1066 | #endif /* CONFIG_PPP_FILTER */ | ||
1067 | } | ||
1068 | |||
1069 | ++ppp->stats.tx_packets; | ||
1070 | ppp->stats.tx_bytes += skb->len - 2; | ||
1071 | |||
1072 | switch (proto) { | ||
1073 | case PPP_IP: | ||
1074 | if (ppp->vj == 0 || (ppp->flags & SC_COMP_TCP) == 0) | ||
1075 | break; | ||
1076 | /* try to do VJ TCP header compression */ | ||
1077 | new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, | ||
1078 | GFP_ATOMIC); | ||
1079 | if (new_skb == 0) { | ||
1080 | printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); | ||
1081 | goto drop; | ||
1082 | } | ||
1083 | skb_reserve(new_skb, ppp->dev->hard_header_len - 2); | ||
1084 | cp = skb->data + 2; | ||
1085 | len = slhc_compress(ppp->vj, cp, skb->len - 2, | ||
1086 | new_skb->data + 2, &cp, | ||
1087 | !(ppp->flags & SC_NO_TCP_CCID)); | ||
1088 | if (cp == skb->data + 2) { | ||
1089 | /* didn't compress */ | ||
1090 | kfree_skb(new_skb); | ||
1091 | } else { | ||
1092 | if (cp[0] & SL_TYPE_COMPRESSED_TCP) { | ||
1093 | proto = PPP_VJC_COMP; | ||
1094 | cp[0] &= ~SL_TYPE_COMPRESSED_TCP; | ||
1095 | } else { | ||
1096 | proto = PPP_VJC_UNCOMP; | ||
1097 | cp[0] = skb->data[2]; | ||
1098 | } | ||
1099 | kfree_skb(skb); | ||
1100 | skb = new_skb; | ||
1101 | cp = skb_put(skb, len + 2); | ||
1102 | cp[0] = 0; | ||
1103 | cp[1] = proto; | ||
1104 | } | ||
1105 | break; | ||
1106 | |||
1107 | case PPP_CCP: | ||
1108 | /* peek at outbound CCP frames */ | ||
1109 | ppp_ccp_peek(ppp, skb, 0); | ||
1110 | break; | ||
1111 | } | ||
1112 | |||
1113 | /* try to do packet compression */ | ||
1114 | if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0 | ||
1115 | && proto != PPP_LCP && proto != PPP_CCP) { | ||
1116 | new_skb = alloc_skb(ppp->dev->mtu + ppp->dev->hard_header_len, | ||
1117 | GFP_ATOMIC); | ||
1118 | if (new_skb == 0) { | ||
1119 | printk(KERN_ERR "PPP: no memory (comp pkt)\n"); | ||
1120 | goto drop; | ||
1121 | } | ||
1122 | if (ppp->dev->hard_header_len > PPP_HDRLEN) | ||
1123 | skb_reserve(new_skb, | ||
1124 | ppp->dev->hard_header_len - PPP_HDRLEN); | ||
1125 | |||
1126 | /* compressor still expects A/C bytes in hdr */ | ||
1127 | len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, | ||
1128 | new_skb->data, skb->len + 2, | ||
1129 | ppp->dev->mtu + PPP_HDRLEN); | ||
1130 | if (len > 0 && (ppp->flags & SC_CCP_UP)) { | ||
1131 | kfree_skb(skb); | ||
1132 | skb = new_skb; | ||
1133 | skb_put(skb, len); | ||
1134 | skb_pull(skb, 2); /* pull off A/C bytes */ | ||
1135 | } else { | ||
1136 | /* didn't compress, or CCP not up yet */ | ||
1137 | kfree_skb(new_skb); | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | /* | ||
1142 | * If we are waiting for traffic (demand dialling), | ||
1143 | * queue it up for pppd to receive. | ||
1144 | */ | ||
1145 | if (ppp->flags & SC_LOOP_TRAFFIC) { | ||
1146 | if (ppp->file.rq.qlen > PPP_MAX_RQLEN) | ||
1147 | goto drop; | ||
1148 | skb_queue_tail(&ppp->file.rq, skb); | ||
1149 | wake_up_interruptible(&ppp->file.rwait); | ||
1150 | return; | ||
1151 | } | ||
1152 | |||
1153 | ppp->xmit_pending = skb; | ||
1154 | ppp_push(ppp); | ||
1155 | return; | ||
1156 | |||
1157 | drop: | ||
1158 | kfree_skb(skb); | ||
1159 | ++ppp->stats.tx_errors; | ||
1160 | } | ||
1161 | |||
1162 | /* | ||
1163 | * Try to send the frame in xmit_pending. | ||
1164 | * The caller should have the xmit path locked. | ||
1165 | */ | ||
1166 | static void | ||
1167 | ppp_push(struct ppp *ppp) | ||
1168 | { | ||
1169 | struct list_head *list; | ||
1170 | struct channel *pch; | ||
1171 | struct sk_buff *skb = ppp->xmit_pending; | ||
1172 | |||
1173 | if (skb == 0) | ||
1174 | return; | ||
1175 | |||
1176 | list = &ppp->channels; | ||
1177 | if (list_empty(list)) { | ||
1178 | /* nowhere to send the packet, just drop it */ | ||
1179 | ppp->xmit_pending = NULL; | ||
1180 | kfree_skb(skb); | ||
1181 | return; | ||
1182 | } | ||
1183 | |||
1184 | if ((ppp->flags & SC_MULTILINK) == 0) { | ||
1185 | /* not doing multilink: send it down the first channel */ | ||
1186 | list = list->next; | ||
1187 | pch = list_entry(list, struct channel, clist); | ||
1188 | |||
1189 | spin_lock_bh(&pch->downl); | ||
1190 | if (pch->chan) { | ||
1191 | if (pch->chan->ops->start_xmit(pch->chan, skb)) | ||
1192 | ppp->xmit_pending = NULL; | ||
1193 | } else { | ||
1194 | /* channel got unregistered */ | ||
1195 | kfree_skb(skb); | ||
1196 | ppp->xmit_pending = NULL; | ||
1197 | } | ||
1198 | spin_unlock_bh(&pch->downl); | ||
1199 | return; | ||
1200 | } | ||
1201 | |||
1202 | #ifdef CONFIG_PPP_MULTILINK | ||
1203 | /* Multilink: fragment the packet over as many links | ||
1204 | as can take the packet at the moment. */ | ||
1205 | if (!ppp_mp_explode(ppp, skb)) | ||
1206 | return; | ||
1207 | #endif /* CONFIG_PPP_MULTILINK */ | ||
1208 | |||
1209 | ppp->xmit_pending = NULL; | ||
1210 | kfree_skb(skb); | ||
1211 | } | ||
1212 | |||
1213 | #ifdef CONFIG_PPP_MULTILINK | ||
1214 | /* | ||
1215 | * Divide a packet to be transmitted into fragments and | ||
1216 | * send them out the individual links. | ||
1217 | */ | ||
1218 | static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | ||
1219 | { | ||
1220 | int nch, len, fragsize; | ||
1221 | int i, bits, hdrlen, mtu; | ||
1222 | int flen, fnb; | ||
1223 | unsigned char *p, *q; | ||
1224 | struct list_head *list; | ||
1225 | struct channel *pch; | ||
1226 | struct sk_buff *frag; | ||
1227 | struct ppp_channel *chan; | ||
1228 | |||
1229 | nch = 0; | ||
1230 | hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; | ||
1231 | list = &ppp->channels; | ||
1232 | while ((list = list->next) != &ppp->channels) { | ||
1233 | pch = list_entry(list, struct channel, clist); | ||
1234 | nch += pch->avail = (skb_queue_len(&pch->file.xq) == 0); | ||
1235 | /* | ||
1236 | * If a channel hasn't had a fragment yet, it has to get | ||
1237 | * one before we send any fragments on later channels. | ||
1238 | * If it can't take a fragment now, don't give any | ||
1239 | * to subsequent channels. | ||
1240 | */ | ||
1241 | if (!pch->had_frag && !pch->avail) { | ||
1242 | while ((list = list->next) != &ppp->channels) { | ||
1243 | pch = list_entry(list, struct channel, clist); | ||
1244 | pch->avail = 0; | ||
1245 | } | ||
1246 | break; | ||
1247 | } | ||
1248 | } | ||
1249 | if (nch == 0) | ||
1250 | return 0; /* can't take now, leave it in xmit_pending */ | ||
1251 | |||
1252 | /* Do protocol field compression (XXX this should be optional) */ | ||
1253 | p = skb->data; | ||
1254 | len = skb->len; | ||
1255 | if (*p == 0) { | ||
1256 | ++p; | ||
1257 | --len; | ||
1258 | } | ||
1259 | |||
1260 | /* decide on fragment size */ | ||
1261 | fragsize = len; | ||
1262 | if (nch > 1) { | ||
1263 | int maxch = ROUNDUP(len, MIN_FRAG_SIZE); | ||
1264 | if (nch > maxch) | ||
1265 | nch = maxch; | ||
1266 | fragsize = ROUNDUP(fragsize, nch); | ||
1267 | } | ||
1268 | |||
1269 | /* skip to the channel after the one we last used | ||
1270 | and start at that one */ | ||
1271 | for (i = 0; i < ppp->nxchan; ++i) { | ||
1272 | list = list->next; | ||
1273 | if (list == &ppp->channels) { | ||
1274 | i = 0; | ||
1275 | break; | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1279 | /* create a fragment for each channel */ | ||
1280 | bits = B; | ||
1281 | do { | ||
1282 | list = list->next; | ||
1283 | if (list == &ppp->channels) { | ||
1284 | i = 0; | ||
1285 | continue; | ||
1286 | } | ||
1287 | pch = list_entry(list, struct channel, clist); | ||
1288 | ++i; | ||
1289 | if (!pch->avail) | ||
1290 | continue; | ||
1291 | |||
1292 | /* check the channel's mtu and whether it is still attached. */ | ||
1293 | spin_lock_bh(&pch->downl); | ||
1294 | if (pch->chan == 0 || (mtu = pch->chan->mtu) < hdrlen) { | ||
1295 | /* can't use this channel */ | ||
1296 | spin_unlock_bh(&pch->downl); | ||
1297 | pch->avail = 0; | ||
1298 | if (--nch == 0) | ||
1299 | break; | ||
1300 | continue; | ||
1301 | } | ||
1302 | |||
1303 | /* | ||
1304 | * We have to create multiple fragments for this channel | ||
1305 | * if fragsize is greater than the channel's mtu. | ||
1306 | */ | ||
1307 | if (fragsize > len) | ||
1308 | fragsize = len; | ||
1309 | for (flen = fragsize; flen > 0; flen -= fnb) { | ||
1310 | fnb = flen; | ||
1311 | if (fnb > mtu + 2 - hdrlen) | ||
1312 | fnb = mtu + 2 - hdrlen; | ||
1313 | if (fnb >= len) | ||
1314 | bits |= E; | ||
1315 | frag = alloc_skb(fnb + hdrlen, GFP_ATOMIC); | ||
1316 | if (frag == 0) | ||
1317 | goto noskb; | ||
1318 | q = skb_put(frag, fnb + hdrlen); | ||
1319 | /* make the MP header */ | ||
1320 | q[0] = PPP_MP >> 8; | ||
1321 | q[1] = PPP_MP; | ||
1322 | if (ppp->flags & SC_MP_XSHORTSEQ) { | ||
1323 | q[2] = bits + ((ppp->nxseq >> 8) & 0xf); | ||
1324 | q[3] = ppp->nxseq; | ||
1325 | } else { | ||
1326 | q[2] = bits; | ||
1327 | q[3] = ppp->nxseq >> 16; | ||
1328 | q[4] = ppp->nxseq >> 8; | ||
1329 | q[5] = ppp->nxseq; | ||
1330 | } | ||
1331 | |||
1332 | /* copy the data in */ | ||
1333 | memcpy(q + hdrlen, p, fnb); | ||
1334 | |||
1335 | /* try to send it down the channel */ | ||
1336 | chan = pch->chan; | ||
1337 | if (!chan->ops->start_xmit(chan, frag)) | ||
1338 | skb_queue_tail(&pch->file.xq, frag); | ||
1339 | pch->had_frag = 1; | ||
1340 | p += fnb; | ||
1341 | len -= fnb; | ||
1342 | ++ppp->nxseq; | ||
1343 | bits = 0; | ||
1344 | } | ||
1345 | spin_unlock_bh(&pch->downl); | ||
1346 | } while (len > 0); | ||
1347 | ppp->nxchan = i; | ||
1348 | |||
1349 | return 1; | ||
1350 | |||
1351 | noskb: | ||
1352 | spin_unlock_bh(&pch->downl); | ||
1353 | if (ppp->debug & 1) | ||
1354 | printk(KERN_ERR "PPP: no memory (fragment)\n"); | ||
1355 | ++ppp->stats.tx_errors; | ||
1356 | ++ppp->nxseq; | ||
1357 | return 1; /* abandon the frame */ | ||
1358 | } | ||
1359 | #endif /* CONFIG_PPP_MULTILINK */ | ||
1360 | |||
1361 | /* | ||
1362 | * Try to send data out on a channel. | ||
1363 | */ | ||
1364 | static void | ||
1365 | ppp_channel_push(struct channel *pch) | ||
1366 | { | ||
1367 | struct sk_buff *skb; | ||
1368 | struct ppp *ppp; | ||
1369 | |||
1370 | spin_lock_bh(&pch->downl); | ||
1371 | if (pch->chan != 0) { | ||
1372 | while (skb_queue_len(&pch->file.xq) > 0) { | ||
1373 | skb = skb_dequeue(&pch->file.xq); | ||
1374 | if (!pch->chan->ops->start_xmit(pch->chan, skb)) { | ||
1375 | /* put the packet back and try again later */ | ||
1376 | skb_queue_head(&pch->file.xq, skb); | ||
1377 | break; | ||
1378 | } | ||
1379 | } | ||
1380 | } else { | ||
1381 | /* channel got deregistered */ | ||
1382 | skb_queue_purge(&pch->file.xq); | ||
1383 | } | ||
1384 | spin_unlock_bh(&pch->downl); | ||
1385 | /* see if there is anything from the attached unit to be sent */ | ||
1386 | if (skb_queue_len(&pch->file.xq) == 0) { | ||
1387 | read_lock_bh(&pch->upl); | ||
1388 | ppp = pch->ppp; | ||
1389 | if (ppp != 0) | ||
1390 | ppp_xmit_process(ppp); | ||
1391 | read_unlock_bh(&pch->upl); | ||
1392 | } | ||
1393 | } | ||
1394 | |||
1395 | /* | ||
1396 | * Receive-side routines. | ||
1397 | */ | ||
1398 | |||
1399 | /* misuse a few fields of the skb for MP reconstruction */ | ||
1400 | #define sequence priority | ||
1401 | #define BEbits cb[0] | ||
1402 | |||
1403 | static inline void | ||
1404 | ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | ||
1405 | { | ||
1406 | ppp_recv_lock(ppp); | ||
1407 | /* ppp->dev == 0 means interface is closing down */ | ||
1408 | if (ppp->dev != 0) | ||
1409 | ppp_receive_frame(ppp, skb, pch); | ||
1410 | else | ||
1411 | kfree_skb(skb); | ||
1412 | ppp_recv_unlock(ppp); | ||
1413 | } | ||
1414 | |||
1415 | void | ||
1416 | ppp_input(struct ppp_channel *chan, struct sk_buff *skb) | ||
1417 | { | ||
1418 | struct channel *pch = chan->ppp; | ||
1419 | int proto; | ||
1420 | |||
1421 | if (pch == 0 || skb->len == 0) { | ||
1422 | kfree_skb(skb); | ||
1423 | return; | ||
1424 | } | ||
1425 | |||
1426 | proto = PPP_PROTO(skb); | ||
1427 | read_lock_bh(&pch->upl); | ||
1428 | if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) { | ||
1429 | /* put it on the channel queue */ | ||
1430 | skb_queue_tail(&pch->file.rq, skb); | ||
1431 | /* drop old frames if queue too long */ | ||
1432 | while (pch->file.rq.qlen > PPP_MAX_RQLEN | ||
1433 | && (skb = skb_dequeue(&pch->file.rq)) != 0) | ||
1434 | kfree_skb(skb); | ||
1435 | wake_up_interruptible(&pch->file.rwait); | ||
1436 | } else { | ||
1437 | ppp_do_recv(pch->ppp, skb, pch); | ||
1438 | } | ||
1439 | read_unlock_bh(&pch->upl); | ||
1440 | } | ||
1441 | |||
1442 | /* Put a 0-length skb in the receive queue as an error indication */ | ||
1443 | void | ||
1444 | ppp_input_error(struct ppp_channel *chan, int code) | ||
1445 | { | ||
1446 | struct channel *pch = chan->ppp; | ||
1447 | struct sk_buff *skb; | ||
1448 | |||
1449 | if (pch == 0) | ||
1450 | return; | ||
1451 | |||
1452 | read_lock_bh(&pch->upl); | ||
1453 | if (pch->ppp != 0) { | ||
1454 | skb = alloc_skb(0, GFP_ATOMIC); | ||
1455 | if (skb != 0) { | ||
1456 | skb->len = 0; /* probably unnecessary */ | ||
1457 | skb->cb[0] = code; | ||
1458 | ppp_do_recv(pch->ppp, skb, pch); | ||
1459 | } | ||
1460 | } | ||
1461 | read_unlock_bh(&pch->upl); | ||
1462 | } | ||
1463 | |||
1464 | /* | ||
1465 | * We come in here to process a received frame. | ||
1466 | * The receive side of the ppp unit is locked. | ||
1467 | */ | ||
1468 | static void | ||
1469 | ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | ||
1470 | { | ||
1471 | if (skb->len >= 2) { | ||
1472 | #ifdef CONFIG_PPP_MULTILINK | ||
1473 | /* XXX do channel-level decompression here */ | ||
1474 | if (PPP_PROTO(skb) == PPP_MP) | ||
1475 | ppp_receive_mp_frame(ppp, skb, pch); | ||
1476 | else | ||
1477 | #endif /* CONFIG_PPP_MULTILINK */ | ||
1478 | ppp_receive_nonmp_frame(ppp, skb); | ||
1479 | return; | ||
1480 | } | ||
1481 | |||
1482 | if (skb->len > 0) | ||
1483 | /* note: a 0-length skb is used as an error indication */ | ||
1484 | ++ppp->stats.rx_length_errors; | ||
1485 | |||
1486 | kfree_skb(skb); | ||
1487 | ppp_receive_error(ppp); | ||
1488 | } | ||
1489 | |||
1490 | static void | ||
1491 | ppp_receive_error(struct ppp *ppp) | ||
1492 | { | ||
1493 | ++ppp->stats.rx_errors; | ||
1494 | if (ppp->vj != 0) | ||
1495 | slhc_toss(ppp->vj); | ||
1496 | } | ||
1497 | |||
1498 | static void | ||
1499 | ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) | ||
1500 | { | ||
1501 | struct sk_buff *ns; | ||
1502 | int proto, len, npi; | ||
1503 | |||
1504 | /* | ||
1505 | * Decompress the frame, if compressed. | ||
1506 | * Note that some decompressors need to see uncompressed frames | ||
1507 | * that come in as well as compressed frames. | ||
1508 | */ | ||
1509 | if (ppp->rc_state != 0 && (ppp->rstate & SC_DECOMP_RUN) | ||
1510 | && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) | ||
1511 | skb = ppp_decompress_frame(ppp, skb); | ||
1512 | |||
1513 | proto = PPP_PROTO(skb); | ||
1514 | switch (proto) { | ||
1515 | case PPP_VJC_COMP: | ||
1516 | /* decompress VJ compressed packets */ | ||
1517 | if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) | ||
1518 | goto err; | ||
1519 | |||
1520 | if (skb_tailroom(skb) < 124) { | ||
1521 | /* copy to a new sk_buff with more tailroom */ | ||
1522 | ns = dev_alloc_skb(skb->len + 128); | ||
1523 | if (ns == 0) { | ||
1524 | printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); | ||
1525 | goto err; | ||
1526 | } | ||
1527 | skb_reserve(ns, 2); | ||
1528 | skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); | ||
1529 | kfree_skb(skb); | ||
1530 | skb = ns; | ||
1531 | } | ||
1532 | else if (!pskb_may_pull(skb, skb->len)) | ||
1533 | goto err; | ||
1534 | |||
1535 | len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); | ||
1536 | if (len <= 0) { | ||
1537 | printk(KERN_DEBUG "PPP: VJ decompression error\n"); | ||
1538 | goto err; | ||
1539 | } | ||
1540 | len += 2; | ||
1541 | if (len > skb->len) | ||
1542 | skb_put(skb, len - skb->len); | ||
1543 | else if (len < skb->len) | ||
1544 | skb_trim(skb, len); | ||
1545 | proto = PPP_IP; | ||
1546 | break; | ||
1547 | |||
1548 | case PPP_VJC_UNCOMP: | ||
1549 | if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) | ||
1550 | goto err; | ||
1551 | |||
1552 | /* Until we fix the decompressor need to make sure | ||
1553 | * data portion is linear. | ||
1554 | */ | ||
1555 | if (!pskb_may_pull(skb, skb->len)) | ||
1556 | goto err; | ||
1557 | |||
1558 | if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { | ||
1559 | printk(KERN_ERR "PPP: VJ uncompressed error\n"); | ||
1560 | goto err; | ||
1561 | } | ||
1562 | proto = PPP_IP; | ||
1563 | break; | ||
1564 | |||
1565 | case PPP_CCP: | ||
1566 | ppp_ccp_peek(ppp, skb, 1); | ||
1567 | break; | ||
1568 | } | ||
1569 | |||
1570 | ++ppp->stats.rx_packets; | ||
1571 | ppp->stats.rx_bytes += skb->len - 2; | ||
1572 | |||
1573 | npi = proto_to_npindex(proto); | ||
1574 | if (npi < 0) { | ||
1575 | /* control or unknown frame - pass it to pppd */ | ||
1576 | skb_queue_tail(&ppp->file.rq, skb); | ||
1577 | /* limit queue length by dropping old frames */ | ||
1578 | while (ppp->file.rq.qlen > PPP_MAX_RQLEN | ||
1579 | && (skb = skb_dequeue(&ppp->file.rq)) != 0) | ||
1580 | kfree_skb(skb); | ||
1581 | /* wake up any process polling or blocking on read */ | ||
1582 | wake_up_interruptible(&ppp->file.rwait); | ||
1583 | |||
1584 | } else { | ||
1585 | /* network protocol frame - give it to the kernel */ | ||
1586 | |||
1587 | #ifdef CONFIG_PPP_FILTER | ||
1588 | /* check if the packet passes the pass and active filters */ | ||
1589 | /* the filter instructions are constructed assuming | ||
1590 | a four-byte PPP header on each packet */ | ||
1591 | *skb_push(skb, 2) = 0; | ||
1592 | if (ppp->pass_filter | ||
1593 | && sk_run_filter(skb, ppp->pass_filter, | ||
1594 | ppp->pass_len) == 0) { | ||
1595 | if (ppp->debug & 1) | ||
1596 | printk(KERN_DEBUG "PPP: inbound frame not passed\n"); | ||
1597 | kfree_skb(skb); | ||
1598 | return; | ||
1599 | } | ||
1600 | if (!(ppp->active_filter | ||
1601 | && sk_run_filter(skb, ppp->active_filter, | ||
1602 | ppp->active_len) == 0)) | ||
1603 | ppp->last_recv = jiffies; | ||
1604 | skb_pull(skb, 2); | ||
1605 | #else | ||
1606 | ppp->last_recv = jiffies; | ||
1607 | #endif /* CONFIG_PPP_FILTER */ | ||
1608 | |||
1609 | if ((ppp->dev->flags & IFF_UP) == 0 | ||
1610 | || ppp->npmode[npi] != NPMODE_PASS) { | ||
1611 | kfree_skb(skb); | ||
1612 | } else { | ||
1613 | skb_pull(skb, 2); /* chop off protocol */ | ||
1614 | skb->dev = ppp->dev; | ||
1615 | skb->protocol = htons(npindex_to_ethertype[npi]); | ||
1616 | skb->mac.raw = skb->data; | ||
1617 | skb->input_dev = ppp->dev; | ||
1618 | netif_rx(skb); | ||
1619 | ppp->dev->last_rx = jiffies; | ||
1620 | } | ||
1621 | } | ||
1622 | return; | ||
1623 | |||
1624 | err: | ||
1625 | kfree_skb(skb); | ||
1626 | ppp_receive_error(ppp); | ||
1627 | } | ||
1628 | |||
1629 | static struct sk_buff * | ||
1630 | ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) | ||
1631 | { | ||
1632 | int proto = PPP_PROTO(skb); | ||
1633 | struct sk_buff *ns; | ||
1634 | int len; | ||
1635 | |||
1636 | /* Until we fix all the decompressor's need to make sure | ||
1637 | * data portion is linear. | ||
1638 | */ | ||
1639 | if (!pskb_may_pull(skb, skb->len)) | ||
1640 | goto err; | ||
1641 | |||
1642 | if (proto == PPP_COMP) { | ||
1643 | ns = dev_alloc_skb(ppp->mru + PPP_HDRLEN); | ||
1644 | if (ns == 0) { | ||
1645 | printk(KERN_ERR "ppp_decompress_frame: no memory\n"); | ||
1646 | goto err; | ||
1647 | } | ||
1648 | /* the decompressor still expects the A/C bytes in the hdr */ | ||
1649 | len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, | ||
1650 | skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN); | ||
1651 | if (len < 0) { | ||
1652 | /* Pass the compressed frame to pppd as an | ||
1653 | error indication. */ | ||
1654 | if (len == DECOMP_FATALERROR) | ||
1655 | ppp->rstate |= SC_DC_FERROR; | ||
1656 | kfree_skb(ns); | ||
1657 | goto err; | ||
1658 | } | ||
1659 | |||
1660 | kfree_skb(skb); | ||
1661 | skb = ns; | ||
1662 | skb_put(skb, len); | ||
1663 | skb_pull(skb, 2); /* pull off the A/C bytes */ | ||
1664 | |||
1665 | } else { | ||
1666 | /* Uncompressed frame - pass to decompressor so it | ||
1667 | can update its dictionary if necessary. */ | ||
1668 | if (ppp->rcomp->incomp) | ||
1669 | ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, | ||
1670 | skb->len + 2); | ||
1671 | } | ||
1672 | |||
1673 | return skb; | ||
1674 | |||
1675 | err: | ||
1676 | ppp->rstate |= SC_DC_ERROR; | ||
1677 | ppp_receive_error(ppp); | ||
1678 | return skb; | ||
1679 | } | ||
1680 | |||
1681 | #ifdef CONFIG_PPP_MULTILINK | ||
1682 | /* | ||
1683 | * Receive a multilink frame. | ||
1684 | * We put it on the reconstruction queue and then pull off | ||
1685 | * as many completed frames as we can. | ||
1686 | */ | ||
1687 | static void | ||
1688 | ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) | ||
1689 | { | ||
1690 | u32 mask, seq; | ||
1691 | struct list_head *l; | ||
1692 | int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; | ||
1693 | |||
1694 | if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) | ||
1695 | goto err; /* no good, throw it away */ | ||
1696 | |||
1697 | /* Decode sequence number and begin/end bits */ | ||
1698 | if (ppp->flags & SC_MP_SHORTSEQ) { | ||
1699 | seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; | ||
1700 | mask = 0xfff; | ||
1701 | } else { | ||
1702 | seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; | ||
1703 | mask = 0xffffff; | ||
1704 | } | ||
1705 | skb->BEbits = skb->data[2]; | ||
1706 | skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ | ||
1707 | |||
1708 | /* | ||
1709 | * Do protocol ID decompression on the first fragment of each packet. | ||
1710 | */ | ||
1711 | if ((skb->BEbits & B) && (skb->data[0] & 1)) | ||
1712 | *skb_push(skb, 1) = 0; | ||
1713 | |||
1714 | /* | ||
1715 | * Expand sequence number to 32 bits, making it as close | ||
1716 | * as possible to ppp->minseq. | ||
1717 | */ | ||
1718 | seq |= ppp->minseq & ~mask; | ||
1719 | if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) | ||
1720 | seq += mask + 1; | ||
1721 | else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) | ||
1722 | seq -= mask + 1; /* should never happen */ | ||
1723 | skb->sequence = seq; | ||
1724 | pch->lastseq = seq; | ||
1725 | |||
1726 | /* | ||
1727 | * If this packet comes before the next one we were expecting, | ||
1728 | * drop it. | ||
1729 | */ | ||
1730 | if (seq_before(seq, ppp->nextseq)) { | ||
1731 | kfree_skb(skb); | ||
1732 | ++ppp->stats.rx_dropped; | ||
1733 | ppp_receive_error(ppp); | ||
1734 | return; | ||
1735 | } | ||
1736 | |||
1737 | /* | ||
1738 | * Reevaluate minseq, the minimum over all channels of the | ||
1739 | * last sequence number received on each channel. Because of | ||
1740 | * the increasing sequence number rule, we know that any fragment | ||
1741 | * before `minseq' which hasn't arrived is never going to arrive. | ||
1742 | * The list of channels can't change because we have the receive | ||
1743 | * side of the ppp unit locked. | ||
1744 | */ | ||
1745 | for (l = ppp->channels.next; l != &ppp->channels; l = l->next) { | ||
1746 | struct channel *ch = list_entry(l, struct channel, clist); | ||
1747 | if (seq_before(ch->lastseq, seq)) | ||
1748 | seq = ch->lastseq; | ||
1749 | } | ||
1750 | if (seq_before(ppp->minseq, seq)) | ||
1751 | ppp->minseq = seq; | ||
1752 | |||
1753 | /* Put the fragment on the reconstruction queue */ | ||
1754 | ppp_mp_insert(ppp, skb); | ||
1755 | |||
1756 | /* If the queue is getting long, don't wait any longer for packets | ||
1757 | before the start of the queue. */ | ||
1758 | if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN | ||
1759 | && seq_before(ppp->minseq, ppp->mrq.next->sequence)) | ||
1760 | ppp->minseq = ppp->mrq.next->sequence; | ||
1761 | |||
1762 | /* Pull completed packets off the queue and receive them. */ | ||
1763 | while ((skb = ppp_mp_reconstruct(ppp)) != 0) | ||
1764 | ppp_receive_nonmp_frame(ppp, skb); | ||
1765 | |||
1766 | return; | ||
1767 | |||
1768 | err: | ||
1769 | kfree_skb(skb); | ||
1770 | ppp_receive_error(ppp); | ||
1771 | } | ||
1772 | |||
1773 | /* | ||
1774 | * Insert a fragment on the MP reconstruction queue. | ||
1775 | * The queue is ordered by increasing sequence number. | ||
1776 | */ | ||
1777 | static void | ||
1778 | ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) | ||
1779 | { | ||
1780 | struct sk_buff *p; | ||
1781 | struct sk_buff_head *list = &ppp->mrq; | ||
1782 | u32 seq = skb->sequence; | ||
1783 | |||
1784 | /* N.B. we don't need to lock the list lock because we have the | ||
1785 | ppp unit receive-side lock. */ | ||
1786 | for (p = list->next; p != (struct sk_buff *)list; p = p->next) | ||
1787 | if (seq_before(seq, p->sequence)) | ||
1788 | break; | ||
1789 | __skb_insert(skb, p->prev, p, list); | ||
1790 | } | ||
1791 | |||
1792 | /* | ||
1793 | * Reconstruct a packet from the MP fragment queue. | ||
1794 | * We go through increasing sequence numbers until we find a | ||
1795 | * complete packet, or we get to the sequence number for a fragment | ||
1796 | * which hasn't arrived but might still do so. | ||
1797 | */ | ||
1798 | struct sk_buff * | ||
1799 | ppp_mp_reconstruct(struct ppp *ppp) | ||
1800 | { | ||
1801 | u32 seq = ppp->nextseq; | ||
1802 | u32 minseq = ppp->minseq; | ||
1803 | struct sk_buff_head *list = &ppp->mrq; | ||
1804 | struct sk_buff *p, *next; | ||
1805 | struct sk_buff *head, *tail; | ||
1806 | struct sk_buff *skb = NULL; | ||
1807 | int lost = 0, len = 0; | ||
1808 | |||
1809 | if (ppp->mrru == 0) /* do nothing until mrru is set */ | ||
1810 | return NULL; | ||
1811 | head = list->next; | ||
1812 | tail = NULL; | ||
1813 | for (p = head; p != (struct sk_buff *) list; p = next) { | ||
1814 | next = p->next; | ||
1815 | if (seq_before(p->sequence, seq)) { | ||
1816 | /* this can't happen, anyway ignore the skb */ | ||
1817 | printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", | ||
1818 | p->sequence, seq); | ||
1819 | head = next; | ||
1820 | continue; | ||
1821 | } | ||
1822 | if (p->sequence != seq) { | ||
1823 | /* Fragment `seq' is missing. If it is after | ||
1824 | minseq, it might arrive later, so stop here. */ | ||
1825 | if (seq_after(seq, minseq)) | ||
1826 | break; | ||
1827 | /* Fragment `seq' is lost, keep going. */ | ||
1828 | lost = 1; | ||
1829 | seq = seq_before(minseq, p->sequence)? | ||
1830 | minseq + 1: p->sequence; | ||
1831 | next = p; | ||
1832 | continue; | ||
1833 | } | ||
1834 | |||
1835 | /* | ||
1836 | * At this point we know that all the fragments from | ||
1837 | * ppp->nextseq to seq are either present or lost. | ||
1838 | * Also, there are no complete packets in the queue | ||
1839 | * that have no missing fragments and end before this | ||
1840 | * fragment. | ||
1841 | */ | ||
1842 | |||
1843 | /* B bit set indicates this fragment starts a packet */ | ||
1844 | if (p->BEbits & B) { | ||
1845 | head = p; | ||
1846 | lost = 0; | ||
1847 | len = 0; | ||
1848 | } | ||
1849 | |||
1850 | len += p->len; | ||
1851 | |||
1852 | /* Got a complete packet yet? */ | ||
1853 | if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) { | ||
1854 | if (len > ppp->mrru + 2) { | ||
1855 | ++ppp->stats.rx_length_errors; | ||
1856 | printk(KERN_DEBUG "PPP: reconstructed packet" | ||
1857 | " is too long (%d)\n", len); | ||
1858 | } else if (p == head) { | ||
1859 | /* fragment is complete packet - reuse skb */ | ||
1860 | tail = p; | ||
1861 | skb = skb_get(p); | ||
1862 | break; | ||
1863 | } else if ((skb = dev_alloc_skb(len)) == NULL) { | ||
1864 | ++ppp->stats.rx_missed_errors; | ||
1865 | printk(KERN_DEBUG "PPP: no memory for " | ||
1866 | "reconstructed packet"); | ||
1867 | } else { | ||
1868 | tail = p; | ||
1869 | break; | ||
1870 | } | ||
1871 | ppp->nextseq = seq + 1; | ||
1872 | } | ||
1873 | |||
1874 | /* | ||
1875 | * If this is the ending fragment of a packet, | ||
1876 | * and we haven't found a complete valid packet yet, | ||
1877 | * we can discard up to and including this fragment. | ||
1878 | */ | ||
1879 | if (p->BEbits & E) | ||
1880 | head = next; | ||
1881 | |||
1882 | ++seq; | ||
1883 | } | ||
1884 | |||
1885 | /* If we have a complete packet, copy it all into one skb. */ | ||
1886 | if (tail != NULL) { | ||
1887 | /* If we have discarded any fragments, | ||
1888 | signal a receive error. */ | ||
1889 | if (head->sequence != ppp->nextseq) { | ||
1890 | if (ppp->debug & 1) | ||
1891 | printk(KERN_DEBUG " missed pkts %u..%u\n", | ||
1892 | ppp->nextseq, head->sequence-1); | ||
1893 | ++ppp->stats.rx_dropped; | ||
1894 | ppp_receive_error(ppp); | ||
1895 | } | ||
1896 | |||
1897 | if (head != tail) | ||
1898 | /* copy to a single skb */ | ||
1899 | for (p = head; p != tail->next; p = p->next) | ||
1900 | skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); | ||
1901 | ppp->nextseq = tail->sequence + 1; | ||
1902 | head = tail->next; | ||
1903 | } | ||
1904 | |||
1905 | /* Discard all the skbuffs that we have copied the data out of | ||
1906 | or that we can't use. */ | ||
1907 | while ((p = list->next) != head) { | ||
1908 | __skb_unlink(p, list); | ||
1909 | kfree_skb(p); | ||
1910 | } | ||
1911 | |||
1912 | return skb; | ||
1913 | } | ||
1914 | #endif /* CONFIG_PPP_MULTILINK */ | ||
1915 | |||
1916 | /* | ||
1917 | * Channel interface. | ||
1918 | */ | ||
1919 | |||
1920 | /* | ||
1921 | * Create a new, unattached ppp channel. | ||
1922 | */ | ||
1923 | int | ||
1924 | ppp_register_channel(struct ppp_channel *chan) | ||
1925 | { | ||
1926 | struct channel *pch; | ||
1927 | |||
1928 | pch = kmalloc(sizeof(struct channel), GFP_KERNEL); | ||
1929 | if (pch == 0) | ||
1930 | return -ENOMEM; | ||
1931 | memset(pch, 0, sizeof(struct channel)); | ||
1932 | pch->ppp = NULL; | ||
1933 | pch->chan = chan; | ||
1934 | chan->ppp = pch; | ||
1935 | init_ppp_file(&pch->file, CHANNEL); | ||
1936 | pch->file.hdrlen = chan->hdrlen; | ||
1937 | #ifdef CONFIG_PPP_MULTILINK | ||
1938 | pch->lastseq = -1; | ||
1939 | #endif /* CONFIG_PPP_MULTILINK */ | ||
1940 | init_rwsem(&pch->chan_sem); | ||
1941 | spin_lock_init(&pch->downl); | ||
1942 | rwlock_init(&pch->upl); | ||
1943 | spin_lock_bh(&all_channels_lock); | ||
1944 | pch->file.index = ++last_channel_index; | ||
1945 | list_add(&pch->list, &new_channels); | ||
1946 | atomic_inc(&channel_count); | ||
1947 | spin_unlock_bh(&all_channels_lock); | ||
1948 | return 0; | ||
1949 | } | ||
1950 | |||
1951 | /* | ||
1952 | * Return the index of a channel. | ||
1953 | */ | ||
1954 | int ppp_channel_index(struct ppp_channel *chan) | ||
1955 | { | ||
1956 | struct channel *pch = chan->ppp; | ||
1957 | |||
1958 | if (pch != 0) | ||
1959 | return pch->file.index; | ||
1960 | return -1; | ||
1961 | } | ||
1962 | |||
1963 | /* | ||
1964 | * Return the PPP unit number to which a channel is connected. | ||
1965 | */ | ||
1966 | int ppp_unit_number(struct ppp_channel *chan) | ||
1967 | { | ||
1968 | struct channel *pch = chan->ppp; | ||
1969 | int unit = -1; | ||
1970 | |||
1971 | if (pch != 0) { | ||
1972 | read_lock_bh(&pch->upl); | ||
1973 | if (pch->ppp != 0) | ||
1974 | unit = pch->ppp->file.index; | ||
1975 | read_unlock_bh(&pch->upl); | ||
1976 | } | ||
1977 | return unit; | ||
1978 | } | ||
1979 | |||
1980 | /* | ||
1981 | * Disconnect a channel from the generic layer. | ||
1982 | * This must be called in process context. | ||
1983 | */ | ||
1984 | void | ||
1985 | ppp_unregister_channel(struct ppp_channel *chan) | ||
1986 | { | ||
1987 | struct channel *pch = chan->ppp; | ||
1988 | |||
1989 | if (pch == 0) | ||
1990 | return; /* should never happen */ | ||
1991 | chan->ppp = NULL; | ||
1992 | |||
1993 | /* | ||
1994 | * This ensures that we have returned from any calls into the | ||
1995 | * the channel's start_xmit or ioctl routine before we proceed. | ||
1996 | */ | ||
1997 | down_write(&pch->chan_sem); | ||
1998 | spin_lock_bh(&pch->downl); | ||
1999 | pch->chan = NULL; | ||
2000 | spin_unlock_bh(&pch->downl); | ||
2001 | up_write(&pch->chan_sem); | ||
2002 | ppp_disconnect_channel(pch); | ||
2003 | spin_lock_bh(&all_channels_lock); | ||
2004 | list_del(&pch->list); | ||
2005 | spin_unlock_bh(&all_channels_lock); | ||
2006 | pch->file.dead = 1; | ||
2007 | wake_up_interruptible(&pch->file.rwait); | ||
2008 | if (atomic_dec_and_test(&pch->file.refcnt)) | ||
2009 | ppp_destroy_channel(pch); | ||
2010 | } | ||
2011 | |||
2012 | /* | ||
2013 | * Callback from a channel when it can accept more to transmit. | ||
2014 | * This should be called at BH/softirq level, not interrupt level. | ||
2015 | */ | ||
2016 | void | ||
2017 | ppp_output_wakeup(struct ppp_channel *chan) | ||
2018 | { | ||
2019 | struct channel *pch = chan->ppp; | ||
2020 | |||
2021 | if (pch == 0) | ||
2022 | return; | ||
2023 | ppp_channel_push(pch); | ||
2024 | } | ||
2025 | |||
2026 | /* | ||
2027 | * Compression control. | ||
2028 | */ | ||
2029 | |||
2030 | /* Process the PPPIOCSCOMPRESS ioctl. */ | ||
2031 | static int | ||
2032 | ppp_set_compress(struct ppp *ppp, unsigned long arg) | ||
2033 | { | ||
2034 | int err; | ||
2035 | struct compressor *cp, *ocomp; | ||
2036 | struct ppp_option_data data; | ||
2037 | void *state, *ostate; | ||
2038 | unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; | ||
2039 | |||
2040 | err = -EFAULT; | ||
2041 | if (copy_from_user(&data, (void __user *) arg, sizeof(data)) | ||
2042 | || (data.length <= CCP_MAX_OPTION_LENGTH | ||
2043 | && copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) | ||
2044 | goto out; | ||
2045 | err = -EINVAL; | ||
2046 | if (data.length > CCP_MAX_OPTION_LENGTH | ||
2047 | || ccp_option[1] < 2 || ccp_option[1] > data.length) | ||
2048 | goto out; | ||
2049 | |||
2050 | cp = find_compressor(ccp_option[0]); | ||
2051 | #ifdef CONFIG_KMOD | ||
2052 | if (cp == 0) { | ||
2053 | request_module("ppp-compress-%d", ccp_option[0]); | ||
2054 | cp = find_compressor(ccp_option[0]); | ||
2055 | } | ||
2056 | #endif /* CONFIG_KMOD */ | ||
2057 | if (cp == 0) | ||
2058 | goto out; | ||
2059 | |||
2060 | err = -ENOBUFS; | ||
2061 | if (data.transmit) { | ||
2062 | state = cp->comp_alloc(ccp_option, data.length); | ||
2063 | if (state != 0) { | ||
2064 | ppp_xmit_lock(ppp); | ||
2065 | ppp->xstate &= ~SC_COMP_RUN; | ||
2066 | ocomp = ppp->xcomp; | ||
2067 | ostate = ppp->xc_state; | ||
2068 | ppp->xcomp = cp; | ||
2069 | ppp->xc_state = state; | ||
2070 | ppp_xmit_unlock(ppp); | ||
2071 | if (ostate != 0) { | ||
2072 | ocomp->comp_free(ostate); | ||
2073 | module_put(ocomp->owner); | ||
2074 | } | ||
2075 | err = 0; | ||
2076 | } else | ||
2077 | module_put(cp->owner); | ||
2078 | |||
2079 | } else { | ||
2080 | state = cp->decomp_alloc(ccp_option, data.length); | ||
2081 | if (state != 0) { | ||
2082 | ppp_recv_lock(ppp); | ||
2083 | ppp->rstate &= ~SC_DECOMP_RUN; | ||
2084 | ocomp = ppp->rcomp; | ||
2085 | ostate = ppp->rc_state; | ||
2086 | ppp->rcomp = cp; | ||
2087 | ppp->rc_state = state; | ||
2088 | ppp_recv_unlock(ppp); | ||
2089 | if (ostate != 0) { | ||
2090 | ocomp->decomp_free(ostate); | ||
2091 | module_put(ocomp->owner); | ||
2092 | } | ||
2093 | err = 0; | ||
2094 | } else | ||
2095 | module_put(cp->owner); | ||
2096 | } | ||
2097 | |||
2098 | out: | ||
2099 | return err; | ||
2100 | } | ||
2101 | |||
2102 | /* | ||
2103 | * Look at a CCP packet and update our state accordingly. | ||
2104 | * We assume the caller has the xmit or recv path locked. | ||
2105 | */ | ||
2106 | static void | ||
2107 | ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) | ||
2108 | { | ||
2109 | unsigned char *dp; | ||
2110 | int len; | ||
2111 | |||
2112 | if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) | ||
2113 | return; /* no header */ | ||
2114 | dp = skb->data + 2; | ||
2115 | |||
2116 | switch (CCP_CODE(dp)) { | ||
2117 | case CCP_CONFREQ: | ||
2118 | |||
2119 | /* A ConfReq starts negotiation of compression | ||
2120 | * in one direction of transmission, | ||
2121 | * and hence brings it down...but which way? | ||
2122 | * | ||
2123 | * Remember: | ||
2124 | * A ConfReq indicates what the sender would like to receive | ||
2125 | */ | ||
2126 | if(inbound) | ||
2127 | /* He is proposing what I should send */ | ||
2128 | ppp->xstate &= ~SC_COMP_RUN; | ||
2129 | else | ||
2130 | /* I am proposing to what he should send */ | ||
2131 | ppp->rstate &= ~SC_DECOMP_RUN; | ||
2132 | |||
2133 | break; | ||
2134 | |||
2135 | case CCP_TERMREQ: | ||
2136 | case CCP_TERMACK: | ||
2137 | /* | ||
2138 | * CCP is going down, both directions of transmission | ||
2139 | */ | ||
2140 | ppp->rstate &= ~SC_DECOMP_RUN; | ||
2141 | ppp->xstate &= ~SC_COMP_RUN; | ||
2142 | break; | ||
2143 | |||
2144 | case CCP_CONFACK: | ||
2145 | if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) | ||
2146 | break; | ||
2147 | len = CCP_LENGTH(dp); | ||
2148 | if (!pskb_may_pull(skb, len + 2)) | ||
2149 | return; /* too short */ | ||
2150 | dp += CCP_HDRLEN; | ||
2151 | len -= CCP_HDRLEN; | ||
2152 | if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) | ||
2153 | break; | ||
2154 | if (inbound) { | ||
2155 | /* we will start receiving compressed packets */ | ||
2156 | if (ppp->rc_state == 0) | ||
2157 | break; | ||
2158 | if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, | ||
2159 | ppp->file.index, 0, ppp->mru, ppp->debug)) { | ||
2160 | ppp->rstate |= SC_DECOMP_RUN; | ||
2161 | ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); | ||
2162 | } | ||
2163 | } else { | ||
2164 | /* we will soon start sending compressed packets */ | ||
2165 | if (ppp->xc_state == 0) | ||
2166 | break; | ||
2167 | if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, | ||
2168 | ppp->file.index, 0, ppp->debug)) | ||
2169 | ppp->xstate |= SC_COMP_RUN; | ||
2170 | } | ||
2171 | break; | ||
2172 | |||
2173 | case CCP_RESETACK: | ||
2174 | /* reset the [de]compressor */ | ||
2175 | if ((ppp->flags & SC_CCP_UP) == 0) | ||
2176 | break; | ||
2177 | if (inbound) { | ||
2178 | if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { | ||
2179 | ppp->rcomp->decomp_reset(ppp->rc_state); | ||
2180 | ppp->rstate &= ~SC_DC_ERROR; | ||
2181 | } | ||
2182 | } else { | ||
2183 | if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) | ||
2184 | ppp->xcomp->comp_reset(ppp->xc_state); | ||
2185 | } | ||
2186 | break; | ||
2187 | } | ||
2188 | } | ||
2189 | |||
2190 | /* Free up compression resources. */ | ||
2191 | static void | ||
2192 | ppp_ccp_closed(struct ppp *ppp) | ||
2193 | { | ||
2194 | void *xstate, *rstate; | ||
2195 | struct compressor *xcomp, *rcomp; | ||
2196 | |||
2197 | ppp_lock(ppp); | ||
2198 | ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); | ||
2199 | ppp->xstate = 0; | ||
2200 | xcomp = ppp->xcomp; | ||
2201 | xstate = ppp->xc_state; | ||
2202 | ppp->xc_state = NULL; | ||
2203 | ppp->rstate = 0; | ||
2204 | rcomp = ppp->rcomp; | ||
2205 | rstate = ppp->rc_state; | ||
2206 | ppp->rc_state = NULL; | ||
2207 | ppp_unlock(ppp); | ||
2208 | |||
2209 | if (xstate) { | ||
2210 | xcomp->comp_free(xstate); | ||
2211 | module_put(xcomp->owner); | ||
2212 | } | ||
2213 | if (rstate) { | ||
2214 | rcomp->decomp_free(rstate); | ||
2215 | module_put(rcomp->owner); | ||
2216 | } | ||
2217 | } | ||
2218 | |||
2219 | /* List of compressors. */ | ||
2220 | static LIST_HEAD(compressor_list); | ||
2221 | static DEFINE_SPINLOCK(compressor_list_lock); | ||
2222 | |||
2223 | struct compressor_entry { | ||
2224 | struct list_head list; | ||
2225 | struct compressor *comp; | ||
2226 | }; | ||
2227 | |||
2228 | static struct compressor_entry * | ||
2229 | find_comp_entry(int proto) | ||
2230 | { | ||
2231 | struct compressor_entry *ce; | ||
2232 | struct list_head *list = &compressor_list; | ||
2233 | |||
2234 | while ((list = list->next) != &compressor_list) { | ||
2235 | ce = list_entry(list, struct compressor_entry, list); | ||
2236 | if (ce->comp->compress_proto == proto) | ||
2237 | return ce; | ||
2238 | } | ||
2239 | return NULL; | ||
2240 | } | ||
2241 | |||
2242 | /* Register a compressor */ | ||
2243 | int | ||
2244 | ppp_register_compressor(struct compressor *cp) | ||
2245 | { | ||
2246 | struct compressor_entry *ce; | ||
2247 | int ret; | ||
2248 | spin_lock(&compressor_list_lock); | ||
2249 | ret = -EEXIST; | ||
2250 | if (find_comp_entry(cp->compress_proto) != 0) | ||
2251 | goto out; | ||
2252 | ret = -ENOMEM; | ||
2253 | ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); | ||
2254 | if (ce == 0) | ||
2255 | goto out; | ||
2256 | ret = 0; | ||
2257 | ce->comp = cp; | ||
2258 | list_add(&ce->list, &compressor_list); | ||
2259 | out: | ||
2260 | spin_unlock(&compressor_list_lock); | ||
2261 | return ret; | ||
2262 | } | ||
2263 | |||
2264 | /* Unregister a compressor */ | ||
2265 | void | ||
2266 | ppp_unregister_compressor(struct compressor *cp) | ||
2267 | { | ||
2268 | struct compressor_entry *ce; | ||
2269 | |||
2270 | spin_lock(&compressor_list_lock); | ||
2271 | ce = find_comp_entry(cp->compress_proto); | ||
2272 | if (ce != 0 && ce->comp == cp) { | ||
2273 | list_del(&ce->list); | ||
2274 | kfree(ce); | ||
2275 | } | ||
2276 | spin_unlock(&compressor_list_lock); | ||
2277 | } | ||
2278 | |||
2279 | /* Find a compressor. */ | ||
2280 | static struct compressor * | ||
2281 | find_compressor(int type) | ||
2282 | { | ||
2283 | struct compressor_entry *ce; | ||
2284 | struct compressor *cp = NULL; | ||
2285 | |||
2286 | spin_lock(&compressor_list_lock); | ||
2287 | ce = find_comp_entry(type); | ||
2288 | if (ce != 0) { | ||
2289 | cp = ce->comp; | ||
2290 | if (!try_module_get(cp->owner)) | ||
2291 | cp = NULL; | ||
2292 | } | ||
2293 | spin_unlock(&compressor_list_lock); | ||
2294 | return cp; | ||
2295 | } | ||
2296 | |||
2297 | /* | ||
2298 | * Miscelleneous stuff. | ||
2299 | */ | ||
2300 | |||
2301 | static void | ||
2302 | ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) | ||
2303 | { | ||
2304 | struct slcompress *vj = ppp->vj; | ||
2305 | |||
2306 | memset(st, 0, sizeof(*st)); | ||
2307 | st->p.ppp_ipackets = ppp->stats.rx_packets; | ||
2308 | st->p.ppp_ierrors = ppp->stats.rx_errors; | ||
2309 | st->p.ppp_ibytes = ppp->stats.rx_bytes; | ||
2310 | st->p.ppp_opackets = ppp->stats.tx_packets; | ||
2311 | st->p.ppp_oerrors = ppp->stats.tx_errors; | ||
2312 | st->p.ppp_obytes = ppp->stats.tx_bytes; | ||
2313 | if (vj == 0) | ||
2314 | return; | ||
2315 | st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; | ||
2316 | st->vj.vjs_compressed = vj->sls_o_compressed; | ||
2317 | st->vj.vjs_searches = vj->sls_o_searches; | ||
2318 | st->vj.vjs_misses = vj->sls_o_misses; | ||
2319 | st->vj.vjs_errorin = vj->sls_i_error; | ||
2320 | st->vj.vjs_tossed = vj->sls_i_tossed; | ||
2321 | st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; | ||
2322 | st->vj.vjs_compressedin = vj->sls_i_compressed; | ||
2323 | } | ||
2324 | |||
2325 | /* | ||
2326 | * Stuff for handling the lists of ppp units and channels | ||
2327 | * and for initialization. | ||
2328 | */ | ||
2329 | |||
2330 | /* | ||
2331 | * Create a new ppp interface unit. Fails if it can't allocate memory | ||
2332 | * or if there is already a unit with the requested number. | ||
2333 | * unit == -1 means allocate a new number. | ||
2334 | */ | ||
2335 | static struct ppp * | ||
2336 | ppp_create_interface(int unit, int *retp) | ||
2337 | { | ||
2338 | struct ppp *ppp; | ||
2339 | struct net_device *dev = NULL; | ||
2340 | int ret = -ENOMEM; | ||
2341 | int i; | ||
2342 | |||
2343 | ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); | ||
2344 | if (!ppp) | ||
2345 | goto out; | ||
2346 | dev = alloc_netdev(0, "", ppp_setup); | ||
2347 | if (!dev) | ||
2348 | goto out1; | ||
2349 | memset(ppp, 0, sizeof(struct ppp)); | ||
2350 | |||
2351 | ppp->mru = PPP_MRU; | ||
2352 | init_ppp_file(&ppp->file, INTERFACE); | ||
2353 | ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ | ||
2354 | for (i = 0; i < NUM_NP; ++i) | ||
2355 | ppp->npmode[i] = NPMODE_PASS; | ||
2356 | INIT_LIST_HEAD(&ppp->channels); | ||
2357 | spin_lock_init(&ppp->rlock); | ||
2358 | spin_lock_init(&ppp->wlock); | ||
2359 | #ifdef CONFIG_PPP_MULTILINK | ||
2360 | ppp->minseq = -1; | ||
2361 | skb_queue_head_init(&ppp->mrq); | ||
2362 | #endif /* CONFIG_PPP_MULTILINK */ | ||
2363 | ppp->dev = dev; | ||
2364 | dev->priv = ppp; | ||
2365 | |||
2366 | dev->hard_start_xmit = ppp_start_xmit; | ||
2367 | dev->get_stats = ppp_net_stats; | ||
2368 | dev->do_ioctl = ppp_net_ioctl; | ||
2369 | |||
2370 | ret = -EEXIST; | ||
2371 | down(&all_ppp_sem); | ||
2372 | if (unit < 0) | ||
2373 | unit = cardmap_find_first_free(all_ppp_units); | ||
2374 | else if (cardmap_get(all_ppp_units, unit) != NULL) | ||
2375 | goto out2; /* unit already exists */ | ||
2376 | |||
2377 | /* Initialize the new ppp unit */ | ||
2378 | ppp->file.index = unit; | ||
2379 | sprintf(dev->name, "ppp%d", unit); | ||
2380 | |||
2381 | ret = register_netdev(dev); | ||
2382 | if (ret != 0) { | ||
2383 | printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", | ||
2384 | dev->name, ret); | ||
2385 | goto out2; | ||
2386 | } | ||
2387 | |||
2388 | atomic_inc(&ppp_unit_count); | ||
2389 | cardmap_set(&all_ppp_units, unit, ppp); | ||
2390 | up(&all_ppp_sem); | ||
2391 | *retp = 0; | ||
2392 | return ppp; | ||
2393 | |||
2394 | out2: | ||
2395 | up(&all_ppp_sem); | ||
2396 | free_netdev(dev); | ||
2397 | out1: | ||
2398 | kfree(ppp); | ||
2399 | out: | ||
2400 | *retp = ret; | ||
2401 | return NULL; | ||
2402 | } | ||
2403 | |||
2404 | /* | ||
2405 | * Initialize a ppp_file structure. | ||
2406 | */ | ||
2407 | static void | ||
2408 | init_ppp_file(struct ppp_file *pf, int kind) | ||
2409 | { | ||
2410 | pf->kind = kind; | ||
2411 | skb_queue_head_init(&pf->xq); | ||
2412 | skb_queue_head_init(&pf->rq); | ||
2413 | atomic_set(&pf->refcnt, 1); | ||
2414 | init_waitqueue_head(&pf->rwait); | ||
2415 | } | ||
2416 | |||
2417 | /* | ||
2418 | * Take down a ppp interface unit - called when the owning file | ||
2419 | * (the one that created the unit) is closed or detached. | ||
2420 | */ | ||
2421 | static void ppp_shutdown_interface(struct ppp *ppp) | ||
2422 | { | ||
2423 | struct net_device *dev; | ||
2424 | |||
2425 | down(&all_ppp_sem); | ||
2426 | ppp_lock(ppp); | ||
2427 | dev = ppp->dev; | ||
2428 | ppp->dev = NULL; | ||
2429 | ppp_unlock(ppp); | ||
2430 | /* This will call dev_close() for us. */ | ||
2431 | if (dev) { | ||
2432 | unregister_netdev(dev); | ||
2433 | free_netdev(dev); | ||
2434 | } | ||
2435 | cardmap_set(&all_ppp_units, ppp->file.index, NULL); | ||
2436 | ppp->file.dead = 1; | ||
2437 | ppp->owner = NULL; | ||
2438 | wake_up_interruptible(&ppp->file.rwait); | ||
2439 | up(&all_ppp_sem); | ||
2440 | } | ||
2441 | |||
2442 | /* | ||
2443 | * Free the memory used by a ppp unit. This is only called once | ||
2444 | * there are no channels connected to the unit and no file structs | ||
2445 | * that reference the unit. | ||
2446 | */ | ||
2447 | static void ppp_destroy_interface(struct ppp *ppp) | ||
2448 | { | ||
2449 | atomic_dec(&ppp_unit_count); | ||
2450 | |||
2451 | if (!ppp->file.dead || ppp->n_channels) { | ||
2452 | /* "can't happen" */ | ||
2453 | printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " | ||
2454 | "n_channels=%d !\n", ppp, ppp->file.dead, | ||
2455 | ppp->n_channels); | ||
2456 | return; | ||
2457 | } | ||
2458 | |||
2459 | ppp_ccp_closed(ppp); | ||
2460 | if (ppp->vj) { | ||
2461 | slhc_free(ppp->vj); | ||
2462 | ppp->vj = NULL; | ||
2463 | } | ||
2464 | skb_queue_purge(&ppp->file.xq); | ||
2465 | skb_queue_purge(&ppp->file.rq); | ||
2466 | #ifdef CONFIG_PPP_MULTILINK | ||
2467 | skb_queue_purge(&ppp->mrq); | ||
2468 | #endif /* CONFIG_PPP_MULTILINK */ | ||
2469 | #ifdef CONFIG_PPP_FILTER | ||
2470 | if (ppp->pass_filter) { | ||
2471 | kfree(ppp->pass_filter); | ||
2472 | ppp->pass_filter = NULL; | ||
2473 | } | ||
2474 | if (ppp->active_filter) { | ||
2475 | kfree(ppp->active_filter); | ||
2476 | ppp->active_filter = NULL; | ||
2477 | } | ||
2478 | #endif /* CONFIG_PPP_FILTER */ | ||
2479 | |||
2480 | kfree(ppp); | ||
2481 | } | ||
2482 | |||
2483 | /* | ||
2484 | * Locate an existing ppp unit. | ||
2485 | * The caller should have locked the all_ppp_sem. | ||
2486 | */ | ||
2487 | static struct ppp * | ||
2488 | ppp_find_unit(int unit) | ||
2489 | { | ||
2490 | return cardmap_get(all_ppp_units, unit); | ||
2491 | } | ||
2492 | |||
2493 | /* | ||
2494 | * Locate an existing ppp channel. | ||
2495 | * The caller should have locked the all_channels_lock. | ||
2496 | * First we look in the new_channels list, then in the | ||
2497 | * all_channels list. If found in the new_channels list, | ||
2498 | * we move it to the all_channels list. This is for speed | ||
2499 | * when we have a lot of channels in use. | ||
2500 | */ | ||
2501 | static struct channel * | ||
2502 | ppp_find_channel(int unit) | ||
2503 | { | ||
2504 | struct channel *pch; | ||
2505 | struct list_head *list; | ||
2506 | |||
2507 | list = &new_channels; | ||
2508 | while ((list = list->next) != &new_channels) { | ||
2509 | pch = list_entry(list, struct channel, list); | ||
2510 | if (pch->file.index == unit) { | ||
2511 | list_del(&pch->list); | ||
2512 | list_add(&pch->list, &all_channels); | ||
2513 | return pch; | ||
2514 | } | ||
2515 | } | ||
2516 | list = &all_channels; | ||
2517 | while ((list = list->next) != &all_channels) { | ||
2518 | pch = list_entry(list, struct channel, list); | ||
2519 | if (pch->file.index == unit) | ||
2520 | return pch; | ||
2521 | } | ||
2522 | return NULL; | ||
2523 | } | ||
2524 | |||
2525 | /* | ||
2526 | * Connect a PPP channel to a PPP interface unit. | ||
2527 | */ | ||
2528 | static int | ||
2529 | ppp_connect_channel(struct channel *pch, int unit) | ||
2530 | { | ||
2531 | struct ppp *ppp; | ||
2532 | int ret = -ENXIO; | ||
2533 | int hdrlen; | ||
2534 | |||
2535 | down(&all_ppp_sem); | ||
2536 | ppp = ppp_find_unit(unit); | ||
2537 | if (ppp == 0) | ||
2538 | goto out; | ||
2539 | write_lock_bh(&pch->upl); | ||
2540 | ret = -EINVAL; | ||
2541 | if (pch->ppp != 0) | ||
2542 | goto outl; | ||
2543 | |||
2544 | ppp_lock(ppp); | ||
2545 | if (pch->file.hdrlen > ppp->file.hdrlen) | ||
2546 | ppp->file.hdrlen = pch->file.hdrlen; | ||
2547 | hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ | ||
2548 | if (ppp->dev && hdrlen > ppp->dev->hard_header_len) | ||
2549 | ppp->dev->hard_header_len = hdrlen; | ||
2550 | list_add_tail(&pch->clist, &ppp->channels); | ||
2551 | ++ppp->n_channels; | ||
2552 | pch->ppp = ppp; | ||
2553 | atomic_inc(&ppp->file.refcnt); | ||
2554 | ppp_unlock(ppp); | ||
2555 | ret = 0; | ||
2556 | |||
2557 | outl: | ||
2558 | write_unlock_bh(&pch->upl); | ||
2559 | out: | ||
2560 | up(&all_ppp_sem); | ||
2561 | return ret; | ||
2562 | } | ||
2563 | |||
2564 | /* | ||
2565 | * Disconnect a channel from its ppp unit. | ||
2566 | */ | ||
2567 | static int | ||
2568 | ppp_disconnect_channel(struct channel *pch) | ||
2569 | { | ||
2570 | struct ppp *ppp; | ||
2571 | int err = -EINVAL; | ||
2572 | |||
2573 | write_lock_bh(&pch->upl); | ||
2574 | ppp = pch->ppp; | ||
2575 | pch->ppp = NULL; | ||
2576 | write_unlock_bh(&pch->upl); | ||
2577 | if (ppp != 0) { | ||
2578 | /* remove it from the ppp unit's list */ | ||
2579 | ppp_lock(ppp); | ||
2580 | list_del(&pch->clist); | ||
2581 | if (--ppp->n_channels == 0) | ||
2582 | wake_up_interruptible(&ppp->file.rwait); | ||
2583 | ppp_unlock(ppp); | ||
2584 | if (atomic_dec_and_test(&ppp->file.refcnt)) | ||
2585 | ppp_destroy_interface(ppp); | ||
2586 | err = 0; | ||
2587 | } | ||
2588 | return err; | ||
2589 | } | ||
2590 | |||
2591 | /* | ||
2592 | * Free up the resources used by a ppp channel. | ||
2593 | */ | ||
2594 | static void ppp_destroy_channel(struct channel *pch) | ||
2595 | { | ||
2596 | atomic_dec(&channel_count); | ||
2597 | |||
2598 | if (!pch->file.dead) { | ||
2599 | /* "can't happen" */ | ||
2600 | printk(KERN_ERR "ppp: destroying undead channel %p !\n", | ||
2601 | pch); | ||
2602 | return; | ||
2603 | } | ||
2604 | skb_queue_purge(&pch->file.xq); | ||
2605 | skb_queue_purge(&pch->file.rq); | ||
2606 | kfree(pch); | ||
2607 | } | ||
2608 | |||
2609 | static void __exit ppp_cleanup(void) | ||
2610 | { | ||
2611 | /* should never happen */ | ||
2612 | if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) | ||
2613 | printk(KERN_ERR "PPP: removing module but units remain!\n"); | ||
2614 | cardmap_destroy(&all_ppp_units); | ||
2615 | if (unregister_chrdev(PPP_MAJOR, "ppp") != 0) | ||
2616 | printk(KERN_ERR "PPP: failed to unregister PPP device\n"); | ||
2617 | devfs_remove("ppp"); | ||
2618 | class_simple_device_remove(MKDEV(PPP_MAJOR, 0)); | ||
2619 | class_simple_destroy(ppp_class); | ||
2620 | } | ||
2621 | |||
2622 | /* | ||
2623 | * Cardmap implementation. | ||
2624 | */ | ||
2625 | static void *cardmap_get(struct cardmap *map, unsigned int nr) | ||
2626 | { | ||
2627 | struct cardmap *p; | ||
2628 | int i; | ||
2629 | |||
2630 | for (p = map; p != NULL; ) { | ||
2631 | if ((i = nr >> p->shift) >= CARDMAP_WIDTH) | ||
2632 | return NULL; | ||
2633 | if (p->shift == 0) | ||
2634 | return p->ptr[i]; | ||
2635 | nr &= ~(CARDMAP_MASK << p->shift); | ||
2636 | p = p->ptr[i]; | ||
2637 | } | ||
2638 | return NULL; | ||
2639 | } | ||
2640 | |||
2641 | static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | ||
2642 | { | ||
2643 | struct cardmap *p; | ||
2644 | int i; | ||
2645 | |||
2646 | p = *pmap; | ||
2647 | if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { | ||
2648 | do { | ||
2649 | /* need a new top level */ | ||
2650 | struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); | ||
2651 | memset(np, 0, sizeof(*np)); | ||
2652 | np->ptr[0] = p; | ||
2653 | if (p != NULL) { | ||
2654 | np->shift = p->shift + CARDMAP_ORDER; | ||
2655 | p->parent = np; | ||
2656 | } else | ||
2657 | np->shift = 0; | ||
2658 | p = np; | ||
2659 | } while ((nr >> p->shift) >= CARDMAP_WIDTH); | ||
2660 | *pmap = p; | ||
2661 | } | ||
2662 | while (p->shift > 0) { | ||
2663 | i = (nr >> p->shift) & CARDMAP_MASK; | ||
2664 | if (p->ptr[i] == NULL) { | ||
2665 | struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); | ||
2666 | memset(np, 0, sizeof(*np)); | ||
2667 | np->shift = p->shift - CARDMAP_ORDER; | ||
2668 | np->parent = p; | ||
2669 | p->ptr[i] = np; | ||
2670 | } | ||
2671 | if (ptr == NULL) | ||
2672 | clear_bit(i, &p->inuse); | ||
2673 | p = p->ptr[i]; | ||
2674 | } | ||
2675 | i = nr & CARDMAP_MASK; | ||
2676 | p->ptr[i] = ptr; | ||
2677 | if (ptr != NULL) | ||
2678 | set_bit(i, &p->inuse); | ||
2679 | else | ||
2680 | clear_bit(i, &p->inuse); | ||
2681 | } | ||
2682 | |||
2683 | static unsigned int cardmap_find_first_free(struct cardmap *map) | ||
2684 | { | ||
2685 | struct cardmap *p; | ||
2686 | unsigned int nr = 0; | ||
2687 | int i; | ||
2688 | |||
2689 | if ((p = map) == NULL) | ||
2690 | return 0; | ||
2691 | for (;;) { | ||
2692 | i = find_first_zero_bit(&p->inuse, CARDMAP_WIDTH); | ||
2693 | if (i >= CARDMAP_WIDTH) { | ||
2694 | if (p->parent == NULL) | ||
2695 | return CARDMAP_WIDTH << p->shift; | ||
2696 | p = p->parent; | ||
2697 | i = (nr >> p->shift) & CARDMAP_MASK; | ||
2698 | set_bit(i, &p->inuse); | ||
2699 | continue; | ||
2700 | } | ||
2701 | nr = (nr & (~CARDMAP_MASK << p->shift)) | (i << p->shift); | ||
2702 | if (p->shift == 0 || p->ptr[i] == NULL) | ||
2703 | return nr; | ||
2704 | p = p->ptr[i]; | ||
2705 | } | ||
2706 | } | ||
2707 | |||
2708 | static void cardmap_destroy(struct cardmap **pmap) | ||
2709 | { | ||
2710 | struct cardmap *p, *np; | ||
2711 | int i; | ||
2712 | |||
2713 | for (p = *pmap; p != NULL; p = np) { | ||
2714 | if (p->shift != 0) { | ||
2715 | for (i = 0; i < CARDMAP_WIDTH; ++i) | ||
2716 | if (p->ptr[i] != NULL) | ||
2717 | break; | ||
2718 | if (i < CARDMAP_WIDTH) { | ||
2719 | np = p->ptr[i]; | ||
2720 | p->ptr[i] = NULL; | ||
2721 | continue; | ||
2722 | } | ||
2723 | } | ||
2724 | np = p->parent; | ||
2725 | kfree(p); | ||
2726 | } | ||
2727 | *pmap = NULL; | ||
2728 | } | ||
2729 | |||
2730 | /* Module/initialization stuff */ | ||
2731 | |||
2732 | module_init(ppp_init); | ||
2733 | module_exit(ppp_cleanup); | ||
2734 | |||
2735 | EXPORT_SYMBOL(ppp_register_channel); | ||
2736 | EXPORT_SYMBOL(ppp_unregister_channel); | ||
2737 | EXPORT_SYMBOL(ppp_channel_index); | ||
2738 | EXPORT_SYMBOL(ppp_unit_number); | ||
2739 | EXPORT_SYMBOL(ppp_input); | ||
2740 | EXPORT_SYMBOL(ppp_input_error); | ||
2741 | EXPORT_SYMBOL(ppp_output_wakeup); | ||
2742 | EXPORT_SYMBOL(ppp_register_compressor); | ||
2743 | EXPORT_SYMBOL(ppp_unregister_compressor); | ||
2744 | MODULE_LICENSE("GPL"); | ||
2745 | MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR); | ||
2746 | MODULE_ALIAS("/dev/ppp"); | ||