aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ppp
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-07-31 05:38:19 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-27 03:58:26 -0400
commit224cf5ad14c038b13c119dff29422f178a306f54 (patch)
tree89bf411ea743e9d7bbd3c609eeb7220270a97fc5 /drivers/net/ppp
parentaab3ac26108642eaa06efa4697dab595c7de2bbd (diff)
ppp: Move the PPP drivers
Move the PPP drivers into drivers/net/ppp/ and make the necessary Kconfig and Makefile changes. CC: Paul Mackerras <paulus@samba.org> CC: Frank Cusack <fcusack@fcusack.com> CC: Michal Ostrowski <mostrows@speakeasy.net> CC: Michal Ostrowski <mostrows@earthlink.net> CC: Dmitry Kozlov <xeb@mail.ru> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ppp')
-rw-r--r--drivers/net/ppp/Kconfig175
-rw-r--r--drivers/net/ppp/Makefile13
-rw-r--r--drivers/net/ppp/bsd_comp.c1170
-rw-r--r--drivers/net/ppp/ppp_async.c1028
-rw-r--r--drivers/net/ppp/ppp_deflate.c653
-rw-r--r--drivers/net/ppp/ppp_generic.c2954
-rw-r--r--drivers/net/ppp/ppp_mppe.c740
-rw-r--r--drivers/net/ppp/ppp_mppe.h86
-rw-r--r--drivers/net/ppp/ppp_synctty.c790
-rw-r--r--drivers/net/ppp/pppoe.c1208
-rw-r--r--drivers/net/ppp/pppox.c149
-rw-r--r--drivers/net/ppp/pptp.c717
12 files changed, 9683 insertions, 0 deletions
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
new file mode 100644
index 000000000000..872df3ef07a6
--- /dev/null
+++ b/drivers/net/ppp/Kconfig
@@ -0,0 +1,175 @@
1#
2# PPP network device configuration
3#
4
5config PPP
6 tristate "PPP (point-to-point protocol) support"
7 select SLHC
8 ---help---
9 PPP (Point to Point Protocol) is a newer and better SLIP. It serves
10 the same purpose: sending Internet traffic over telephone (and other
11 serial) lines. Ask your access provider if they support it, because
12 otherwise you can't use it; most Internet access providers these
13 days support PPP rather than SLIP.
14
15 To use PPP, you need an additional program called pppd as described
16 in the PPP-HOWTO, available at
17 <http://www.tldp.org/docs.html#howto>. Make sure that you have
18 the version of pppd recommended in <file:Documentation/Changes>.
19 The PPP option enlarges your kernel by about 16 KB.
20
21 There are actually two versions of PPP: the traditional PPP for
22 asynchronous lines, such as regular analog phone lines, and
23 synchronous PPP which can be used over digital ISDN lines for
24 example. If you want to use PPP over phone lines or other
25 asynchronous serial lines, you need to say Y (or M) here and also to
26 the next option, "PPP support for async serial ports". For PPP over
27 synchronous lines, you should say Y (or M) here and to "Support
28 synchronous PPP", below.
29
30 If you said Y to "Version information on all symbols" above, then
31 you cannot compile the PPP driver into the kernel; you can then only
32 compile it as a module. To compile this driver as a module, choose M
33 here. The module will be called ppp_generic.
34
35if PPP
36
37config PPP_BSDCOMP
38 tristate "PPP BSD-Compress compression"
39 depends on PPP
40 ---help---
41 Support for the BSD-Compress compression method for PPP, which uses
42 the LZW compression method to compress each PPP packet before it is
43 sent over the wire. The machine at the other end of the PPP link
44 (usually your ISP) has to support the BSD-Compress compression
45 method as well for this to be useful. Even if they don't support it,
46 it is safe to say Y here.
47
48 The PPP Deflate compression method ("PPP Deflate compression",
49 above) is preferable to BSD-Compress, because it compresses better
50 and is patent-free.
51
52 Note that the BSD compression code will always be compiled as a
53 module; it is called bsd_comp and will show up in the directory
54 modules once you have said "make modules". If unsure, say N.
55
56config PPP_DEFLATE
57 tristate "PPP Deflate compression"
58 depends on PPP
59 select ZLIB_INFLATE
60 select ZLIB_DEFLATE
61 ---help---
62 Support for the Deflate compression method for PPP, which uses the
63 Deflate algorithm (the same algorithm that gzip uses) to compress
64 each PPP packet before it is sent over the wire. The machine at the
65 other end of the PPP link (usually your ISP) has to support the
66 Deflate compression method as well for this to be useful. Even if
67 they don't support it, it is safe to say Y here.
68
69 To compile this driver as a module, choose M here.
70
71config PPP_FILTER
72 bool "PPP filtering"
73 depends on PPP
74 ---help---
75 Say Y here if you want to be able to filter the packets passing over
76 PPP interfaces. This allows you to control which packets count as
77 activity (i.e. which packets will reset the idle timer or bring up
78 a demand-dialed link) and which packets are to be dropped entirely.
79 You need to say Y here if you wish to use the pass-filter and
80 active-filter options to pppd.
81
82 If unsure, say N.
83
84config PPP_MPPE
85 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
86 depends on PPP && EXPERIMENTAL
87 select CRYPTO
88 select CRYPTO_SHA1
89 select CRYPTO_ARC4
90 select CRYPTO_ECB
91 ---help---
92 Support for the MPPE Encryption protocol, as employed by the
93 Microsoft Point-to-Point Tunneling Protocol.
94
95 See http://pptpclient.sourceforge.net/ for information on
96 configuring PPTP clients and servers to utilize this method.
97
98config PPP_MULTILINK
99 bool "PPP multilink support (EXPERIMENTAL)"
100 depends on PPP && EXPERIMENTAL
101 ---help---
102 PPP multilink is a protocol (defined in RFC 1990) which allows you
103 to combine several (logical or physical) lines into one logical PPP
104 connection, so that you can utilize your full bandwidth.
105
106 This has to be supported at the other end as well and you need a
107 version of the pppd daemon which understands the multilink protocol.
108
109 If unsure, say N.
110
111config PPPOATM
112 tristate "PPP over ATM"
113 depends on ATM && PPP
114 ---help---
115 Support PPP (Point to Point Protocol) encapsulated in ATM frames.
116 This implementation does not yet comply with section 8 of RFC2364,
117 which can lead to bad results if the ATM peer loses state and
118 changes its encapsulation unilaterally.
119
120config PPPOE
121 tristate "PPP over Ethernet (EXPERIMENTAL)"
122 depends on EXPERIMENTAL && PPP
123 ---help---
124 Support for PPP over Ethernet.
125
126 This driver requires the latest version of pppd from the CVS
127 repository at cvs.samba.org. Alternatively, see the
128 RoaringPenguin package (<http://www.roaringpenguin.com/pppoe>)
129 which contains instruction on how to use this driver (under
130 the heading "Kernel mode PPPoE").
131
132config PPTP
133 tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
134 depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
135 ---help---
136 Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
137
138 This driver requires pppd plugin to work in client mode or
139 modified pptpd (poptop) to work in server mode.
140 See http://accel-pptp.sourceforge.net/ for information how to
141 utilize this module.
142
143config PPPOL2TP
144 tristate "PPP over L2TP (EXPERIMENTAL)"
145 depends on EXPERIMENTAL && L2TP && PPP
146 ---help---
147 Support for PPP-over-L2TP socket family. L2TP is a protocol
148 used by ISPs and enterprises to tunnel PPP traffic over UDP
149 tunnels. L2TP is replacing PPTP for VPN uses.
150
151config PPP_ASYNC
152 tristate "PPP support for async serial ports"
153 depends on PPP
154 select CRC_CCITT
155 ---help---
156 Say Y (or M) here if you want to be able to use PPP over standard
157 asynchronous serial ports, such as COM1 or COM2 on a PC. If you use
158 a modem (not a synchronous or ISDN modem) to contact your ISP, you
159 need this option.
160
161 To compile this driver as a module, choose M here.
162
163 If unsure, say Y.
164
165config PPP_SYNC_TTY
166 tristate "PPP support for sync tty ports"
167 depends on PPP
168 ---help---
169 Say Y (or M) here if you want to be able to use PPP over synchronous
170 (HDLC) tty devices, such as the SyncLink adapter. These devices
171 are often used for high-speed leased lines like T1/E1.
172
173 To compile this driver as a module, choose M here.
174
175endif # PPP
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
new file mode 100644
index 000000000000..a6b6297b0066
--- /dev/null
+++ b/drivers/net/ppp/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the Linux PPP network device drivers.
3#
4
5obj-$(CONFIG_PPP) += ppp_generic.o
6obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
7obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
8obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
9obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
10obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
11obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
12obj-$(CONFIG_PPPOL2TP) += pppox.o
13obj-$(CONFIG_PPTP) += pppox.o pptp.o
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
new file mode 100644
index 000000000000..a9b759add187
--- /dev/null
+++ b/drivers/net/ppp/bsd_comp.c
@@ -0,0 +1,1170 @@
1/*
2 * Update: The Berkeley copyright was changed, and the change
3 * is retroactive to all "true" BSD software (ie everything
4 * from UCB as opposed to other peoples code that just carried
5 * the same license). The new copyright doesn't clash with the
6 * GPL, so the module-only restriction has been removed..
7 */
8
9/* Because this code is derived from the 4.3BSD compress source:
10 *
11 * Copyright (c) 1985, 1986 The Regents of the University of California.
12 * All rights reserved.
13 *
14 * This code is derived from software contributed to Berkeley by
15 * James A. Woods, derived from original work by Spencer Thomas
16 * and Joseph Orost.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. All advertising materials mentioning features or use of this software
27 * must display the following acknowledgement:
28 * This product includes software developed by the University of
29 * California, Berkeley and its contributors.
30 * 4. Neither the name of the University nor the names of its contributors
31 * may be used to endorse or promote products derived from this software
32 * without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 * SUCH DAMAGE.
45 */
46
47/*
48 * This version is for use with contiguous buffers on Linux-derived systems.
49 *
50 * ==FILEVERSION 20000226==
51 *
52 * NOTE TO MAINTAINERS:
53 * If you modify this file at all, please set the number above to the
54 * date of the modification as YYMMDD (year month day).
55 * bsd_comp.c is shipped with a PPP distribution as well as with
56 * the kernel; if everyone increases the FILEVERSION number above,
57 * then scripts can do the right thing when deciding whether to
58 * install a new bsd_comp.c file. Don't change the format of that
59 * line otherwise, so the installation script can recognize it.
60 *
61 * From: bsd_comp.c,v 1.3 1994/12/08 01:59:58 paulus Exp
62 */
63
64#include <linux/module.h>
65#include <linux/init.h>
66#include <linux/slab.h>
67#include <linux/vmalloc.h>
68#include <linux/string.h>
69
70#include <linux/ppp_defs.h>
71
72#undef PACKETPTR
73#define PACKETPTR 1
74#include <linux/ppp-comp.h>
75#undef PACKETPTR
76
77#include <asm/byteorder.h>
78
79/*
80 * PPP "BSD compress" compression
81 * The differences between this compression and the classic BSD LZW
82 * source are obvious from the requirement that the classic code worked
83 * with files while this handles arbitrarily long streams that
84 * are broken into packets. They are:
85 *
86 * When the code size expands, a block of junk is not emitted by
87 * the compressor and not expected by the decompressor.
88 *
89 * New codes are not necessarily assigned every time an old
90 * code is output by the compressor. This is because a packet
91 * end forces a code to be emitted, but does not imply that a
92 * new sequence has been seen.
93 *
94 * The compression ratio is checked at the first end of a packet
95 * after the appropriate gap. Besides simplifying and speeding
96 * things up, this makes it more likely that the transmitter
97 * and receiver will agree when the dictionary is cleared when
98 * compression is not going well.
99 */
100
101/*
102 * Macros to extract protocol version and number of bits
103 * from the third byte of the BSD Compress CCP configuration option.
104 */
105
106#define BSD_VERSION(x) ((x) >> 5)
107#define BSD_NBITS(x) ((x) & 0x1F)
108
109#define BSD_CURRENT_VERSION 1
110
111/*
112 * A dictionary for doing BSD compress.
113 */
114
115struct bsd_dict {
116 union { /* hash value */
117 unsigned long fcode;
118 struct {
119#if defined(__LITTLE_ENDIAN) /* Little endian order */
120 unsigned short prefix; /* preceding code */
121 unsigned char suffix; /* last character of new code */
122 unsigned char pad;
123#elif defined(__BIG_ENDIAN) /* Big endian order */
124 unsigned char pad;
125 unsigned char suffix; /* last character of new code */
126 unsigned short prefix; /* preceding code */
127#else
128#error Endianness not defined...
129#endif
130 } hs;
131 } f;
132 unsigned short codem1; /* output of hash table -1 */
133 unsigned short cptr; /* map code to hash table entry */
134};
135
136struct bsd_db {
137 int totlen; /* length of this structure */
138 unsigned int hsize; /* size of the hash table */
139 unsigned char hshift; /* used in hash function */
140 unsigned char n_bits; /* current bits/code */
141 unsigned char maxbits; /* maximum bits/code */
142 unsigned char debug; /* non-zero if debug desired */
143 unsigned char unit; /* ppp unit number */
144 unsigned short seqno; /* sequence # of next packet */
145 unsigned int mru; /* size of receive (decompress) bufr */
146 unsigned int maxmaxcode; /* largest valid code */
147 unsigned int max_ent; /* largest code in use */
148 unsigned int in_count; /* uncompressed bytes, aged */
149 unsigned int bytes_out; /* compressed bytes, aged */
150 unsigned int ratio; /* recent compression ratio */
151 unsigned int checkpoint; /* when to next check the ratio */
152 unsigned int clear_count; /* times dictionary cleared */
153 unsigned int incomp_count; /* incompressible packets */
154 unsigned int incomp_bytes; /* incompressible bytes */
155 unsigned int uncomp_count; /* uncompressed packets */
156 unsigned int uncomp_bytes; /* uncompressed bytes */
157 unsigned int comp_count; /* compressed packets */
158 unsigned int comp_bytes; /* compressed bytes */
159 unsigned short *lens; /* array of lengths of codes */
160 struct bsd_dict *dict; /* dictionary */
161};
162
163#define BSD_OVHD 2 /* BSD compress overhead/packet */
164#define MIN_BSD_BITS 9
165#define BSD_INIT_BITS MIN_BSD_BITS
166#define MAX_BSD_BITS 15
167
168static void bsd_free (void *state);
169static void *bsd_alloc(unsigned char *options, int opt_len, int decomp);
170static void *bsd_comp_alloc (unsigned char *options, int opt_len);
171static void *bsd_decomp_alloc (unsigned char *options, int opt_len);
172
173static int bsd_init (void *db, unsigned char *options,
174 int opt_len, int unit, int debug, int decomp);
175static int bsd_comp_init (void *state, unsigned char *options,
176 int opt_len, int unit, int opthdr, int debug);
177static int bsd_decomp_init (void *state, unsigned char *options,
178 int opt_len, int unit, int opthdr, int mru,
179 int debug);
180
181static void bsd_reset (void *state);
182static void bsd_comp_stats (void *state, struct compstat *stats);
183
184static int bsd_compress (void *state, unsigned char *rptr,
185 unsigned char *obuf, int isize, int osize);
186static void bsd_incomp (void *state, unsigned char *ibuf, int icnt);
187
188static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
189 unsigned char *obuf, int osize);
190
191/* These are in ppp_generic.c */
192extern int ppp_register_compressor (struct compressor *cp);
193extern void ppp_unregister_compressor (struct compressor *cp);
194
195/*
196 * the next two codes should not be changed lightly, as they must not
197 * lie within the contiguous general code space.
198 */
199#define CLEAR 256 /* table clear output code */
200#define FIRST 257 /* first free entry */
201#define LAST 255
202
203#define MAXCODE(b) ((1 << (b)) - 1)
204#define BADCODEM1 MAXCODE(MAX_BSD_BITS)
205
206#define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \
207 ^ (unsigned long)(prefix))
208#define BSD_KEY(prefix,suffix) ((((unsigned long)(suffix)) << 16) \
209 + (unsigned long)(prefix))
210
211#define CHECK_GAP 10000 /* Ratio check interval */
212
213#define RATIO_SCALE_LOG 8
214#define RATIO_SCALE (1<<RATIO_SCALE_LOG)
215#define RATIO_MAX (0x7fffffff>>RATIO_SCALE_LOG)
216
217/*
218 * clear the dictionary
219 */
220
221static void
222bsd_clear(struct bsd_db *db)
223{
224 db->clear_count++;
225 db->max_ent = FIRST-1;
226 db->n_bits = BSD_INIT_BITS;
227 db->bytes_out = 0;
228 db->in_count = 0;
229 db->ratio = 0;
230 db->checkpoint = CHECK_GAP;
231}
232
233/*
234 * If the dictionary is full, then see if it is time to reset it.
235 *
236 * Compute the compression ratio using fixed-point arithmetic
237 * with 8 fractional bits.
238 *
239 * Since we have an infinite stream instead of a single file,
240 * watch only the local compression ratio.
241 *
242 * Since both peers must reset the dictionary at the same time even in
243 * the absence of CLEAR codes (while packets are incompressible), they
244 * must compute the same ratio.
245 */
246
247static int bsd_check (struct bsd_db *db) /* 1=output CLEAR */
248 {
249 unsigned int new_ratio;
250
251 if (db->in_count >= db->checkpoint)
252 {
253 /* age the ratio by limiting the size of the counts */
254 if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX)
255 {
256 db->in_count -= (db->in_count >> 2);
257 db->bytes_out -= (db->bytes_out >> 2);
258 }
259
260 db->checkpoint = db->in_count + CHECK_GAP;
261
262 if (db->max_ent >= db->maxmaxcode)
263 {
264 /* Reset the dictionary only if the ratio is worse,
265 * or if it looks as if it has been poisoned
266 * by incompressible data.
267 *
268 * This does not overflow, because
269 * db->in_count <= RATIO_MAX.
270 */
271
272 new_ratio = db->in_count << RATIO_SCALE_LOG;
273 if (db->bytes_out != 0)
274 {
275 new_ratio /= db->bytes_out;
276 }
277
278 if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE)
279 {
280 bsd_clear (db);
281 return 1;
282 }
283 db->ratio = new_ratio;
284 }
285 }
286 return 0;
287 }
288
289/*
290 * Return statistics.
291 */
292
293static void bsd_comp_stats (void *state, struct compstat *stats)
294 {
295 struct bsd_db *db = (struct bsd_db *) state;
296
297 stats->unc_bytes = db->uncomp_bytes;
298 stats->unc_packets = db->uncomp_count;
299 stats->comp_bytes = db->comp_bytes;
300 stats->comp_packets = db->comp_count;
301 stats->inc_bytes = db->incomp_bytes;
302 stats->inc_packets = db->incomp_count;
303 stats->in_count = db->in_count;
304 stats->bytes_out = db->bytes_out;
305 }
306
307/*
308 * Reset state, as on a CCP ResetReq.
309 */
310
311static void bsd_reset (void *state)
312 {
313 struct bsd_db *db = (struct bsd_db *) state;
314
315 bsd_clear(db);
316
317 db->seqno = 0;
318 db->clear_count = 0;
319 }
320
321/*
322 * Release the compression structure
323 */
324
325static void bsd_free (void *state)
326{
327 struct bsd_db *db = state;
328
329 if (!db)
330 return;
331
332/*
333 * Release the dictionary
334 */
335 vfree(db->dict);
336 db->dict = NULL;
337/*
338 * Release the string buffer
339 */
340 vfree(db->lens);
341 db->lens = NULL;
342/*
343 * Finally release the structure itself.
344 */
345 kfree(db);
346}
347
348/*
349 * Allocate space for a (de) compressor.
350 */
351
352static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
353 {
354 int bits;
355 unsigned int hsize, hshift, maxmaxcode;
356 struct bsd_db *db;
357
358 if (opt_len != 3 || options[0] != CI_BSD_COMPRESS || options[1] != 3
359 || BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
360 {
361 return NULL;
362 }
363
364 bits = BSD_NBITS(options[2]);
365
366 switch (bits)
367 {
368 case 9: /* needs 82152 for both directions */
369 case 10: /* needs 84144 */
370 case 11: /* needs 88240 */
371 case 12: /* needs 96432 */
372 hsize = 5003;
373 hshift = 4;
374 break;
375 case 13: /* needs 176784 */
376 hsize = 9001;
377 hshift = 5;
378 break;
379 case 14: /* needs 353744 */
380 hsize = 18013;
381 hshift = 6;
382 break;
383 case 15: /* needs 691440 */
384 hsize = 35023;
385 hshift = 7;
386 break;
387 case 16: /* needs 1366160--far too much, */
388 /* hsize = 69001; */ /* and 69001 is too big for cptr */
389 /* hshift = 8; */ /* in struct bsd_db */
390 /* break; */
391 default:
392 return NULL;
393 }
394/*
395 * Allocate the main control structure for this instance.
396 */
397 maxmaxcode = MAXCODE(bits);
398 db = kzalloc(sizeof (struct bsd_db),
399 GFP_KERNEL);
400 if (!db)
401 {
402 return NULL;
403 }
404
405/*
406 * Allocate space for the dictionary. This may be more than one page in
407 * length.
408 */
409 db->dict = vmalloc(hsize * sizeof(struct bsd_dict));
410 if (!db->dict)
411 {
412 bsd_free (db);
413 return NULL;
414 }
415
416/*
417 * If this is the compression buffer then there is no length data.
418 */
419 if (!decomp)
420 {
421 db->lens = NULL;
422 }
423/*
424 * For decompression, the length information is needed as well.
425 */
426 else
427 {
428 db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0]));
429 if (!db->lens)
430 {
431 bsd_free (db);
432 return NULL;
433 }
434 }
435/*
436 * Initialize the data information for the compression code
437 */
438 db->totlen = sizeof (struct bsd_db) +
439 (sizeof (struct bsd_dict) * hsize);
440
441 db->hsize = hsize;
442 db->hshift = hshift;
443 db->maxmaxcode = maxmaxcode;
444 db->maxbits = bits;
445
446 return (void *) db;
447 }
448
449static void *bsd_comp_alloc (unsigned char *options, int opt_len)
450 {
451 return bsd_alloc (options, opt_len, 0);
452 }
453
454static void *bsd_decomp_alloc (unsigned char *options, int opt_len)
455 {
456 return bsd_alloc (options, opt_len, 1);
457 }
458
459/*
460 * Initialize the database.
461 */
462
463static int bsd_init (void *state, unsigned char *options,
464 int opt_len, int unit, int debug, int decomp)
465 {
466 struct bsd_db *db = state;
467 int indx;
468
469 if ((opt_len != 3) || (options[0] != CI_BSD_COMPRESS) || (options[1] != 3)
470 || (BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
471 || (BSD_NBITS(options[2]) != db->maxbits)
472 || (decomp && db->lens == NULL))
473 {
474 return 0;
475 }
476
477 if (decomp)
478 {
479 indx = LAST;
480 do
481 {
482 db->lens[indx] = 1;
483 }
484 while (indx-- > 0);
485 }
486
487 indx = db->hsize;
488 while (indx-- != 0)
489 {
490 db->dict[indx].codem1 = BADCODEM1;
491 db->dict[indx].cptr = 0;
492 }
493
494 db->unit = unit;
495 db->mru = 0;
496#ifndef DEBUG
497 if (debug)
498#endif
499 db->debug = 1;
500
501 bsd_reset(db);
502
503 return 1;
504 }
505
506static int bsd_comp_init (void *state, unsigned char *options,
507 int opt_len, int unit, int opthdr, int debug)
508 {
509 return bsd_init (state, options, opt_len, unit, debug, 0);
510 }
511
512static int bsd_decomp_init (void *state, unsigned char *options,
513 int opt_len, int unit, int opthdr, int mru,
514 int debug)
515 {
516 return bsd_init (state, options, opt_len, unit, debug, 1);
517 }
518
519/*
520 * Obtain pointers to the various structures in the compression tables
521 */
522
523#define dict_ptrx(p,idx) &(p->dict[idx])
524#define lens_ptrx(p,idx) &(p->lens[idx])
525
526#ifdef DEBUG
527static unsigned short *lens_ptr(struct bsd_db *db, int idx)
528 {
529 if ((unsigned int) idx > (unsigned int) db->maxmaxcode)
530 {
531 printk ("<9>ppp: lens_ptr(%d) > max\n", idx);
532 idx = 0;
533 }
534 return lens_ptrx (db, idx);
535 }
536
537static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx)
538 {
539 if ((unsigned int) idx >= (unsigned int) db->hsize)
540 {
541 printk ("<9>ppp: dict_ptr(%d) > max\n", idx);
542 idx = 0;
543 }
544 return dict_ptrx (db, idx);
545 }
546
547#else
548#define lens_ptr(db,idx) lens_ptrx(db,idx)
549#define dict_ptr(db,idx) dict_ptrx(db,idx)
550#endif
551
552/*
553 * compress a packet
554 *
555 * The result of this function is the size of the compressed
556 * packet. A zero is returned if the packet was not compressed
557 * for some reason, such as the size being larger than uncompressed.
558 *
559 * One change from the BSD compress command is that when the
560 * code size expands, we do not output a bunch of padding.
561 */
562
563static int bsd_compress (void *state, unsigned char *rptr, unsigned char *obuf,
564 int isize, int osize)
565 {
566 struct bsd_db *db;
567 int hshift;
568 unsigned int max_ent;
569 unsigned int n_bits;
570 unsigned int bitno;
571 unsigned long accm;
572 int ent;
573 unsigned long fcode;
574 struct bsd_dict *dictp;
575 unsigned char c;
576 int hval;
577 int disp;
578 int ilen;
579 int mxcode;
580 unsigned char *wptr;
581 int olen;
582
583#define PUTBYTE(v) \
584 { \
585 ++olen; \
586 if (wptr) \
587 { \
588 *wptr++ = (unsigned char) (v); \
589 if (olen >= osize) \
590 { \
591 wptr = NULL; \
592 } \
593 } \
594 }
595
596#define OUTPUT(ent) \
597 { \
598 bitno -= n_bits; \
599 accm |= ((ent) << bitno); \
600 do \
601 { \
602 PUTBYTE(accm >> 24); \
603 accm <<= 8; \
604 bitno += 8; \
605 } \
606 while (bitno <= 24); \
607 }
608
609 /*
610 * If the protocol is not in the range we're interested in,
611 * just return without compressing the packet. If it is,
612 * the protocol becomes the first byte to compress.
613 */
614
615 ent = PPP_PROTOCOL(rptr);
616 if (ent < 0x21 || ent > 0xf9)
617 {
618 return 0;
619 }
620
621 db = (struct bsd_db *) state;
622 hshift = db->hshift;
623 max_ent = db->max_ent;
624 n_bits = db->n_bits;
625 bitno = 32;
626 accm = 0;
627 mxcode = MAXCODE (n_bits);
628
629 /* Initialize the output pointers */
630 wptr = obuf;
631 olen = PPP_HDRLEN + BSD_OVHD;
632
633 if (osize > isize)
634 {
635 osize = isize;
636 }
637
638 /* This is the PPP header information */
639 if (wptr)
640 {
641 *wptr++ = PPP_ADDRESS(rptr);
642 *wptr++ = PPP_CONTROL(rptr);
643 *wptr++ = 0;
644 *wptr++ = PPP_COMP;
645 *wptr++ = db->seqno >> 8;
646 *wptr++ = db->seqno;
647 }
648
649 /* Skip the input header */
650 rptr += PPP_HDRLEN;
651 isize -= PPP_HDRLEN;
652 ilen = ++isize; /* Low byte of protocol is counted as input */
653
654 while (--ilen > 0)
655 {
656 c = *rptr++;
657 fcode = BSD_KEY (ent, c);
658 hval = BSD_HASH (ent, c, hshift);
659 dictp = dict_ptr (db, hval);
660
661 /* Validate and then check the entry. */
662 if (dictp->codem1 >= max_ent)
663 {
664 goto nomatch;
665 }
666
667 if (dictp->f.fcode == fcode)
668 {
669 ent = dictp->codem1 + 1;
670 continue; /* found (prefix,suffix) */
671 }
672
673 /* continue probing until a match or invalid entry */
674 disp = (hval == 0) ? 1 : hval;
675
676 do
677 {
678 hval += disp;
679 if (hval >= db->hsize)
680 {
681 hval -= db->hsize;
682 }
683 dictp = dict_ptr (db, hval);
684 if (dictp->codem1 >= max_ent)
685 {
686 goto nomatch;
687 }
688 }
689 while (dictp->f.fcode != fcode);
690
691 ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */
692 continue;
693
694nomatch:
695 OUTPUT(ent); /* output the prefix */
696
697 /* code -> hashtable */
698 if (max_ent < db->maxmaxcode)
699 {
700 struct bsd_dict *dictp2;
701 struct bsd_dict *dictp3;
702 int indx;
703
704 /* expand code size if needed */
705 if (max_ent >= mxcode)
706 {
707 db->n_bits = ++n_bits;
708 mxcode = MAXCODE (n_bits);
709 }
710
711 /* Invalidate old hash table entry using
712 * this code, and then take it over.
713 */
714
715 dictp2 = dict_ptr (db, max_ent + 1);
716 indx = dictp2->cptr;
717 dictp3 = dict_ptr (db, indx);
718
719 if (dictp3->codem1 == max_ent)
720 {
721 dictp3->codem1 = BADCODEM1;
722 }
723
724 dictp2->cptr = hval;
725 dictp->codem1 = max_ent;
726 dictp->f.fcode = fcode;
727 db->max_ent = ++max_ent;
728
729 if (db->lens)
730 {
731 unsigned short *len1 = lens_ptr (db, max_ent);
732 unsigned short *len2 = lens_ptr (db, ent);
733 *len1 = *len2 + 1;
734 }
735 }
736 ent = c;
737 }
738
739 OUTPUT(ent); /* output the last code */
740
741 db->bytes_out += olen - PPP_HDRLEN - BSD_OVHD;
742 db->uncomp_bytes += isize;
743 db->in_count += isize;
744 ++db->uncomp_count;
745 ++db->seqno;
746
747 if (bitno < 32)
748 {
749 ++db->bytes_out; /* must be set before calling bsd_check */
750 }
751
752 /*
753 * Generate the clear command if needed
754 */
755
756 if (bsd_check(db))
757 {
758 OUTPUT (CLEAR);
759 }
760
761 /*
762 * Pad dribble bits of last code with ones.
763 * Do not emit a completely useless byte of ones.
764 */
765
766 if (bitno != 32)
767 {
768 PUTBYTE((accm | (0xff << (bitno-8))) >> 24);
769 }
770
771 /*
772 * Increase code size if we would have without the packet
773 * boundary because the decompressor will do so.
774 */
775
776 if (max_ent >= mxcode && max_ent < db->maxmaxcode)
777 {
778 db->n_bits++;
779 }
780
781 /* If output length is too large then this is an incomplete frame. */
782 if (wptr == NULL)
783 {
784 ++db->incomp_count;
785 db->incomp_bytes += isize;
786 olen = 0;
787 }
788 else /* Count the number of compressed frames */
789 {
790 ++db->comp_count;
791 db->comp_bytes += olen;
792 }
793
794 /* Return the resulting output length */
795 return olen;
796#undef OUTPUT
797#undef PUTBYTE
798 }
799
800/*
801 * Update the "BSD Compress" dictionary on the receiver for
802 * incompressible data by pretending to compress the incoming data.
803 */
804
805static void bsd_incomp (void *state, unsigned char *ibuf, int icnt)
806 {
807 (void) bsd_compress (state, ibuf, (char *) 0, icnt, 0);
808 }
809
810/*
811 * Decompress "BSD Compress".
812 *
813 * Because of patent problems, we return DECOMP_ERROR for errors
814 * found by inspecting the input data and for system problems, but
815 * DECOMP_FATALERROR for any errors which could possibly be said to
816 * be being detected "after" decompression. For DECOMP_ERROR,
817 * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
818 * infringing a patent of Motorola's if we do, so we take CCP down
819 * instead.
820 *
821 * Given that the frame has the correct sequence number and a good FCS,
822 * errors such as invalid codes in the input most likely indicate a
823 * bug, so we return DECOMP_FATALERROR for them in order to turn off
824 * compression, even though they are detected by inspecting the input.
825 */
826
827static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
828 unsigned char *obuf, int osize)
829 {
830 struct bsd_db *db;
831 unsigned int max_ent;
832 unsigned long accm;
833 unsigned int bitno; /* 1st valid bit in accm */
834 unsigned int n_bits;
835 unsigned int tgtbitno; /* bitno when we have a code */
836 struct bsd_dict *dictp;
837 int explen;
838 int seq;
839 unsigned int incode;
840 unsigned int oldcode;
841 unsigned int finchar;
842 unsigned char *p;
843 unsigned char *wptr;
844 int adrs;
845 int ctrl;
846 int ilen;
847 int codelen;
848 int extra;
849
850 db = (struct bsd_db *) state;
851 max_ent = db->max_ent;
852 accm = 0;
853 bitno = 32; /* 1st valid bit in accm */
854 n_bits = db->n_bits;
855 tgtbitno = 32 - n_bits; /* bitno when we have a code */
856
857 /*
858 * Save the address/control from the PPP header
859 * and then get the sequence number.
860 */
861
862 adrs = PPP_ADDRESS (ibuf);
863 ctrl = PPP_CONTROL (ibuf);
864
865 seq = (ibuf[4] << 8) + ibuf[5];
866
867 ibuf += (PPP_HDRLEN + 2);
868 ilen = isize - (PPP_HDRLEN + 2);
869
870 /*
871 * Check the sequence number and give up if it differs from
872 * the value we're expecting.
873 */
874
875 if (seq != db->seqno)
876 {
877 if (db->debug)
878 {
879 printk("bsd_decomp%d: bad sequence # %d, expected %d\n",
880 db->unit, seq, db->seqno - 1);
881 }
882 return DECOMP_ERROR;
883 }
884
885 ++db->seqno;
886 db->bytes_out += ilen;
887
888 /*
889 * Fill in the ppp header, but not the last byte of the protocol
890 * (that comes from the decompressed data).
891 */
892
893 wptr = obuf;
894 *wptr++ = adrs;
895 *wptr++ = ctrl;
896 *wptr++ = 0;
897
898 oldcode = CLEAR;
899 explen = 3;
900
901 /*
902 * Keep the checkpoint correctly so that incompressible packets
903 * clear the dictionary at the proper times.
904 */
905
906 for (;;)
907 {
908 if (ilen-- <= 0)
909 {
910 db->in_count += (explen - 3); /* don't count the header */
911 break;
912 }
913
914 /*
915 * Accumulate bytes until we have a complete code.
916 * Then get the next code, relying on the 32-bit,
917 * unsigned accm to mask the result.
918 */
919
920 bitno -= 8;
921 accm |= *ibuf++ << bitno;
922 if (tgtbitno < bitno)
923 {
924 continue;
925 }
926
927 incode = accm >> tgtbitno;
928 accm <<= n_bits;
929 bitno += n_bits;
930
931 /*
932 * The dictionary must only be cleared at the end of a packet.
933 */
934
935 if (incode == CLEAR)
936 {
937 if (ilen > 0)
938 {
939 if (db->debug)
940 {
941 printk("bsd_decomp%d: bad CLEAR\n", db->unit);
942 }
943 return DECOMP_FATALERROR; /* probably a bug */
944 }
945
946 bsd_clear(db);
947 break;
948 }
949
950 if ((incode > max_ent + 2) || (incode > db->maxmaxcode)
951 || (incode > max_ent && oldcode == CLEAR))
952 {
953 if (db->debug)
954 {
955 printk("bsd_decomp%d: bad code 0x%x oldcode=0x%x ",
956 db->unit, incode, oldcode);
957 printk("max_ent=0x%x explen=%d seqno=%d\n",
958 max_ent, explen, db->seqno);
959 }
960 return DECOMP_FATALERROR; /* probably a bug */
961 }
962
963 /* Special case for KwKwK string. */
964 if (incode > max_ent)
965 {
966 finchar = oldcode;
967 extra = 1;
968 }
969 else
970 {
971 finchar = incode;
972 extra = 0;
973 }
974
975 codelen = *(lens_ptr (db, finchar));
976 explen += codelen + extra;
977 if (explen > osize)
978 {
979 if (db->debug)
980 {
981 printk("bsd_decomp%d: ran out of mru\n", db->unit);
982#ifdef DEBUG
983 printk(" len=%d, finchar=0x%x, codelen=%d, explen=%d\n",
984 ilen, finchar, codelen, explen);
985#endif
986 }
987 return DECOMP_FATALERROR;
988 }
989
990 /*
991 * Decode this code and install it in the decompressed buffer.
992 */
993
994 wptr += codelen;
995 p = wptr;
996 while (finchar > LAST)
997 {
998 struct bsd_dict *dictp2 = dict_ptr (db, finchar);
999
1000 dictp = dict_ptr (db, dictp2->cptr);
1001#ifdef DEBUG
1002 if (--codelen <= 0 || dictp->codem1 != finchar-1)
1003 {
1004 if (codelen <= 0)
1005 {
1006 printk("bsd_decomp%d: fell off end of chain ", db->unit);
1007 printk("0x%x at 0x%x by 0x%x, max_ent=0x%x\n",
1008 incode, finchar, dictp2->cptr, max_ent);
1009 }
1010 else
1011 {
1012 if (dictp->codem1 != finchar-1)
1013 {
1014 printk("bsd_decomp%d: bad code chain 0x%x "
1015 "finchar=0x%x ",
1016 db->unit, incode, finchar);
1017
1018 printk("oldcode=0x%x cptr=0x%x codem1=0x%x\n",
1019 oldcode, dictp2->cptr, dictp->codem1);
1020 }
1021 }
1022 return DECOMP_FATALERROR;
1023 }
1024#endif
1025 *--p = dictp->f.hs.suffix;
1026 finchar = dictp->f.hs.prefix;
1027 }
1028 *--p = finchar;
1029
1030#ifdef DEBUG
1031 if (--codelen != 0)
1032 {
1033 printk("bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n",
1034 db->unit, codelen, incode, max_ent);
1035 }
1036#endif
1037
1038 if (extra) /* the KwKwK case again */
1039 {
1040 *wptr++ = finchar;
1041 }
1042
1043 /*
1044 * If not first code in a packet, and
1045 * if not out of code space, then allocate a new code.
1046 *
1047 * Keep the hash table correct so it can be used
1048 * with uncompressed packets.
1049 */
1050
1051 if (oldcode != CLEAR && max_ent < db->maxmaxcode)
1052 {
1053 struct bsd_dict *dictp2, *dictp3;
1054 unsigned short *lens1, *lens2;
1055 unsigned long fcode;
1056 int hval, disp, indx;
1057
1058 fcode = BSD_KEY(oldcode,finchar);
1059 hval = BSD_HASH(oldcode,finchar,db->hshift);
1060 dictp = dict_ptr (db, hval);
1061
1062 /* look for a free hash table entry */
1063 if (dictp->codem1 < max_ent)
1064 {
1065 disp = (hval == 0) ? 1 : hval;
1066 do
1067 {
1068 hval += disp;
1069 if (hval >= db->hsize)
1070 {
1071 hval -= db->hsize;
1072 }
1073 dictp = dict_ptr (db, hval);
1074 }
1075 while (dictp->codem1 < max_ent);
1076 }
1077
1078 /*
1079 * Invalidate previous hash table entry
1080 * assigned this code, and then take it over
1081 */
1082
1083 dictp2 = dict_ptr (db, max_ent + 1);
1084 indx = dictp2->cptr;
1085 dictp3 = dict_ptr (db, indx);
1086
1087 if (dictp3->codem1 == max_ent)
1088 {
1089 dictp3->codem1 = BADCODEM1;
1090 }
1091
1092 dictp2->cptr = hval;
1093 dictp->codem1 = max_ent;
1094 dictp->f.fcode = fcode;
1095 db->max_ent = ++max_ent;
1096
1097 /* Update the length of this string. */
1098 lens1 = lens_ptr (db, max_ent);
1099 lens2 = lens_ptr (db, oldcode);
1100 *lens1 = *lens2 + 1;
1101
1102 /* Expand code size if needed. */
1103 if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode)
1104 {
1105 db->n_bits = ++n_bits;
1106 tgtbitno = 32-n_bits;
1107 }
1108 }
1109 oldcode = incode;
1110 }
1111
1112 ++db->comp_count;
1113 ++db->uncomp_count;
1114 db->comp_bytes += isize - BSD_OVHD - PPP_HDRLEN;
1115 db->uncomp_bytes += explen;
1116
1117 if (bsd_check(db))
1118 {
1119 if (db->debug)
1120 {
1121 printk("bsd_decomp%d: peer should have cleared dictionary on %d\n",
1122 db->unit, db->seqno - 1);
1123 }
1124 }
1125 return explen;
1126 }
1127
1128/*************************************************************
1129 * Table of addresses for the BSD compression module
1130 *************************************************************/
1131
1132static struct compressor ppp_bsd_compress = {
1133 .compress_proto = CI_BSD_COMPRESS,
1134 .comp_alloc = bsd_comp_alloc,
1135 .comp_free = bsd_free,
1136 .comp_init = bsd_comp_init,
1137 .comp_reset = bsd_reset,
1138 .compress = bsd_compress,
1139 .comp_stat = bsd_comp_stats,
1140 .decomp_alloc = bsd_decomp_alloc,
1141 .decomp_free = bsd_free,
1142 .decomp_init = bsd_decomp_init,
1143 .decomp_reset = bsd_reset,
1144 .decompress = bsd_decompress,
1145 .incomp = bsd_incomp,
1146 .decomp_stat = bsd_comp_stats,
1147 .owner = THIS_MODULE
1148};
1149
1150/*************************************************************
1151 * Module support routines
1152 *************************************************************/
1153
1154static int __init bsdcomp_init(void)
1155{
1156 int answer = ppp_register_compressor(&ppp_bsd_compress);
1157 if (answer == 0)
1158 printk(KERN_INFO "PPP BSD Compression module registered\n");
1159 return answer;
1160}
1161
1162static void __exit bsdcomp_cleanup(void)
1163{
1164 ppp_unregister_compressor(&ppp_bsd_compress);
1165}
1166
1167module_init(bsdcomp_init);
1168module_exit(bsdcomp_cleanup);
1169MODULE_LICENSE("Dual BSD/GPL");
1170MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
new file mode 100644
index 000000000000..c6ba64380829
--- /dev/null
+++ b/drivers/net/ppp/ppp_async.c
@@ -0,0 +1,1028 @@
1/*
2 * PPP async serial channel driver for Linux.
3 *
4 * Copyright 1999 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * This driver provides the encapsulation and framing for sending
12 * and receiving PPP frames over async serial lines. It relies on
13 * the generic PPP layer to give it frames to send and to process
14 * received frames. It implements the PPP line discipline.
15 *
16 * Part of the code in this driver was inspired by the old async-only
17 * PPP driver, written by Michael Callahan and Al Longyear, and
18 * subsequently hacked by Paul Mackerras.
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/skbuff.h>
24#include <linux/tty.h>
25#include <linux/netdevice.h>
26#include <linux/poll.h>
27#include <linux/crc-ccitt.h>
28#include <linux/ppp_defs.h>
29#include <linux/if_ppp.h>
30#include <linux/ppp_channel.h>
31#include <linux/spinlock.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <asm/unaligned.h>
37#include <asm/uaccess.h>
38#include <asm/string.h>
39
40#define PPP_VERSION "2.4.2"
41
42#define OBUFSIZE 4096
43
44/* Structure for storing local state. */
45struct asyncppp {
46 struct tty_struct *tty;
47 unsigned int flags;
48 unsigned int state;
49 unsigned int rbits;
50 int mru;
51 spinlock_t xmit_lock;
52 spinlock_t recv_lock;
53 unsigned long xmit_flags;
54 u32 xaccm[8];
55 u32 raccm;
56 unsigned int bytes_sent;
57 unsigned int bytes_rcvd;
58
59 struct sk_buff *tpkt;
60 int tpkt_pos;
61 u16 tfcs;
62 unsigned char *optr;
63 unsigned char *olim;
64 unsigned long last_xmit;
65
66 struct sk_buff *rpkt;
67 int lcp_fcs;
68 struct sk_buff_head rqueue;
69
70 struct tasklet_struct tsk;
71
72 atomic_t refcnt;
73 struct semaphore dead_sem;
74 struct ppp_channel chan; /* interface to generic ppp layer */
75 unsigned char obuf[OBUFSIZE];
76};
77
78/* Bit numbers in xmit_flags */
79#define XMIT_WAKEUP 0
80#define XMIT_FULL 1
81#define XMIT_BUSY 2
82
83/* State bits */
84#define SC_TOSS 1
85#define SC_ESCAPE 2
86#define SC_PREV_ERROR 4
87
88/* Bits in rbits */
89#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
90
91static int flag_time = HZ;
92module_param(flag_time, int, 0);
93MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
94MODULE_LICENSE("GPL");
95MODULE_ALIAS_LDISC(N_PPP);
96
97/*
98 * Prototypes.
99 */
100static int ppp_async_encode(struct asyncppp *ap);
101static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
102static int ppp_async_push(struct asyncppp *ap);
103static void ppp_async_flush_output(struct asyncppp *ap);
104static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
105 char *flags, int count);
106static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
107 unsigned long arg);
108static void ppp_async_process(unsigned long arg);
109
110static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
111 int len, int inbound);
112
113static const struct ppp_channel_ops async_ops = {
114 .start_xmit = ppp_async_send,
115 .ioctl = ppp_async_ioctl,
116};
117
118/*
119 * Routines implementing the PPP line discipline.
120 */
121
122/*
123 * We have a potential race on dereferencing tty->disc_data,
124 * because the tty layer provides no locking at all - thus one
125 * cpu could be running ppp_asynctty_receive while another
126 * calls ppp_asynctty_close, which zeroes tty->disc_data and
127 * frees the memory that ppp_asynctty_receive is using. The best
128 * way to fix this is to use a rwlock in the tty struct, but for now
129 * we use a single global rwlock for all ttys in ppp line discipline.
130 *
131 * FIXME: this is no longer true. The _close path for the ldisc is
132 * now guaranteed to be sane.
133 */
134static DEFINE_RWLOCK(disc_data_lock);
135
136static struct asyncppp *ap_get(struct tty_struct *tty)
137{
138 struct asyncppp *ap;
139
140 read_lock(&disc_data_lock);
141 ap = tty->disc_data;
142 if (ap != NULL)
143 atomic_inc(&ap->refcnt);
144 read_unlock(&disc_data_lock);
145 return ap;
146}
147
148static void ap_put(struct asyncppp *ap)
149{
150 if (atomic_dec_and_test(&ap->refcnt))
151 up(&ap->dead_sem);
152}
153
154/*
155 * Called when a tty is put into PPP line discipline. Called in process
156 * context.
157 */
158static int
159ppp_asynctty_open(struct tty_struct *tty)
160{
161 struct asyncppp *ap;
162 int err;
163 int speed;
164
165 if (tty->ops->write == NULL)
166 return -EOPNOTSUPP;
167
168 err = -ENOMEM;
169 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
170 if (!ap)
171 goto out;
172
173 /* initialize the asyncppp structure */
174 ap->tty = tty;
175 ap->mru = PPP_MRU;
176 spin_lock_init(&ap->xmit_lock);
177 spin_lock_init(&ap->recv_lock);
178 ap->xaccm[0] = ~0U;
179 ap->xaccm[3] = 0x60000000U;
180 ap->raccm = ~0U;
181 ap->optr = ap->obuf;
182 ap->olim = ap->obuf;
183 ap->lcp_fcs = -1;
184
185 skb_queue_head_init(&ap->rqueue);
186 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
187
188 atomic_set(&ap->refcnt, 1);
189 sema_init(&ap->dead_sem, 0);
190
191 ap->chan.private = ap;
192 ap->chan.ops = &async_ops;
193 ap->chan.mtu = PPP_MRU;
194 speed = tty_get_baud_rate(tty);
195 ap->chan.speed = speed;
196 err = ppp_register_channel(&ap->chan);
197 if (err)
198 goto out_free;
199
200 tty->disc_data = ap;
201 tty->receive_room = 65536;
202 return 0;
203
204 out_free:
205 kfree(ap);
206 out:
207 return err;
208}
209
210/*
211 * Called when the tty is put into another line discipline
212 * or it hangs up. We have to wait for any cpu currently
213 * executing in any of the other ppp_asynctty_* routines to
214 * finish before we can call ppp_unregister_channel and free
215 * the asyncppp struct. This routine must be called from
216 * process context, not interrupt or softirq context.
217 */
218static void
219ppp_asynctty_close(struct tty_struct *tty)
220{
221 struct asyncppp *ap;
222
223 write_lock_irq(&disc_data_lock);
224 ap = tty->disc_data;
225 tty->disc_data = NULL;
226 write_unlock_irq(&disc_data_lock);
227 if (!ap)
228 return;
229
230 /*
231 * We have now ensured that nobody can start using ap from now
232 * on, but we have to wait for all existing users to finish.
233 * Note that ppp_unregister_channel ensures that no calls to
234 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
235 * by the time it returns.
236 */
237 if (!atomic_dec_and_test(&ap->refcnt))
238 down(&ap->dead_sem);
239 tasklet_kill(&ap->tsk);
240
241 ppp_unregister_channel(&ap->chan);
242 kfree_skb(ap->rpkt);
243 skb_queue_purge(&ap->rqueue);
244 kfree_skb(ap->tpkt);
245 kfree(ap);
246}
247
248/*
249 * Called on tty hangup in process context.
250 *
251 * Wait for I/O to driver to complete and unregister PPP channel.
252 * This is already done by the close routine, so just call that.
253 */
254static int ppp_asynctty_hangup(struct tty_struct *tty)
255{
256 ppp_asynctty_close(tty);
257 return 0;
258}
259
260/*
261 * Read does nothing - no data is ever available this way.
262 * Pppd reads and writes packets via /dev/ppp instead.
263 */
264static ssize_t
265ppp_asynctty_read(struct tty_struct *tty, struct file *file,
266 unsigned char __user *buf, size_t count)
267{
268 return -EAGAIN;
269}
270
271/*
272 * Write on the tty does nothing, the packets all come in
273 * from the ppp generic stuff.
274 */
275static ssize_t
276ppp_asynctty_write(struct tty_struct *tty, struct file *file,
277 const unsigned char *buf, size_t count)
278{
279 return -EAGAIN;
280}
281
282/*
283 * Called in process context only. May be re-entered by multiple
284 * ioctl calling threads.
285 */
286
287static int
288ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
289 unsigned int cmd, unsigned long arg)
290{
291 struct asyncppp *ap = ap_get(tty);
292 int err, val;
293 int __user *p = (int __user *)arg;
294
295 if (!ap)
296 return -ENXIO;
297 err = -EFAULT;
298 switch (cmd) {
299 case PPPIOCGCHAN:
300 err = -EFAULT;
301 if (put_user(ppp_channel_index(&ap->chan), p))
302 break;
303 err = 0;
304 break;
305
306 case PPPIOCGUNIT:
307 err = -EFAULT;
308 if (put_user(ppp_unit_number(&ap->chan), p))
309 break;
310 err = 0;
311 break;
312
313 case TCFLSH:
314 /* flush our buffers and the serial port's buffer */
315 if (arg == TCIOFLUSH || arg == TCOFLUSH)
316 ppp_async_flush_output(ap);
317 err = tty_perform_flush(tty, arg);
318 break;
319
320 case FIONREAD:
321 val = 0;
322 if (put_user(val, p))
323 break;
324 err = 0;
325 break;
326
327 default:
328 /* Try the various mode ioctls */
329 err = tty_mode_ioctl(tty, file, cmd, arg);
330 }
331
332 ap_put(ap);
333 return err;
334}
335
336/* No kernel lock - fine */
337static unsigned int
338ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
339{
340 return 0;
341}
342
343/* May sleep, don't call from interrupt level or with interrupts disabled */
344static void
345ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
346 char *cflags, int count)
347{
348 struct asyncppp *ap = ap_get(tty);
349 unsigned long flags;
350
351 if (!ap)
352 return;
353 spin_lock_irqsave(&ap->recv_lock, flags);
354 ppp_async_input(ap, buf, cflags, count);
355 spin_unlock_irqrestore(&ap->recv_lock, flags);
356 if (!skb_queue_empty(&ap->rqueue))
357 tasklet_schedule(&ap->tsk);
358 ap_put(ap);
359 tty_unthrottle(tty);
360}
361
362static void
363ppp_asynctty_wakeup(struct tty_struct *tty)
364{
365 struct asyncppp *ap = ap_get(tty);
366
367 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
368 if (!ap)
369 return;
370 set_bit(XMIT_WAKEUP, &ap->xmit_flags);
371 tasklet_schedule(&ap->tsk);
372 ap_put(ap);
373}
374
375
376static struct tty_ldisc_ops ppp_ldisc = {
377 .owner = THIS_MODULE,
378 .magic = TTY_LDISC_MAGIC,
379 .name = "ppp",
380 .open = ppp_asynctty_open,
381 .close = ppp_asynctty_close,
382 .hangup = ppp_asynctty_hangup,
383 .read = ppp_asynctty_read,
384 .write = ppp_asynctty_write,
385 .ioctl = ppp_asynctty_ioctl,
386 .poll = ppp_asynctty_poll,
387 .receive_buf = ppp_asynctty_receive,
388 .write_wakeup = ppp_asynctty_wakeup,
389};
390
391static int __init
392ppp_async_init(void)
393{
394 int err;
395
396 err = tty_register_ldisc(N_PPP, &ppp_ldisc);
397 if (err != 0)
398 printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
399 err);
400 return err;
401}
402
403/*
404 * The following routines provide the PPP channel interface.
405 */
406static int
407ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
408{
409 struct asyncppp *ap = chan->private;
410 void __user *argp = (void __user *)arg;
411 int __user *p = argp;
412 int err, val;
413 u32 accm[8];
414
415 err = -EFAULT;
416 switch (cmd) {
417 case PPPIOCGFLAGS:
418 val = ap->flags | ap->rbits;
419 if (put_user(val, p))
420 break;
421 err = 0;
422 break;
423 case PPPIOCSFLAGS:
424 if (get_user(val, p))
425 break;
426 ap->flags = val & ~SC_RCV_BITS;
427 spin_lock_irq(&ap->recv_lock);
428 ap->rbits = val & SC_RCV_BITS;
429 spin_unlock_irq(&ap->recv_lock);
430 err = 0;
431 break;
432
433 case PPPIOCGASYNCMAP:
434 if (put_user(ap->xaccm[0], (u32 __user *)argp))
435 break;
436 err = 0;
437 break;
438 case PPPIOCSASYNCMAP:
439 if (get_user(ap->xaccm[0], (u32 __user *)argp))
440 break;
441 err = 0;
442 break;
443
444 case PPPIOCGRASYNCMAP:
445 if (put_user(ap->raccm, (u32 __user *)argp))
446 break;
447 err = 0;
448 break;
449 case PPPIOCSRASYNCMAP:
450 if (get_user(ap->raccm, (u32 __user *)argp))
451 break;
452 err = 0;
453 break;
454
455 case PPPIOCGXASYNCMAP:
456 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
457 break;
458 err = 0;
459 break;
460 case PPPIOCSXASYNCMAP:
461 if (copy_from_user(accm, argp, sizeof(accm)))
462 break;
463 accm[2] &= ~0x40000000U; /* can't escape 0x5e */
464 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
465 memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
466 err = 0;
467 break;
468
469 case PPPIOCGMRU:
470 if (put_user(ap->mru, p))
471 break;
472 err = 0;
473 break;
474 case PPPIOCSMRU:
475 if (get_user(val, p))
476 break;
477 if (val < PPP_MRU)
478 val = PPP_MRU;
479 ap->mru = val;
480 err = 0;
481 break;
482
483 default:
484 err = -ENOTTY;
485 }
486
487 return err;
488}
489
490/*
491 * This is called at softirq level to deliver received packets
492 * to the ppp_generic code, and to tell the ppp_generic code
493 * if we can accept more output now.
494 */
495static void ppp_async_process(unsigned long arg)
496{
497 struct asyncppp *ap = (struct asyncppp *) arg;
498 struct sk_buff *skb;
499
500 /* process received packets */
501 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
502 if (skb->cb[0])
503 ppp_input_error(&ap->chan, 0);
504 ppp_input(&ap->chan, skb);
505 }
506
507 /* try to push more stuff out */
508 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap))
509 ppp_output_wakeup(&ap->chan);
510}
511
512/*
513 * Procedures for encapsulation and framing.
514 */
515
516/*
517 * Procedure to encode the data for async serial transmission.
518 * Does octet stuffing (escaping), puts the address/control bytes
519 * on if A/C compression is disabled, and does protocol compression.
520 * Assumes ap->tpkt != 0 on entry.
521 * Returns 1 if we finished the current frame, 0 otherwise.
522 */
523
524#define PUT_BYTE(ap, buf, c, islcp) do { \
525 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
526 *buf++ = PPP_ESCAPE; \
527 *buf++ = c ^ PPP_TRANS; \
528 } else \
529 *buf++ = c; \
530} while (0)
531
532static int
533ppp_async_encode(struct asyncppp *ap)
534{
535 int fcs, i, count, c, proto;
536 unsigned char *buf, *buflim;
537 unsigned char *data;
538 int islcp;
539
540 buf = ap->obuf;
541 ap->olim = buf;
542 ap->optr = buf;
543 i = ap->tpkt_pos;
544 data = ap->tpkt->data;
545 count = ap->tpkt->len;
546 fcs = ap->tfcs;
547 proto = get_unaligned_be16(data);
548
549 /*
550 * LCP packets with code values between 1 (configure-reqest)
551 * and 7 (code-reject) must be sent as though no options
552 * had been negotiated.
553 */
554 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
555
556 if (i == 0) {
557 if (islcp)
558 async_lcp_peek(ap, data, count, 0);
559
560 /*
561 * Start of a new packet - insert the leading FLAG
562 * character if necessary.
563 */
564 if (islcp || flag_time == 0 ||
565 time_after_eq(jiffies, ap->last_xmit + flag_time))
566 *buf++ = PPP_FLAG;
567 ap->last_xmit = jiffies;
568 fcs = PPP_INITFCS;
569
570 /*
571 * Put in the address/control bytes if necessary
572 */
573 if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
574 PUT_BYTE(ap, buf, 0xff, islcp);
575 fcs = PPP_FCS(fcs, 0xff);
576 PUT_BYTE(ap, buf, 0x03, islcp);
577 fcs = PPP_FCS(fcs, 0x03);
578 }
579 }
580
581 /*
582 * Once we put in the last byte, we need to put in the FCS
583 * and closing flag, so make sure there is at least 7 bytes
584 * of free space in the output buffer.
585 */
586 buflim = ap->obuf + OBUFSIZE - 6;
587 while (i < count && buf < buflim) {
588 c = data[i++];
589 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
590 continue; /* compress protocol field */
591 fcs = PPP_FCS(fcs, c);
592 PUT_BYTE(ap, buf, c, islcp);
593 }
594
595 if (i < count) {
596 /*
597 * Remember where we are up to in this packet.
598 */
599 ap->olim = buf;
600 ap->tpkt_pos = i;
601 ap->tfcs = fcs;
602 return 0;
603 }
604
605 /*
606 * We have finished the packet. Add the FCS and flag.
607 */
608 fcs = ~fcs;
609 c = fcs & 0xff;
610 PUT_BYTE(ap, buf, c, islcp);
611 c = (fcs >> 8) & 0xff;
612 PUT_BYTE(ap, buf, c, islcp);
613 *buf++ = PPP_FLAG;
614 ap->olim = buf;
615
616 kfree_skb(ap->tpkt);
617 ap->tpkt = NULL;
618 return 1;
619}
620
621/*
622 * Transmit-side routines.
623 */
624
625/*
626 * Send a packet to the peer over an async tty line.
627 * Returns 1 iff the packet was accepted.
628 * If the packet was not accepted, we will call ppp_output_wakeup
629 * at some later time.
630 */
631static int
632ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
633{
634 struct asyncppp *ap = chan->private;
635
636 ppp_async_push(ap);
637
638 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
639 return 0; /* already full */
640 ap->tpkt = skb;
641 ap->tpkt_pos = 0;
642
643 ppp_async_push(ap);
644 return 1;
645}
646
647/*
648 * Push as much data as possible out to the tty.
649 */
650static int
651ppp_async_push(struct asyncppp *ap)
652{
653 int avail, sent, done = 0;
654 struct tty_struct *tty = ap->tty;
655 int tty_stuffed = 0;
656
657 /*
658 * We can get called recursively here if the tty write
659 * function calls our wakeup function. This can happen
660 * for example on a pty with both the master and slave
661 * set to PPP line discipline.
662 * We use the XMIT_BUSY bit to detect this and get out,
663 * leaving the XMIT_WAKEUP bit set to tell the other
664 * instance that it may now be able to write more now.
665 */
666 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
667 return 0;
668 spin_lock_bh(&ap->xmit_lock);
669 for (;;) {
670 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
671 tty_stuffed = 0;
672 if (!tty_stuffed && ap->optr < ap->olim) {
673 avail = ap->olim - ap->optr;
674 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
675 sent = tty->ops->write(tty, ap->optr, avail);
676 if (sent < 0)
677 goto flush; /* error, e.g. loss of CD */
678 ap->optr += sent;
679 if (sent < avail)
680 tty_stuffed = 1;
681 continue;
682 }
683 if (ap->optr >= ap->olim && ap->tpkt) {
684 if (ppp_async_encode(ap)) {
685 /* finished processing ap->tpkt */
686 clear_bit(XMIT_FULL, &ap->xmit_flags);
687 done = 1;
688 }
689 continue;
690 }
691 /*
692 * We haven't made any progress this time around.
693 * Clear XMIT_BUSY to let other callers in, but
694 * after doing so we have to check if anyone set
695 * XMIT_WAKEUP since we last checked it. If they
696 * did, we should try again to set XMIT_BUSY and go
697 * around again in case XMIT_BUSY was still set when
698 * the other caller tried.
699 */
700 clear_bit(XMIT_BUSY, &ap->xmit_flags);
701 /* any more work to do? if not, exit the loop */
702 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
703 (!tty_stuffed && ap->tpkt)))
704 break;
705 /* more work to do, see if we can do it now */
706 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
707 break;
708 }
709 spin_unlock_bh(&ap->xmit_lock);
710 return done;
711
712flush:
713 clear_bit(XMIT_BUSY, &ap->xmit_flags);
714 if (ap->tpkt) {
715 kfree_skb(ap->tpkt);
716 ap->tpkt = NULL;
717 clear_bit(XMIT_FULL, &ap->xmit_flags);
718 done = 1;
719 }
720 ap->optr = ap->olim;
721 spin_unlock_bh(&ap->xmit_lock);
722 return done;
723}
724
725/*
726 * Flush output from our internal buffers.
727 * Called for the TCFLSH ioctl. Can be entered in parallel
728 * but this is covered by the xmit_lock.
729 */
730static void
731ppp_async_flush_output(struct asyncppp *ap)
732{
733 int done = 0;
734
735 spin_lock_bh(&ap->xmit_lock);
736 ap->optr = ap->olim;
737 if (ap->tpkt != NULL) {
738 kfree_skb(ap->tpkt);
739 ap->tpkt = NULL;
740 clear_bit(XMIT_FULL, &ap->xmit_flags);
741 done = 1;
742 }
743 spin_unlock_bh(&ap->xmit_lock);
744 if (done)
745 ppp_output_wakeup(&ap->chan);
746}
747
748/*
749 * Receive-side routines.
750 */
751
752/* see how many ordinary chars there are at the start of buf */
753static inline int
754scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
755{
756 int i, c;
757
758 for (i = 0; i < count; ++i) {
759 c = buf[i];
760 if (c == PPP_ESCAPE || c == PPP_FLAG ||
761 (c < 0x20 && (ap->raccm & (1 << c)) != 0))
762 break;
763 }
764 return i;
765}
766
767/* called when a flag is seen - do end-of-packet processing */
768static void
769process_input_packet(struct asyncppp *ap)
770{
771 struct sk_buff *skb;
772 unsigned char *p;
773 unsigned int len, fcs, proto;
774
775 skb = ap->rpkt;
776 if (ap->state & (SC_TOSS | SC_ESCAPE))
777 goto err;
778
779 if (skb == NULL)
780 return; /* 0-length packet */
781
782 /* check the FCS */
783 p = skb->data;
784 len = skb->len;
785 if (len < 3)
786 goto err; /* too short */
787 fcs = PPP_INITFCS;
788 for (; len > 0; --len)
789 fcs = PPP_FCS(fcs, *p++);
790 if (fcs != PPP_GOODFCS)
791 goto err; /* bad FCS */
792 skb_trim(skb, skb->len - 2);
793
794 /* check for address/control and protocol compression */
795 p = skb->data;
796 if (p[0] == PPP_ALLSTATIONS) {
797 /* chop off address/control */
798 if (p[1] != PPP_UI || skb->len < 3)
799 goto err;
800 p = skb_pull(skb, 2);
801 }
802 proto = p[0];
803 if (proto & 1) {
804 /* protocol is compressed */
805 skb_push(skb, 1)[0] = 0;
806 } else {
807 if (skb->len < 2)
808 goto err;
809 proto = (proto << 8) + p[1];
810 if (proto == PPP_LCP)
811 async_lcp_peek(ap, p, skb->len, 1);
812 }
813
814 /* queue the frame to be processed */
815 skb->cb[0] = ap->state;
816 skb_queue_tail(&ap->rqueue, skb);
817 ap->rpkt = NULL;
818 ap->state = 0;
819 return;
820
821 err:
822 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
823 ap->state = SC_PREV_ERROR;
824 if (skb) {
825 /* make skb appear as freshly allocated */
826 skb_trim(skb, 0);
827 skb_reserve(skb, - skb_headroom(skb));
828 }
829}
830
831/* Called when the tty driver has data for us. Runs parallel with the
832 other ldisc functions but will not be re-entered */
833
834static void
835ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
836 char *flags, int count)
837{
838 struct sk_buff *skb;
839 int c, i, j, n, s, f;
840 unsigned char *sp;
841
842 /* update bits used for 8-bit cleanness detection */
843 if (~ap->rbits & SC_RCV_BITS) {
844 s = 0;
845 for (i = 0; i < count; ++i) {
846 c = buf[i];
847 if (flags && flags[i] != 0)
848 continue;
849 s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
850 c = ((c >> 4) ^ c) & 0xf;
851 s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
852 }
853 ap->rbits |= s;
854 }
855
856 while (count > 0) {
857 /* scan through and see how many chars we can do in bulk */
858 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
859 n = 1;
860 else
861 n = scan_ordinary(ap, buf, count);
862
863 f = 0;
864 if (flags && (ap->state & SC_TOSS) == 0) {
865 /* check the flags to see if any char had an error */
866 for (j = 0; j < n; ++j)
867 if ((f = flags[j]) != 0)
868 break;
869 }
870 if (f != 0) {
871 /* start tossing */
872 ap->state |= SC_TOSS;
873
874 } else if (n > 0 && (ap->state & SC_TOSS) == 0) {
875 /* stuff the chars in the skb */
876 skb = ap->rpkt;
877 if (!skb) {
878 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
879 if (!skb)
880 goto nomem;
881 ap->rpkt = skb;
882 }
883 if (skb->len == 0) {
884 /* Try to get the payload 4-byte aligned.
885 * This should match the
886 * PPP_ALLSTATIONS/PPP_UI/compressed tests in
887 * process_input_packet, but we do not have
888 * enough chars here to test buf[1] and buf[2].
889 */
890 if (buf[0] != PPP_ALLSTATIONS)
891 skb_reserve(skb, 2 + (buf[0] & 1));
892 }
893 if (n > skb_tailroom(skb)) {
894 /* packet overflowed MRU */
895 ap->state |= SC_TOSS;
896 } else {
897 sp = skb_put(skb, n);
898 memcpy(sp, buf, n);
899 if (ap->state & SC_ESCAPE) {
900 sp[0] ^= PPP_TRANS;
901 ap->state &= ~SC_ESCAPE;
902 }
903 }
904 }
905
906 if (n >= count)
907 break;
908
909 c = buf[n];
910 if (flags != NULL && flags[n] != 0) {
911 ap->state |= SC_TOSS;
912 } else if (c == PPP_FLAG) {
913 process_input_packet(ap);
914 } else if (c == PPP_ESCAPE) {
915 ap->state |= SC_ESCAPE;
916 } else if (I_IXON(ap->tty)) {
917 if (c == START_CHAR(ap->tty))
918 start_tty(ap->tty);
919 else if (c == STOP_CHAR(ap->tty))
920 stop_tty(ap->tty);
921 }
922 /* otherwise it's a char in the recv ACCM */
923 ++n;
924
925 buf += n;
926 if (flags)
927 flags += n;
928 count -= n;
929 }
930 return;
931
932 nomem:
933 printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
934 ap->state |= SC_TOSS;
935}
936
937/*
938 * We look at LCP frames going past so that we can notice
939 * and react to the LCP configure-ack from the peer.
940 * In the situation where the peer has been sent a configure-ack
941 * already, LCP is up once it has sent its configure-ack
942 * so the immediately following packet can be sent with the
943 * configured LCP options. This allows us to process the following
944 * packet correctly without pppd needing to respond quickly.
945 *
946 * We only respond to the received configure-ack if we have just
947 * sent a configure-request, and the configure-ack contains the
948 * same data (this is checked using a 16-bit crc of the data).
949 */
950#define CONFREQ 1 /* LCP code field values */
951#define CONFACK 2
952#define LCP_MRU 1 /* LCP option numbers */
953#define LCP_ASYNCMAP 2
954
955static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
956 int len, int inbound)
957{
958 int dlen, fcs, i, code;
959 u32 val;
960
961 data += 2; /* skip protocol bytes */
962 len -= 2;
963 if (len < 4) /* 4 = code, ID, length */
964 return;
965 code = data[0];
966 if (code != CONFACK && code != CONFREQ)
967 return;
968 dlen = get_unaligned_be16(data + 2);
969 if (len < dlen)
970 return; /* packet got truncated or length is bogus */
971
972 if (code == (inbound? CONFACK: CONFREQ)) {
973 /*
974 * sent confreq or received confack:
975 * calculate the crc of the data from the ID field on.
976 */
977 fcs = PPP_INITFCS;
978 for (i = 1; i < dlen; ++i)
979 fcs = PPP_FCS(fcs, data[i]);
980
981 if (!inbound) {
982 /* outbound confreq - remember the crc for later */
983 ap->lcp_fcs = fcs;
984 return;
985 }
986
987 /* received confack, check the crc */
988 fcs ^= ap->lcp_fcs;
989 ap->lcp_fcs = -1;
990 if (fcs != 0)
991 return;
992 } else if (inbound)
993 return; /* not interested in received confreq */
994
995 /* process the options in the confack */
996 data += 4;
997 dlen -= 4;
998 /* data[0] is code, data[1] is length */
999 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
1000 switch (data[0]) {
1001 case LCP_MRU:
1002 val = get_unaligned_be16(data + 2);
1003 if (inbound)
1004 ap->mru = val;
1005 else
1006 ap->chan.mtu = val;
1007 break;
1008 case LCP_ASYNCMAP:
1009 val = get_unaligned_be32(data + 2);
1010 if (inbound)
1011 ap->raccm = val;
1012 else
1013 ap->xaccm[0] = val;
1014 break;
1015 }
1016 dlen -= data[1];
1017 data += data[1];
1018 }
1019}
1020
1021static void __exit ppp_async_cleanup(void)
1022{
1023 if (tty_unregister_ldisc(N_PPP) != 0)
1024 printk(KERN_ERR "failed to unregister PPP line discipline\n");
1025}
1026
1027module_init(ppp_async_init);
1028module_exit(ppp_async_cleanup);
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
new file mode 100644
index 000000000000..1dbdf82a6dfd
--- /dev/null
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -0,0 +1,653 @@
1/*
2 * ==FILEVERSION 980319==
3 *
4 * ppp_deflate.c - interface the zlib procedures for Deflate compression
5 * and decompression (as used by gzip) to the PPP code.
6 * This version is for use with Linux kernel 1.3.X.
7 *
8 * Copyright (c) 1994 The Australian National University.
9 * All rights reserved.
10 *
11 * Permission to use, copy, modify, and distribute this software and its
12 * documentation is hereby granted, provided that the above copyright
13 * notice appears in all copies. This software is provided without any
14 * warranty, express or implied. The Australian National University
15 * makes no representations about the suitability of this software for
16 * any purpose.
17 *
18 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
19 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
20 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
21 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
22 * OF SUCH DAMAGE.
23 *
24 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
25 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
26 * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
27 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
28 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
29 * OR MODIFICATIONS.
30 *
31 * From: deflate.c,v 1.1 1996/01/18 03:17:48 paulus Exp
32 */
33
34#include <linux/module.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37#include <linux/init.h>
38#include <linux/string.h>
39
40#include <linux/ppp_defs.h>
41#include <linux/ppp-comp.h>
42
43#include <linux/zlib.h>
44#include <asm/unaligned.h>
45
46/*
47 * State for a Deflate (de)compressor.
48 */
49struct ppp_deflate_state {
50 int seqno;
51 int w_size;
52 int unit;
53 int mru;
54 int debug;
55 z_stream strm;
56 struct compstat stats;
57};
58
59#define DEFLATE_OVHD 2 /* Deflate overhead/packet */
60
61static void *z_comp_alloc(unsigned char *options, int opt_len);
62static void *z_decomp_alloc(unsigned char *options, int opt_len);
63static void z_comp_free(void *state);
64static void z_decomp_free(void *state);
65static int z_comp_init(void *state, unsigned char *options,
66 int opt_len,
67 int unit, int hdrlen, int debug);
68static int z_decomp_init(void *state, unsigned char *options,
69 int opt_len,
70 int unit, int hdrlen, int mru, int debug);
71static int z_compress(void *state, unsigned char *rptr,
72 unsigned char *obuf,
73 int isize, int osize);
74static void z_incomp(void *state, unsigned char *ibuf, int icnt);
75static int z_decompress(void *state, unsigned char *ibuf,
76 int isize, unsigned char *obuf, int osize);
77static void z_comp_reset(void *state);
78static void z_decomp_reset(void *state);
79static void z_comp_stats(void *state, struct compstat *stats);
80
81/**
82 * z_comp_free - free the memory used by a compressor
83 * @arg: pointer to the private state for the compressor.
84 */
85static void z_comp_free(void *arg)
86{
87 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
88
89 if (state) {
90 zlib_deflateEnd(&state->strm);
91 vfree(state->strm.workspace);
92 kfree(state);
93 }
94}
95
96/**
97 * z_comp_alloc - allocate space for a compressor.
98 * @options: pointer to CCP option data
99 * @opt_len: length of the CCP option at @options.
100 *
101 * The @options pointer points to the a buffer containing the
102 * CCP option data for the compression being negotiated. It is
103 * formatted according to RFC1979, and describes the window
104 * size that the peer is requesting that we use in compressing
105 * data to be sent to it.
106 *
107 * Returns the pointer to the private state for the compressor,
108 * or NULL if we could not allocate enough memory.
109 */
110static void *z_comp_alloc(unsigned char *options, int opt_len)
111{
112 struct ppp_deflate_state *state;
113 int w_size;
114
115 if (opt_len != CILEN_DEFLATE ||
116 (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
117 options[1] != CILEN_DEFLATE ||
118 DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
119 options[3] != DEFLATE_CHK_SEQUENCE)
120 return NULL;
121 w_size = DEFLATE_SIZE(options[2]);
122 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
123 return NULL;
124
125 state = kzalloc(sizeof(*state),
126 GFP_KERNEL);
127 if (state == NULL)
128 return NULL;
129
130 state->strm.next_in = NULL;
131 state->w_size = w_size;
132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8));
133 if (state->strm.workspace == NULL)
134 goto out_free;
135
136 if (zlib_deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION,
137 DEFLATE_METHOD_VAL, -w_size, 8, Z_DEFAULT_STRATEGY)
138 != Z_OK)
139 goto out_free;
140 return (void *) state;
141
142out_free:
143 z_comp_free(state);
144 return NULL;
145}
146
147/**
148 * z_comp_init - initialize a previously-allocated compressor.
149 * @arg: pointer to the private state for the compressor
150 * @options: pointer to the CCP option data describing the
151 * compression that was negotiated with the peer
152 * @opt_len: length of the CCP option data at @options
153 * @unit: PPP unit number for diagnostic messages
154 * @hdrlen: ignored (present for backwards compatibility)
155 * @debug: debug flag; if non-zero, debug messages are printed.
156 *
157 * The CCP options described by @options must match the options
158 * specified when the compressor was allocated. The compressor
159 * history is reset. Returns 0 for failure (CCP options don't
160 * match) or 1 for success.
161 */
162static int z_comp_init(void *arg, unsigned char *options, int opt_len,
163 int unit, int hdrlen, int debug)
164{
165 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
166
167 if (opt_len < CILEN_DEFLATE ||
168 (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
169 options[1] != CILEN_DEFLATE ||
170 DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
171 DEFLATE_SIZE(options[2]) != state->w_size ||
172 options[3] != DEFLATE_CHK_SEQUENCE)
173 return 0;
174
175 state->seqno = 0;
176 state->unit = unit;
177 state->debug = debug;
178
179 zlib_deflateReset(&state->strm);
180
181 return 1;
182}
183
184/**
185 * z_comp_reset - reset a previously-allocated compressor.
186 * @arg: pointer to private state for the compressor.
187 *
188 * This clears the history for the compressor and makes it
189 * ready to start emitting a new compressed stream.
190 */
191static void z_comp_reset(void *arg)
192{
193 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
194
195 state->seqno = 0;
196 zlib_deflateReset(&state->strm);
197}
198
199/**
200 * z_compress - compress a PPP packet with Deflate compression.
201 * @arg: pointer to private state for the compressor
202 * @rptr: uncompressed packet (input)
203 * @obuf: compressed packet (output)
204 * @isize: size of uncompressed packet
205 * @osize: space available at @obuf
206 *
207 * Returns the length of the compressed packet, or 0 if the
208 * packet is incompressible.
209 */
210static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
211 int isize, int osize)
212{
213 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
214 int r, proto, off, olen, oavail;
215 unsigned char *wptr;
216
217 /*
218 * Check that the protocol is in the range we handle.
219 */
220 proto = PPP_PROTOCOL(rptr);
221 if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
222 return 0;
223
224 /* Don't generate compressed packets which are larger than
225 the uncompressed packet. */
226 if (osize > isize)
227 osize = isize;
228
229 wptr = obuf;
230
231 /*
232 * Copy over the PPP header and store the 2-byte sequence number.
233 */
234 wptr[0] = PPP_ADDRESS(rptr);
235 wptr[1] = PPP_CONTROL(rptr);
236 put_unaligned_be16(PPP_COMP, wptr + 2);
237 wptr += PPP_HDRLEN;
238 put_unaligned_be16(state->seqno, wptr);
239 wptr += DEFLATE_OVHD;
240 olen = PPP_HDRLEN + DEFLATE_OVHD;
241 state->strm.next_out = wptr;
242 state->strm.avail_out = oavail = osize - olen;
243 ++state->seqno;
244
245 off = (proto > 0xff) ? 2 : 3; /* skip 1st proto byte if 0 */
246 rptr += off;
247 state->strm.next_in = rptr;
248 state->strm.avail_in = (isize - off);
249
250 for (;;) {
251 r = zlib_deflate(&state->strm, Z_PACKET_FLUSH);
252 if (r != Z_OK) {
253 if (state->debug)
254 printk(KERN_ERR
255 "z_compress: deflate returned %d\n", r);
256 break;
257 }
258 if (state->strm.avail_out == 0) {
259 olen += oavail;
260 state->strm.next_out = NULL;
261 state->strm.avail_out = oavail = 1000000;
262 } else {
263 break; /* all done */
264 }
265 }
266 olen += oavail - state->strm.avail_out;
267
268 /*
269 * See if we managed to reduce the size of the packet.
270 */
271 if (olen < isize) {
272 state->stats.comp_bytes += olen;
273 state->stats.comp_packets++;
274 } else {
275 state->stats.inc_bytes += isize;
276 state->stats.inc_packets++;
277 olen = 0;
278 }
279 state->stats.unc_bytes += isize;
280 state->stats.unc_packets++;
281
282 return olen;
283}
284
285/**
286 * z_comp_stats - return compression statistics for a compressor
287 * or decompressor.
288 * @arg: pointer to private space for the (de)compressor
289 * @stats: pointer to a struct compstat to receive the result.
290 */
291static void z_comp_stats(void *arg, struct compstat *stats)
292{
293 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
294
295 *stats = state->stats;
296}
297
298/**
299 * z_decomp_free - Free the memory used by a decompressor.
300 * @arg: pointer to private space for the decompressor.
301 */
302static void z_decomp_free(void *arg)
303{
304 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
305
306 if (state) {
307 zlib_inflateEnd(&state->strm);
308 vfree(state->strm.workspace);
309 kfree(state);
310 }
311}
312
313/**
314 * z_decomp_alloc - allocate space for a decompressor.
315 * @options: pointer to CCP option data
316 * @opt_len: length of the CCP option at @options.
317 *
318 * The @options pointer points to the a buffer containing the
319 * CCP option data for the compression being negotiated. It is
320 * formatted according to RFC1979, and describes the window
321 * size that we are requesting the peer to use in compressing
322 * data to be sent to us.
323 *
324 * Returns the pointer to the private state for the decompressor,
325 * or NULL if we could not allocate enough memory.
326 */
327static void *z_decomp_alloc(unsigned char *options, int opt_len)
328{
329 struct ppp_deflate_state *state;
330 int w_size;
331
332 if (opt_len != CILEN_DEFLATE ||
333 (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
334 options[1] != CILEN_DEFLATE ||
335 DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
336 options[3] != DEFLATE_CHK_SEQUENCE)
337 return NULL;
338 w_size = DEFLATE_SIZE(options[2]);
339 if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
340 return NULL;
341
342 state = kzalloc(sizeof(*state), GFP_KERNEL);
343 if (state == NULL)
344 return NULL;
345
346 state->w_size = w_size;
347 state->strm.next_out = NULL;
348 state->strm.workspace = vmalloc(zlib_inflate_workspacesize());
349 if (state->strm.workspace == NULL)
350 goto out_free;
351
352 if (zlib_inflateInit2(&state->strm, -w_size) != Z_OK)
353 goto out_free;
354 return (void *) state;
355
356out_free:
357 z_decomp_free(state);
358 return NULL;
359}
360
361/**
362 * z_decomp_init - initialize a previously-allocated decompressor.
363 * @arg: pointer to the private state for the decompressor
364 * @options: pointer to the CCP option data describing the
365 * compression that was negotiated with the peer
366 * @opt_len: length of the CCP option data at @options
367 * @unit: PPP unit number for diagnostic messages
368 * @hdrlen: ignored (present for backwards compatibility)
369 * @mru: maximum length of decompressed packets
370 * @debug: debug flag; if non-zero, debug messages are printed.
371 *
372 * The CCP options described by @options must match the options
373 * specified when the decompressor was allocated. The decompressor
374 * history is reset. Returns 0 for failure (CCP options don't
375 * match) or 1 for success.
376 */
377static int z_decomp_init(void *arg, unsigned char *options, int opt_len,
378 int unit, int hdrlen, int mru, int debug)
379{
380 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
381
382 if (opt_len < CILEN_DEFLATE ||
383 (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) ||
384 options[1] != CILEN_DEFLATE ||
385 DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL ||
386 DEFLATE_SIZE(options[2]) != state->w_size ||
387 options[3] != DEFLATE_CHK_SEQUENCE)
388 return 0;
389
390 state->seqno = 0;
391 state->unit = unit;
392 state->debug = debug;
393 state->mru = mru;
394
395 zlib_inflateReset(&state->strm);
396
397 return 1;
398}
399
400/**
401 * z_decomp_reset - reset a previously-allocated decompressor.
402 * @arg: pointer to private state for the decompressor.
403 *
404 * This clears the history for the decompressor and makes it
405 * ready to receive a new compressed stream.
406 */
407static void z_decomp_reset(void *arg)
408{
409 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
410
411 state->seqno = 0;
412 zlib_inflateReset(&state->strm);
413}
414
415/**
416 * z_decompress - decompress a Deflate-compressed packet.
417 * @arg: pointer to private state for the decompressor
418 * @ibuf: pointer to input (compressed) packet data
419 * @isize: length of input packet
420 * @obuf: pointer to space for output (decompressed) packet
421 * @osize: amount of space available at @obuf
422 *
423 * Because of patent problems, we return DECOMP_ERROR for errors
424 * found by inspecting the input data and for system problems, but
425 * DECOMP_FATALERROR for any errors which could possibly be said to
426 * be being detected "after" decompression. For DECOMP_ERROR,
427 * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
428 * infringing a patent of Motorola's if we do, so we take CCP down
429 * instead.
430 *
431 * Given that the frame has the correct sequence number and a good FCS,
432 * errors such as invalid codes in the input most likely indicate a
433 * bug, so we return DECOMP_FATALERROR for them in order to turn off
434 * compression, even though they are detected by inspecting the input.
435 */
436static int z_decompress(void *arg, unsigned char *ibuf, int isize,
437 unsigned char *obuf, int osize)
438{
439 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
440 int olen, seq, r;
441 int decode_proto, overflow;
442 unsigned char overflow_buf[1];
443
444 if (isize <= PPP_HDRLEN + DEFLATE_OVHD) {
445 if (state->debug)
446 printk(KERN_DEBUG "z_decompress%d: short pkt (%d)\n",
447 state->unit, isize);
448 return DECOMP_ERROR;
449 }
450
451 /* Check the sequence number. */
452 seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
453 if (seq != (state->seqno & 0xffff)) {
454 if (state->debug)
455 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
456 state->unit, seq, state->seqno & 0xffff);
457 return DECOMP_ERROR;
458 }
459 ++state->seqno;
460
461 /*
462 * Fill in the first part of the PPP header. The protocol field
463 * comes from the decompressed data.
464 */
465 obuf[0] = PPP_ADDRESS(ibuf);
466 obuf[1] = PPP_CONTROL(ibuf);
467 obuf[2] = 0;
468
469 /*
470 * Set up to call inflate. We set avail_out to 1 initially so we can
471 * look at the first byte of the output and decide whether we have
472 * a 1-byte or 2-byte protocol field.
473 */
474 state->strm.next_in = ibuf + PPP_HDRLEN + DEFLATE_OVHD;
475 state->strm.avail_in = isize - (PPP_HDRLEN + DEFLATE_OVHD);
476 state->strm.next_out = obuf + 3;
477 state->strm.avail_out = 1;
478 decode_proto = 1;
479 overflow = 0;
480
481 /*
482 * Call inflate, supplying more input or output as needed.
483 */
484 for (;;) {
485 r = zlib_inflate(&state->strm, Z_PACKET_FLUSH);
486 if (r != Z_OK) {
487 if (state->debug)
488 printk(KERN_DEBUG "z_decompress%d: inflate returned %d (%s)\n",
489 state->unit, r, (state->strm.msg? state->strm.msg: ""));
490 return DECOMP_FATALERROR;
491 }
492 if (state->strm.avail_out != 0)
493 break; /* all done */
494 if (decode_proto) {
495 state->strm.avail_out = osize - PPP_HDRLEN;
496 if ((obuf[3] & 1) == 0) {
497 /* 2-byte protocol field */
498 obuf[2] = obuf[3];
499 --state->strm.next_out;
500 ++state->strm.avail_out;
501 }
502 decode_proto = 0;
503 } else if (!overflow) {
504 /*
505 * We've filled up the output buffer; the only way to
506 * find out whether inflate has any more characters
507 * left is to give it another byte of output space.
508 */
509 state->strm.next_out = overflow_buf;
510 state->strm.avail_out = 1;
511 overflow = 1;
512 } else {
513 if (state->debug)
514 printk(KERN_DEBUG "z_decompress%d: ran out of mru\n",
515 state->unit);
516 return DECOMP_FATALERROR;
517 }
518 }
519
520 if (decode_proto) {
521 if (state->debug)
522 printk(KERN_DEBUG "z_decompress%d: didn't get proto\n",
523 state->unit);
524 return DECOMP_ERROR;
525 }
526
527 olen = osize + overflow - state->strm.avail_out;
528 state->stats.unc_bytes += olen;
529 state->stats.unc_packets++;
530 state->stats.comp_bytes += isize;
531 state->stats.comp_packets++;
532
533 return olen;
534}
535
536/**
537 * z_incomp - add incompressible input data to the history.
538 * @arg: pointer to private state for the decompressor
539 * @ibuf: pointer to input packet data
540 * @icnt: length of input data.
541 */
542static void z_incomp(void *arg, unsigned char *ibuf, int icnt)
543{
544 struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
545 int proto, r;
546
547 /*
548 * Check that the protocol is one we handle.
549 */
550 proto = PPP_PROTOCOL(ibuf);
551 if (proto > 0x3fff || proto == 0xfd || proto == 0xfb)
552 return;
553
554 ++state->seqno;
555
556 /*
557 * We start at the either the 1st or 2nd byte of the protocol field,
558 * depending on whether the protocol value is compressible.
559 */
560 state->strm.next_in = ibuf + 3;
561 state->strm.avail_in = icnt - 3;
562 if (proto > 0xff) {
563 --state->strm.next_in;
564 ++state->strm.avail_in;
565 }
566
567 r = zlib_inflateIncomp(&state->strm);
568 if (r != Z_OK) {
569 /* gak! */
570 if (state->debug) {
571 printk(KERN_DEBUG "z_incomp%d: inflateIncomp returned %d (%s)\n",
572 state->unit, r, (state->strm.msg? state->strm.msg: ""));
573 }
574 return;
575 }
576
577 /*
578 * Update stats.
579 */
580 state->stats.inc_bytes += icnt;
581 state->stats.inc_packets++;
582 state->stats.unc_bytes += icnt;
583 state->stats.unc_packets++;
584}
585
586/*************************************************************
587 * Module interface table
588 *************************************************************/
589
590/* These are in ppp_generic.c */
591extern int ppp_register_compressor (struct compressor *cp);
592extern void ppp_unregister_compressor (struct compressor *cp);
593
594/*
595 * Procedures exported to if_ppp.c.
596 */
597static struct compressor ppp_deflate = {
598 .compress_proto = CI_DEFLATE,
599 .comp_alloc = z_comp_alloc,
600 .comp_free = z_comp_free,
601 .comp_init = z_comp_init,
602 .comp_reset = z_comp_reset,
603 .compress = z_compress,
604 .comp_stat = z_comp_stats,
605 .decomp_alloc = z_decomp_alloc,
606 .decomp_free = z_decomp_free,
607 .decomp_init = z_decomp_init,
608 .decomp_reset = z_decomp_reset,
609 .decompress = z_decompress,
610 .incomp = z_incomp,
611 .decomp_stat = z_comp_stats,
612 .owner = THIS_MODULE
613};
614
615static struct compressor ppp_deflate_draft = {
616 .compress_proto = CI_DEFLATE_DRAFT,
617 .comp_alloc = z_comp_alloc,
618 .comp_free = z_comp_free,
619 .comp_init = z_comp_init,
620 .comp_reset = z_comp_reset,
621 .compress = z_compress,
622 .comp_stat = z_comp_stats,
623 .decomp_alloc = z_decomp_alloc,
624 .decomp_free = z_decomp_free,
625 .decomp_init = z_decomp_init,
626 .decomp_reset = z_decomp_reset,
627 .decompress = z_decompress,
628 .incomp = z_incomp,
629 .decomp_stat = z_comp_stats,
630 .owner = THIS_MODULE
631};
632
633static int __init deflate_init(void)
634{
635 int answer = ppp_register_compressor(&ppp_deflate);
636 if (answer == 0)
637 printk(KERN_INFO
638 "PPP Deflate Compression module registered\n");
639 ppp_register_compressor(&ppp_deflate_draft);
640 return answer;
641}
642
643static void __exit deflate_cleanup(void)
644{
645 ppp_unregister_compressor(&ppp_deflate);
646 ppp_unregister_compressor(&ppp_deflate_draft);
647}
648
649module_init(deflate_init);
650module_exit(deflate_cleanup);
651MODULE_LICENSE("Dual BSD/GPL");
652MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE));
653MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
new file mode 100644
index 000000000000..10e5d985afa3
--- /dev/null
+++ b/drivers/net/ppp/ppp_generic.c
@@ -0,0 +1,2954 @@
1/*
2 * Generic PPP layer for Linux.
3 *
4 * Copyright 1999-2002 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * The generic PPP layer handles the PPP network interfaces, the
12 * /dev/ppp device, packet and VJ compression, and multilink.
13 * It talks to PPP `channels' via the interface defined in
14 * include/linux/ppp_channel.h. Channels provide the basic means for
15 * sending and receiving PPP frames on some kind of communications
16 * channel.
17 *
18 * Part of the code in this driver was inspired by the old async-only
19 * PPP driver, written by Michael Callahan and Al Longyear, and
20 * subsequently hacked by Paul Mackerras.
21 *
22 * ==FILEVERSION 20041108==
23 */
24
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/kmod.h>
28#include <linux/init.h>
29#include <linux/list.h>
30#include <linux/idr.h>
31#include <linux/netdevice.h>
32#include <linux/poll.h>
33#include <linux/ppp_defs.h>
34#include <linux/filter.h>
35#include <linux/if_ppp.h>
36#include <linux/ppp_channel.h>
37#include <linux/ppp-comp.h>
38#include <linux/skbuff.h>
39#include <linux/rtnetlink.h>
40#include <linux/if_arp.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/spinlock.h>
44#include <linux/rwsem.h>
45#include <linux/stddef.h>
46#include <linux/device.h>
47#include <linux/mutex.h>
48#include <linux/slab.h>
49#include <asm/unaligned.h>
50#include <net/slhc_vj.h>
51#include <linux/atomic.h>
52
53#include <linux/nsproxy.h>
54#include <net/net_namespace.h>
55#include <net/netns/generic.h>
56
57#define PPP_VERSION "2.4.2"
58
59/*
60 * Network protocols we support.
61 */
62#define NP_IP 0 /* Internet Protocol V4 */
63#define NP_IPV6 1 /* Internet Protocol V6 */
64#define NP_IPX 2 /* IPX protocol */
65#define NP_AT 3 /* Appletalk protocol */
66#define NP_MPLS_UC 4 /* MPLS unicast */
67#define NP_MPLS_MC 5 /* MPLS multicast */
68#define NUM_NP 6 /* Number of NPs. */
69
70#define MPHDRLEN 6 /* multilink protocol header length */
71#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
72
73/*
74 * An instance of /dev/ppp can be associated with either a ppp
75 * interface unit or a ppp channel. In both cases, file->private_data
76 * points to one of these.
77 */
78struct ppp_file {
79 enum {
80 INTERFACE=1, CHANNEL
81 } kind;
82 struct sk_buff_head xq; /* pppd transmit queue */
83 struct sk_buff_head rq; /* receive queue for pppd */
84 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
85 atomic_t refcnt; /* # refs (incl /dev/ppp attached) */
86 int hdrlen; /* space to leave for headers */
87 int index; /* interface unit / channel number */
88 int dead; /* unit/channel has been shut down */
89};
90
91#define PF_TO_X(pf, X) container_of(pf, X, file)
92
93#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
94#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
95
96/*
97 * Data structure describing one ppp unit.
98 * A ppp unit corresponds to a ppp network interface device
99 * and represents a multilink bundle.
100 * It can have 0 or more ppp channels connected to it.
101 */
102struct ppp {
103 struct ppp_file file; /* stuff for read/write/poll 0 */
104 struct file *owner; /* file that owns this unit 48 */
105 struct list_head channels; /* list of attached channels 4c */
106 int n_channels; /* how many channels are attached 54 */
107 spinlock_t rlock; /* lock for receive side 58 */
108 spinlock_t wlock; /* lock for transmit side 5c */
109 int mru; /* max receive unit 60 */
110 unsigned int flags; /* control bits 64 */
111 unsigned int xstate; /* transmit state bits 68 */
112 unsigned int rstate; /* receive state bits 6c */
113 int debug; /* debug flags 70 */
114 struct slcompress *vj; /* state for VJ header compression */
115 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
116 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
117 struct compressor *xcomp; /* transmit packet compressor 8c */
118 void *xc_state; /* its internal state 90 */
119 struct compressor *rcomp; /* receive decompressor 94 */
120 void *rc_state; /* its internal state 98 */
121 unsigned long last_xmit; /* jiffies when last pkt sent 9c */
122 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */
123 struct net_device *dev; /* network interface device a4 */
124 int closing; /* is device closing down? a8 */
125#ifdef CONFIG_PPP_MULTILINK
126 int nxchan; /* next channel to send something on */
127 u32 nxseq; /* next sequence number to send */
128 int mrru; /* MP: max reconst. receive unit */
129 u32 nextseq; /* MP: seq no of next packet */
130 u32 minseq; /* MP: min of most recent seqnos */
131 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
132#endif /* CONFIG_PPP_MULTILINK */
133#ifdef CONFIG_PPP_FILTER
134 struct sock_filter *pass_filter; /* filter for packets to pass */
135 struct sock_filter *active_filter;/* filter for pkts to reset idle */
136 unsigned pass_len, active_len;
137#endif /* CONFIG_PPP_FILTER */
138 struct net *ppp_net; /* the net we belong to */
139};
140
141/*
142 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
143 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
144 * SC_MUST_COMP
145 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
146 * Bits in xstate: SC_COMP_RUN
147 */
148#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
149 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
150 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
151
152/*
153 * Private data structure for each channel.
154 * This includes the data structure used for multilink.
155 */
156struct channel {
157 struct ppp_file file; /* stuff for read/write/poll */
158 struct list_head list; /* link in all/new_channels list */
159 struct ppp_channel *chan; /* public channel data structure */
160 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
161 spinlock_t downl; /* protects `chan', file.xq dequeue */
162 struct ppp *ppp; /* ppp unit we're connected to */
163 struct net *chan_net; /* the net channel belongs to */
164 struct list_head clist; /* link in list of channels per unit */
165 rwlock_t upl; /* protects `ppp' */
166#ifdef CONFIG_PPP_MULTILINK
167 u8 avail; /* flag used in multilink stuff */
168 u8 had_frag; /* >= 1 fragments have been sent */
169 u32 lastseq; /* MP: last sequence # received */
170 int speed; /* speed of the corresponding ppp channel*/
171#endif /* CONFIG_PPP_MULTILINK */
172};
173
174/*
175 * SMP locking issues:
176 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
177 * list and the ppp.n_channels field, you need to take both locks
178 * before you modify them.
179 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
180 * channel.downl.
181 */
182
183static DEFINE_MUTEX(ppp_mutex);
184static atomic_t ppp_unit_count = ATOMIC_INIT(0);
185static atomic_t channel_count = ATOMIC_INIT(0);
186
187/* per-net private data for this module */
188static int ppp_net_id __read_mostly;
189struct ppp_net {
190 /* units to ppp mapping */
191 struct idr units_idr;
192
193 /*
194 * all_ppp_mutex protects the units_idr mapping.
195 * It also ensures that finding a ppp unit in the units_idr
196 * map and updating its file.refcnt field is atomic.
197 */
198 struct mutex all_ppp_mutex;
199
200 /* channels */
201 struct list_head all_channels;
202 struct list_head new_channels;
203 int last_channel_index;
204
205 /*
206 * all_channels_lock protects all_channels and
207 * last_channel_index, and the atomicity of find
208 * a channel and updating its file.refcnt field.
209 */
210 spinlock_t all_channels_lock;
211};
212
213/* Get the PPP protocol number from a skb */
214#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
215
216/* We limit the length of ppp->file.rq to this (arbitrary) value */
217#define PPP_MAX_RQLEN 32
218
219/*
220 * Maximum number of multilink fragments queued up.
221 * This has to be large enough to cope with the maximum latency of
222 * the slowest channel relative to the others. Strictly it should
223 * depend on the number of channels and their characteristics.
224 */
225#define PPP_MP_MAX_QLEN 128
226
227/* Multilink header bits. */
228#define B 0x80 /* this fragment begins a packet */
229#define E 0x40 /* this fragment ends a packet */
230
231/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
232#define seq_before(a, b) ((s32)((a) - (b)) < 0)
233#define seq_after(a, b) ((s32)((a) - (b)) > 0)
234
235/* Prototypes. */
236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
237 struct file *file, unsigned int cmd, unsigned long arg);
238static void ppp_xmit_process(struct ppp *ppp);
239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
240static void ppp_push(struct ppp *ppp);
241static void ppp_channel_push(struct channel *pch);
242static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
243 struct channel *pch);
244static void ppp_receive_error(struct ppp *ppp);
245static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
246static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
247 struct sk_buff *skb);
248#ifdef CONFIG_PPP_MULTILINK
249static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
250 struct channel *pch);
251static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
252static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
253static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
254#endif /* CONFIG_PPP_MULTILINK */
255static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
256static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
257static void ppp_ccp_closed(struct ppp *ppp);
258static struct compressor *find_compressor(int type);
259static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
260static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
261static void init_ppp_file(struct ppp_file *pf, int kind);
262static void ppp_shutdown_interface(struct ppp *ppp);
263static void ppp_destroy_interface(struct ppp *ppp);
264static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
265static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
266static int ppp_connect_channel(struct channel *pch, int unit);
267static int ppp_disconnect_channel(struct channel *pch);
268static void ppp_destroy_channel(struct channel *pch);
269static int unit_get(struct idr *p, void *ptr);
270static int unit_set(struct idr *p, void *ptr, int n);
271static void unit_put(struct idr *p, int n);
272static void *unit_find(struct idr *p, int n);
273
274static struct class *ppp_class;
275
276/* per net-namespace data */
277static inline struct ppp_net *ppp_pernet(struct net *net)
278{
279 BUG_ON(!net);
280
281 return net_generic(net, ppp_net_id);
282}
283
284/* Translates a PPP protocol number to a NP index (NP == network protocol) */
285static inline int proto_to_npindex(int proto)
286{
287 switch (proto) {
288 case PPP_IP:
289 return NP_IP;
290 case PPP_IPV6:
291 return NP_IPV6;
292 case PPP_IPX:
293 return NP_IPX;
294 case PPP_AT:
295 return NP_AT;
296 case PPP_MPLS_UC:
297 return NP_MPLS_UC;
298 case PPP_MPLS_MC:
299 return NP_MPLS_MC;
300 }
301 return -EINVAL;
302}
303
304/* Translates an NP index into a PPP protocol number */
305static const int npindex_to_proto[NUM_NP] = {
306 PPP_IP,
307 PPP_IPV6,
308 PPP_IPX,
309 PPP_AT,
310 PPP_MPLS_UC,
311 PPP_MPLS_MC,
312};
313
314/* Translates an ethertype into an NP index */
315static inline int ethertype_to_npindex(int ethertype)
316{
317 switch (ethertype) {
318 case ETH_P_IP:
319 return NP_IP;
320 case ETH_P_IPV6:
321 return NP_IPV6;
322 case ETH_P_IPX:
323 return NP_IPX;
324 case ETH_P_PPPTALK:
325 case ETH_P_ATALK:
326 return NP_AT;
327 case ETH_P_MPLS_UC:
328 return NP_MPLS_UC;
329 case ETH_P_MPLS_MC:
330 return NP_MPLS_MC;
331 }
332 return -1;
333}
334
335/* Translates an NP index into an ethertype */
336static const int npindex_to_ethertype[NUM_NP] = {
337 ETH_P_IP,
338 ETH_P_IPV6,
339 ETH_P_IPX,
340 ETH_P_PPPTALK,
341 ETH_P_MPLS_UC,
342 ETH_P_MPLS_MC,
343};
344
345/*
346 * Locking shorthand.
347 */
348#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
349#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
350#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
351#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
352#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
353 ppp_recv_lock(ppp); } while (0)
354#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
355 ppp_xmit_unlock(ppp); } while (0)
356
357/*
358 * /dev/ppp device routines.
359 * The /dev/ppp device is used by pppd to control the ppp unit.
360 * It supports the read, write, ioctl and poll functions.
361 * Open instances of /dev/ppp can be in one of three states:
362 * unattached, attached to a ppp unit, or attached to a ppp channel.
363 */
364static int ppp_open(struct inode *inode, struct file *file)
365{
366 /*
367 * This could (should?) be enforced by the permissions on /dev/ppp.
368 */
369 if (!capable(CAP_NET_ADMIN))
370 return -EPERM;
371 return 0;
372}
373
374static int ppp_release(struct inode *unused, struct file *file)
375{
376 struct ppp_file *pf = file->private_data;
377 struct ppp *ppp;
378
379 if (pf) {
380 file->private_data = NULL;
381 if (pf->kind == INTERFACE) {
382 ppp = PF_TO_PPP(pf);
383 if (file == ppp->owner)
384 ppp_shutdown_interface(ppp);
385 }
386 if (atomic_dec_and_test(&pf->refcnt)) {
387 switch (pf->kind) {
388 case INTERFACE:
389 ppp_destroy_interface(PF_TO_PPP(pf));
390 break;
391 case CHANNEL:
392 ppp_destroy_channel(PF_TO_CHANNEL(pf));
393 break;
394 }
395 }
396 }
397 return 0;
398}
399
400static ssize_t ppp_read(struct file *file, char __user *buf,
401 size_t count, loff_t *ppos)
402{
403 struct ppp_file *pf = file->private_data;
404 DECLARE_WAITQUEUE(wait, current);
405 ssize_t ret;
406 struct sk_buff *skb = NULL;
407 struct iovec iov;
408
409 ret = count;
410
411 if (!pf)
412 return -ENXIO;
413 add_wait_queue(&pf->rwait, &wait);
414 for (;;) {
415 set_current_state(TASK_INTERRUPTIBLE);
416 skb = skb_dequeue(&pf->rq);
417 if (skb)
418 break;
419 ret = 0;
420 if (pf->dead)
421 break;
422 if (pf->kind == INTERFACE) {
423 /*
424 * Return 0 (EOF) on an interface that has no
425 * channels connected, unless it is looping
426 * network traffic (demand mode).
427 */
428 struct ppp *ppp = PF_TO_PPP(pf);
429 if (ppp->n_channels == 0 &&
430 (ppp->flags & SC_LOOP_TRAFFIC) == 0)
431 break;
432 }
433 ret = -EAGAIN;
434 if (file->f_flags & O_NONBLOCK)
435 break;
436 ret = -ERESTARTSYS;
437 if (signal_pending(current))
438 break;
439 schedule();
440 }
441 set_current_state(TASK_RUNNING);
442 remove_wait_queue(&pf->rwait, &wait);
443
444 if (!skb)
445 goto out;
446
447 ret = -EOVERFLOW;
448 if (skb->len > count)
449 goto outf;
450 ret = -EFAULT;
451 iov.iov_base = buf;
452 iov.iov_len = count;
453 if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
454 goto outf;
455 ret = skb->len;
456
457 outf:
458 kfree_skb(skb);
459 out:
460 return ret;
461}
462
463static ssize_t ppp_write(struct file *file, const char __user *buf,
464 size_t count, loff_t *ppos)
465{
466 struct ppp_file *pf = file->private_data;
467 struct sk_buff *skb;
468 ssize_t ret;
469
470 if (!pf)
471 return -ENXIO;
472 ret = -ENOMEM;
473 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
474 if (!skb)
475 goto out;
476 skb_reserve(skb, pf->hdrlen);
477 ret = -EFAULT;
478 if (copy_from_user(skb_put(skb, count), buf, count)) {
479 kfree_skb(skb);
480 goto out;
481 }
482
483 skb_queue_tail(&pf->xq, skb);
484
485 switch (pf->kind) {
486 case INTERFACE:
487 ppp_xmit_process(PF_TO_PPP(pf));
488 break;
489 case CHANNEL:
490 ppp_channel_push(PF_TO_CHANNEL(pf));
491 break;
492 }
493
494 ret = count;
495
496 out:
497 return ret;
498}
499
500/* No kernel lock - fine */
501static unsigned int ppp_poll(struct file *file, poll_table *wait)
502{
503 struct ppp_file *pf = file->private_data;
504 unsigned int mask;
505
506 if (!pf)
507 return 0;
508 poll_wait(file, &pf->rwait, wait);
509 mask = POLLOUT | POLLWRNORM;
510 if (skb_peek(&pf->rq))
511 mask |= POLLIN | POLLRDNORM;
512 if (pf->dead)
513 mask |= POLLHUP;
514 else if (pf->kind == INTERFACE) {
515 /* see comment in ppp_read */
516 struct ppp *ppp = PF_TO_PPP(pf);
517 if (ppp->n_channels == 0 &&
518 (ppp->flags & SC_LOOP_TRAFFIC) == 0)
519 mask |= POLLIN | POLLRDNORM;
520 }
521
522 return mask;
523}
524
525#ifdef CONFIG_PPP_FILTER
526static int get_filter(void __user *arg, struct sock_filter **p)
527{
528 struct sock_fprog uprog;
529 struct sock_filter *code = NULL;
530 int len, err;
531
532 if (copy_from_user(&uprog, arg, sizeof(uprog)))
533 return -EFAULT;
534
535 if (!uprog.len) {
536 *p = NULL;
537 return 0;
538 }
539
540 len = uprog.len * sizeof(struct sock_filter);
541 code = memdup_user(uprog.filter, len);
542 if (IS_ERR(code))
543 return PTR_ERR(code);
544
545 err = sk_chk_filter(code, uprog.len);
546 if (err) {
547 kfree(code);
548 return err;
549 }
550
551 *p = code;
552 return uprog.len;
553}
554#endif /* CONFIG_PPP_FILTER */
555
556static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
557{
558 struct ppp_file *pf = file->private_data;
559 struct ppp *ppp;
560 int err = -EFAULT, val, val2, i;
561 struct ppp_idle idle;
562 struct npioctl npi;
563 int unit, cflags;
564 struct slcompress *vj;
565 void __user *argp = (void __user *)arg;
566 int __user *p = argp;
567
568 if (!pf)
569 return ppp_unattached_ioctl(current->nsproxy->net_ns,
570 pf, file, cmd, arg);
571
572 if (cmd == PPPIOCDETACH) {
573 /*
574 * We have to be careful here... if the file descriptor
575 * has been dup'd, we could have another process in the
576 * middle of a poll using the same file *, so we had
577 * better not free the interface data structures -
578 * instead we fail the ioctl. Even in this case, we
579 * shut down the interface if we are the owner of it.
580 * Actually, we should get rid of PPPIOCDETACH, userland
581 * (i.e. pppd) could achieve the same effect by closing
582 * this fd and reopening /dev/ppp.
583 */
584 err = -EINVAL;
585 mutex_lock(&ppp_mutex);
586 if (pf->kind == INTERFACE) {
587 ppp = PF_TO_PPP(pf);
588 if (file == ppp->owner)
589 ppp_shutdown_interface(ppp);
590 }
591 if (atomic_long_read(&file->f_count) <= 2) {
592 ppp_release(NULL, file);
593 err = 0;
594 } else
595 pr_warn("PPPIOCDETACH file->f_count=%ld\n",
596 atomic_long_read(&file->f_count));
597 mutex_unlock(&ppp_mutex);
598 return err;
599 }
600
601 if (pf->kind == CHANNEL) {
602 struct channel *pch;
603 struct ppp_channel *chan;
604
605 mutex_lock(&ppp_mutex);
606 pch = PF_TO_CHANNEL(pf);
607
608 switch (cmd) {
609 case PPPIOCCONNECT:
610 if (get_user(unit, p))
611 break;
612 err = ppp_connect_channel(pch, unit);
613 break;
614
615 case PPPIOCDISCONN:
616 err = ppp_disconnect_channel(pch);
617 break;
618
619 default:
620 down_read(&pch->chan_sem);
621 chan = pch->chan;
622 err = -ENOTTY;
623 if (chan && chan->ops->ioctl)
624 err = chan->ops->ioctl(chan, cmd, arg);
625 up_read(&pch->chan_sem);
626 }
627 mutex_unlock(&ppp_mutex);
628 return err;
629 }
630
631 if (pf->kind != INTERFACE) {
632 /* can't happen */
633 pr_err("PPP: not interface or channel??\n");
634 return -EINVAL;
635 }
636
637 mutex_lock(&ppp_mutex);
638 ppp = PF_TO_PPP(pf);
639 switch (cmd) {
640 case PPPIOCSMRU:
641 if (get_user(val, p))
642 break;
643 ppp->mru = val;
644 err = 0;
645 break;
646
647 case PPPIOCSFLAGS:
648 if (get_user(val, p))
649 break;
650 ppp_lock(ppp);
651 cflags = ppp->flags & ~val;
652 ppp->flags = val & SC_FLAG_BITS;
653 ppp_unlock(ppp);
654 if (cflags & SC_CCP_OPEN)
655 ppp_ccp_closed(ppp);
656 err = 0;
657 break;
658
659 case PPPIOCGFLAGS:
660 val = ppp->flags | ppp->xstate | ppp->rstate;
661 if (put_user(val, p))
662 break;
663 err = 0;
664 break;
665
666 case PPPIOCSCOMPRESS:
667 err = ppp_set_compress(ppp, arg);
668 break;
669
670 case PPPIOCGUNIT:
671 if (put_user(ppp->file.index, p))
672 break;
673 err = 0;
674 break;
675
676 case PPPIOCSDEBUG:
677 if (get_user(val, p))
678 break;
679 ppp->debug = val;
680 err = 0;
681 break;
682
683 case PPPIOCGDEBUG:
684 if (put_user(ppp->debug, p))
685 break;
686 err = 0;
687 break;
688
689 case PPPIOCGIDLE:
690 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
691 idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
692 if (copy_to_user(argp, &idle, sizeof(idle)))
693 break;
694 err = 0;
695 break;
696
697 case PPPIOCSMAXCID:
698 if (get_user(val, p))
699 break;
700 val2 = 15;
701 if ((val >> 16) != 0) {
702 val2 = val >> 16;
703 val &= 0xffff;
704 }
705 vj = slhc_init(val2+1, val+1);
706 if (!vj) {
707 netdev_err(ppp->dev,
708 "PPP: no memory (VJ compressor)\n");
709 err = -ENOMEM;
710 break;
711 }
712 ppp_lock(ppp);
713 if (ppp->vj)
714 slhc_free(ppp->vj);
715 ppp->vj = vj;
716 ppp_unlock(ppp);
717 err = 0;
718 break;
719
720 case PPPIOCGNPMODE:
721 case PPPIOCSNPMODE:
722 if (copy_from_user(&npi, argp, sizeof(npi)))
723 break;
724 err = proto_to_npindex(npi.protocol);
725 if (err < 0)
726 break;
727 i = err;
728 if (cmd == PPPIOCGNPMODE) {
729 err = -EFAULT;
730 npi.mode = ppp->npmode[i];
731 if (copy_to_user(argp, &npi, sizeof(npi)))
732 break;
733 } else {
734 ppp->npmode[i] = npi.mode;
735 /* we may be able to transmit more packets now (??) */
736 netif_wake_queue(ppp->dev);
737 }
738 err = 0;
739 break;
740
741#ifdef CONFIG_PPP_FILTER
742 case PPPIOCSPASS:
743 {
744 struct sock_filter *code;
745 err = get_filter(argp, &code);
746 if (err >= 0) {
747 ppp_lock(ppp);
748 kfree(ppp->pass_filter);
749 ppp->pass_filter = code;
750 ppp->pass_len = err;
751 ppp_unlock(ppp);
752 err = 0;
753 }
754 break;
755 }
756 case PPPIOCSACTIVE:
757 {
758 struct sock_filter *code;
759 err = get_filter(argp, &code);
760 if (err >= 0) {
761 ppp_lock(ppp);
762 kfree(ppp->active_filter);
763 ppp->active_filter = code;
764 ppp->active_len = err;
765 ppp_unlock(ppp);
766 err = 0;
767 }
768 break;
769 }
770#endif /* CONFIG_PPP_FILTER */
771
772#ifdef CONFIG_PPP_MULTILINK
773 case PPPIOCSMRRU:
774 if (get_user(val, p))
775 break;
776 ppp_recv_lock(ppp);
777 ppp->mrru = val;
778 ppp_recv_unlock(ppp);
779 err = 0;
780 break;
781#endif /* CONFIG_PPP_MULTILINK */
782
783 default:
784 err = -ENOTTY;
785 }
786 mutex_unlock(&ppp_mutex);
787 return err;
788}
789
790static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
791 struct file *file, unsigned int cmd, unsigned long arg)
792{
793 int unit, err = -EFAULT;
794 struct ppp *ppp;
795 struct channel *chan;
796 struct ppp_net *pn;
797 int __user *p = (int __user *)arg;
798
799 mutex_lock(&ppp_mutex);
800 switch (cmd) {
801 case PPPIOCNEWUNIT:
802 /* Create a new ppp unit */
803 if (get_user(unit, p))
804 break;
805 ppp = ppp_create_interface(net, unit, &err);
806 if (!ppp)
807 break;
808 file->private_data = &ppp->file;
809 ppp->owner = file;
810 err = -EFAULT;
811 if (put_user(ppp->file.index, p))
812 break;
813 err = 0;
814 break;
815
816 case PPPIOCATTACH:
817 /* Attach to an existing ppp unit */
818 if (get_user(unit, p))
819 break;
820 err = -ENXIO;
821 pn = ppp_pernet(net);
822 mutex_lock(&pn->all_ppp_mutex);
823 ppp = ppp_find_unit(pn, unit);
824 if (ppp) {
825 atomic_inc(&ppp->file.refcnt);
826 file->private_data = &ppp->file;
827 err = 0;
828 }
829 mutex_unlock(&pn->all_ppp_mutex);
830 break;
831
832 case PPPIOCATTCHAN:
833 if (get_user(unit, p))
834 break;
835 err = -ENXIO;
836 pn = ppp_pernet(net);
837 spin_lock_bh(&pn->all_channels_lock);
838 chan = ppp_find_channel(pn, unit);
839 if (chan) {
840 atomic_inc(&chan->file.refcnt);
841 file->private_data = &chan->file;
842 err = 0;
843 }
844 spin_unlock_bh(&pn->all_channels_lock);
845 break;
846
847 default:
848 err = -ENOTTY;
849 }
850 mutex_unlock(&ppp_mutex);
851 return err;
852}
853
854static const struct file_operations ppp_device_fops = {
855 .owner = THIS_MODULE,
856 .read = ppp_read,
857 .write = ppp_write,
858 .poll = ppp_poll,
859 .unlocked_ioctl = ppp_ioctl,
860 .open = ppp_open,
861 .release = ppp_release,
862 .llseek = noop_llseek,
863};
864
865static __net_init int ppp_init_net(struct net *net)
866{
867 struct ppp_net *pn = net_generic(net, ppp_net_id);
868
869 idr_init(&pn->units_idr);
870 mutex_init(&pn->all_ppp_mutex);
871
872 INIT_LIST_HEAD(&pn->all_channels);
873 INIT_LIST_HEAD(&pn->new_channels);
874
875 spin_lock_init(&pn->all_channels_lock);
876
877 return 0;
878}
879
880static __net_exit void ppp_exit_net(struct net *net)
881{
882 struct ppp_net *pn = net_generic(net, ppp_net_id);
883
884 idr_destroy(&pn->units_idr);
885}
886
887static struct pernet_operations ppp_net_ops = {
888 .init = ppp_init_net,
889 .exit = ppp_exit_net,
890 .id = &ppp_net_id,
891 .size = sizeof(struct ppp_net),
892};
893
894#define PPP_MAJOR 108
895
896/* Called at boot time if ppp is compiled into the kernel,
897 or at module load time (from init_module) if compiled as a module. */
898static int __init ppp_init(void)
899{
900 int err;
901
902 pr_info("PPP generic driver version " PPP_VERSION "\n");
903
904 err = register_pernet_device(&ppp_net_ops);
905 if (err) {
906 pr_err("failed to register PPP pernet device (%d)\n", err);
907 goto out;
908 }
909
910 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
911 if (err) {
912 pr_err("failed to register PPP device (%d)\n", err);
913 goto out_net;
914 }
915
916 ppp_class = class_create(THIS_MODULE, "ppp");
917 if (IS_ERR(ppp_class)) {
918 err = PTR_ERR(ppp_class);
919 goto out_chrdev;
920 }
921
922 /* not a big deal if we fail here :-) */
923 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
924
925 return 0;
926
927out_chrdev:
928 unregister_chrdev(PPP_MAJOR, "ppp");
929out_net:
930 unregister_pernet_device(&ppp_net_ops);
931out:
932 return err;
933}
934
935/*
936 * Network interface unit routines.
937 */
938static netdev_tx_t
939ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
940{
941 struct ppp *ppp = netdev_priv(dev);
942 int npi, proto;
943 unsigned char *pp;
944
945 npi = ethertype_to_npindex(ntohs(skb->protocol));
946 if (npi < 0)
947 goto outf;
948
949 /* Drop, accept or reject the packet */
950 switch (ppp->npmode[npi]) {
951 case NPMODE_PASS:
952 break;
953 case NPMODE_QUEUE:
954 /* it would be nice to have a way to tell the network
955 system to queue this one up for later. */
956 goto outf;
957 case NPMODE_DROP:
958 case NPMODE_ERROR:
959 goto outf;
960 }
961
962 /* Put the 2-byte PPP protocol number on the front,
963 making sure there is room for the address and control fields. */
964 if (skb_cow_head(skb, PPP_HDRLEN))
965 goto outf;
966
967 pp = skb_push(skb, 2);
968 proto = npindex_to_proto[npi];
969 put_unaligned_be16(proto, pp);
970
971 netif_stop_queue(dev);
972 skb_queue_tail(&ppp->file.xq, skb);
973 ppp_xmit_process(ppp);
974 return NETDEV_TX_OK;
975
976 outf:
977 kfree_skb(skb);
978 ++dev->stats.tx_dropped;
979 return NETDEV_TX_OK;
980}
981
982static int
983ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
984{
985 struct ppp *ppp = netdev_priv(dev);
986 int err = -EFAULT;
987 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
988 struct ppp_stats stats;
989 struct ppp_comp_stats cstats;
990 char *vers;
991
992 switch (cmd) {
993 case SIOCGPPPSTATS:
994 ppp_get_stats(ppp, &stats);
995 if (copy_to_user(addr, &stats, sizeof(stats)))
996 break;
997 err = 0;
998 break;
999
1000 case SIOCGPPPCSTATS:
1001 memset(&cstats, 0, sizeof(cstats));
1002 if (ppp->xc_state)
1003 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
1004 if (ppp->rc_state)
1005 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
1006 if (copy_to_user(addr, &cstats, sizeof(cstats)))
1007 break;
1008 err = 0;
1009 break;
1010
1011 case SIOCGPPPVER:
1012 vers = PPP_VERSION;
1013 if (copy_to_user(addr, vers, strlen(vers) + 1))
1014 break;
1015 err = 0;
1016 break;
1017
1018 default:
1019 err = -EINVAL;
1020 }
1021
1022 return err;
1023}
1024
1025static const struct net_device_ops ppp_netdev_ops = {
1026 .ndo_start_xmit = ppp_start_xmit,
1027 .ndo_do_ioctl = ppp_net_ioctl,
1028};
1029
1030static void ppp_setup(struct net_device *dev)
1031{
1032 dev->netdev_ops = &ppp_netdev_ops;
1033 dev->hard_header_len = PPP_HDRLEN;
1034 dev->mtu = PPP_MTU;
1035 dev->addr_len = 0;
1036 dev->tx_queue_len = 3;
1037 dev->type = ARPHRD_PPP;
1038 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1039 dev->features |= NETIF_F_NETNS_LOCAL;
1040 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1041}
1042
1043/*
1044 * Transmit-side routines.
1045 */
1046
1047/*
1048 * Called to do any work queued up on the transmit side
1049 * that can now be done.
1050 */
1051static void
1052ppp_xmit_process(struct ppp *ppp)
1053{
1054 struct sk_buff *skb;
1055
1056 ppp_xmit_lock(ppp);
1057 if (!ppp->closing) {
1058 ppp_push(ppp);
1059 while (!ppp->xmit_pending &&
1060 (skb = skb_dequeue(&ppp->file.xq)))
1061 ppp_send_frame(ppp, skb);
1062 /* If there's no work left to do, tell the core net
1063 code that we can accept some more. */
1064 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1065 netif_wake_queue(ppp->dev);
1066 }
1067 ppp_xmit_unlock(ppp);
1068}
1069
1070static inline struct sk_buff *
1071pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1072{
1073 struct sk_buff *new_skb;
1074 int len;
1075 int new_skb_size = ppp->dev->mtu +
1076 ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1077 int compressor_skb_size = ppp->dev->mtu +
1078 ppp->xcomp->comp_extra + PPP_HDRLEN;
1079 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1080 if (!new_skb) {
1081 if (net_ratelimit())
1082 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1083 return NULL;
1084 }
1085 if (ppp->dev->hard_header_len > PPP_HDRLEN)
1086 skb_reserve(new_skb,
1087 ppp->dev->hard_header_len - PPP_HDRLEN);
1088
1089 /* compressor still expects A/C bytes in hdr */
1090 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1091 new_skb->data, skb->len + 2,
1092 compressor_skb_size);
1093 if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1094 kfree_skb(skb);
1095 skb = new_skb;
1096 skb_put(skb, len);
1097 skb_pull(skb, 2); /* pull off A/C bytes */
1098 } else if (len == 0) {
1099 /* didn't compress, or CCP not up yet */
1100 kfree_skb(new_skb);
1101 new_skb = skb;
1102 } else {
1103 /*
1104 * (len < 0)
1105 * MPPE requires that we do not send unencrypted
1106 * frames. The compressor will return -1 if we
1107 * should drop the frame. We cannot simply test
1108 * the compress_proto because MPPE and MPPC share
1109 * the same number.
1110 */
1111 if (net_ratelimit())
1112 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1113 kfree_skb(skb);
1114 kfree_skb(new_skb);
1115 new_skb = NULL;
1116 }
1117 return new_skb;
1118}
1119
1120/*
1121 * Compress and send a frame.
1122 * The caller should have locked the xmit path,
1123 * and xmit_pending should be 0.
1124 */
1125static void
1126ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1127{
1128 int proto = PPP_PROTO(skb);
1129 struct sk_buff *new_skb;
1130 int len;
1131 unsigned char *cp;
1132
1133 if (proto < 0x8000) {
1134#ifdef CONFIG_PPP_FILTER
1135 /* check if we should pass this packet */
1136 /* the filter instructions are constructed assuming
1137 a four-byte PPP header on each packet */
1138 *skb_push(skb, 2) = 1;
1139 if (ppp->pass_filter &&
1140 sk_run_filter(skb, ppp->pass_filter) == 0) {
1141 if (ppp->debug & 1)
1142 netdev_printk(KERN_DEBUG, ppp->dev,
1143 "PPP: outbound frame "
1144 "not passed\n");
1145 kfree_skb(skb);
1146 return;
1147 }
1148 /* if this packet passes the active filter, record the time */
1149 if (!(ppp->active_filter &&
1150 sk_run_filter(skb, ppp->active_filter) == 0))
1151 ppp->last_xmit = jiffies;
1152 skb_pull(skb, 2);
1153#else
1154 /* for data packets, record the time */
1155 ppp->last_xmit = jiffies;
1156#endif /* CONFIG_PPP_FILTER */
1157 }
1158
1159 ++ppp->dev->stats.tx_packets;
1160 ppp->dev->stats.tx_bytes += skb->len - 2;
1161
1162 switch (proto) {
1163 case PPP_IP:
1164 if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
1165 break;
1166 /* try to do VJ TCP header compression */
1167 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1168 GFP_ATOMIC);
1169 if (!new_skb) {
1170 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1171 goto drop;
1172 }
1173 skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1174 cp = skb->data + 2;
1175 len = slhc_compress(ppp->vj, cp, skb->len - 2,
1176 new_skb->data + 2, &cp,
1177 !(ppp->flags & SC_NO_TCP_CCID));
1178 if (cp == skb->data + 2) {
1179 /* didn't compress */
1180 kfree_skb(new_skb);
1181 } else {
1182 if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1183 proto = PPP_VJC_COMP;
1184 cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1185 } else {
1186 proto = PPP_VJC_UNCOMP;
1187 cp[0] = skb->data[2];
1188 }
1189 kfree_skb(skb);
1190 skb = new_skb;
1191 cp = skb_put(skb, len + 2);
1192 cp[0] = 0;
1193 cp[1] = proto;
1194 }
1195 break;
1196
1197 case PPP_CCP:
1198 /* peek at outbound CCP frames */
1199 ppp_ccp_peek(ppp, skb, 0);
1200 break;
1201 }
1202
1203 /* try to do packet compression */
1204 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
1205 proto != PPP_LCP && proto != PPP_CCP) {
1206 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1207 if (net_ratelimit())
1208 netdev_err(ppp->dev,
1209 "ppp: compression required but "
1210 "down - pkt dropped.\n");
1211 goto drop;
1212 }
1213 skb = pad_compress_skb(ppp, skb);
1214 if (!skb)
1215 goto drop;
1216 }
1217
1218 /*
1219 * If we are waiting for traffic (demand dialling),
1220 * queue it up for pppd to receive.
1221 */
1222 if (ppp->flags & SC_LOOP_TRAFFIC) {
1223 if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1224 goto drop;
1225 skb_queue_tail(&ppp->file.rq, skb);
1226 wake_up_interruptible(&ppp->file.rwait);
1227 return;
1228 }
1229
1230 ppp->xmit_pending = skb;
1231 ppp_push(ppp);
1232 return;
1233
1234 drop:
1235 kfree_skb(skb);
1236 ++ppp->dev->stats.tx_errors;
1237}
1238
1239/*
1240 * Try to send the frame in xmit_pending.
1241 * The caller should have the xmit path locked.
1242 */
1243static void
1244ppp_push(struct ppp *ppp)
1245{
1246 struct list_head *list;
1247 struct channel *pch;
1248 struct sk_buff *skb = ppp->xmit_pending;
1249
1250 if (!skb)
1251 return;
1252
1253 list = &ppp->channels;
1254 if (list_empty(list)) {
1255 /* nowhere to send the packet, just drop it */
1256 ppp->xmit_pending = NULL;
1257 kfree_skb(skb);
1258 return;
1259 }
1260
1261 if ((ppp->flags & SC_MULTILINK) == 0) {
1262 /* not doing multilink: send it down the first channel */
1263 list = list->next;
1264 pch = list_entry(list, struct channel, clist);
1265
1266 spin_lock_bh(&pch->downl);
1267 if (pch->chan) {
1268 if (pch->chan->ops->start_xmit(pch->chan, skb))
1269 ppp->xmit_pending = NULL;
1270 } else {
1271 /* channel got unregistered */
1272 kfree_skb(skb);
1273 ppp->xmit_pending = NULL;
1274 }
1275 spin_unlock_bh(&pch->downl);
1276 return;
1277 }
1278
1279#ifdef CONFIG_PPP_MULTILINK
1280 /* Multilink: fragment the packet over as many links
1281 as can take the packet at the moment. */
1282 if (!ppp_mp_explode(ppp, skb))
1283 return;
1284#endif /* CONFIG_PPP_MULTILINK */
1285
1286 ppp->xmit_pending = NULL;
1287 kfree_skb(skb);
1288}
1289
1290#ifdef CONFIG_PPP_MULTILINK
1291static bool mp_protocol_compress __read_mostly = true;
1292module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR);
1293MODULE_PARM_DESC(mp_protocol_compress,
1294 "compress protocol id in multilink fragments");
1295
1296/*
1297 * Divide a packet to be transmitted into fragments and
1298 * send them out the individual links.
1299 */
1300static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1301{
1302 int len, totlen;
1303 int i, bits, hdrlen, mtu;
1304 int flen;
1305 int navail, nfree, nzero;
1306 int nbigger;
1307 int totspeed;
1308 int totfree;
1309 unsigned char *p, *q;
1310 struct list_head *list;
1311 struct channel *pch;
1312 struct sk_buff *frag;
1313 struct ppp_channel *chan;
1314
1315 totspeed = 0; /*total bitrate of the bundle*/
1316 nfree = 0; /* # channels which have no packet already queued */
1317 navail = 0; /* total # of usable channels (not deregistered) */
1318 nzero = 0; /* number of channels with zero speed associated*/
1319 totfree = 0; /*total # of channels available and
1320 *having no queued packets before
1321 *starting the fragmentation*/
1322
1323 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1324 i = 0;
1325 list_for_each_entry(pch, &ppp->channels, clist) {
1326 if (pch->chan) {
1327 pch->avail = 1;
1328 navail++;
1329 pch->speed = pch->chan->speed;
1330 } else {
1331 pch->avail = 0;
1332 }
1333 if (pch->avail) {
1334 if (skb_queue_empty(&pch->file.xq) ||
1335 !pch->had_frag) {
1336 if (pch->speed == 0)
1337 nzero++;
1338 else
1339 totspeed += pch->speed;
1340
1341 pch->avail = 2;
1342 ++nfree;
1343 ++totfree;
1344 }
1345 if (!pch->had_frag && i < ppp->nxchan)
1346 ppp->nxchan = i;
1347 }
1348 ++i;
1349 }
1350 /*
1351 * Don't start sending this packet unless at least half of
1352 * the channels are free. This gives much better TCP
1353 * performance if we have a lot of channels.
1354 */
1355 if (nfree == 0 || nfree < navail / 2)
1356 return 0; /* can't take now, leave it in xmit_pending */
1357
1358 /* Do protocol field compression */
1359 p = skb->data;
1360 len = skb->len;
1361 if (*p == 0 && mp_protocol_compress) {
1362 ++p;
1363 --len;
1364 }
1365
1366 totlen = len;
1367 nbigger = len % nfree;
1368
1369 /* skip to the channel after the one we last used
1370 and start at that one */
1371 list = &ppp->channels;
1372 for (i = 0; i < ppp->nxchan; ++i) {
1373 list = list->next;
1374 if (list == &ppp->channels) {
1375 i = 0;
1376 break;
1377 }
1378 }
1379
1380 /* create a fragment for each channel */
1381 bits = B;
1382 while (len > 0) {
1383 list = list->next;
1384 if (list == &ppp->channels) {
1385 i = 0;
1386 continue;
1387 }
1388 pch = list_entry(list, struct channel, clist);
1389 ++i;
1390 if (!pch->avail)
1391 continue;
1392
1393 /*
1394 * Skip this channel if it has a fragment pending already and
1395 * we haven't given a fragment to all of the free channels.
1396 */
1397 if (pch->avail == 1) {
1398 if (nfree > 0)
1399 continue;
1400 } else {
1401 pch->avail = 1;
1402 }
1403
1404 /* check the channel's mtu and whether it is still attached. */
1405 spin_lock_bh(&pch->downl);
1406 if (pch->chan == NULL) {
1407 /* can't use this channel, it's being deregistered */
1408 if (pch->speed == 0)
1409 nzero--;
1410 else
1411 totspeed -= pch->speed;
1412
1413 spin_unlock_bh(&pch->downl);
1414 pch->avail = 0;
1415 totlen = len;
1416 totfree--;
1417 nfree--;
1418 if (--navail == 0)
1419 break;
1420 continue;
1421 }
1422
1423 /*
1424 *if the channel speed is not set divide
1425 *the packet evenly among the free channels;
1426 *otherwise divide it according to the speed
1427 *of the channel we are going to transmit on
1428 */
1429 flen = len;
1430 if (nfree > 0) {
1431 if (pch->speed == 0) {
1432 flen = len/nfree;
1433 if (nbigger > 0) {
1434 flen++;
1435 nbigger--;
1436 }
1437 } else {
1438 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
1439 ((totspeed*totfree)/pch->speed)) - hdrlen;
1440 if (nbigger > 0) {
1441 flen += ((totfree - nzero)*pch->speed)/totspeed;
1442 nbigger -= ((totfree - nzero)*pch->speed)/
1443 totspeed;
1444 }
1445 }
1446 nfree--;
1447 }
1448
1449 /*
1450 *check if we are on the last channel or
1451 *we exceded the length of the data to
1452 *fragment
1453 */
1454 if ((nfree <= 0) || (flen > len))
1455 flen = len;
1456 /*
1457 *it is not worth to tx on slow channels:
1458 *in that case from the resulting flen according to the
1459 *above formula will be equal or less than zero.
1460 *Skip the channel in this case
1461 */
1462 if (flen <= 0) {
1463 pch->avail = 2;
1464 spin_unlock_bh(&pch->downl);
1465 continue;
1466 }
1467
1468 mtu = pch->chan->mtu - hdrlen;
1469 if (mtu < 4)
1470 mtu = 4;
1471 if (flen > mtu)
1472 flen = mtu;
1473 if (flen == len)
1474 bits |= E;
1475 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1476 if (!frag)
1477 goto noskb;
1478 q = skb_put(frag, flen + hdrlen);
1479
1480 /* make the MP header */
1481 put_unaligned_be16(PPP_MP, q);
1482 if (ppp->flags & SC_MP_XSHORTSEQ) {
1483 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1484 q[3] = ppp->nxseq;
1485 } else {
1486 q[2] = bits;
1487 q[3] = ppp->nxseq >> 16;
1488 q[4] = ppp->nxseq >> 8;
1489 q[5] = ppp->nxseq;
1490 }
1491
1492 memcpy(q + hdrlen, p, flen);
1493
1494 /* try to send it down the channel */
1495 chan = pch->chan;
1496 if (!skb_queue_empty(&pch->file.xq) ||
1497 !chan->ops->start_xmit(chan, frag))
1498 skb_queue_tail(&pch->file.xq, frag);
1499 pch->had_frag = 1;
1500 p += flen;
1501 len -= flen;
1502 ++ppp->nxseq;
1503 bits = 0;
1504 spin_unlock_bh(&pch->downl);
1505 }
1506 ppp->nxchan = i;
1507
1508 return 1;
1509
1510 noskb:
1511 spin_unlock_bh(&pch->downl);
1512 if (ppp->debug & 1)
1513 netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1514 ++ppp->dev->stats.tx_errors;
1515 ++ppp->nxseq;
1516 return 1; /* abandon the frame */
1517}
1518#endif /* CONFIG_PPP_MULTILINK */
1519
1520/*
1521 * Try to send data out on a channel.
1522 */
1523static void
1524ppp_channel_push(struct channel *pch)
1525{
1526 struct sk_buff *skb;
1527 struct ppp *ppp;
1528
1529 spin_lock_bh(&pch->downl);
1530 if (pch->chan) {
1531 while (!skb_queue_empty(&pch->file.xq)) {
1532 skb = skb_dequeue(&pch->file.xq);
1533 if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
1534 /* put the packet back and try again later */
1535 skb_queue_head(&pch->file.xq, skb);
1536 break;
1537 }
1538 }
1539 } else {
1540 /* channel got deregistered */
1541 skb_queue_purge(&pch->file.xq);
1542 }
1543 spin_unlock_bh(&pch->downl);
1544 /* see if there is anything from the attached unit to be sent */
1545 if (skb_queue_empty(&pch->file.xq)) {
1546 read_lock_bh(&pch->upl);
1547 ppp = pch->ppp;
1548 if (ppp)
1549 ppp_xmit_process(ppp);
1550 read_unlock_bh(&pch->upl);
1551 }
1552}
1553
1554/*
1555 * Receive-side routines.
1556 */
1557
1558struct ppp_mp_skb_parm {
1559 u32 sequence;
1560 u8 BEbits;
1561};
1562#define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb))
1563
1564static inline void
1565ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1566{
1567 ppp_recv_lock(ppp);
1568 if (!ppp->closing)
1569 ppp_receive_frame(ppp, skb, pch);
1570 else
1571 kfree_skb(skb);
1572 ppp_recv_unlock(ppp);
1573}
1574
1575void
1576ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1577{
1578 struct channel *pch = chan->ppp;
1579 int proto;
1580
1581 if (!pch) {
1582 kfree_skb(skb);
1583 return;
1584 }
1585
1586 read_lock_bh(&pch->upl);
1587 if (!pskb_may_pull(skb, 2)) {
1588 kfree_skb(skb);
1589 if (pch->ppp) {
1590 ++pch->ppp->dev->stats.rx_length_errors;
1591 ppp_receive_error(pch->ppp);
1592 }
1593 goto done;
1594 }
1595
1596 proto = PPP_PROTO(skb);
1597 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
1598 /* put it on the channel queue */
1599 skb_queue_tail(&pch->file.rq, skb);
1600 /* drop old frames if queue too long */
1601 while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
1602 (skb = skb_dequeue(&pch->file.rq)))
1603 kfree_skb(skb);
1604 wake_up_interruptible(&pch->file.rwait);
1605 } else {
1606 ppp_do_recv(pch->ppp, skb, pch);
1607 }
1608
1609done:
1610 read_unlock_bh(&pch->upl);
1611}
1612
1613/* Put a 0-length skb in the receive queue as an error indication */
1614void
1615ppp_input_error(struct ppp_channel *chan, int code)
1616{
1617 struct channel *pch = chan->ppp;
1618 struct sk_buff *skb;
1619
1620 if (!pch)
1621 return;
1622
1623 read_lock_bh(&pch->upl);
1624 if (pch->ppp) {
1625 skb = alloc_skb(0, GFP_ATOMIC);
1626 if (skb) {
1627 skb->len = 0; /* probably unnecessary */
1628 skb->cb[0] = code;
1629 ppp_do_recv(pch->ppp, skb, pch);
1630 }
1631 }
1632 read_unlock_bh(&pch->upl);
1633}
1634
1635/*
1636 * We come in here to process a received frame.
1637 * The receive side of the ppp unit is locked.
1638 */
1639static void
1640ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1641{
1642 /* note: a 0-length skb is used as an error indication */
1643 if (skb->len > 0) {
1644#ifdef CONFIG_PPP_MULTILINK
1645 /* XXX do channel-level decompression here */
1646 if (PPP_PROTO(skb) == PPP_MP)
1647 ppp_receive_mp_frame(ppp, skb, pch);
1648 else
1649#endif /* CONFIG_PPP_MULTILINK */
1650 ppp_receive_nonmp_frame(ppp, skb);
1651 } else {
1652 kfree_skb(skb);
1653 ppp_receive_error(ppp);
1654 }
1655}
1656
1657static void
1658ppp_receive_error(struct ppp *ppp)
1659{
1660 ++ppp->dev->stats.rx_errors;
1661 if (ppp->vj)
1662 slhc_toss(ppp->vj);
1663}
1664
1665static void
1666ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1667{
1668 struct sk_buff *ns;
1669 int proto, len, npi;
1670
1671 /*
1672 * Decompress the frame, if compressed.
1673 * Note that some decompressors need to see uncompressed frames
1674 * that come in as well as compressed frames.
1675 */
1676 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
1677 (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
1678 skb = ppp_decompress_frame(ppp, skb);
1679
1680 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
1681 goto err;
1682
1683 proto = PPP_PROTO(skb);
1684 switch (proto) {
1685 case PPP_VJC_COMP:
1686 /* decompress VJ compressed packets */
1687 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
1688 goto err;
1689
1690 if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
1691 /* copy to a new sk_buff with more tailroom */
1692 ns = dev_alloc_skb(skb->len + 128);
1693 if (!ns) {
1694 netdev_err(ppp->dev, "PPP: no memory "
1695 "(VJ decomp)\n");
1696 goto err;
1697 }
1698 skb_reserve(ns, 2);
1699 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
1700 kfree_skb(skb);
1701 skb = ns;
1702 }
1703 else
1704 skb->ip_summed = CHECKSUM_NONE;
1705
1706 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1707 if (len <= 0) {
1708 netdev_printk(KERN_DEBUG, ppp->dev,
1709 "PPP: VJ decompression error\n");
1710 goto err;
1711 }
1712 len += 2;
1713 if (len > skb->len)
1714 skb_put(skb, len - skb->len);
1715 else if (len < skb->len)
1716 skb_trim(skb, len);
1717 proto = PPP_IP;
1718 break;
1719
1720 case PPP_VJC_UNCOMP:
1721 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
1722 goto err;
1723
1724 /* Until we fix the decompressor need to make sure
1725 * data portion is linear.
1726 */
1727 if (!pskb_may_pull(skb, skb->len))
1728 goto err;
1729
1730 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1731 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
1732 goto err;
1733 }
1734 proto = PPP_IP;
1735 break;
1736
1737 case PPP_CCP:
1738 ppp_ccp_peek(ppp, skb, 1);
1739 break;
1740 }
1741
1742 ++ppp->dev->stats.rx_packets;
1743 ppp->dev->stats.rx_bytes += skb->len - 2;
1744
1745 npi = proto_to_npindex(proto);
1746 if (npi < 0) {
1747 /* control or unknown frame - pass it to pppd */
1748 skb_queue_tail(&ppp->file.rq, skb);
1749 /* limit queue length by dropping old frames */
1750 while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
1751 (skb = skb_dequeue(&ppp->file.rq)))
1752 kfree_skb(skb);
1753 /* wake up any process polling or blocking on read */
1754 wake_up_interruptible(&ppp->file.rwait);
1755
1756 } else {
1757 /* network protocol frame - give it to the kernel */
1758
1759#ifdef CONFIG_PPP_FILTER
1760 /* check if the packet passes the pass and active filters */
1761 /* the filter instructions are constructed assuming
1762 a four-byte PPP header on each packet */
1763 if (ppp->pass_filter || ppp->active_filter) {
1764 if (skb_cloned(skb) &&
1765 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1766 goto err;
1767
1768 *skb_push(skb, 2) = 0;
1769 if (ppp->pass_filter &&
1770 sk_run_filter(skb, ppp->pass_filter) == 0) {
1771 if (ppp->debug & 1)
1772 netdev_printk(KERN_DEBUG, ppp->dev,
1773 "PPP: inbound frame "
1774 "not passed\n");
1775 kfree_skb(skb);
1776 return;
1777 }
1778 if (!(ppp->active_filter &&
1779 sk_run_filter(skb, ppp->active_filter) == 0))
1780 ppp->last_recv = jiffies;
1781 __skb_pull(skb, 2);
1782 } else
1783#endif /* CONFIG_PPP_FILTER */
1784 ppp->last_recv = jiffies;
1785
1786 if ((ppp->dev->flags & IFF_UP) == 0 ||
1787 ppp->npmode[npi] != NPMODE_PASS) {
1788 kfree_skb(skb);
1789 } else {
1790 /* chop off protocol */
1791 skb_pull_rcsum(skb, 2);
1792 skb->dev = ppp->dev;
1793 skb->protocol = htons(npindex_to_ethertype[npi]);
1794 skb_reset_mac_header(skb);
1795 netif_rx(skb);
1796 }
1797 }
1798 return;
1799
1800 err:
1801 kfree_skb(skb);
1802 ppp_receive_error(ppp);
1803}
1804
1805static struct sk_buff *
1806ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1807{
1808 int proto = PPP_PROTO(skb);
1809 struct sk_buff *ns;
1810 int len;
1811
1812 /* Until we fix all the decompressor's need to make sure
1813 * data portion is linear.
1814 */
1815 if (!pskb_may_pull(skb, skb->len))
1816 goto err;
1817
1818 if (proto == PPP_COMP) {
1819 int obuff_size;
1820
1821 switch(ppp->rcomp->compress_proto) {
1822 case CI_MPPE:
1823 obuff_size = ppp->mru + PPP_HDRLEN + 1;
1824 break;
1825 default:
1826 obuff_size = ppp->mru + PPP_HDRLEN;
1827 break;
1828 }
1829
1830 ns = dev_alloc_skb(obuff_size);
1831 if (!ns) {
1832 netdev_err(ppp->dev, "ppp_decompress_frame: "
1833 "no memory\n");
1834 goto err;
1835 }
1836 /* the decompressor still expects the A/C bytes in the hdr */
1837 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
1838 skb->len + 2, ns->data, obuff_size);
1839 if (len < 0) {
1840 /* Pass the compressed frame to pppd as an
1841 error indication. */
1842 if (len == DECOMP_FATALERROR)
1843 ppp->rstate |= SC_DC_FERROR;
1844 kfree_skb(ns);
1845 goto err;
1846 }
1847
1848 kfree_skb(skb);
1849 skb = ns;
1850 skb_put(skb, len);
1851 skb_pull(skb, 2); /* pull off the A/C bytes */
1852
1853 } else {
1854 /* Uncompressed frame - pass to decompressor so it
1855 can update its dictionary if necessary. */
1856 if (ppp->rcomp->incomp)
1857 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
1858 skb->len + 2);
1859 }
1860
1861 return skb;
1862
1863 err:
1864 ppp->rstate |= SC_DC_ERROR;
1865 ppp_receive_error(ppp);
1866 return skb;
1867}
1868
1869#ifdef CONFIG_PPP_MULTILINK
1870/*
1871 * Receive a multilink frame.
1872 * We put it on the reconstruction queue and then pull off
1873 * as many completed frames as we can.
1874 */
1875static void
1876ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1877{
1878 u32 mask, seq;
1879 struct channel *ch;
1880 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1881
1882 if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
1883 goto err; /* no good, throw it away */
1884
1885 /* Decode sequence number and begin/end bits */
1886 if (ppp->flags & SC_MP_SHORTSEQ) {
1887 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
1888 mask = 0xfff;
1889 } else {
1890 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
1891 mask = 0xffffff;
1892 }
1893 PPP_MP_CB(skb)->BEbits = skb->data[2];
1894 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
1895
1896 /*
1897 * Do protocol ID decompression on the first fragment of each packet.
1898 */
1899 if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
1900 *skb_push(skb, 1) = 0;
1901
1902 /*
1903 * Expand sequence number to 32 bits, making it as close
1904 * as possible to ppp->minseq.
1905 */
1906 seq |= ppp->minseq & ~mask;
1907 if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
1908 seq += mask + 1;
1909 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
1910 seq -= mask + 1; /* should never happen */
1911 PPP_MP_CB(skb)->sequence = seq;
1912 pch->lastseq = seq;
1913
1914 /*
1915 * If this packet comes before the next one we were expecting,
1916 * drop it.
1917 */
1918 if (seq_before(seq, ppp->nextseq)) {
1919 kfree_skb(skb);
1920 ++ppp->dev->stats.rx_dropped;
1921 ppp_receive_error(ppp);
1922 return;
1923 }
1924
1925 /*
1926 * Reevaluate minseq, the minimum over all channels of the
1927 * last sequence number received on each channel. Because of
1928 * the increasing sequence number rule, we know that any fragment
1929 * before `minseq' which hasn't arrived is never going to arrive.
1930 * The list of channels can't change because we have the receive
1931 * side of the ppp unit locked.
1932 */
1933 list_for_each_entry(ch, &ppp->channels, clist) {
1934 if (seq_before(ch->lastseq, seq))
1935 seq = ch->lastseq;
1936 }
1937 if (seq_before(ppp->minseq, seq))
1938 ppp->minseq = seq;
1939
1940 /* Put the fragment on the reconstruction queue */
1941 ppp_mp_insert(ppp, skb);
1942
1943 /* If the queue is getting long, don't wait any longer for packets
1944 before the start of the queue. */
1945 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
1946 struct sk_buff *mskb = skb_peek(&ppp->mrq);
1947 if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
1948 ppp->minseq = PPP_MP_CB(mskb)->sequence;
1949 }
1950
1951 /* Pull completed packets off the queue and receive them. */
1952 while ((skb = ppp_mp_reconstruct(ppp))) {
1953 if (pskb_may_pull(skb, 2))
1954 ppp_receive_nonmp_frame(ppp, skb);
1955 else {
1956 ++ppp->dev->stats.rx_length_errors;
1957 kfree_skb(skb);
1958 ppp_receive_error(ppp);
1959 }
1960 }
1961
1962 return;
1963
1964 err:
1965 kfree_skb(skb);
1966 ppp_receive_error(ppp);
1967}
1968
1969/*
1970 * Insert a fragment on the MP reconstruction queue.
1971 * The queue is ordered by increasing sequence number.
1972 */
1973static void
1974ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
1975{
1976 struct sk_buff *p;
1977 struct sk_buff_head *list = &ppp->mrq;
1978 u32 seq = PPP_MP_CB(skb)->sequence;
1979
1980 /* N.B. we don't need to lock the list lock because we have the
1981 ppp unit receive-side lock. */
1982 skb_queue_walk(list, p) {
1983 if (seq_before(seq, PPP_MP_CB(p)->sequence))
1984 break;
1985 }
1986 __skb_queue_before(list, p, skb);
1987}
1988
1989/*
1990 * Reconstruct a packet from the MP fragment queue.
1991 * We go through increasing sequence numbers until we find a
1992 * complete packet, or we get to the sequence number for a fragment
1993 * which hasn't arrived but might still do so.
1994 */
1995static struct sk_buff *
1996ppp_mp_reconstruct(struct ppp *ppp)
1997{
1998 u32 seq = ppp->nextseq;
1999 u32 minseq = ppp->minseq;
2000 struct sk_buff_head *list = &ppp->mrq;
2001 struct sk_buff *p, *tmp;
2002 struct sk_buff *head, *tail;
2003 struct sk_buff *skb = NULL;
2004 int lost = 0, len = 0;
2005
2006 if (ppp->mrru == 0) /* do nothing until mrru is set */
2007 return NULL;
2008 head = list->next;
2009 tail = NULL;
2010 skb_queue_walk_safe(list, p, tmp) {
2011 again:
2012 if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2013 /* this can't happen, anyway ignore the skb */
2014 netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2015 "seq %u < %u\n",
2016 PPP_MP_CB(p)->sequence, seq);
2017 __skb_unlink(p, list);
2018 kfree_skb(p);
2019 continue;
2020 }
2021 if (PPP_MP_CB(p)->sequence != seq) {
2022 /* Fragment `seq' is missing. If it is after
2023 minseq, it might arrive later, so stop here. */
2024 if (seq_after(seq, minseq))
2025 break;
2026 /* Fragment `seq' is lost, keep going. */
2027 lost = 1;
2028 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2029 minseq + 1: PPP_MP_CB(p)->sequence;
2030 goto again;
2031 }
2032
2033 /*
2034 * At this point we know that all the fragments from
2035 * ppp->nextseq to seq are either present or lost.
2036 * Also, there are no complete packets in the queue
2037 * that have no missing fragments and end before this
2038 * fragment.
2039 */
2040
2041 /* B bit set indicates this fragment starts a packet */
2042 if (PPP_MP_CB(p)->BEbits & B) {
2043 head = p;
2044 lost = 0;
2045 len = 0;
2046 }
2047
2048 len += p->len;
2049
2050 /* Got a complete packet yet? */
2051 if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2052 (PPP_MP_CB(head)->BEbits & B)) {
2053 if (len > ppp->mrru + 2) {
2054 ++ppp->dev->stats.rx_length_errors;
2055 netdev_printk(KERN_DEBUG, ppp->dev,
2056 "PPP: reconstructed packet"
2057 " is too long (%d)\n", len);
2058 } else {
2059 tail = p;
2060 break;
2061 }
2062 ppp->nextseq = seq + 1;
2063 }
2064
2065 /*
2066 * If this is the ending fragment of a packet,
2067 * and we haven't found a complete valid packet yet,
2068 * we can discard up to and including this fragment.
2069 */
2070 if (PPP_MP_CB(p)->BEbits & E) {
2071 struct sk_buff *tmp2;
2072
2073 skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2074 __skb_unlink(p, list);
2075 kfree_skb(p);
2076 }
2077 head = skb_peek(list);
2078 if (!head)
2079 break;
2080 }
2081 ++seq;
2082 }
2083
2084 /* If we have a complete packet, copy it all into one skb. */
2085 if (tail != NULL) {
2086 /* If we have discarded any fragments,
2087 signal a receive error. */
2088 if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2089 if (ppp->debug & 1)
2090 netdev_printk(KERN_DEBUG, ppp->dev,
2091 " missed pkts %u..%u\n",
2092 ppp->nextseq,
2093 PPP_MP_CB(head)->sequence-1);
2094 ++ppp->dev->stats.rx_dropped;
2095 ppp_receive_error(ppp);
2096 }
2097
2098 skb = head;
2099 if (head != tail) {
2100 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2101 p = skb_queue_next(list, head);
2102 __skb_unlink(skb, list);
2103 skb_queue_walk_from_safe(list, p, tmp) {
2104 __skb_unlink(p, list);
2105 *fragpp = p;
2106 p->next = NULL;
2107 fragpp = &p->next;
2108
2109 skb->len += p->len;
2110 skb->data_len += p->len;
2111 skb->truesize += p->len;
2112
2113 if (p == tail)
2114 break;
2115 }
2116 } else {
2117 __skb_unlink(skb, list);
2118 }
2119
2120 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2121 }
2122
2123 return skb;
2124}
2125#endif /* CONFIG_PPP_MULTILINK */
2126
2127/*
2128 * Channel interface.
2129 */
2130
2131/* Create a new, unattached ppp channel. */
2132int ppp_register_channel(struct ppp_channel *chan)
2133{
2134 return ppp_register_net_channel(current->nsproxy->net_ns, chan);
2135}
2136
2137/* Create a new, unattached ppp channel for specified net. */
2138int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2139{
2140 struct channel *pch;
2141 struct ppp_net *pn;
2142
2143 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2144 if (!pch)
2145 return -ENOMEM;
2146
2147 pn = ppp_pernet(net);
2148
2149 pch->ppp = NULL;
2150 pch->chan = chan;
2151 pch->chan_net = net;
2152 chan->ppp = pch;
2153 init_ppp_file(&pch->file, CHANNEL);
2154 pch->file.hdrlen = chan->hdrlen;
2155#ifdef CONFIG_PPP_MULTILINK
2156 pch->lastseq = -1;
2157#endif /* CONFIG_PPP_MULTILINK */
2158 init_rwsem(&pch->chan_sem);
2159 spin_lock_init(&pch->downl);
2160 rwlock_init(&pch->upl);
2161
2162 spin_lock_bh(&pn->all_channels_lock);
2163 pch->file.index = ++pn->last_channel_index;
2164 list_add(&pch->list, &pn->new_channels);
2165 atomic_inc(&channel_count);
2166 spin_unlock_bh(&pn->all_channels_lock);
2167
2168 return 0;
2169}
2170
2171/*
2172 * Return the index of a channel.
2173 */
2174int ppp_channel_index(struct ppp_channel *chan)
2175{
2176 struct channel *pch = chan->ppp;
2177
2178 if (pch)
2179 return pch->file.index;
2180 return -1;
2181}
2182
2183/*
2184 * Return the PPP unit number to which a channel is connected.
2185 */
2186int ppp_unit_number(struct ppp_channel *chan)
2187{
2188 struct channel *pch = chan->ppp;
2189 int unit = -1;
2190
2191 if (pch) {
2192 read_lock_bh(&pch->upl);
2193 if (pch->ppp)
2194 unit = pch->ppp->file.index;
2195 read_unlock_bh(&pch->upl);
2196 }
2197 return unit;
2198}
2199
2200/*
2201 * Return the PPP device interface name of a channel.
2202 */
2203char *ppp_dev_name(struct ppp_channel *chan)
2204{
2205 struct channel *pch = chan->ppp;
2206 char *name = NULL;
2207
2208 if (pch) {
2209 read_lock_bh(&pch->upl);
2210 if (pch->ppp && pch->ppp->dev)
2211 name = pch->ppp->dev->name;
2212 read_unlock_bh(&pch->upl);
2213 }
2214 return name;
2215}
2216
2217
2218/*
2219 * Disconnect a channel from the generic layer.
2220 * This must be called in process context.
2221 */
2222void
2223ppp_unregister_channel(struct ppp_channel *chan)
2224{
2225 struct channel *pch = chan->ppp;
2226 struct ppp_net *pn;
2227
2228 if (!pch)
2229 return; /* should never happen */
2230
2231 chan->ppp = NULL;
2232
2233 /*
2234 * This ensures that we have returned from any calls into the
2235 * the channel's start_xmit or ioctl routine before we proceed.
2236 */
2237 down_write(&pch->chan_sem);
2238 spin_lock_bh(&pch->downl);
2239 pch->chan = NULL;
2240 spin_unlock_bh(&pch->downl);
2241 up_write(&pch->chan_sem);
2242 ppp_disconnect_channel(pch);
2243
2244 pn = ppp_pernet(pch->chan_net);
2245 spin_lock_bh(&pn->all_channels_lock);
2246 list_del(&pch->list);
2247 spin_unlock_bh(&pn->all_channels_lock);
2248
2249 pch->file.dead = 1;
2250 wake_up_interruptible(&pch->file.rwait);
2251 if (atomic_dec_and_test(&pch->file.refcnt))
2252 ppp_destroy_channel(pch);
2253}
2254
2255/*
2256 * Callback from a channel when it can accept more to transmit.
2257 * This should be called at BH/softirq level, not interrupt level.
2258 */
2259void
2260ppp_output_wakeup(struct ppp_channel *chan)
2261{
2262 struct channel *pch = chan->ppp;
2263
2264 if (!pch)
2265 return;
2266 ppp_channel_push(pch);
2267}
2268
2269/*
2270 * Compression control.
2271 */
2272
2273/* Process the PPPIOCSCOMPRESS ioctl. */
2274static int
2275ppp_set_compress(struct ppp *ppp, unsigned long arg)
2276{
2277 int err;
2278 struct compressor *cp, *ocomp;
2279 struct ppp_option_data data;
2280 void *state, *ostate;
2281 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
2282
2283 err = -EFAULT;
2284 if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
2285 (data.length <= CCP_MAX_OPTION_LENGTH &&
2286 copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
2287 goto out;
2288 err = -EINVAL;
2289 if (data.length > CCP_MAX_OPTION_LENGTH ||
2290 ccp_option[1] < 2 || ccp_option[1] > data.length)
2291 goto out;
2292
2293 cp = try_then_request_module(
2294 find_compressor(ccp_option[0]),
2295 "ppp-compress-%d", ccp_option[0]);
2296 if (!cp)
2297 goto out;
2298
2299 err = -ENOBUFS;
2300 if (data.transmit) {
2301 state = cp->comp_alloc(ccp_option, data.length);
2302 if (state) {
2303 ppp_xmit_lock(ppp);
2304 ppp->xstate &= ~SC_COMP_RUN;
2305 ocomp = ppp->xcomp;
2306 ostate = ppp->xc_state;
2307 ppp->xcomp = cp;
2308 ppp->xc_state = state;
2309 ppp_xmit_unlock(ppp);
2310 if (ostate) {
2311 ocomp->comp_free(ostate);
2312 module_put(ocomp->owner);
2313 }
2314 err = 0;
2315 } else
2316 module_put(cp->owner);
2317
2318 } else {
2319 state = cp->decomp_alloc(ccp_option, data.length);
2320 if (state) {
2321 ppp_recv_lock(ppp);
2322 ppp->rstate &= ~SC_DECOMP_RUN;
2323 ocomp = ppp->rcomp;
2324 ostate = ppp->rc_state;
2325 ppp->rcomp = cp;
2326 ppp->rc_state = state;
2327 ppp_recv_unlock(ppp);
2328 if (ostate) {
2329 ocomp->decomp_free(ostate);
2330 module_put(ocomp->owner);
2331 }
2332 err = 0;
2333 } else
2334 module_put(cp->owner);
2335 }
2336
2337 out:
2338 return err;
2339}
2340
2341/*
2342 * Look at a CCP packet and update our state accordingly.
2343 * We assume the caller has the xmit or recv path locked.
2344 */
2345static void
2346ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
2347{
2348 unsigned char *dp;
2349 int len;
2350
2351 if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
2352 return; /* no header */
2353 dp = skb->data + 2;
2354
2355 switch (CCP_CODE(dp)) {
2356 case CCP_CONFREQ:
2357
2358 /* A ConfReq starts negotiation of compression
2359 * in one direction of transmission,
2360 * and hence brings it down...but which way?
2361 *
2362 * Remember:
2363 * A ConfReq indicates what the sender would like to receive
2364 */
2365 if(inbound)
2366 /* He is proposing what I should send */
2367 ppp->xstate &= ~SC_COMP_RUN;
2368 else
2369 /* I am proposing to what he should send */
2370 ppp->rstate &= ~SC_DECOMP_RUN;
2371
2372 break;
2373
2374 case CCP_TERMREQ:
2375 case CCP_TERMACK:
2376 /*
2377 * CCP is going down, both directions of transmission
2378 */
2379 ppp->rstate &= ~SC_DECOMP_RUN;
2380 ppp->xstate &= ~SC_COMP_RUN;
2381 break;
2382
2383 case CCP_CONFACK:
2384 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
2385 break;
2386 len = CCP_LENGTH(dp);
2387 if (!pskb_may_pull(skb, len + 2))
2388 return; /* too short */
2389 dp += CCP_HDRLEN;
2390 len -= CCP_HDRLEN;
2391 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
2392 break;
2393 if (inbound) {
2394 /* we will start receiving compressed packets */
2395 if (!ppp->rc_state)
2396 break;
2397 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
2398 ppp->file.index, 0, ppp->mru, ppp->debug)) {
2399 ppp->rstate |= SC_DECOMP_RUN;
2400 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
2401 }
2402 } else {
2403 /* we will soon start sending compressed packets */
2404 if (!ppp->xc_state)
2405 break;
2406 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
2407 ppp->file.index, 0, ppp->debug))
2408 ppp->xstate |= SC_COMP_RUN;
2409 }
2410 break;
2411
2412 case CCP_RESETACK:
2413 /* reset the [de]compressor */
2414 if ((ppp->flags & SC_CCP_UP) == 0)
2415 break;
2416 if (inbound) {
2417 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
2418 ppp->rcomp->decomp_reset(ppp->rc_state);
2419 ppp->rstate &= ~SC_DC_ERROR;
2420 }
2421 } else {
2422 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
2423 ppp->xcomp->comp_reset(ppp->xc_state);
2424 }
2425 break;
2426 }
2427}
2428
2429/* Free up compression resources. */
2430static void
2431ppp_ccp_closed(struct ppp *ppp)
2432{
2433 void *xstate, *rstate;
2434 struct compressor *xcomp, *rcomp;
2435
2436 ppp_lock(ppp);
2437 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
2438 ppp->xstate = 0;
2439 xcomp = ppp->xcomp;
2440 xstate = ppp->xc_state;
2441 ppp->xc_state = NULL;
2442 ppp->rstate = 0;
2443 rcomp = ppp->rcomp;
2444 rstate = ppp->rc_state;
2445 ppp->rc_state = NULL;
2446 ppp_unlock(ppp);
2447
2448 if (xstate) {
2449 xcomp->comp_free(xstate);
2450 module_put(xcomp->owner);
2451 }
2452 if (rstate) {
2453 rcomp->decomp_free(rstate);
2454 module_put(rcomp->owner);
2455 }
2456}
2457
2458/* List of compressors. */
2459static LIST_HEAD(compressor_list);
2460static DEFINE_SPINLOCK(compressor_list_lock);
2461
2462struct compressor_entry {
2463 struct list_head list;
2464 struct compressor *comp;
2465};
2466
2467static struct compressor_entry *
2468find_comp_entry(int proto)
2469{
2470 struct compressor_entry *ce;
2471
2472 list_for_each_entry(ce, &compressor_list, list) {
2473 if (ce->comp->compress_proto == proto)
2474 return ce;
2475 }
2476 return NULL;
2477}
2478
2479/* Register a compressor */
2480int
2481ppp_register_compressor(struct compressor *cp)
2482{
2483 struct compressor_entry *ce;
2484 int ret;
2485 spin_lock(&compressor_list_lock);
2486 ret = -EEXIST;
2487 if (find_comp_entry(cp->compress_proto))
2488 goto out;
2489 ret = -ENOMEM;
2490 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
2491 if (!ce)
2492 goto out;
2493 ret = 0;
2494 ce->comp = cp;
2495 list_add(&ce->list, &compressor_list);
2496 out:
2497 spin_unlock(&compressor_list_lock);
2498 return ret;
2499}
2500
2501/* Unregister a compressor */
2502void
2503ppp_unregister_compressor(struct compressor *cp)
2504{
2505 struct compressor_entry *ce;
2506
2507 spin_lock(&compressor_list_lock);
2508 ce = find_comp_entry(cp->compress_proto);
2509 if (ce && ce->comp == cp) {
2510 list_del(&ce->list);
2511 kfree(ce);
2512 }
2513 spin_unlock(&compressor_list_lock);
2514}
2515
2516/* Find a compressor. */
2517static struct compressor *
2518find_compressor(int type)
2519{
2520 struct compressor_entry *ce;
2521 struct compressor *cp = NULL;
2522
2523 spin_lock(&compressor_list_lock);
2524 ce = find_comp_entry(type);
2525 if (ce) {
2526 cp = ce->comp;
2527 if (!try_module_get(cp->owner))
2528 cp = NULL;
2529 }
2530 spin_unlock(&compressor_list_lock);
2531 return cp;
2532}
2533
2534/*
2535 * Miscelleneous stuff.
2536 */
2537
2538static void
2539ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2540{
2541 struct slcompress *vj = ppp->vj;
2542
2543 memset(st, 0, sizeof(*st));
2544 st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
2545 st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
2546 st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
2547 st->p.ppp_opackets = ppp->dev->stats.tx_packets;
2548 st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
2549 st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
2550 if (!vj)
2551 return;
2552 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
2553 st->vj.vjs_compressed = vj->sls_o_compressed;
2554 st->vj.vjs_searches = vj->sls_o_searches;
2555 st->vj.vjs_misses = vj->sls_o_misses;
2556 st->vj.vjs_errorin = vj->sls_i_error;
2557 st->vj.vjs_tossed = vj->sls_i_tossed;
2558 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
2559 st->vj.vjs_compressedin = vj->sls_i_compressed;
2560}
2561
2562/*
2563 * Stuff for handling the lists of ppp units and channels
2564 * and for initialization.
2565 */
2566
2567/*
2568 * Create a new ppp interface unit. Fails if it can't allocate memory
2569 * or if there is already a unit with the requested number.
2570 * unit == -1 means allocate a new number.
2571 */
2572static struct ppp *
2573ppp_create_interface(struct net *net, int unit, int *retp)
2574{
2575 struct ppp *ppp;
2576 struct ppp_net *pn;
2577 struct net_device *dev = NULL;
2578 int ret = -ENOMEM;
2579 int i;
2580
2581 dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
2582 if (!dev)
2583 goto out1;
2584
2585 pn = ppp_pernet(net);
2586
2587 ppp = netdev_priv(dev);
2588 ppp->dev = dev;
2589 ppp->mru = PPP_MRU;
2590 init_ppp_file(&ppp->file, INTERFACE);
2591 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
2592 for (i = 0; i < NUM_NP; ++i)
2593 ppp->npmode[i] = NPMODE_PASS;
2594 INIT_LIST_HEAD(&ppp->channels);
2595 spin_lock_init(&ppp->rlock);
2596 spin_lock_init(&ppp->wlock);
2597#ifdef CONFIG_PPP_MULTILINK
2598 ppp->minseq = -1;
2599 skb_queue_head_init(&ppp->mrq);
2600#endif /* CONFIG_PPP_MULTILINK */
2601
2602 /*
2603 * drum roll: don't forget to set
2604 * the net device is belong to
2605 */
2606 dev_net_set(dev, net);
2607
2608 mutex_lock(&pn->all_ppp_mutex);
2609
2610 if (unit < 0) {
2611 unit = unit_get(&pn->units_idr, ppp);
2612 if (unit < 0) {
2613 ret = unit;
2614 goto out2;
2615 }
2616 } else {
2617 ret = -EEXIST;
2618 if (unit_find(&pn->units_idr, unit))
2619 goto out2; /* unit already exists */
2620 /*
2621 * if caller need a specified unit number
2622 * lets try to satisfy him, otherwise --
2623 * he should better ask us for new unit number
2624 *
2625 * NOTE: yes I know that returning EEXIST it's not
2626 * fair but at least pppd will ask us to allocate
2627 * new unit in this case so user is happy :)
2628 */
2629 unit = unit_set(&pn->units_idr, ppp, unit);
2630 if (unit < 0)
2631 goto out2;
2632 }
2633
2634 /* Initialize the new ppp unit */
2635 ppp->file.index = unit;
2636 sprintf(dev->name, "ppp%d", unit);
2637
2638 ret = register_netdev(dev);
2639 if (ret != 0) {
2640 unit_put(&pn->units_idr, unit);
2641 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
2642 dev->name, ret);
2643 goto out2;
2644 }
2645
2646 ppp->ppp_net = net;
2647
2648 atomic_inc(&ppp_unit_count);
2649 mutex_unlock(&pn->all_ppp_mutex);
2650
2651 *retp = 0;
2652 return ppp;
2653
2654out2:
2655 mutex_unlock(&pn->all_ppp_mutex);
2656 free_netdev(dev);
2657out1:
2658 *retp = ret;
2659 return NULL;
2660}
2661
2662/*
2663 * Initialize a ppp_file structure.
2664 */
2665static void
2666init_ppp_file(struct ppp_file *pf, int kind)
2667{
2668 pf->kind = kind;
2669 skb_queue_head_init(&pf->xq);
2670 skb_queue_head_init(&pf->rq);
2671 atomic_set(&pf->refcnt, 1);
2672 init_waitqueue_head(&pf->rwait);
2673}
2674
2675/*
2676 * Take down a ppp interface unit - called when the owning file
2677 * (the one that created the unit) is closed or detached.
2678 */
2679static void ppp_shutdown_interface(struct ppp *ppp)
2680{
2681 struct ppp_net *pn;
2682
2683 pn = ppp_pernet(ppp->ppp_net);
2684 mutex_lock(&pn->all_ppp_mutex);
2685
2686 /* This will call dev_close() for us. */
2687 ppp_lock(ppp);
2688 if (!ppp->closing) {
2689 ppp->closing = 1;
2690 ppp_unlock(ppp);
2691 unregister_netdev(ppp->dev);
2692 unit_put(&pn->units_idr, ppp->file.index);
2693 } else
2694 ppp_unlock(ppp);
2695
2696 ppp->file.dead = 1;
2697 ppp->owner = NULL;
2698 wake_up_interruptible(&ppp->file.rwait);
2699
2700 mutex_unlock(&pn->all_ppp_mutex);
2701}
2702
2703/*
2704 * Free the memory used by a ppp unit. This is only called once
2705 * there are no channels connected to the unit and no file structs
2706 * that reference the unit.
2707 */
2708static void ppp_destroy_interface(struct ppp *ppp)
2709{
2710 atomic_dec(&ppp_unit_count);
2711
2712 if (!ppp->file.dead || ppp->n_channels) {
2713 /* "can't happen" */
2714 netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
2715 "but dead=%d n_channels=%d !\n",
2716 ppp, ppp->file.dead, ppp->n_channels);
2717 return;
2718 }
2719
2720 ppp_ccp_closed(ppp);
2721 if (ppp->vj) {
2722 slhc_free(ppp->vj);
2723 ppp->vj = NULL;
2724 }
2725 skb_queue_purge(&ppp->file.xq);
2726 skb_queue_purge(&ppp->file.rq);
2727#ifdef CONFIG_PPP_MULTILINK
2728 skb_queue_purge(&ppp->mrq);
2729#endif /* CONFIG_PPP_MULTILINK */
2730#ifdef CONFIG_PPP_FILTER
2731 kfree(ppp->pass_filter);
2732 ppp->pass_filter = NULL;
2733 kfree(ppp->active_filter);
2734 ppp->active_filter = NULL;
2735#endif /* CONFIG_PPP_FILTER */
2736
2737 kfree_skb(ppp->xmit_pending);
2738
2739 free_netdev(ppp->dev);
2740}
2741
2742/*
2743 * Locate an existing ppp unit.
2744 * The caller should have locked the all_ppp_mutex.
2745 */
2746static struct ppp *
2747ppp_find_unit(struct ppp_net *pn, int unit)
2748{
2749 return unit_find(&pn->units_idr, unit);
2750}
2751
2752/*
2753 * Locate an existing ppp channel.
2754 * The caller should have locked the all_channels_lock.
2755 * First we look in the new_channels list, then in the
2756 * all_channels list. If found in the new_channels list,
2757 * we move it to the all_channels list. This is for speed
2758 * when we have a lot of channels in use.
2759 */
2760static struct channel *
2761ppp_find_channel(struct ppp_net *pn, int unit)
2762{
2763 struct channel *pch;
2764
2765 list_for_each_entry(pch, &pn->new_channels, list) {
2766 if (pch->file.index == unit) {
2767 list_move(&pch->list, &pn->all_channels);
2768 return pch;
2769 }
2770 }
2771
2772 list_for_each_entry(pch, &pn->all_channels, list) {
2773 if (pch->file.index == unit)
2774 return pch;
2775 }
2776
2777 return NULL;
2778}
2779
2780/*
2781 * Connect a PPP channel to a PPP interface unit.
2782 */
2783static int
2784ppp_connect_channel(struct channel *pch, int unit)
2785{
2786 struct ppp *ppp;
2787 struct ppp_net *pn;
2788 int ret = -ENXIO;
2789 int hdrlen;
2790
2791 pn = ppp_pernet(pch->chan_net);
2792
2793 mutex_lock(&pn->all_ppp_mutex);
2794 ppp = ppp_find_unit(pn, unit);
2795 if (!ppp)
2796 goto out;
2797 write_lock_bh(&pch->upl);
2798 ret = -EINVAL;
2799 if (pch->ppp)
2800 goto outl;
2801
2802 ppp_lock(ppp);
2803 if (pch->file.hdrlen > ppp->file.hdrlen)
2804 ppp->file.hdrlen = pch->file.hdrlen;
2805 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
2806 if (hdrlen > ppp->dev->hard_header_len)
2807 ppp->dev->hard_header_len = hdrlen;
2808 list_add_tail(&pch->clist, &ppp->channels);
2809 ++ppp->n_channels;
2810 pch->ppp = ppp;
2811 atomic_inc(&ppp->file.refcnt);
2812 ppp_unlock(ppp);
2813 ret = 0;
2814
2815 outl:
2816 write_unlock_bh(&pch->upl);
2817 out:
2818 mutex_unlock(&pn->all_ppp_mutex);
2819 return ret;
2820}
2821
2822/*
2823 * Disconnect a channel from its ppp unit.
2824 */
2825static int
2826ppp_disconnect_channel(struct channel *pch)
2827{
2828 struct ppp *ppp;
2829 int err = -EINVAL;
2830
2831 write_lock_bh(&pch->upl);
2832 ppp = pch->ppp;
2833 pch->ppp = NULL;
2834 write_unlock_bh(&pch->upl);
2835 if (ppp) {
2836 /* remove it from the ppp unit's list */
2837 ppp_lock(ppp);
2838 list_del(&pch->clist);
2839 if (--ppp->n_channels == 0)
2840 wake_up_interruptible(&ppp->file.rwait);
2841 ppp_unlock(ppp);
2842 if (atomic_dec_and_test(&ppp->file.refcnt))
2843 ppp_destroy_interface(ppp);
2844 err = 0;
2845 }
2846 return err;
2847}
2848
2849/*
2850 * Free up the resources used by a ppp channel.
2851 */
2852static void ppp_destroy_channel(struct channel *pch)
2853{
2854 atomic_dec(&channel_count);
2855
2856 if (!pch->file.dead) {
2857 /* "can't happen" */
2858 pr_err("ppp: destroying undead channel %p !\n", pch);
2859 return;
2860 }
2861 skb_queue_purge(&pch->file.xq);
2862 skb_queue_purge(&pch->file.rq);
2863 kfree(pch);
2864}
2865
2866static void __exit ppp_cleanup(void)
2867{
2868 /* should never happen */
2869 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2870 pr_err("PPP: removing module but units remain!\n");
2871 unregister_chrdev(PPP_MAJOR, "ppp");
2872 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2873 class_destroy(ppp_class);
2874 unregister_pernet_device(&ppp_net_ops);
2875}
2876
2877/*
2878 * Units handling. Caller must protect concurrent access
2879 * by holding all_ppp_mutex
2880 */
2881
2882static int __unit_alloc(struct idr *p, void *ptr, int n)
2883{
2884 int unit, err;
2885
2886again:
2887 if (!idr_pre_get(p, GFP_KERNEL)) {
2888 pr_err("PPP: No free memory for idr\n");
2889 return -ENOMEM;
2890 }
2891
2892 err = idr_get_new_above(p, ptr, n, &unit);
2893 if (err < 0) {
2894 if (err == -EAGAIN)
2895 goto again;
2896 return err;
2897 }
2898
2899 return unit;
2900}
2901
2902/* associate pointer with specified number */
2903static int unit_set(struct idr *p, void *ptr, int n)
2904{
2905 int unit;
2906
2907 unit = __unit_alloc(p, ptr, n);
2908 if (unit < 0)
2909 return unit;
2910 else if (unit != n) {
2911 idr_remove(p, unit);
2912 return -EINVAL;
2913 }
2914
2915 return unit;
2916}
2917
2918/* get new free unit number and associate pointer with it */
2919static int unit_get(struct idr *p, void *ptr)
2920{
2921 return __unit_alloc(p, ptr, 0);
2922}
2923
2924/* put unit number back to a pool */
2925static void unit_put(struct idr *p, int n)
2926{
2927 idr_remove(p, n);
2928}
2929
2930/* get pointer associated with the number */
2931static void *unit_find(struct idr *p, int n)
2932{
2933 return idr_find(p, n);
2934}
2935
2936/* Module/initialization stuff */
2937
2938module_init(ppp_init);
2939module_exit(ppp_cleanup);
2940
2941EXPORT_SYMBOL(ppp_register_net_channel);
2942EXPORT_SYMBOL(ppp_register_channel);
2943EXPORT_SYMBOL(ppp_unregister_channel);
2944EXPORT_SYMBOL(ppp_channel_index);
2945EXPORT_SYMBOL(ppp_unit_number);
2946EXPORT_SYMBOL(ppp_dev_name);
2947EXPORT_SYMBOL(ppp_input);
2948EXPORT_SYMBOL(ppp_input_error);
2949EXPORT_SYMBOL(ppp_output_wakeup);
2950EXPORT_SYMBOL(ppp_register_compressor);
2951EXPORT_SYMBOL(ppp_unregister_compressor);
2952MODULE_LICENSE("GPL");
2953MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
2954MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
new file mode 100644
index 000000000000..9a1849a83e2a
--- /dev/null
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -0,0 +1,740 @@
1/*
2 * ppp_mppe.c - interface MPPE to the PPP code.
3 * This version is for use with Linux kernel 2.6.14+
4 *
5 * By Frank Cusack <fcusack@fcusack.com>.
6 * Copyright (c) 2002,2003,2004 Google, Inc.
7 * All rights reserved.
8 *
9 * License:
10 * Permission to use, copy, modify, and distribute this software and its
11 * documentation is hereby granted, provided that the above copyright
12 * notice appears in all copies. This software is provided without any
13 * warranty, express or implied.
14 *
15 * ALTERNATIVELY, provided that this notice is retained in full, this product
16 * may be distributed under the terms of the GNU General Public License (GPL),
17 * in which case the provisions of the GPL apply INSTEAD OF those given above.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 *
33 *
34 * Changelog:
35 * 08/12/05 - Matt Domsch <Matt_Domsch@dell.com>
36 * Only need extra skb padding on transmit, not receive.
37 * 06/18/04 - Matt Domsch <Matt_Domsch@dell.com>, Oleg Makarenko <mole@quadra.ru>
38 * Use Linux kernel 2.6 arc4 and sha1 routines rather than
39 * providing our own.
40 * 2/15/04 - TS: added #include <version.h> and testing for Kernel
41 * version before using
42 * MOD_DEC_USAGE_COUNT/MOD_INC_USAGE_COUNT which are
43 * deprecated in 2.6
44 */
45
46#include <linux/err.h>
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/types.h>
51#include <linux/slab.h>
52#include <linux/string.h>
53#include <linux/crypto.h>
54#include <linux/mm.h>
55#include <linux/ppp_defs.h>
56#include <linux/ppp-comp.h>
57#include <linux/scatterlist.h>
58#include <asm/unaligned.h>
59
60#include "ppp_mppe.h"
61
62MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
63MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
64MODULE_LICENSE("Dual BSD/GPL");
65MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
66MODULE_VERSION("1.0.2");
67
68static unsigned int
69setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
70{
71 sg_set_buf(sg, address, length);
72 return length;
73}
74
75#define SHA1_PAD_SIZE 40
76
77/*
78 * kernel crypto API needs its arguments to be in kmalloc'd memory, not in the module
79 * static data area. That means sha_pad needs to be kmalloc'd.
80 */
81
82struct sha_pad {
83 unsigned char sha_pad1[SHA1_PAD_SIZE];
84 unsigned char sha_pad2[SHA1_PAD_SIZE];
85};
86static struct sha_pad *sha_pad;
87
88static inline void sha_pad_init(struct sha_pad *shapad)
89{
90 memset(shapad->sha_pad1, 0x00, sizeof(shapad->sha_pad1));
91 memset(shapad->sha_pad2, 0xF2, sizeof(shapad->sha_pad2));
92}
93
94/*
95 * State for an MPPE (de)compressor.
96 */
97struct ppp_mppe_state {
98 struct crypto_blkcipher *arc4;
99 struct crypto_hash *sha1;
100 unsigned char *sha1_digest;
101 unsigned char master_key[MPPE_MAX_KEY_LEN];
102 unsigned char session_key[MPPE_MAX_KEY_LEN];
103 unsigned keylen; /* key length in bytes */
104 /* NB: 128-bit == 16, 40-bit == 8! */
105 /* If we want to support 56-bit, */
106 /* the unit has to change to bits */
107 unsigned char bits; /* MPPE control bits */
108 unsigned ccount; /* 12-bit coherency count (seqno) */
109 unsigned stateful; /* stateful mode flag */
110 int discard; /* stateful mode packet loss flag */
111 int sanity_errors; /* take down LCP if too many */
112 int unit;
113 int debug;
114 struct compstat stats;
115};
116
117/* struct ppp_mppe_state.bits definitions */
118#define MPPE_BIT_A 0x80 /* Encryption table were (re)inititalized */
119#define MPPE_BIT_B 0x40 /* MPPC only (not implemented) */
120#define MPPE_BIT_C 0x20 /* MPPC only (not implemented) */
121#define MPPE_BIT_D 0x10 /* This is an encrypted frame */
122
123#define MPPE_BIT_FLUSHED MPPE_BIT_A
124#define MPPE_BIT_ENCRYPTED MPPE_BIT_D
125
126#define MPPE_BITS(p) ((p)[4] & 0xf0)
127#define MPPE_CCOUNT(p) ((((p)[4] & 0x0f) << 8) + (p)[5])
128#define MPPE_CCOUNT_SPACE 0x1000 /* The size of the ccount space */
129
130#define MPPE_OVHD 2 /* MPPE overhead/packet */
131#define SANITY_MAX 1600 /* Max bogon factor we will tolerate */
132
133/*
134 * Key Derivation, from RFC 3078, RFC 3079.
135 * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
136 */
137static void get_new_key_from_sha(struct ppp_mppe_state * state)
138{
139 struct hash_desc desc;
140 struct scatterlist sg[4];
141 unsigned int nbytes;
142
143 sg_init_table(sg, 4);
144
145 nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
146 nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
147 sizeof(sha_pad->sha_pad1));
148 nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
149 nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
150 sizeof(sha_pad->sha_pad2));
151
152 desc.tfm = state->sha1;
153 desc.flags = 0;
154
155 crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
156}
157
158/*
159 * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3.
160 * Well, not what's written there, but rather what they meant.
161 */
162static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
163{
164 struct scatterlist sg_in[1], sg_out[1];
165 struct blkcipher_desc desc = { .tfm = state->arc4 };
166
167 get_new_key_from_sha(state);
168 if (!initial_key) {
169 crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
170 state->keylen);
171 sg_init_table(sg_in, 1);
172 sg_init_table(sg_out, 1);
173 setup_sg(sg_in, state->sha1_digest, state->keylen);
174 setup_sg(sg_out, state->session_key, state->keylen);
175 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
176 state->keylen) != 0) {
177 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
178 }
179 } else {
180 memcpy(state->session_key, state->sha1_digest, state->keylen);
181 }
182 if (state->keylen == 8) {
183 /* See RFC 3078 */
184 state->session_key[0] = 0xd1;
185 state->session_key[1] = 0x26;
186 state->session_key[2] = 0x9e;
187 }
188 crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
189}
190
191/*
192 * Allocate space for a (de)compressor.
193 */
194static void *mppe_alloc(unsigned char *options, int optlen)
195{
196 struct ppp_mppe_state *state;
197 unsigned int digestsize;
198
199 if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
200 options[0] != CI_MPPE || options[1] != CILEN_MPPE)
201 goto out;
202
203 state = kzalloc(sizeof(*state), GFP_KERNEL);
204 if (state == NULL)
205 goto out;
206
207
208 state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
209 if (IS_ERR(state->arc4)) {
210 state->arc4 = NULL;
211 goto out_free;
212 }
213
214 state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
215 if (IS_ERR(state->sha1)) {
216 state->sha1 = NULL;
217 goto out_free;
218 }
219
220 digestsize = crypto_hash_digestsize(state->sha1);
221 if (digestsize < MPPE_MAX_KEY_LEN)
222 goto out_free;
223
224 state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
225 if (!state->sha1_digest)
226 goto out_free;
227
228 /* Save keys. */
229 memcpy(state->master_key, &options[CILEN_MPPE],
230 sizeof(state->master_key));
231 memcpy(state->session_key, state->master_key,
232 sizeof(state->master_key));
233
234 /*
235 * We defer initial key generation until mppe_init(), as mppe_alloc()
236 * is called frequently during negotiation.
237 */
238
239 return (void *)state;
240
241 out_free:
242 if (state->sha1_digest)
243 kfree(state->sha1_digest);
244 if (state->sha1)
245 crypto_free_hash(state->sha1);
246 if (state->arc4)
247 crypto_free_blkcipher(state->arc4);
248 kfree(state);
249 out:
250 return NULL;
251}
252
253/*
254 * Deallocate space for a (de)compressor.
255 */
256static void mppe_free(void *arg)
257{
258 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
259 if (state) {
260 if (state->sha1_digest)
261 kfree(state->sha1_digest);
262 if (state->sha1)
263 crypto_free_hash(state->sha1);
264 if (state->arc4)
265 crypto_free_blkcipher(state->arc4);
266 kfree(state);
267 }
268}
269
270/*
271 * Initialize (de)compressor state.
272 */
273static int
274mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug,
275 const char *debugstr)
276{
277 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
278 unsigned char mppe_opts;
279
280 if (optlen != CILEN_MPPE ||
281 options[0] != CI_MPPE || options[1] != CILEN_MPPE)
282 return 0;
283
284 MPPE_CI_TO_OPTS(&options[2], mppe_opts);
285 if (mppe_opts & MPPE_OPT_128)
286 state->keylen = 16;
287 else if (mppe_opts & MPPE_OPT_40)
288 state->keylen = 8;
289 else {
290 printk(KERN_WARNING "%s[%d]: unknown key length\n", debugstr,
291 unit);
292 return 0;
293 }
294 if (mppe_opts & MPPE_OPT_STATEFUL)
295 state->stateful = 1;
296
297 /* Generate the initial session key. */
298 mppe_rekey(state, 1);
299
300 if (debug) {
301 int i;
302 char mkey[sizeof(state->master_key) * 2 + 1];
303 char skey[sizeof(state->session_key) * 2 + 1];
304
305 printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n",
306 debugstr, unit, (state->keylen == 16) ? 128 : 40,
307 (state->stateful) ? "stateful" : "stateless");
308
309 for (i = 0; i < sizeof(state->master_key); i++)
310 sprintf(mkey + i * 2, "%02x", state->master_key[i]);
311 for (i = 0; i < sizeof(state->session_key); i++)
312 sprintf(skey + i * 2, "%02x", state->session_key[i]);
313 printk(KERN_DEBUG
314 "%s[%d]: keys: master: %s initial session: %s\n",
315 debugstr, unit, mkey, skey);
316 }
317
318 /*
319 * Initialize the coherency count. The initial value is not specified
320 * in RFC 3078, but we can make a reasonable assumption that it will
321 * start at 0. Setting it to the max here makes the comp/decomp code
322 * do the right thing (determined through experiment).
323 */
324 state->ccount = MPPE_CCOUNT_SPACE - 1;
325
326 /*
327 * Note that even though we have initialized the key table, we don't
328 * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1.
329 */
330 state->bits = MPPE_BIT_ENCRYPTED;
331
332 state->unit = unit;
333 state->debug = debug;
334
335 return 1;
336}
337
338static int
339mppe_comp_init(void *arg, unsigned char *options, int optlen, int unit,
340 int hdrlen, int debug)
341{
342 /* ARGSUSED */
343 return mppe_init(arg, options, optlen, unit, debug, "mppe_comp_init");
344}
345
346/*
347 * We received a CCP Reset-Request (actually, we are sending a Reset-Ack),
348 * tell the compressor to rekey. Note that we MUST NOT rekey for
349 * every CCP Reset-Request; we only rekey on the next xmit packet.
350 * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost.
351 * So, rekeying for every CCP Reset-Request is broken as the peer will not
352 * know how many times we've rekeyed. (If we rekey and THEN get another
353 * CCP Reset-Request, we must rekey again.)
354 */
355static void mppe_comp_reset(void *arg)
356{
357 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
358
359 state->bits |= MPPE_BIT_FLUSHED;
360}
361
362/*
363 * Compress (encrypt) a packet.
364 * It's strange to call this a compressor, since the output is always
365 * MPPE_OVHD + 2 bytes larger than the input.
366 */
367static int
368mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
369 int isize, int osize)
370{
371 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
372 struct blkcipher_desc desc = { .tfm = state->arc4 };
373 int proto;
374 struct scatterlist sg_in[1], sg_out[1];
375
376 /*
377 * Check that the protocol is in the range we handle.
378 */
379 proto = PPP_PROTOCOL(ibuf);
380 if (proto < 0x0021 || proto > 0x00fa)
381 return 0;
382
383 /* Make sure we have enough room to generate an encrypted packet. */
384 if (osize < isize + MPPE_OVHD + 2) {
385 /* Drop the packet if we should encrypt it, but can't. */
386 printk(KERN_DEBUG "mppe_compress[%d]: osize too small! "
387 "(have: %d need: %d)\n", state->unit,
388 osize, osize + MPPE_OVHD + 2);
389 return -1;
390 }
391
392 osize = isize + MPPE_OVHD + 2;
393
394 /*
395 * Copy over the PPP header and set control bits.
396 */
397 obuf[0] = PPP_ADDRESS(ibuf);
398 obuf[1] = PPP_CONTROL(ibuf);
399 put_unaligned_be16(PPP_COMP, obuf + 2);
400 obuf += PPP_HDRLEN;
401
402 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
403 if (state->debug >= 7)
404 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
405 state->ccount);
406 put_unaligned_be16(state->ccount, obuf);
407
408 if (!state->stateful || /* stateless mode */
409 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */
410 (state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */
411 /* We must rekey */
412 if (state->debug && state->stateful)
413 printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n",
414 state->unit);
415 mppe_rekey(state, 0);
416 state->bits |= MPPE_BIT_FLUSHED;
417 }
418 obuf[0] |= state->bits;
419 state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */
420
421 obuf += MPPE_OVHD;
422 ibuf += 2; /* skip to proto field */
423 isize -= 2;
424
425 /* Encrypt packet */
426 sg_init_table(sg_in, 1);
427 sg_init_table(sg_out, 1);
428 setup_sg(sg_in, ibuf, isize);
429 setup_sg(sg_out, obuf, osize);
430 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
431 printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
432 return -1;
433 }
434
435 state->stats.unc_bytes += isize;
436 state->stats.unc_packets++;
437 state->stats.comp_bytes += osize;
438 state->stats.comp_packets++;
439
440 return osize;
441}
442
443/*
444 * Since every frame grows by MPPE_OVHD + 2 bytes, this is always going
445 * to look bad ... and the longer the link is up the worse it will get.
446 */
447static void mppe_comp_stats(void *arg, struct compstat *stats)
448{
449 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
450
451 *stats = state->stats;
452}
453
454static int
455mppe_decomp_init(void *arg, unsigned char *options, int optlen, int unit,
456 int hdrlen, int mru, int debug)
457{
458 /* ARGSUSED */
459 return mppe_init(arg, options, optlen, unit, debug, "mppe_decomp_init");
460}
461
462/*
463 * We received a CCP Reset-Ack. Just ignore it.
464 */
465static void mppe_decomp_reset(void *arg)
466{
467 /* ARGSUSED */
468 return;
469}
470
471/*
472 * Decompress (decrypt) an MPPE packet.
473 */
474static int
475mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
476 int osize)
477{
478 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
479 struct blkcipher_desc desc = { .tfm = state->arc4 };
480 unsigned ccount;
481 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
482 int sanity = 0;
483 struct scatterlist sg_in[1], sg_out[1];
484
485 if (isize <= PPP_HDRLEN + MPPE_OVHD) {
486 if (state->debug)
487 printk(KERN_DEBUG
488 "mppe_decompress[%d]: short pkt (%d)\n",
489 state->unit, isize);
490 return DECOMP_ERROR;
491 }
492
493 /*
494 * Make sure we have enough room to decrypt the packet.
495 * Note that for our test we only subtract 1 byte whereas in
496 * mppe_compress() we added 2 bytes (+MPPE_OVHD);
497 * this is to account for possible PFC.
498 */
499 if (osize < isize - MPPE_OVHD - 1) {
500 printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! "
501 "(have: %d need: %d)\n", state->unit,
502 osize, isize - MPPE_OVHD - 1);
503 return DECOMP_ERROR;
504 }
505 osize = isize - MPPE_OVHD - 2; /* assume no PFC */
506
507 ccount = MPPE_CCOUNT(ibuf);
508 if (state->debug >= 7)
509 printk(KERN_DEBUG "mppe_decompress[%d]: ccount %d\n",
510 state->unit, ccount);
511
512 /* sanity checks -- terminate with extreme prejudice */
513 if (!(MPPE_BITS(ibuf) & MPPE_BIT_ENCRYPTED)) {
514 printk(KERN_DEBUG
515 "mppe_decompress[%d]: ENCRYPTED bit not set!\n",
516 state->unit);
517 state->sanity_errors += 100;
518 sanity = 1;
519 }
520 if (!state->stateful && !flushed) {
521 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
522 "stateless mode!\n", state->unit);
523 state->sanity_errors += 100;
524 sanity = 1;
525 }
526 if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
527 printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
528 "flag packet!\n", state->unit);
529 state->sanity_errors += 100;
530 sanity = 1;
531 }
532
533 if (sanity) {
534 if (state->sanity_errors < SANITY_MAX)
535 return DECOMP_ERROR;
536 else
537 /*
538 * Take LCP down if the peer is sending too many bogons.
539 * We don't want to do this for a single or just a few
540 * instances since it could just be due to packet corruption.
541 */
542 return DECOMP_FATALERROR;
543 }
544
545 /*
546 * Check the coherency count.
547 */
548
549 if (!state->stateful) {
550 /* RFC 3078, sec 8.1. Rekey for every packet. */
551 while (state->ccount != ccount) {
552 mppe_rekey(state, 0);
553 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
554 }
555 } else {
556 /* RFC 3078, sec 8.2. */
557 if (!state->discard) {
558 /* normal state */
559 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
560 if (ccount != state->ccount) {
561 /*
562 * (ccount > state->ccount)
563 * Packet loss detected, enter the discard state.
564 * Signal the peer to rekey (by sending a CCP Reset-Request).
565 */
566 state->discard = 1;
567 return DECOMP_ERROR;
568 }
569 } else {
570 /* discard state */
571 if (!flushed) {
572 /* ccp.c will be silent (no additional CCP Reset-Requests). */
573 return DECOMP_ERROR;
574 } else {
575 /* Rekey for every missed "flag" packet. */
576 while ((ccount & ~0xff) !=
577 (state->ccount & ~0xff)) {
578 mppe_rekey(state, 0);
579 state->ccount =
580 (state->ccount +
581 256) % MPPE_CCOUNT_SPACE;
582 }
583
584 /* reset */
585 state->discard = 0;
586 state->ccount = ccount;
587 /*
588 * Another problem with RFC 3078 here. It implies that the
589 * peer need not send a Reset-Ack packet. But RFC 1962
590 * requires it. Hopefully, M$ does send a Reset-Ack; even
591 * though it isn't required for MPPE synchronization, it is
592 * required to reset CCP state.
593 */
594 }
595 }
596 if (flushed)
597 mppe_rekey(state, 0);
598 }
599
600 /*
601 * Fill in the first part of the PPP header. The protocol field
602 * comes from the decrypted data.
603 */
604 obuf[0] = PPP_ADDRESS(ibuf); /* +1 */
605 obuf[1] = PPP_CONTROL(ibuf); /* +1 */
606 obuf += 2;
607 ibuf += PPP_HDRLEN + MPPE_OVHD;
608 isize -= PPP_HDRLEN + MPPE_OVHD; /* -6 */
609 /* net osize: isize-4 */
610
611 /*
612 * Decrypt the first byte in order to check if it is
613 * a compressed or uncompressed protocol field.
614 */
615 sg_init_table(sg_in, 1);
616 sg_init_table(sg_out, 1);
617 setup_sg(sg_in, ibuf, 1);
618 setup_sg(sg_out, obuf, 1);
619 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
620 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
621 return DECOMP_ERROR;
622 }
623
624 /*
625 * Do PFC decompression.
626 * This would be nicer if we were given the actual sk_buff
627 * instead of a char *.
628 */
629 if ((obuf[0] & 0x01) != 0) {
630 obuf[1] = obuf[0];
631 obuf[0] = 0;
632 obuf++;
633 osize++;
634 }
635
636 /* And finally, decrypt the rest of the packet. */
637 setup_sg(sg_in, ibuf + 1, isize - 1);
638 setup_sg(sg_out, obuf + 1, osize - 1);
639 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
640 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
641 return DECOMP_ERROR;
642 }
643
644 state->stats.unc_bytes += osize;
645 state->stats.unc_packets++;
646 state->stats.comp_bytes += isize;
647 state->stats.comp_packets++;
648
649 /* good packet credit */
650 state->sanity_errors >>= 1;
651
652 return osize;
653}
654
655/*
656 * Incompressible data has arrived (this should never happen!).
657 * We should probably drop the link if the protocol is in the range
658 * of what should be encrypted. At the least, we should drop this
659 * packet. (How to do this?)
660 */
661static void mppe_incomp(void *arg, unsigned char *ibuf, int icnt)
662{
663 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
664
665 if (state->debug &&
666 (PPP_PROTOCOL(ibuf) >= 0x0021 && PPP_PROTOCOL(ibuf) <= 0x00fa))
667 printk(KERN_DEBUG
668 "mppe_incomp[%d]: incompressible (unencrypted) data! "
669 "(proto %04x)\n", state->unit, PPP_PROTOCOL(ibuf));
670
671 state->stats.inc_bytes += icnt;
672 state->stats.inc_packets++;
673 state->stats.unc_bytes += icnt;
674 state->stats.unc_packets++;
675}
676
677/*************************************************************
678 * Module interface table
679 *************************************************************/
680
681/*
682 * Procedures exported to if_ppp.c.
683 */
684static struct compressor ppp_mppe = {
685 .compress_proto = CI_MPPE,
686 .comp_alloc = mppe_alloc,
687 .comp_free = mppe_free,
688 .comp_init = mppe_comp_init,
689 .comp_reset = mppe_comp_reset,
690 .compress = mppe_compress,
691 .comp_stat = mppe_comp_stats,
692 .decomp_alloc = mppe_alloc,
693 .decomp_free = mppe_free,
694 .decomp_init = mppe_decomp_init,
695 .decomp_reset = mppe_decomp_reset,
696 .decompress = mppe_decompress,
697 .incomp = mppe_incomp,
698 .decomp_stat = mppe_comp_stats,
699 .owner = THIS_MODULE,
700 .comp_extra = MPPE_PAD,
701};
702
703/*
704 * ppp_mppe_init()
705 *
706 * Prior to allowing load, try to load the arc4 and sha1 crypto
707 * libraries. The actual use will be allocated later, but
708 * this way the module will fail to insmod if they aren't available.
709 */
710
711static int __init ppp_mppe_init(void)
712{
713 int answer;
714 if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
715 crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
716 return -ENODEV;
717
718 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
719 if (!sha_pad)
720 return -ENOMEM;
721 sha_pad_init(sha_pad);
722
723 answer = ppp_register_compressor(&ppp_mppe);
724
725 if (answer == 0)
726 printk(KERN_INFO "PPP MPPE Compression module registered\n");
727 else
728 kfree(sha_pad);
729
730 return answer;
731}
732
733static void __exit ppp_mppe_cleanup(void)
734{
735 ppp_unregister_compressor(&ppp_mppe);
736 kfree(sha_pad);
737}
738
739module_init(ppp_mppe_init);
740module_exit(ppp_mppe_cleanup);
diff --git a/drivers/net/ppp/ppp_mppe.h b/drivers/net/ppp/ppp_mppe.h
new file mode 100644
index 000000000000..7a14e058c668
--- /dev/null
+++ b/drivers/net/ppp/ppp_mppe.h
@@ -0,0 +1,86 @@
1#define MPPE_PAD 4 /* MPPE growth per frame */
2#define MPPE_MAX_KEY_LEN 16 /* largest key length (128-bit) */
3
4/* option bits for ccp_options.mppe */
5#define MPPE_OPT_40 0x01 /* 40 bit */
6#define MPPE_OPT_128 0x02 /* 128 bit */
7#define MPPE_OPT_STATEFUL 0x04 /* stateful mode */
8/* unsupported opts */
9#define MPPE_OPT_56 0x08 /* 56 bit */
10#define MPPE_OPT_MPPC 0x10 /* MPPC compression */
11#define MPPE_OPT_D 0x20 /* Unknown */
12#define MPPE_OPT_UNSUPPORTED (MPPE_OPT_56|MPPE_OPT_MPPC|MPPE_OPT_D)
13#define MPPE_OPT_UNKNOWN 0x40 /* Bits !defined in RFC 3078 were set */
14
15/*
16 * This is not nice ... the alternative is a bitfield struct though.
17 * And unfortunately, we cannot share the same bits for the option
18 * names above since C and H are the same bit. We could do a u_int32
19 * but then we have to do a htonl() all the time and/or we still need
20 * to know which octet is which.
21 */
22#define MPPE_C_BIT 0x01 /* MPPC */
23#define MPPE_D_BIT 0x10 /* Obsolete, usage unknown */
24#define MPPE_L_BIT 0x20 /* 40-bit */
25#define MPPE_S_BIT 0x40 /* 128-bit */
26#define MPPE_M_BIT 0x80 /* 56-bit, not supported */
27#define MPPE_H_BIT 0x01 /* Stateless (in a different byte) */
28
29/* Does not include H bit; used for least significant octet only. */
30#define MPPE_ALL_BITS (MPPE_D_BIT|MPPE_L_BIT|MPPE_S_BIT|MPPE_M_BIT|MPPE_H_BIT)
31
32/* Build a CI from mppe opts (see RFC 3078) */
33#define MPPE_OPTS_TO_CI(opts, ci) \
34 do { \
35 u_char *ptr = ci; /* u_char[4] */ \
36 \
37 /* H bit */ \
38 if (opts & MPPE_OPT_STATEFUL) \
39 *ptr++ = 0x0; \
40 else \
41 *ptr++ = MPPE_H_BIT; \
42 *ptr++ = 0; \
43 *ptr++ = 0; \
44 \
45 /* S,L bits */ \
46 *ptr = 0; \
47 if (opts & MPPE_OPT_128) \
48 *ptr |= MPPE_S_BIT; \
49 if (opts & MPPE_OPT_40) \
50 *ptr |= MPPE_L_BIT; \
51 /* M,D,C bits not supported */ \
52 } while (/* CONSTCOND */ 0)
53
54/* The reverse of the above */
55#define MPPE_CI_TO_OPTS(ci, opts) \
56 do { \
57 u_char *ptr = ci; /* u_char[4] */ \
58 \
59 opts = 0; \
60 \
61 /* H bit */ \
62 if (!(ptr[0] & MPPE_H_BIT)) \
63 opts |= MPPE_OPT_STATEFUL; \
64 \
65 /* S,L bits */ \
66 if (ptr[3] & MPPE_S_BIT) \
67 opts |= MPPE_OPT_128; \
68 if (ptr[3] & MPPE_L_BIT) \
69 opts |= MPPE_OPT_40; \
70 \
71 /* M,D,C bits */ \
72 if (ptr[3] & MPPE_M_BIT) \
73 opts |= MPPE_OPT_56; \
74 if (ptr[3] & MPPE_D_BIT) \
75 opts |= MPPE_OPT_D; \
76 if (ptr[3] & MPPE_C_BIT) \
77 opts |= MPPE_OPT_MPPC; \
78 \
79 /* Other bits */ \
80 if (ptr[0] & ~MPPE_H_BIT) \
81 opts |= MPPE_OPT_UNKNOWN; \
82 if (ptr[1] || ptr[2]) \
83 opts |= MPPE_OPT_UNKNOWN; \
84 if (ptr[3] & ~MPPE_ALL_BITS) \
85 opts |= MPPE_OPT_UNKNOWN; \
86 } while (/* CONSTCOND */ 0)
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
new file mode 100644
index 000000000000..736a39ee05bb
--- /dev/null
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -0,0 +1,790 @@
1/*
2 * PPP synchronous tty channel driver for Linux.
3 *
4 * This is a ppp channel driver that can be used with tty device drivers
5 * that are frame oriented, such as synchronous HDLC devices.
6 *
7 * Complete PPP frames without encoding/decoding are exchanged between
8 * the channel driver and the device driver.
9 *
10 * The async map IOCTL codes are implemented to keep the user mode
11 * applications happy if they call them. Synchronous PPP does not use
12 * the async maps.
13 *
14 * Copyright 1999 Paul Mackerras.
15 *
16 * Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * This driver provides the encapsulation and framing for sending
24 * and receiving PPP frames over sync serial lines. It relies on
25 * the generic PPP layer to give it frames to send and to process
26 * received frames. It implements the PPP line discipline.
27 *
28 * Part of the code in this driver was inspired by the old async-only
29 * PPP driver, written by Michael Callahan and Al Longyear, and
30 * subsequently hacked by Paul Mackerras.
31 *
32 * ==FILEVERSION 20040616==
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/skbuff.h>
38#include <linux/tty.h>
39#include <linux/netdevice.h>
40#include <linux/poll.h>
41#include <linux/ppp_defs.h>
42#include <linux/if_ppp.h>
43#include <linux/ppp_channel.h>
44#include <linux/spinlock.h>
45#include <linux/completion.h>
46#include <linux/init.h>
47#include <linux/interrupt.h>
48#include <linux/slab.h>
49#include <asm/unaligned.h>
50#include <asm/uaccess.h>
51
52#define PPP_VERSION "2.4.2"
53
54/* Structure for storing local state. */
55struct syncppp {
56 struct tty_struct *tty;
57 unsigned int flags;
58 unsigned int rbits;
59 int mru;
60 spinlock_t xmit_lock;
61 spinlock_t recv_lock;
62 unsigned long xmit_flags;
63 u32 xaccm[8];
64 u32 raccm;
65 unsigned int bytes_sent;
66 unsigned int bytes_rcvd;
67
68 struct sk_buff *tpkt;
69 unsigned long last_xmit;
70
71 struct sk_buff_head rqueue;
72
73 struct tasklet_struct tsk;
74
75 atomic_t refcnt;
76 struct completion dead_cmp;
77 struct ppp_channel chan; /* interface to generic ppp layer */
78};
79
80/* Bit numbers in xmit_flags */
81#define XMIT_WAKEUP 0
82#define XMIT_FULL 1
83
84/* Bits in rbits */
85#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
86
87#define PPPSYNC_MAX_RQLEN 32 /* arbitrary */
88
89/*
90 * Prototypes.
91 */
92static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
93static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
94static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
95 unsigned long arg);
96static void ppp_sync_process(unsigned long arg);
97static int ppp_sync_push(struct syncppp *ap);
98static void ppp_sync_flush_output(struct syncppp *ap);
99static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
100 char *flags, int count);
101
102static const struct ppp_channel_ops sync_ops = {
103 .start_xmit = ppp_sync_send,
104 .ioctl = ppp_sync_ioctl,
105};
106
107/*
108 * Utility procedures to print a buffer in hex/ascii
109 */
110static void
111ppp_print_hex (register __u8 * out, const __u8 * in, int count)
112{
113 register __u8 next_ch;
114 static const char hex[] = "0123456789ABCDEF";
115
116 while (count-- > 0) {
117 next_ch = *in++;
118 *out++ = hex[(next_ch >> 4) & 0x0F];
119 *out++ = hex[next_ch & 0x0F];
120 ++out;
121 }
122}
123
124static void
125ppp_print_char (register __u8 * out, const __u8 * in, int count)
126{
127 register __u8 next_ch;
128
129 while (count-- > 0) {
130 next_ch = *in++;
131
132 if (next_ch < 0x20 || next_ch > 0x7e)
133 *out++ = '.';
134 else {
135 *out++ = next_ch;
136 if (next_ch == '%') /* printk/syslogd has a bug !! */
137 *out++ = '%';
138 }
139 }
140 *out = '\0';
141}
142
143static void
144ppp_print_buffer (const char *name, const __u8 *buf, int count)
145{
146 __u8 line[44];
147
148 if (name != NULL)
149 printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
150
151 while (count > 8) {
152 memset (line, 32, 44);
153 ppp_print_hex (line, buf, 8);
154 ppp_print_char (&line[8 * 3], buf, 8);
155 printk(KERN_DEBUG "%s\n", line);
156 count -= 8;
157 buf += 8;
158 }
159
160 if (count > 0) {
161 memset (line, 32, 44);
162 ppp_print_hex (line, buf, count);
163 ppp_print_char (&line[8 * 3], buf, count);
164 printk(KERN_DEBUG "%s\n", line);
165 }
166}
167
168
169/*
170 * Routines implementing the synchronous PPP line discipline.
171 */
172
173/*
174 * We have a potential race on dereferencing tty->disc_data,
175 * because the tty layer provides no locking at all - thus one
176 * cpu could be running ppp_synctty_receive while another
177 * calls ppp_synctty_close, which zeroes tty->disc_data and
178 * frees the memory that ppp_synctty_receive is using. The best
179 * way to fix this is to use a rwlock in the tty struct, but for now
180 * we use a single global rwlock for all ttys in ppp line discipline.
181 *
182 * FIXME: Fixed in tty_io nowadays.
183 */
184static DEFINE_RWLOCK(disc_data_lock);
185
186static struct syncppp *sp_get(struct tty_struct *tty)
187{
188 struct syncppp *ap;
189
190 read_lock(&disc_data_lock);
191 ap = tty->disc_data;
192 if (ap != NULL)
193 atomic_inc(&ap->refcnt);
194 read_unlock(&disc_data_lock);
195 return ap;
196}
197
198static void sp_put(struct syncppp *ap)
199{
200 if (atomic_dec_and_test(&ap->refcnt))
201 complete(&ap->dead_cmp);
202}
203
204/*
205 * Called when a tty is put into sync-PPP line discipline.
206 */
207static int
208ppp_sync_open(struct tty_struct *tty)
209{
210 struct syncppp *ap;
211 int err;
212 int speed;
213
214 if (tty->ops->write == NULL)
215 return -EOPNOTSUPP;
216
217 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
218 err = -ENOMEM;
219 if (!ap)
220 goto out;
221
222 /* initialize the syncppp structure */
223 ap->tty = tty;
224 ap->mru = PPP_MRU;
225 spin_lock_init(&ap->xmit_lock);
226 spin_lock_init(&ap->recv_lock);
227 ap->xaccm[0] = ~0U;
228 ap->xaccm[3] = 0x60000000U;
229 ap->raccm = ~0U;
230
231 skb_queue_head_init(&ap->rqueue);
232 tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
233
234 atomic_set(&ap->refcnt, 1);
235 init_completion(&ap->dead_cmp);
236
237 ap->chan.private = ap;
238 ap->chan.ops = &sync_ops;
239 ap->chan.mtu = PPP_MRU;
240 ap->chan.hdrlen = 2; /* for A/C bytes */
241 speed = tty_get_baud_rate(tty);
242 ap->chan.speed = speed;
243 err = ppp_register_channel(&ap->chan);
244 if (err)
245 goto out_free;
246
247 tty->disc_data = ap;
248 tty->receive_room = 65536;
249 return 0;
250
251 out_free:
252 kfree(ap);
253 out:
254 return err;
255}
256
257/*
258 * Called when the tty is put into another line discipline
259 * or it hangs up. We have to wait for any cpu currently
260 * executing in any of the other ppp_synctty_* routines to
261 * finish before we can call ppp_unregister_channel and free
262 * the syncppp struct. This routine must be called from
263 * process context, not interrupt or softirq context.
264 */
265static void
266ppp_sync_close(struct tty_struct *tty)
267{
268 struct syncppp *ap;
269
270 write_lock_irq(&disc_data_lock);
271 ap = tty->disc_data;
272 tty->disc_data = NULL;
273 write_unlock_irq(&disc_data_lock);
274 if (!ap)
275 return;
276
277 /*
278 * We have now ensured that nobody can start using ap from now
279 * on, but we have to wait for all existing users to finish.
280 * Note that ppp_unregister_channel ensures that no calls to
281 * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
282 * by the time it returns.
283 */
284 if (!atomic_dec_and_test(&ap->refcnt))
285 wait_for_completion(&ap->dead_cmp);
286 tasklet_kill(&ap->tsk);
287
288 ppp_unregister_channel(&ap->chan);
289 skb_queue_purge(&ap->rqueue);
290 kfree_skb(ap->tpkt);
291 kfree(ap);
292}
293
294/*
295 * Called on tty hangup in process context.
296 *
297 * Wait for I/O to driver to complete and unregister PPP channel.
298 * This is already done by the close routine, so just call that.
299 */
300static int ppp_sync_hangup(struct tty_struct *tty)
301{
302 ppp_sync_close(tty);
303 return 0;
304}
305
306/*
307 * Read does nothing - no data is ever available this way.
308 * Pppd reads and writes packets via /dev/ppp instead.
309 */
310static ssize_t
311ppp_sync_read(struct tty_struct *tty, struct file *file,
312 unsigned char __user *buf, size_t count)
313{
314 return -EAGAIN;
315}
316
317/*
318 * Write on the tty does nothing, the packets all come in
319 * from the ppp generic stuff.
320 */
321static ssize_t
322ppp_sync_write(struct tty_struct *tty, struct file *file,
323 const unsigned char *buf, size_t count)
324{
325 return -EAGAIN;
326}
327
328static int
329ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
330 unsigned int cmd, unsigned long arg)
331{
332 struct syncppp *ap = sp_get(tty);
333 int __user *p = (int __user *)arg;
334 int err, val;
335
336 if (!ap)
337 return -ENXIO;
338 err = -EFAULT;
339 switch (cmd) {
340 case PPPIOCGCHAN:
341 err = -EFAULT;
342 if (put_user(ppp_channel_index(&ap->chan), p))
343 break;
344 err = 0;
345 break;
346
347 case PPPIOCGUNIT:
348 err = -EFAULT;
349 if (put_user(ppp_unit_number(&ap->chan), p))
350 break;
351 err = 0;
352 break;
353
354 case TCFLSH:
355 /* flush our buffers and the serial port's buffer */
356 if (arg == TCIOFLUSH || arg == TCOFLUSH)
357 ppp_sync_flush_output(ap);
358 err = tty_perform_flush(tty, arg);
359 break;
360
361 case FIONREAD:
362 val = 0;
363 if (put_user(val, p))
364 break;
365 err = 0;
366 break;
367
368 default:
369 err = tty_mode_ioctl(tty, file, cmd, arg);
370 break;
371 }
372
373 sp_put(ap);
374 return err;
375}
376
377/* No kernel lock - fine */
378static unsigned int
379ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
380{
381 return 0;
382}
383
384/* May sleep, don't call from interrupt level or with interrupts disabled */
385static void
386ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
387 char *cflags, int count)
388{
389 struct syncppp *ap = sp_get(tty);
390 unsigned long flags;
391
392 if (!ap)
393 return;
394 spin_lock_irqsave(&ap->recv_lock, flags);
395 ppp_sync_input(ap, buf, cflags, count);
396 spin_unlock_irqrestore(&ap->recv_lock, flags);
397 if (!skb_queue_empty(&ap->rqueue))
398 tasklet_schedule(&ap->tsk);
399 sp_put(ap);
400 tty_unthrottle(tty);
401}
402
403static void
404ppp_sync_wakeup(struct tty_struct *tty)
405{
406 struct syncppp *ap = sp_get(tty);
407
408 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
409 if (!ap)
410 return;
411 set_bit(XMIT_WAKEUP, &ap->xmit_flags);
412 tasklet_schedule(&ap->tsk);
413 sp_put(ap);
414}
415
416
417static struct tty_ldisc_ops ppp_sync_ldisc = {
418 .owner = THIS_MODULE,
419 .magic = TTY_LDISC_MAGIC,
420 .name = "pppsync",
421 .open = ppp_sync_open,
422 .close = ppp_sync_close,
423 .hangup = ppp_sync_hangup,
424 .read = ppp_sync_read,
425 .write = ppp_sync_write,
426 .ioctl = ppp_synctty_ioctl,
427 .poll = ppp_sync_poll,
428 .receive_buf = ppp_sync_receive,
429 .write_wakeup = ppp_sync_wakeup,
430};
431
432static int __init
433ppp_sync_init(void)
434{
435 int err;
436
437 err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
438 if (err != 0)
439 printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
440 err);
441 return err;
442}
443
444/*
445 * The following routines provide the PPP channel interface.
446 */
447static int
448ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
449{
450 struct syncppp *ap = chan->private;
451 int err, val;
452 u32 accm[8];
453 void __user *argp = (void __user *)arg;
454 u32 __user *p = argp;
455
456 err = -EFAULT;
457 switch (cmd) {
458 case PPPIOCGFLAGS:
459 val = ap->flags | ap->rbits;
460 if (put_user(val, (int __user *) argp))
461 break;
462 err = 0;
463 break;
464 case PPPIOCSFLAGS:
465 if (get_user(val, (int __user *) argp))
466 break;
467 ap->flags = val & ~SC_RCV_BITS;
468 spin_lock_irq(&ap->recv_lock);
469 ap->rbits = val & SC_RCV_BITS;
470 spin_unlock_irq(&ap->recv_lock);
471 err = 0;
472 break;
473
474 case PPPIOCGASYNCMAP:
475 if (put_user(ap->xaccm[0], p))
476 break;
477 err = 0;
478 break;
479 case PPPIOCSASYNCMAP:
480 if (get_user(ap->xaccm[0], p))
481 break;
482 err = 0;
483 break;
484
485 case PPPIOCGRASYNCMAP:
486 if (put_user(ap->raccm, p))
487 break;
488 err = 0;
489 break;
490 case PPPIOCSRASYNCMAP:
491 if (get_user(ap->raccm, p))
492 break;
493 err = 0;
494 break;
495
496 case PPPIOCGXASYNCMAP:
497 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
498 break;
499 err = 0;
500 break;
501 case PPPIOCSXASYNCMAP:
502 if (copy_from_user(accm, argp, sizeof(accm)))
503 break;
504 accm[2] &= ~0x40000000U; /* can't escape 0x5e */
505 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
506 memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
507 err = 0;
508 break;
509
510 case PPPIOCGMRU:
511 if (put_user(ap->mru, (int __user *) argp))
512 break;
513 err = 0;
514 break;
515 case PPPIOCSMRU:
516 if (get_user(val, (int __user *) argp))
517 break;
518 if (val < PPP_MRU)
519 val = PPP_MRU;
520 ap->mru = val;
521 err = 0;
522 break;
523
524 default:
525 err = -ENOTTY;
526 }
527 return err;
528}
529
530/*
531 * This is called at softirq level to deliver received packets
532 * to the ppp_generic code, and to tell the ppp_generic code
533 * if we can accept more output now.
534 */
535static void ppp_sync_process(unsigned long arg)
536{
537 struct syncppp *ap = (struct syncppp *) arg;
538 struct sk_buff *skb;
539
540 /* process received packets */
541 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
542 if (skb->len == 0) {
543 /* zero length buffers indicate error */
544 ppp_input_error(&ap->chan, 0);
545 kfree_skb(skb);
546 }
547 else
548 ppp_input(&ap->chan, skb);
549 }
550
551 /* try to push more stuff out */
552 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
553 ppp_output_wakeup(&ap->chan);
554}
555
556/*
557 * Procedures for encapsulation and framing.
558 */
559
560static struct sk_buff*
561ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
562{
563 int proto;
564 unsigned char *data;
565 int islcp;
566
567 data = skb->data;
568 proto = get_unaligned_be16(data);
569
570 /* LCP packets with codes between 1 (configure-request)
571 * and 7 (code-reject) must be sent as though no options
572 * have been negotiated.
573 */
574 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
575
576 /* compress protocol field if option enabled */
577 if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
578 skb_pull(skb,1);
579
580 /* prepend address/control fields if necessary */
581 if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
582 if (skb_headroom(skb) < 2) {
583 struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
584 if (npkt == NULL) {
585 kfree_skb(skb);
586 return NULL;
587 }
588 skb_reserve(npkt,2);
589 skb_copy_from_linear_data(skb,
590 skb_put(npkt, skb->len), skb->len);
591 kfree_skb(skb);
592 skb = npkt;
593 }
594 skb_push(skb,2);
595 skb->data[0] = PPP_ALLSTATIONS;
596 skb->data[1] = PPP_UI;
597 }
598
599 ap->last_xmit = jiffies;
600
601 if (skb && ap->flags & SC_LOG_OUTPKT)
602 ppp_print_buffer ("send buffer", skb->data, skb->len);
603
604 return skb;
605}
606
607/*
608 * Transmit-side routines.
609 */
610
611/*
612 * Send a packet to the peer over an sync tty line.
613 * Returns 1 iff the packet was accepted.
614 * If the packet was not accepted, we will call ppp_output_wakeup
615 * at some later time.
616 */
617static int
618ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
619{
620 struct syncppp *ap = chan->private;
621
622 ppp_sync_push(ap);
623
624 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
625 return 0; /* already full */
626 skb = ppp_sync_txmunge(ap, skb);
627 if (skb != NULL)
628 ap->tpkt = skb;
629 else
630 clear_bit(XMIT_FULL, &ap->xmit_flags);
631
632 ppp_sync_push(ap);
633 return 1;
634}
635
636/*
637 * Push as much data as possible out to the tty.
638 */
639static int
640ppp_sync_push(struct syncppp *ap)
641{
642 int sent, done = 0;
643 struct tty_struct *tty = ap->tty;
644 int tty_stuffed = 0;
645
646 if (!spin_trylock_bh(&ap->xmit_lock))
647 return 0;
648 for (;;) {
649 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
650 tty_stuffed = 0;
651 if (!tty_stuffed && ap->tpkt) {
652 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
653 sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
654 if (sent < 0)
655 goto flush; /* error, e.g. loss of CD */
656 if (sent < ap->tpkt->len) {
657 tty_stuffed = 1;
658 } else {
659 kfree_skb(ap->tpkt);
660 ap->tpkt = NULL;
661 clear_bit(XMIT_FULL, &ap->xmit_flags);
662 done = 1;
663 }
664 continue;
665 }
666 /* haven't made any progress */
667 spin_unlock_bh(&ap->xmit_lock);
668 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
669 (!tty_stuffed && ap->tpkt)))
670 break;
671 if (!spin_trylock_bh(&ap->xmit_lock))
672 break;
673 }
674 return done;
675
676flush:
677 if (ap->tpkt) {
678 kfree_skb(ap->tpkt);
679 ap->tpkt = NULL;
680 clear_bit(XMIT_FULL, &ap->xmit_flags);
681 done = 1;
682 }
683 spin_unlock_bh(&ap->xmit_lock);
684 return done;
685}
686
687/*
688 * Flush output from our internal buffers.
689 * Called for the TCFLSH ioctl.
690 */
691static void
692ppp_sync_flush_output(struct syncppp *ap)
693{
694 int done = 0;
695
696 spin_lock_bh(&ap->xmit_lock);
697 if (ap->tpkt != NULL) {
698 kfree_skb(ap->tpkt);
699 ap->tpkt = NULL;
700 clear_bit(XMIT_FULL, &ap->xmit_flags);
701 done = 1;
702 }
703 spin_unlock_bh(&ap->xmit_lock);
704 if (done)
705 ppp_output_wakeup(&ap->chan);
706}
707
708/*
709 * Receive-side routines.
710 */
711
712/* called when the tty driver has data for us.
713 *
714 * Data is frame oriented: each call to ppp_sync_input is considered
715 * a whole frame. If the 1st flag byte is non-zero then the whole
716 * frame is considered to be in error and is tossed.
717 */
718static void
719ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
720 char *flags, int count)
721{
722 struct sk_buff *skb;
723 unsigned char *p;
724
725 if (count == 0)
726 return;
727
728 if (ap->flags & SC_LOG_INPKT)
729 ppp_print_buffer ("receive buffer", buf, count);
730
731 /* stuff the chars in the skb */
732 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
733 if (!skb) {
734 printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
735 goto err;
736 }
737 /* Try to get the payload 4-byte aligned */
738 if (buf[0] != PPP_ALLSTATIONS)
739 skb_reserve(skb, 2 + (buf[0] & 1));
740
741 if (flags && *flags) {
742 /* error flag set, ignore frame */
743 goto err;
744 } else if (count > skb_tailroom(skb)) {
745 /* packet overflowed MRU */
746 goto err;
747 }
748
749 p = skb_put(skb, count);
750 memcpy(p, buf, count);
751
752 /* strip address/control field if present */
753 p = skb->data;
754 if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
755 /* chop off address/control */
756 if (skb->len < 3)
757 goto err;
758 p = skb_pull(skb, 2);
759 }
760
761 /* decompress protocol field if compressed */
762 if (p[0] & 1) {
763 /* protocol is compressed */
764 skb_push(skb, 1)[0] = 0;
765 } else if (skb->len < 2)
766 goto err;
767
768 /* queue the frame to be processed */
769 skb_queue_tail(&ap->rqueue, skb);
770 return;
771
772err:
773 /* queue zero length packet as error indication */
774 if (skb || (skb = dev_alloc_skb(0))) {
775 skb_trim(skb, 0);
776 skb_queue_tail(&ap->rqueue, skb);
777 }
778}
779
780static void __exit
781ppp_sync_cleanup(void)
782{
783 if (tty_unregister_ldisc(N_SYNC_PPP) != 0)
784 printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
785}
786
787module_init(ppp_sync_init);
788module_exit(ppp_sync_cleanup);
789MODULE_LICENSE("GPL");
790MODULE_ALIAS_LDISC(N_SYNC_PPP);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
new file mode 100644
index 000000000000..bc9a4bb31980
--- /dev/null
+++ b/drivers/net/ppp/pppoe.c
@@ -0,0 +1,1208 @@
1/** -*- linux-c -*- ***********************************************************
2 * Linux PPP over Ethernet (PPPoX/PPPoE) Sockets
3 *
4 * PPPoX --- Generic PPP encapsulation socket family
5 * PPPoE --- PPP over Ethernet (RFC 2516)
6 *
7 *
8 * Version: 0.7.0
9 *
10 * 070228 : Fix to allow multiple sessions with same remote MAC and same
11 * session id by including the local device ifindex in the
12 * tuple identifying a session. This also ensures packets can't
13 * be injected into a session from interfaces other than the one
14 * specified by userspace. Florian Zumbiehl <florz@florz.de>
15 * (Oh, BTW, this one is YYMMDD, in case you were wondering ...)
16 * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme
17 * 030700 : Fixed connect logic to allow for disconnect.
18 * 270700 : Fixed potential SMP problems; we must protect against
19 * simultaneous invocation of ppp_input
20 * and ppp_unregister_channel.
21 * 040800 : Respect reference count mechanisms on net-devices.
22 * 200800 : fix kfree(skb) in pppoe_rcv (acme)
23 * Module reference count is decremented in the right spot now,
24 * guards against sock_put not actually freeing the sk
25 * in pppoe_release.
26 * 051000 : Initialization cleanup.
27 * 111100 : Fix recvmsg.
28 * 050101 : Fix PADT procesing.
29 * 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey)
30 * 170701 : Do not lock_sock with rwlock held. (DaveM)
31 * Ignore discovery frames if user has socket
32 * locked. (DaveM)
33 * Ignore return value of dev_queue_xmit in __pppoe_xmit
34 * or else we may kfree an SKB twice. (DaveM)
35 * 190701 : When doing copies of skb's in __pppoe_xmit, always delete
36 * the original skb that was passed in on success, never on
37 * failure. Delete the copy of the skb on failure to avoid
38 * a memory leak.
39 * 081001 : Misc. cleanup (licence string, non-blocking, prevent
40 * reference of device on close).
41 * 121301 : New ppp channels interface; cannot unregister a channel
42 * from interrupts. Thus, we mark the socket as a ZOMBIE
43 * and do the unregistration later.
44 * 081002 : seq_file support for proc stuff -acme
45 * 111602 : Merge all 2.4 fixes into 2.5/2.6 tree. Label 2.5/2.6
46 * as version 0.7. Spacing cleanup.
47 * Author: Michal Ostrowski <mostrows@speakeasy.net>
48 * Contributors:
49 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
50 * David S. Miller (davem@redhat.com)
51 *
52 * License:
53 * This program is free software; you can redistribute it and/or
54 * modify it under the terms of the GNU General Public License
55 * as published by the Free Software Foundation; either version
56 * 2 of the License, or (at your option) any later version.
57 *
58 */
59
60#include <linux/string.h>
61#include <linux/module.h>
62#include <linux/kernel.h>
63#include <linux/slab.h>
64#include <linux/errno.h>
65#include <linux/netdevice.h>
66#include <linux/net.h>
67#include <linux/inetdevice.h>
68#include <linux/etherdevice.h>
69#include <linux/skbuff.h>
70#include <linux/init.h>
71#include <linux/if_ether.h>
72#include <linux/if_pppox.h>
73#include <linux/ppp_channel.h>
74#include <linux/ppp_defs.h>
75#include <linux/if_ppp.h>
76#include <linux/notifier.h>
77#include <linux/file.h>
78#include <linux/proc_fs.h>
79#include <linux/seq_file.h>
80
81#include <linux/nsproxy.h>
82#include <net/net_namespace.h>
83#include <net/netns/generic.h>
84#include <net/sock.h>
85
86#include <asm/uaccess.h>
87
88#define PPPOE_HASH_BITS 4
89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
91
92static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
93
94static const struct proto_ops pppoe_ops;
95static const struct ppp_channel_ops pppoe_chan_ops;
96
97/* per-net private data for this module */
98static int pppoe_net_id __read_mostly;
99struct pppoe_net {
100 /*
101 * we could use _single_ hash table for all
102 * nets by injecting net id into the hash but
103 * it would increase hash chains and add
104 * a few additional math comparations messy
105 * as well, moreover in case of SMP less locking
106 * controversy here
107 */
108 struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
109 rwlock_t hash_lock;
110};
111
112/*
113 * PPPoE could be in the following stages:
114 * 1) Discovery stage (to obtain remote MAC and Session ID)
115 * 2) Session stage (MAC and SID are known)
116 *
117 * Ethernet frames have a special tag for this but
118 * we use simpler approach based on session id
119 */
120static inline bool stage_session(__be16 sid)
121{
122 return sid != 0;
123}
124
125static inline struct pppoe_net *pppoe_pernet(struct net *net)
126{
127 BUG_ON(!net);
128
129 return net_generic(net, pppoe_net_id);
130}
131
132static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
133{
134 return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN);
135}
136
137static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
138{
139 return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN);
140}
141
142#if 8 % PPPOE_HASH_BITS
143#error 8 must be a multiple of PPPOE_HASH_BITS
144#endif
145
146static int hash_item(__be16 sid, unsigned char *addr)
147{
148 unsigned char hash = 0;
149 unsigned int i;
150
151 for (i = 0; i < ETH_ALEN; i++)
152 hash ^= addr[i];
153 for (i = 0; i < sizeof(sid_t) * 8; i += 8)
154 hash ^= (__force __u32)sid >> i;
155 for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;)
156 hash ^= hash >> i;
157
158 return hash & PPPOE_HASH_MASK;
159}
160
161/**********************************************************************
162 *
163 * Set/get/delete/rehash items (internal versions)
164 *
165 **********************************************************************/
166static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
167 unsigned char *addr, int ifindex)
168{
169 int hash = hash_item(sid, addr);
170 struct pppox_sock *ret;
171
172 ret = pn->hash_table[hash];
173 while (ret) {
174 if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
175 ret->pppoe_ifindex == ifindex)
176 return ret;
177
178 ret = ret->next;
179 }
180
181 return NULL;
182}
183
184static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
185{
186 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
187 struct pppox_sock *ret;
188
189 ret = pn->hash_table[hash];
190 while (ret) {
191 if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
192 ret->pppoe_ifindex == po->pppoe_ifindex)
193 return -EALREADY;
194
195 ret = ret->next;
196 }
197
198 po->next = pn->hash_table[hash];
199 pn->hash_table[hash] = po;
200
201 return 0;
202}
203
204static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid,
205 char *addr, int ifindex)
206{
207 int hash = hash_item(sid, addr);
208 struct pppox_sock *ret, **src;
209
210 ret = pn->hash_table[hash];
211 src = &pn->hash_table[hash];
212
213 while (ret) {
214 if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
215 ret->pppoe_ifindex == ifindex) {
216 *src = ret->next;
217 break;
218 }
219
220 src = &ret->next;
221 ret = ret->next;
222 }
223
224 return ret;
225}
226
227/**********************************************************************
228 *
229 * Set/get/delete/rehash items
230 *
231 **********************************************************************/
232static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
233 unsigned char *addr, int ifindex)
234{
235 struct pppox_sock *po;
236
237 read_lock_bh(&pn->hash_lock);
238 po = __get_item(pn, sid, addr, ifindex);
239 if (po)
240 sock_hold(sk_pppox(po));
241 read_unlock_bh(&pn->hash_lock);
242
243 return po;
244}
245
246static inline struct pppox_sock *get_item_by_addr(struct net *net,
247 struct sockaddr_pppox *sp)
248{
249 struct net_device *dev;
250 struct pppoe_net *pn;
251 struct pppox_sock *pppox_sock = NULL;
252
253 int ifindex;
254
255 rcu_read_lock();
256 dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
257 if (dev) {
258 ifindex = dev->ifindex;
259 pn = pppoe_pernet(net);
260 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
261 sp->sa_addr.pppoe.remote, ifindex);
262 }
263 rcu_read_unlock();
264 return pppox_sock;
265}
266
267static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid,
268 char *addr, int ifindex)
269{
270 struct pppox_sock *ret;
271
272 write_lock_bh(&pn->hash_lock);
273 ret = __delete_item(pn, sid, addr, ifindex);
274 write_unlock_bh(&pn->hash_lock);
275
276 return ret;
277}
278
279/***************************************************************************
280 *
281 * Handler for device events.
282 * Certain device events require that sockets be unconnected.
283 *
284 **************************************************************************/
285
286static void pppoe_flush_dev(struct net_device *dev)
287{
288 struct pppoe_net *pn;
289 int i;
290
291 pn = pppoe_pernet(dev_net(dev));
292 write_lock_bh(&pn->hash_lock);
293 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
294 struct pppox_sock *po = pn->hash_table[i];
295 struct sock *sk;
296
297 while (po) {
298 while (po && po->pppoe_dev != dev) {
299 po = po->next;
300 }
301
302 if (!po)
303 break;
304
305 sk = sk_pppox(po);
306
307 /* We always grab the socket lock, followed by the
308 * hash_lock, in that order. Since we should hold the
309 * sock lock while doing any unbinding, we need to
310 * release the lock we're holding. Hold a reference to
311 * the sock so it doesn't disappear as we're jumping
312 * between locks.
313 */
314
315 sock_hold(sk);
316 write_unlock_bh(&pn->hash_lock);
317 lock_sock(sk);
318
319 if (po->pppoe_dev == dev &&
320 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
321 pppox_unbind_sock(sk);
322 sk->sk_state = PPPOX_ZOMBIE;
323 sk->sk_state_change(sk);
324 po->pppoe_dev = NULL;
325 dev_put(dev);
326 }
327
328 release_sock(sk);
329 sock_put(sk);
330
331 /* Restart the process from the start of the current
332 * hash chain. We dropped locks so the world may have
333 * change from underneath us.
334 */
335
336 BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
337 write_lock_bh(&pn->hash_lock);
338 po = pn->hash_table[i];
339 }
340 }
341 write_unlock_bh(&pn->hash_lock);
342}
343
344static int pppoe_device_event(struct notifier_block *this,
345 unsigned long event, void *ptr)
346{
347 struct net_device *dev = (struct net_device *)ptr;
348
349 /* Only look at sockets that are using this specific device. */
350 switch (event) {
351 case NETDEV_CHANGEADDR:
352 case NETDEV_CHANGEMTU:
353 /* A change in mtu or address is a bad thing, requiring
354 * LCP re-negotiation.
355 */
356
357 case NETDEV_GOING_DOWN:
358 case NETDEV_DOWN:
359 /* Find every socket on this device and kill it. */
360 pppoe_flush_dev(dev);
361 break;
362
363 default:
364 break;
365 }
366
367 return NOTIFY_DONE;
368}
369
370static struct notifier_block pppoe_notifier = {
371 .notifier_call = pppoe_device_event,
372};
373
374/************************************************************************
375 *
376 * Do the real work of receiving a PPPoE Session frame.
377 *
378 ***********************************************************************/
379static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
380{
381 struct pppox_sock *po = pppox_sk(sk);
382 struct pppox_sock *relay_po;
383
384 /* Backlog receive. Semantics of backlog rcv preclude any code from
385 * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
386 * can't change.
387 */
388
389 if (sk->sk_state & PPPOX_BOUND) {
390 ppp_input(&po->chan, skb);
391 } else if (sk->sk_state & PPPOX_RELAY) {
392 relay_po = get_item_by_addr(sock_net(sk),
393 &po->pppoe_relay);
394 if (relay_po == NULL)
395 goto abort_kfree;
396
397 if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0)
398 goto abort_put;
399
400 if (!__pppoe_xmit(sk_pppox(relay_po), skb))
401 goto abort_put;
402 } else {
403 if (sock_queue_rcv_skb(sk, skb))
404 goto abort_kfree;
405 }
406
407 return NET_RX_SUCCESS;
408
409abort_put:
410 sock_put(sk_pppox(relay_po));
411
412abort_kfree:
413 kfree_skb(skb);
414 return NET_RX_DROP;
415}
416
417/************************************************************************
418 *
419 * Receive wrapper called in BH context.
420 *
421 ***********************************************************************/
422static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
423 struct packet_type *pt, struct net_device *orig_dev)
424{
425 struct pppoe_hdr *ph;
426 struct pppox_sock *po;
427 struct pppoe_net *pn;
428 int len;
429
430 skb = skb_share_check(skb, GFP_ATOMIC);
431 if (!skb)
432 goto out;
433
434 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
435 goto drop;
436
437 ph = pppoe_hdr(skb);
438 len = ntohs(ph->length);
439
440 skb_pull_rcsum(skb, sizeof(*ph));
441 if (skb->len < len)
442 goto drop;
443
444 if (pskb_trim_rcsum(skb, len))
445 goto drop;
446
447 pn = pppoe_pernet(dev_net(dev));
448
449 /* Note that get_item does a sock_hold(), so sk_pppox(po)
450 * is known to be safe.
451 */
452 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
453 if (!po)
454 goto drop;
455
456 return sk_receive_skb(sk_pppox(po), skb, 0);
457
458drop:
459 kfree_skb(skb);
460out:
461 return NET_RX_DROP;
462}
463
464/************************************************************************
465 *
466 * Receive a PPPoE Discovery frame.
467 * This is solely for detection of PADT frames
468 *
469 ***********************************************************************/
470static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
471 struct packet_type *pt, struct net_device *orig_dev)
472
473{
474 struct pppoe_hdr *ph;
475 struct pppox_sock *po;
476 struct pppoe_net *pn;
477
478 skb = skb_share_check(skb, GFP_ATOMIC);
479 if (!skb)
480 goto out;
481
482 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
483 goto abort;
484
485 ph = pppoe_hdr(skb);
486 if (ph->code != PADT_CODE)
487 goto abort;
488
489 pn = pppoe_pernet(dev_net(dev));
490 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
491 if (po) {
492 struct sock *sk = sk_pppox(po);
493
494 bh_lock_sock(sk);
495
496 /* If the user has locked the socket, just ignore
497 * the packet. With the way two rcv protocols hook into
498 * one socket family type, we cannot (easily) distinguish
499 * what kind of SKB it is during backlog rcv.
500 */
501 if (sock_owned_by_user(sk) == 0) {
502 /* We're no longer connect at the PPPOE layer,
503 * and must wait for ppp channel to disconnect us.
504 */
505 sk->sk_state = PPPOX_ZOMBIE;
506 }
507
508 bh_unlock_sock(sk);
509 sock_put(sk);
510 }
511
512abort:
513 kfree_skb(skb);
514out:
515 return NET_RX_SUCCESS; /* Lies... :-) */
516}
517
518static struct packet_type pppoes_ptype __read_mostly = {
519 .type = cpu_to_be16(ETH_P_PPP_SES),
520 .func = pppoe_rcv,
521};
522
523static struct packet_type pppoed_ptype __read_mostly = {
524 .type = cpu_to_be16(ETH_P_PPP_DISC),
525 .func = pppoe_disc_rcv,
526};
527
528static struct proto pppoe_sk_proto __read_mostly = {
529 .name = "PPPOE",
530 .owner = THIS_MODULE,
531 .obj_size = sizeof(struct pppox_sock),
532};
533
534/***********************************************************************
535 *
536 * Initialize a new struct sock.
537 *
538 **********************************************************************/
539static int pppoe_create(struct net *net, struct socket *sock)
540{
541 struct sock *sk;
542
543 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
544 if (!sk)
545 return -ENOMEM;
546
547 sock_init_data(sock, sk);
548
549 sock->state = SS_UNCONNECTED;
550 sock->ops = &pppoe_ops;
551
552 sk->sk_backlog_rcv = pppoe_rcv_core;
553 sk->sk_state = PPPOX_NONE;
554 sk->sk_type = SOCK_STREAM;
555 sk->sk_family = PF_PPPOX;
556 sk->sk_protocol = PX_PROTO_OE;
557
558 return 0;
559}
560
561static int pppoe_release(struct socket *sock)
562{
563 struct sock *sk = sock->sk;
564 struct pppox_sock *po;
565 struct pppoe_net *pn;
566 struct net *net = NULL;
567
568 if (!sk)
569 return 0;
570
571 lock_sock(sk);
572 if (sock_flag(sk, SOCK_DEAD)) {
573 release_sock(sk);
574 return -EBADF;
575 }
576
577 po = pppox_sk(sk);
578
579 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
580 dev_put(po->pppoe_dev);
581 po->pppoe_dev = NULL;
582 }
583
584 pppox_unbind_sock(sk);
585
586 /* Signal the death of the socket. */
587 sk->sk_state = PPPOX_DEAD;
588
589 net = sock_net(sk);
590 pn = pppoe_pernet(net);
591
592 /*
593 * protect "po" from concurrent updates
594 * on pppoe_flush_dev
595 */
596 delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
597 po->pppoe_ifindex);
598
599 sock_orphan(sk);
600 sock->sk = NULL;
601
602 skb_queue_purge(&sk->sk_receive_queue);
603 release_sock(sk);
604 sock_put(sk);
605
606 return 0;
607}
608
609static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
610 int sockaddr_len, int flags)
611{
612 struct sock *sk = sock->sk;
613 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
614 struct pppox_sock *po = pppox_sk(sk);
615 struct net_device *dev = NULL;
616 struct pppoe_net *pn;
617 struct net *net = NULL;
618 int error;
619
620 lock_sock(sk);
621
622 error = -EINVAL;
623 if (sp->sa_protocol != PX_PROTO_OE)
624 goto end;
625
626 /* Check for already bound sockets */
627 error = -EBUSY;
628 if ((sk->sk_state & PPPOX_CONNECTED) &&
629 stage_session(sp->sa_addr.pppoe.sid))
630 goto end;
631
632 /* Check for already disconnected sockets, on attempts to disconnect */
633 error = -EALREADY;
634 if ((sk->sk_state & PPPOX_DEAD) &&
635 !stage_session(sp->sa_addr.pppoe.sid))
636 goto end;
637
638 error = 0;
639
640 /* Delete the old binding */
641 if (stage_session(po->pppoe_pa.sid)) {
642 pppox_unbind_sock(sk);
643 pn = pppoe_pernet(sock_net(sk));
644 delete_item(pn, po->pppoe_pa.sid,
645 po->pppoe_pa.remote, po->pppoe_ifindex);
646 if (po->pppoe_dev) {
647 dev_put(po->pppoe_dev);
648 po->pppoe_dev = NULL;
649 }
650
651 memset(sk_pppox(po) + 1, 0,
652 sizeof(struct pppox_sock) - sizeof(struct sock));
653 sk->sk_state = PPPOX_NONE;
654 }
655
656 /* Re-bind in session stage only */
657 if (stage_session(sp->sa_addr.pppoe.sid)) {
658 error = -ENODEV;
659 net = sock_net(sk);
660 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
661 if (!dev)
662 goto err_put;
663
664 po->pppoe_dev = dev;
665 po->pppoe_ifindex = dev->ifindex;
666 pn = pppoe_pernet(net);
667 if (!(dev->flags & IFF_UP)) {
668 goto err_put;
669 }
670
671 memcpy(&po->pppoe_pa,
672 &sp->sa_addr.pppoe,
673 sizeof(struct pppoe_addr));
674
675 write_lock_bh(&pn->hash_lock);
676 error = __set_item(pn, po);
677 write_unlock_bh(&pn->hash_lock);
678 if (error < 0)
679 goto err_put;
680
681 po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
682 dev->hard_header_len);
683
684 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
685 po->chan.private = sk;
686 po->chan.ops = &pppoe_chan_ops;
687
688 error = ppp_register_net_channel(dev_net(dev), &po->chan);
689 if (error) {
690 delete_item(pn, po->pppoe_pa.sid,
691 po->pppoe_pa.remote, po->pppoe_ifindex);
692 goto err_put;
693 }
694
695 sk->sk_state = PPPOX_CONNECTED;
696 }
697
698 po->num = sp->sa_addr.pppoe.sid;
699
700end:
701 release_sock(sk);
702 return error;
703err_put:
704 if (po->pppoe_dev) {
705 dev_put(po->pppoe_dev);
706 po->pppoe_dev = NULL;
707 }
708 goto end;
709}
710
711static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
712 int *usockaddr_len, int peer)
713{
714 int len = sizeof(struct sockaddr_pppox);
715 struct sockaddr_pppox sp;
716
717 sp.sa_family = AF_PPPOX;
718 sp.sa_protocol = PX_PROTO_OE;
719 memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa,
720 sizeof(struct pppoe_addr));
721
722 memcpy(uaddr, &sp, len);
723
724 *usockaddr_len = len;
725
726 return 0;
727}
728
729static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
730 unsigned long arg)
731{
732 struct sock *sk = sock->sk;
733 struct pppox_sock *po = pppox_sk(sk);
734 int val;
735 int err;
736
737 switch (cmd) {
738 case PPPIOCGMRU:
739 err = -ENXIO;
740 if (!(sk->sk_state & PPPOX_CONNECTED))
741 break;
742
743 err = -EFAULT;
744 if (put_user(po->pppoe_dev->mtu -
745 sizeof(struct pppoe_hdr) -
746 PPP_HDRLEN,
747 (int __user *)arg))
748 break;
749 err = 0;
750 break;
751
752 case PPPIOCSMRU:
753 err = -ENXIO;
754 if (!(sk->sk_state & PPPOX_CONNECTED))
755 break;
756
757 err = -EFAULT;
758 if (get_user(val, (int __user *)arg))
759 break;
760
761 if (val < (po->pppoe_dev->mtu
762 - sizeof(struct pppoe_hdr)
763 - PPP_HDRLEN))
764 err = 0;
765 else
766 err = -EINVAL;
767 break;
768
769 case PPPIOCSFLAGS:
770 err = -EFAULT;
771 if (get_user(val, (int __user *)arg))
772 break;
773 err = 0;
774 break;
775
776 case PPPOEIOCSFWD:
777 {
778 struct pppox_sock *relay_po;
779
780 err = -EBUSY;
781 if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE | PPPOX_DEAD))
782 break;
783
784 err = -ENOTCONN;
785 if (!(sk->sk_state & PPPOX_CONNECTED))
786 break;
787
788 /* PPPoE address from the user specifies an outbound
789 PPPoE address which frames are forwarded to */
790 err = -EFAULT;
791 if (copy_from_user(&po->pppoe_relay,
792 (void __user *)arg,
793 sizeof(struct sockaddr_pppox)))
794 break;
795
796 err = -EINVAL;
797 if (po->pppoe_relay.sa_family != AF_PPPOX ||
798 po->pppoe_relay.sa_protocol != PX_PROTO_OE)
799 break;
800
801 /* Check that the socket referenced by the address
802 actually exists. */
803 relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
804 if (!relay_po)
805 break;
806
807 sock_put(sk_pppox(relay_po));
808 sk->sk_state |= PPPOX_RELAY;
809 err = 0;
810 break;
811 }
812
813 case PPPOEIOCDFWD:
814 err = -EALREADY;
815 if (!(sk->sk_state & PPPOX_RELAY))
816 break;
817
818 sk->sk_state &= ~PPPOX_RELAY;
819 err = 0;
820 break;
821
822 default:
823 err = -ENOTTY;
824 }
825
826 return err;
827}
828
829static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
830 struct msghdr *m, size_t total_len)
831{
832 struct sk_buff *skb;
833 struct sock *sk = sock->sk;
834 struct pppox_sock *po = pppox_sk(sk);
835 int error;
836 struct pppoe_hdr hdr;
837 struct pppoe_hdr *ph;
838 struct net_device *dev;
839 char *start;
840
841 lock_sock(sk);
842 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
843 error = -ENOTCONN;
844 goto end;
845 }
846
847 hdr.ver = 1;
848 hdr.type = 1;
849 hdr.code = 0;
850 hdr.sid = po->num;
851
852 dev = po->pppoe_dev;
853
854 error = -EMSGSIZE;
855 if (total_len > (dev->mtu + dev->hard_header_len))
856 goto end;
857
858
859 skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
860 0, GFP_KERNEL);
861 if (!skb) {
862 error = -ENOMEM;
863 goto end;
864 }
865
866 /* Reserve space for headers. */
867 skb_reserve(skb, dev->hard_header_len);
868 skb_reset_network_header(skb);
869
870 skb->dev = dev;
871
872 skb->priority = sk->sk_priority;
873 skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
874
875 ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
876 start = (char *)&ph->tag[0];
877
878 error = memcpy_fromiovec(start, m->msg_iov, total_len);
879 if (error < 0) {
880 kfree_skb(skb);
881 goto end;
882 }
883
884 error = total_len;
885 dev_hard_header(skb, dev, ETH_P_PPP_SES,
886 po->pppoe_pa.remote, NULL, total_len);
887
888 memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
889
890 ph->length = htons(total_len);
891
892 dev_queue_xmit(skb);
893
894end:
895 release_sock(sk);
896 return error;
897}
898
899/************************************************************************
900 *
901 * xmit function for internal use.
902 *
903 ***********************************************************************/
904static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
905{
906 struct pppox_sock *po = pppox_sk(sk);
907 struct net_device *dev = po->pppoe_dev;
908 struct pppoe_hdr *ph;
909 int data_len = skb->len;
910
911 /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
912 * xmit operations conclude prior to an unregistration call. Thus
913 * sk->sk_state cannot change, so we don't need to do lock_sock().
914 * But, we also can't do a lock_sock since that introduces a potential
915 * deadlock as we'd reverse the lock ordering used when calling
916 * ppp_unregister_channel().
917 */
918
919 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
920 goto abort;
921
922 if (!dev)
923 goto abort;
924
925 /* Copy the data if there is no space for the header or if it's
926 * read-only.
927 */
928 if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
929 goto abort;
930
931 __skb_push(skb, sizeof(*ph));
932 skb_reset_network_header(skb);
933
934 ph = pppoe_hdr(skb);
935 ph->ver = 1;
936 ph->type = 1;
937 ph->code = 0;
938 ph->sid = po->num;
939 ph->length = htons(data_len);
940
941 skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
942 skb->dev = dev;
943
944 dev_hard_header(skb, dev, ETH_P_PPP_SES,
945 po->pppoe_pa.remote, NULL, data_len);
946
947 dev_queue_xmit(skb);
948 return 1;
949
950abort:
951 kfree_skb(skb);
952 return 1;
953}
954
955/************************************************************************
956 *
957 * xmit function called by generic PPP driver
958 * sends PPP frame over PPPoE socket
959 *
960 ***********************************************************************/
961static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
962{
963 struct sock *sk = (struct sock *)chan->private;
964 return __pppoe_xmit(sk, skb);
965}
966
967static const struct ppp_channel_ops pppoe_chan_ops = {
968 .start_xmit = pppoe_xmit,
969};
970
971static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
972 struct msghdr *m, size_t total_len, int flags)
973{
974 struct sock *sk = sock->sk;
975 struct sk_buff *skb;
976 int error = 0;
977
978 if (sk->sk_state & PPPOX_BOUND) {
979 error = -EIO;
980 goto end;
981 }
982
983 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
984 flags & MSG_DONTWAIT, &error);
985 if (error < 0)
986 goto end;
987
988 m->msg_namelen = 0;
989
990 if (skb) {
991 total_len = min_t(size_t, total_len, skb->len);
992 error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
993 if (error == 0)
994 error = total_len;
995 }
996
997 kfree_skb(skb);
998end:
999 return error;
1000}
1001
1002#ifdef CONFIG_PROC_FS
1003static int pppoe_seq_show(struct seq_file *seq, void *v)
1004{
1005 struct pppox_sock *po;
1006 char *dev_name;
1007
1008 if (v == SEQ_START_TOKEN) {
1009 seq_puts(seq, "Id Address Device\n");
1010 goto out;
1011 }
1012
1013 po = v;
1014 dev_name = po->pppoe_pa.dev;
1015
1016 seq_printf(seq, "%08X %pM %8s\n",
1017 po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name);
1018out:
1019 return 0;
1020}
1021
1022static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
1023{
1024 struct pppox_sock *po;
1025 int i;
1026
1027 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
1028 po = pn->hash_table[i];
1029 while (po) {
1030 if (!pos--)
1031 goto out;
1032 po = po->next;
1033 }
1034 }
1035
1036out:
1037 return po;
1038}
1039
1040static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
1041 __acquires(pn->hash_lock)
1042{
1043 struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
1044 loff_t l = *pos;
1045
1046 read_lock_bh(&pn->hash_lock);
1047 return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
1048}
1049
1050static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1051{
1052 struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
1053 struct pppox_sock *po;
1054
1055 ++*pos;
1056 if (v == SEQ_START_TOKEN) {
1057 po = pppoe_get_idx(pn, 0);
1058 goto out;
1059 }
1060 po = v;
1061 if (po->next)
1062 po = po->next;
1063 else {
1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
1065
1066 po = NULL;
1067 while (++hash < PPPOE_HASH_SIZE) {
1068 po = pn->hash_table[hash];
1069 if (po)
1070 break;
1071 }
1072 }
1073
1074out:
1075 return po;
1076}
1077
1078static void pppoe_seq_stop(struct seq_file *seq, void *v)
1079 __releases(pn->hash_lock)
1080{
1081 struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
1082 read_unlock_bh(&pn->hash_lock);
1083}
1084
1085static const struct seq_operations pppoe_seq_ops = {
1086 .start = pppoe_seq_start,
1087 .next = pppoe_seq_next,
1088 .stop = pppoe_seq_stop,
1089 .show = pppoe_seq_show,
1090};
1091
1092static int pppoe_seq_open(struct inode *inode, struct file *file)
1093{
1094 return seq_open_net(inode, file, &pppoe_seq_ops,
1095 sizeof(struct seq_net_private));
1096}
1097
1098static const struct file_operations pppoe_seq_fops = {
1099 .owner = THIS_MODULE,
1100 .open = pppoe_seq_open,
1101 .read = seq_read,
1102 .llseek = seq_lseek,
1103 .release = seq_release_net,
1104};
1105
1106#endif /* CONFIG_PROC_FS */
1107
1108static const struct proto_ops pppoe_ops = {
1109 .family = AF_PPPOX,
1110 .owner = THIS_MODULE,
1111 .release = pppoe_release,
1112 .bind = sock_no_bind,
1113 .connect = pppoe_connect,
1114 .socketpair = sock_no_socketpair,
1115 .accept = sock_no_accept,
1116 .getname = pppoe_getname,
1117 .poll = datagram_poll,
1118 .listen = sock_no_listen,
1119 .shutdown = sock_no_shutdown,
1120 .setsockopt = sock_no_setsockopt,
1121 .getsockopt = sock_no_getsockopt,
1122 .sendmsg = pppoe_sendmsg,
1123 .recvmsg = pppoe_recvmsg,
1124 .mmap = sock_no_mmap,
1125 .ioctl = pppox_ioctl,
1126};
1127
1128static const struct pppox_proto pppoe_proto = {
1129 .create = pppoe_create,
1130 .ioctl = pppoe_ioctl,
1131 .owner = THIS_MODULE,
1132};
1133
1134static __net_init int pppoe_init_net(struct net *net)
1135{
1136 struct pppoe_net *pn = pppoe_pernet(net);
1137 struct proc_dir_entry *pde;
1138
1139 rwlock_init(&pn->hash_lock);
1140
1141 pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
1142#ifdef CONFIG_PROC_FS
1143 if (!pde)
1144 return -ENOMEM;
1145#endif
1146
1147 return 0;
1148}
1149
1150static __net_exit void pppoe_exit_net(struct net *net)
1151{
1152 proc_net_remove(net, "pppoe");
1153}
1154
1155static struct pernet_operations pppoe_net_ops = {
1156 .init = pppoe_init_net,
1157 .exit = pppoe_exit_net,
1158 .id = &pppoe_net_id,
1159 .size = sizeof(struct pppoe_net),
1160};
1161
1162static int __init pppoe_init(void)
1163{
1164 int err;
1165
1166 err = register_pernet_device(&pppoe_net_ops);
1167 if (err)
1168 goto out;
1169
1170 err = proto_register(&pppoe_sk_proto, 0);
1171 if (err)
1172 goto out_unregister_net_ops;
1173
1174 err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
1175 if (err)
1176 goto out_unregister_pppoe_proto;
1177
1178 dev_add_pack(&pppoes_ptype);
1179 dev_add_pack(&pppoed_ptype);
1180 register_netdevice_notifier(&pppoe_notifier);
1181
1182 return 0;
1183
1184out_unregister_pppoe_proto:
1185 proto_unregister(&pppoe_sk_proto);
1186out_unregister_net_ops:
1187 unregister_pernet_device(&pppoe_net_ops);
1188out:
1189 return err;
1190}
1191
1192static void __exit pppoe_exit(void)
1193{
1194 unregister_netdevice_notifier(&pppoe_notifier);
1195 dev_remove_pack(&pppoed_ptype);
1196 dev_remove_pack(&pppoes_ptype);
1197 unregister_pppox_proto(PX_PROTO_OE);
1198 proto_unregister(&pppoe_sk_proto);
1199 unregister_pernet_device(&pppoe_net_ops);
1200}
1201
1202module_init(pppoe_init);
1203module_exit(pppoe_exit);
1204
1205MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>");
1206MODULE_DESCRIPTION("PPP over Ethernet driver");
1207MODULE_LICENSE("GPL");
1208MODULE_ALIAS_NETPROTO(PF_PPPOX);
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
new file mode 100644
index 000000000000..8c0d170dabcd
--- /dev/null
+++ b/drivers/net/ppp/pppox.c
@@ -0,0 +1,149 @@
1/** -*- linux-c -*- ***********************************************************
2 * Linux PPP over X/Ethernet (PPPoX/PPPoE) Sockets
3 *
4 * PPPoX --- Generic PPP encapsulation socket family
5 * PPPoE --- PPP over Ethernet (RFC 2516)
6 *
7 *
8 * Version: 0.5.2
9 *
10 * Author: Michal Ostrowski <mostrows@speakeasy.net>
11 *
12 * 051000 : Initialization cleanup
13 *
14 * License:
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/string.h>
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/netdevice.h>
27#include <linux/net.h>
28#include <linux/init.h>
29#include <linux/if_pppox.h>
30#include <linux/ppp_defs.h>
31#include <linux/if_ppp.h>
32#include <linux/ppp_channel.h>
33#include <linux/kmod.h>
34
35#include <net/sock.h>
36
37#include <asm/uaccess.h>
38
39static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
40
41int register_pppox_proto(int proto_num, const struct pppox_proto *pp)
42{
43 if (proto_num < 0 || proto_num > PX_MAX_PROTO)
44 return -EINVAL;
45 if (pppox_protos[proto_num])
46 return -EALREADY;
47 pppox_protos[proto_num] = pp;
48 return 0;
49}
50
51void unregister_pppox_proto(int proto_num)
52{
53 if (proto_num >= 0 && proto_num <= PX_MAX_PROTO)
54 pppox_protos[proto_num] = NULL;
55}
56
57void pppox_unbind_sock(struct sock *sk)
58{
59 /* Clear connection to ppp device, if attached. */
60
61 if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) {
62 ppp_unregister_channel(&pppox_sk(sk)->chan);
63 sk->sk_state = PPPOX_DEAD;
64 }
65}
66
67EXPORT_SYMBOL(register_pppox_proto);
68EXPORT_SYMBOL(unregister_pppox_proto);
69EXPORT_SYMBOL(pppox_unbind_sock);
70
71int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
72{
73 struct sock *sk = sock->sk;
74 struct pppox_sock *po = pppox_sk(sk);
75 int rc;
76
77 lock_sock(sk);
78
79 switch (cmd) {
80 case PPPIOCGCHAN: {
81 int index;
82 rc = -ENOTCONN;
83 if (!(sk->sk_state & PPPOX_CONNECTED))
84 break;
85
86 rc = -EINVAL;
87 index = ppp_channel_index(&po->chan);
88 if (put_user(index , (int __user *) arg))
89 break;
90
91 rc = 0;
92 sk->sk_state |= PPPOX_BOUND;
93 break;
94 }
95 default:
96 rc = pppox_protos[sk->sk_protocol]->ioctl ?
97 pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, arg) : -ENOTTY;
98 }
99
100 release_sock(sk);
101 return rc;
102}
103
104EXPORT_SYMBOL(pppox_ioctl);
105
106static int pppox_create(struct net *net, struct socket *sock, int protocol,
107 int kern)
108{
109 int rc = -EPROTOTYPE;
110
111 if (protocol < 0 || protocol > PX_MAX_PROTO)
112 goto out;
113
114 rc = -EPROTONOSUPPORT;
115 if (!pppox_protos[protocol])
116 request_module("pppox-proto-%d", protocol);
117 if (!pppox_protos[protocol] ||
118 !try_module_get(pppox_protos[protocol]->owner))
119 goto out;
120
121 rc = pppox_protos[protocol]->create(net, sock);
122
123 module_put(pppox_protos[protocol]->owner);
124out:
125 return rc;
126}
127
128static const struct net_proto_family pppox_proto_family = {
129 .family = PF_PPPOX,
130 .create = pppox_create,
131 .owner = THIS_MODULE,
132};
133
134static int __init pppox_init(void)
135{
136 return sock_register(&pppox_proto_family);
137}
138
139static void __exit pppox_exit(void)
140{
141 sock_unregister(PF_PPPOX);
142}
143
144module_init(pppox_init);
145module_exit(pppox_exit);
146
147MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>");
148MODULE_DESCRIPTION("PPP over Ethernet driver (generic socket layer)");
149MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
new file mode 100644
index 000000000000..eae542a7e987
--- /dev/null
+++ b/drivers/net/ppp/pptp.c
@@ -0,0 +1,717 @@
1/*
2 * Point-to-Point Tunneling Protocol for Linux
3 *
4 * Authors: Dmitry Kozlov <xeb@mail.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/string.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/errno.h>
18#include <linux/netdevice.h>
19#include <linux/net.h>
20#include <linux/skbuff.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/ppp_channel.h>
24#include <linux/ppp_defs.h>
25#include <linux/if_pppox.h>
26#include <linux/if_ppp.h>
27#include <linux/notifier.h>
28#include <linux/file.h>
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/netfilter.h>
32#include <linux/netfilter_ipv4.h>
33#include <linux/rcupdate.h>
34#include <linux/spinlock.h>
35
36#include <net/sock.h>
37#include <net/protocol.h>
38#include <net/ip.h>
39#include <net/icmp.h>
40#include <net/route.h>
41#include <net/gre.h>
42
43#include <linux/uaccess.h>
44
45#define PPTP_DRIVER_VERSION "0.8.5"
46
47#define MAX_CALLID 65535
48
49static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
50static struct pppox_sock **callid_sock;
51
52static DEFINE_SPINLOCK(chan_lock);
53
54static struct proto pptp_sk_proto __read_mostly;
55static const struct ppp_channel_ops pptp_chan_ops;
56static const struct proto_ops pptp_ops;
57
58#define PPP_LCP_ECHOREQ 0x09
59#define PPP_LCP_ECHOREP 0x0A
60#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
61
62#define MISSING_WINDOW 20
63#define WRAPPED(curseq, lastseq)\
64 ((((curseq) & 0xffffff00) == 0) &&\
65 (((lastseq) & 0xffffff00) == 0xffffff00))
66
67#define PPTP_GRE_PROTO 0x880B
68#define PPTP_GRE_VER 0x1
69
70#define PPTP_GRE_FLAG_C 0x80
71#define PPTP_GRE_FLAG_R 0x40
72#define PPTP_GRE_FLAG_K 0x20
73#define PPTP_GRE_FLAG_S 0x10
74#define PPTP_GRE_FLAG_A 0x80
75
76#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C)
77#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R)
78#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K)
79#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S)
80#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A)
81
82#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
83struct pptp_gre_header {
84 u8 flags;
85 u8 ver;
86 u16 protocol;
87 u16 payload_len;
88 u16 call_id;
89 u32 seq;
90 u32 ack;
91} __packed;
92
93static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
94{
95 struct pppox_sock *sock;
96 struct pptp_opt *opt;
97
98 rcu_read_lock();
99 sock = rcu_dereference(callid_sock[call_id]);
100 if (sock) {
101 opt = &sock->proto.pptp;
102 if (opt->dst_addr.sin_addr.s_addr != s_addr)
103 sock = NULL;
104 else
105 sock_hold(sk_pppox(sock));
106 }
107 rcu_read_unlock();
108
109 return sock;
110}
111
112static int lookup_chan_dst(u16 call_id, __be32 d_addr)
113{
114 struct pppox_sock *sock;
115 struct pptp_opt *opt;
116 int i;
117
118 rcu_read_lock();
119 for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID;
120 i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) {
121 sock = rcu_dereference(callid_sock[i]);
122 if (!sock)
123 continue;
124 opt = &sock->proto.pptp;
125 if (opt->dst_addr.call_id == call_id &&
126 opt->dst_addr.sin_addr.s_addr == d_addr)
127 break;
128 }
129 rcu_read_unlock();
130
131 return i < MAX_CALLID;
132}
133
134static int add_chan(struct pppox_sock *sock)
135{
136 static int call_id;
137
138 spin_lock(&chan_lock);
139 if (!sock->proto.pptp.src_addr.call_id) {
140 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
141 if (call_id == MAX_CALLID) {
142 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
143 if (call_id == MAX_CALLID)
144 goto out_err;
145 }
146 sock->proto.pptp.src_addr.call_id = call_id;
147 } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
148 goto out_err;
149
150 set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
151 rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
152 spin_unlock(&chan_lock);
153
154 return 0;
155
156out_err:
157 spin_unlock(&chan_lock);
158 return -1;
159}
160
161static void del_chan(struct pppox_sock *sock)
162{
163 spin_lock(&chan_lock);
164 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
165 rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
166 spin_unlock(&chan_lock);
167 synchronize_rcu();
168}
169
170static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
171{
172 struct sock *sk = (struct sock *) chan->private;
173 struct pppox_sock *po = pppox_sk(sk);
174 struct pptp_opt *opt = &po->proto.pptp;
175 struct pptp_gre_header *hdr;
176 unsigned int header_len = sizeof(*hdr);
177 struct flowi4 fl4;
178 int islcp;
179 int len;
180 unsigned char *data;
181 __u32 seq_recv;
182
183
184 struct rtable *rt;
185 struct net_device *tdev;
186 struct iphdr *iph;
187 int max_headroom;
188
189 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
190 goto tx_error;
191
192 rt = ip_route_output_ports(&init_net, &fl4, NULL,
193 opt->dst_addr.sin_addr.s_addr,
194 opt->src_addr.sin_addr.s_addr,
195 0, 0, IPPROTO_GRE,
196 RT_TOS(0), 0);
197 if (IS_ERR(rt))
198 goto tx_error;
199
200 tdev = rt->dst.dev;
201
202 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
203
204 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
205 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
206 if (!new_skb) {
207 ip_rt_put(rt);
208 goto tx_error;
209 }
210 if (skb->sk)
211 skb_set_owner_w(new_skb, skb->sk);
212 kfree_skb(skb);
213 skb = new_skb;
214 }
215
216 data = skb->data;
217 islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
218
219 /* compress protocol field */
220 if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
221 skb_pull(skb, 1);
222
223 /* Put in the address/control bytes if necessary */
224 if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
225 data = skb_push(skb, 2);
226 data[0] = PPP_ALLSTATIONS;
227 data[1] = PPP_UI;
228 }
229
230 len = skb->len;
231
232 seq_recv = opt->seq_recv;
233
234 if (opt->ack_sent == seq_recv)
235 header_len -= sizeof(hdr->ack);
236
237 /* Push down and install GRE header */
238 skb_push(skb, header_len);
239 hdr = (struct pptp_gre_header *)(skb->data);
240
241 hdr->flags = PPTP_GRE_FLAG_K;
242 hdr->ver = PPTP_GRE_VER;
243 hdr->protocol = htons(PPTP_GRE_PROTO);
244 hdr->call_id = htons(opt->dst_addr.call_id);
245
246 hdr->flags |= PPTP_GRE_FLAG_S;
247 hdr->seq = htonl(++opt->seq_sent);
248 if (opt->ack_sent != seq_recv) {
249 /* send ack with this message */
250 hdr->ver |= PPTP_GRE_FLAG_A;
251 hdr->ack = htonl(seq_recv);
252 opt->ack_sent = seq_recv;
253 }
254 hdr->payload_len = htons(len);
255
256 /* Push down and install the IP header. */
257
258 skb_reset_transport_header(skb);
259 skb_push(skb, sizeof(*iph));
260 skb_reset_network_header(skb);
261 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
262 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
263
264 iph = ip_hdr(skb);
265 iph->version = 4;
266 iph->ihl = sizeof(struct iphdr) >> 2;
267 if (ip_dont_fragment(sk, &rt->dst))
268 iph->frag_off = htons(IP_DF);
269 else
270 iph->frag_off = 0;
271 iph->protocol = IPPROTO_GRE;
272 iph->tos = 0;
273 iph->daddr = fl4.daddr;
274 iph->saddr = fl4.saddr;
275 iph->ttl = ip4_dst_hoplimit(&rt->dst);
276 iph->tot_len = htons(skb->len);
277
278 skb_dst_drop(skb);
279 skb_dst_set(skb, &rt->dst);
280
281 nf_reset(skb);
282
283 skb->ip_summed = CHECKSUM_NONE;
284 ip_select_ident(iph, &rt->dst, NULL);
285 ip_send_check(iph);
286
287 ip_local_out(skb);
288
289tx_error:
290 return 1;
291}
292
293static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
294{
295 struct pppox_sock *po = pppox_sk(sk);
296 struct pptp_opt *opt = &po->proto.pptp;
297 int headersize, payload_len, seq;
298 __u8 *payload;
299 struct pptp_gre_header *header;
300
301 if (!(sk->sk_state & PPPOX_CONNECTED)) {
302 if (sock_queue_rcv_skb(sk, skb))
303 goto drop;
304 return NET_RX_SUCCESS;
305 }
306
307 header = (struct pptp_gre_header *)(skb->data);
308
309 /* test if acknowledgement present */
310 if (PPTP_GRE_IS_A(header->ver)) {
311 __u32 ack = (PPTP_GRE_IS_S(header->flags)) ?
312 header->ack : header->seq; /* ack in different place if S = 0 */
313
314 ack = ntohl(ack);
315
316 if (ack > opt->ack_recv)
317 opt->ack_recv = ack;
318 /* also handle sequence number wrap-around */
319 if (WRAPPED(ack, opt->ack_recv))
320 opt->ack_recv = ack;
321 }
322
323 /* test if payload present */
324 if (!PPTP_GRE_IS_S(header->flags))
325 goto drop;
326
327 headersize = sizeof(*header);
328 payload_len = ntohs(header->payload_len);
329 seq = ntohl(header->seq);
330
331 /* no ack present? */
332 if (!PPTP_GRE_IS_A(header->ver))
333 headersize -= sizeof(header->ack);
334 /* check for incomplete packet (length smaller than expected) */
335 if (skb->len - headersize < payload_len)
336 goto drop;
337
338 payload = skb->data + headersize;
339 /* check for expected sequence number */
340 if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
341 if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
342 (PPP_PROTOCOL(payload) == PPP_LCP) &&
343 ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
344 goto allow_packet;
345 } else {
346 opt->seq_recv = seq;
347allow_packet:
348 skb_pull(skb, headersize);
349
350 if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
351 /* chop off address/control */
352 if (skb->len < 3)
353 goto drop;
354 skb_pull(skb, 2);
355 }
356
357 if ((*skb->data) & 1) {
358 /* protocol is compressed */
359 skb_push(skb, 1)[0] = 0;
360 }
361
362 skb->ip_summed = CHECKSUM_NONE;
363 skb_set_network_header(skb, skb->head-skb->data);
364 ppp_input(&po->chan, skb);
365
366 return NET_RX_SUCCESS;
367 }
368drop:
369 kfree_skb(skb);
370 return NET_RX_DROP;
371}
372
373static int pptp_rcv(struct sk_buff *skb)
374{
375 struct pppox_sock *po;
376 struct pptp_gre_header *header;
377 struct iphdr *iph;
378
379 if (skb->pkt_type != PACKET_HOST)
380 goto drop;
381
382 if (!pskb_may_pull(skb, 12))
383 goto drop;
384
385 iph = ip_hdr(skb);
386
387 header = (struct pptp_gre_header *)skb->data;
388
389 if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */
390 PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */
391 PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */
392 !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */
393 (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */
394 /* if invalid, discard this packet */
395 goto drop;
396
397 po = lookup_chan(htons(header->call_id), iph->saddr);
398 if (po) {
399 skb_dst_drop(skb);
400 nf_reset(skb);
401 return sk_receive_skb(sk_pppox(po), skb, 0);
402 }
403drop:
404 kfree_skb(skb);
405 return NET_RX_DROP;
406}
407
408static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
409 int sockaddr_len)
410{
411 struct sock *sk = sock->sk;
412 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
413 struct pppox_sock *po = pppox_sk(sk);
414 struct pptp_opt *opt = &po->proto.pptp;
415 int error = 0;
416
417 lock_sock(sk);
418
419 opt->src_addr = sp->sa_addr.pptp;
420 if (add_chan(po)) {
421 release_sock(sk);
422 error = -EBUSY;
423 }
424
425 release_sock(sk);
426 return error;
427}
428
429static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
430 int sockaddr_len, int flags)
431{
432 struct sock *sk = sock->sk;
433 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
434 struct pppox_sock *po = pppox_sk(sk);
435 struct pptp_opt *opt = &po->proto.pptp;
436 struct rtable *rt;
437 struct flowi4 fl4;
438 int error = 0;
439
440 if (sp->sa_protocol != PX_PROTO_PPTP)
441 return -EINVAL;
442
443 if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
444 return -EALREADY;
445
446 lock_sock(sk);
447 /* Check for already bound sockets */
448 if (sk->sk_state & PPPOX_CONNECTED) {
449 error = -EBUSY;
450 goto end;
451 }
452
453 /* Check for already disconnected sockets, on attempts to disconnect */
454 if (sk->sk_state & PPPOX_DEAD) {
455 error = -EALREADY;
456 goto end;
457 }
458
459 if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
460 error = -EINVAL;
461 goto end;
462 }
463
464 po->chan.private = sk;
465 po->chan.ops = &pptp_chan_ops;
466
467 rt = ip_route_output_ports(&init_net, &fl4, sk,
468 opt->dst_addr.sin_addr.s_addr,
469 opt->src_addr.sin_addr.s_addr,
470 0, 0,
471 IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
472 if (IS_ERR(rt)) {
473 error = -EHOSTUNREACH;
474 goto end;
475 }
476 sk_setup_caps(sk, &rt->dst);
477
478 po->chan.mtu = dst_mtu(&rt->dst);
479 if (!po->chan.mtu)
480 po->chan.mtu = PPP_MTU;
481 ip_rt_put(rt);
482 po->chan.mtu -= PPTP_HEADER_OVERHEAD;
483
484 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
485 error = ppp_register_channel(&po->chan);
486 if (error) {
487 pr_err("PPTP: failed to register PPP channel (%d)\n", error);
488 goto end;
489 }
490
491 opt->dst_addr = sp->sa_addr.pptp;
492 sk->sk_state = PPPOX_CONNECTED;
493
494 end:
495 release_sock(sk);
496 return error;
497}
498
499static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
500 int *usockaddr_len, int peer)
501{
502 int len = sizeof(struct sockaddr_pppox);
503 struct sockaddr_pppox sp;
504
505 sp.sa_family = AF_PPPOX;
506 sp.sa_protocol = PX_PROTO_PPTP;
507 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
508
509 memcpy(uaddr, &sp, len);
510
511 *usockaddr_len = len;
512
513 return 0;
514}
515
516static int pptp_release(struct socket *sock)
517{
518 struct sock *sk = sock->sk;
519 struct pppox_sock *po;
520 struct pptp_opt *opt;
521 int error = 0;
522
523 if (!sk)
524 return 0;
525
526 lock_sock(sk);
527
528 if (sock_flag(sk, SOCK_DEAD)) {
529 release_sock(sk);
530 return -EBADF;
531 }
532
533 po = pppox_sk(sk);
534 opt = &po->proto.pptp;
535 del_chan(po);
536
537 pppox_unbind_sock(sk);
538 sk->sk_state = PPPOX_DEAD;
539
540 sock_orphan(sk);
541 sock->sk = NULL;
542
543 release_sock(sk);
544 sock_put(sk);
545
546 return error;
547}
548
549static void pptp_sock_destruct(struct sock *sk)
550{
551 if (!(sk->sk_state & PPPOX_DEAD)) {
552 del_chan(pppox_sk(sk));
553 pppox_unbind_sock(sk);
554 }
555 skb_queue_purge(&sk->sk_receive_queue);
556}
557
558static int pptp_create(struct net *net, struct socket *sock)
559{
560 int error = -ENOMEM;
561 struct sock *sk;
562 struct pppox_sock *po;
563 struct pptp_opt *opt;
564
565 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
566 if (!sk)
567 goto out;
568
569 sock_init_data(sock, sk);
570
571 sock->state = SS_UNCONNECTED;
572 sock->ops = &pptp_ops;
573
574 sk->sk_backlog_rcv = pptp_rcv_core;
575 sk->sk_state = PPPOX_NONE;
576 sk->sk_type = SOCK_STREAM;
577 sk->sk_family = PF_PPPOX;
578 sk->sk_protocol = PX_PROTO_PPTP;
579 sk->sk_destruct = pptp_sock_destruct;
580
581 po = pppox_sk(sk);
582 opt = &po->proto.pptp;
583
584 opt->seq_sent = 0; opt->seq_recv = 0;
585 opt->ack_recv = 0; opt->ack_sent = 0;
586
587 error = 0;
588out:
589 return error;
590}
591
592static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
593 unsigned long arg)
594{
595 struct sock *sk = (struct sock *) chan->private;
596 struct pppox_sock *po = pppox_sk(sk);
597 struct pptp_opt *opt = &po->proto.pptp;
598 void __user *argp = (void __user *)arg;
599 int __user *p = argp;
600 int err, val;
601
602 err = -EFAULT;
603 switch (cmd) {
604 case PPPIOCGFLAGS:
605 val = opt->ppp_flags;
606 if (put_user(val, p))
607 break;
608 err = 0;
609 break;
610 case PPPIOCSFLAGS:
611 if (get_user(val, p))
612 break;
613 opt->ppp_flags = val & ~SC_RCV_BITS;
614 err = 0;
615 break;
616 default:
617 err = -ENOTTY;
618 }
619
620 return err;
621}
622
623static const struct ppp_channel_ops pptp_chan_ops = {
624 .start_xmit = pptp_xmit,
625 .ioctl = pptp_ppp_ioctl,
626};
627
628static struct proto pptp_sk_proto __read_mostly = {
629 .name = "PPTP",
630 .owner = THIS_MODULE,
631 .obj_size = sizeof(struct pppox_sock),
632};
633
634static const struct proto_ops pptp_ops = {
635 .family = AF_PPPOX,
636 .owner = THIS_MODULE,
637 .release = pptp_release,
638 .bind = pptp_bind,
639 .connect = pptp_connect,
640 .socketpair = sock_no_socketpair,
641 .accept = sock_no_accept,
642 .getname = pptp_getname,
643 .poll = sock_no_poll,
644 .listen = sock_no_listen,
645 .shutdown = sock_no_shutdown,
646 .setsockopt = sock_no_setsockopt,
647 .getsockopt = sock_no_getsockopt,
648 .sendmsg = sock_no_sendmsg,
649 .recvmsg = sock_no_recvmsg,
650 .mmap = sock_no_mmap,
651 .ioctl = pppox_ioctl,
652};
653
654static const struct pppox_proto pppox_pptp_proto = {
655 .create = pptp_create,
656 .owner = THIS_MODULE,
657};
658
659static const struct gre_protocol gre_pptp_protocol = {
660 .handler = pptp_rcv,
661};
662
663static int __init pptp_init_module(void)
664{
665 int err = 0;
666 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
667
668 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
669 if (!callid_sock) {
670 pr_err("PPTP: cann't allocate memory\n");
671 return -ENOMEM;
672 }
673
674 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
675 if (err) {
676 pr_err("PPTP: can't add gre protocol\n");
677 goto out_mem_free;
678 }
679
680 err = proto_register(&pptp_sk_proto, 0);
681 if (err) {
682 pr_err("PPTP: can't register sk_proto\n");
683 goto out_gre_del_protocol;
684 }
685
686 err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
687 if (err) {
688 pr_err("PPTP: can't register pppox_proto\n");
689 goto out_unregister_sk_proto;
690 }
691
692 return 0;
693
694out_unregister_sk_proto:
695 proto_unregister(&pptp_sk_proto);
696out_gre_del_protocol:
697 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
698out_mem_free:
699 vfree(callid_sock);
700
701 return err;
702}
703
704static void __exit pptp_exit_module(void)
705{
706 unregister_pppox_proto(PX_PROTO_PPTP);
707 proto_unregister(&pptp_sk_proto);
708 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
709 vfree(callid_sock);
710}
711
712module_init(pptp_init_module);
713module_exit(pptp_exit_module);
714
715MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
716MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
717MODULE_LICENSE("GPL");