aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wan/syncppp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/wan/syncppp.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/wan/syncppp.c')
-rw-r--r--drivers/net/wan/syncppp.c1488
1 files changed, 1488 insertions, 0 deletions
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
new file mode 100644
index 000000000000..84b65c60c799
--- /dev/null
+++ b/drivers/net/wan/syncppp.c
@@ -0,0 +1,1488 @@
1/*
2 * NET3: A (fairly minimal) implementation of synchronous PPP for Linux
3 * as well as a CISCO HDLC implementation. See the copyright
4 * message below for the original source.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the license, or (at your option) any later version.
10 *
11 * Note however. This code is also used in a different form by FreeBSD.
12 * Therefore when making any non OS specific change please consider
13 * contributing it back to the original author under the terms
14 * below in addition.
15 * -- Alan
16 *
17 * Port for Linux-2.1 by Jan "Yenya" Kasprzak <kas@fi.muni.cz>
18 */
19
20/*
21 * Synchronous PPP/Cisco link level subroutines.
22 * Keepalive protocol implemented in both Cisco and PPP modes.
23 *
24 * Copyright (C) 1994 Cronyx Ltd.
25 * Author: Serge Vakulenko, <vak@zebub.msk.su>
26 *
27 * This software is distributed with NO WARRANTIES, not even the implied
28 * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Authors grant any other persons or organisations permission to use
31 * or modify this software as long as this message is kept with the software,
32 * all derivative works or modified versions.
33 *
34 * Version 1.9, Wed Oct 4 18:58:15 MSK 1995
35 *
36 * $Id: syncppp.c,v 1.18 2000/04/11 05:25:31 asj Exp $
37 */
38#undef DEBUG
39
40#include <linux/config.h>
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/errno.h>
44#include <linux/init.h>
45#include <linux/if_arp.h>
46#include <linux/skbuff.h>
47#include <linux/route.h>
48#include <linux/netdevice.h>
49#include <linux/inetdevice.h>
50#include <linux/random.h>
51#include <linux/pkt_sched.h>
52#include <linux/spinlock.h>
53#include <linux/rcupdate.h>
54
55#include <net/syncppp.h>
56
57#include <asm/byteorder.h>
58#include <asm/uaccess.h>
59
60#define MAXALIVECNT 6 /* max. alive packets */
61
62#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
63#define PPP_UI 0x03 /* Unnumbered Information */
64#define PPP_IP 0x0021 /* Internet Protocol */
65#define PPP_ISO 0x0023 /* ISO OSI Protocol */
66#define PPP_XNS 0x0025 /* Xerox NS Protocol */
67#define PPP_IPX 0x002b /* Novell IPX Protocol */
68#define PPP_LCP 0xc021 /* Link Control Protocol */
69#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
70
71#define LCP_CONF_REQ 1 /* PPP LCP configure request */
72#define LCP_CONF_ACK 2 /* PPP LCP configure acknowledge */
73#define LCP_CONF_NAK 3 /* PPP LCP configure negative ack */
74#define LCP_CONF_REJ 4 /* PPP LCP configure reject */
75#define LCP_TERM_REQ 5 /* PPP LCP terminate request */
76#define LCP_TERM_ACK 6 /* PPP LCP terminate acknowledge */
77#define LCP_CODE_REJ 7 /* PPP LCP code reject */
78#define LCP_PROTO_REJ 8 /* PPP LCP protocol reject */
79#define LCP_ECHO_REQ 9 /* PPP LCP echo request */
80#define LCP_ECHO_REPLY 10 /* PPP LCP echo reply */
81#define LCP_DISC_REQ 11 /* PPP LCP discard request */
82
83#define LCP_OPT_MRU 1 /* maximum receive unit */
84#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
85#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
86#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
87#define LCP_OPT_MAGIC 5 /* magic number */
88#define LCP_OPT_RESERVED 6 /* reserved */
89#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
90#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
91
92#define IPCP_CONF_REQ LCP_CONF_REQ /* PPP IPCP configure request */
93#define IPCP_CONF_ACK LCP_CONF_ACK /* PPP IPCP configure acknowledge */
94#define IPCP_CONF_NAK LCP_CONF_NAK /* PPP IPCP configure negative ack */
95#define IPCP_CONF_REJ LCP_CONF_REJ /* PPP IPCP configure reject */
96#define IPCP_TERM_REQ LCP_TERM_REQ /* PPP IPCP terminate request */
97#define IPCP_TERM_ACK LCP_TERM_ACK /* PPP IPCP terminate acknowledge */
98#define IPCP_CODE_REJ LCP_CODE_REJ /* PPP IPCP code reject */
99
100#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
101#define CISCO_UNICAST 0x0f /* Cisco unicast address */
102#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
103#define CISCO_ADDR_REQ 0 /* Cisco address request */
104#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
105#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
106
107struct ppp_header {
108 u8 address;
109 u8 control;
110 u16 protocol;
111};
112#define PPP_HEADER_LEN sizeof (struct ppp_header)
113
114struct lcp_header {
115 u8 type;
116 u8 ident;
117 u16 len;
118};
119#define LCP_HEADER_LEN sizeof (struct lcp_header)
120
121struct cisco_packet {
122 u32 type;
123 u32 par1;
124 u32 par2;
125 u16 rel;
126 u16 time0;
127 u16 time1;
128};
129#define CISCO_PACKET_LEN 18
130#define CISCO_BIG_PACKET_LEN 20
131
132static struct sppp *spppq;
133static struct timer_list sppp_keepalive_timer;
134static DEFINE_SPINLOCK(spppq_lock);
135
136/* global xmit queue for sending packets while spinlock is held */
137static struct sk_buff_head tx_queue;
138
139static void sppp_keepalive (unsigned long dummy);
140static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
141 u8 ident, u16 len, void *data);
142static void sppp_cisco_send (struct sppp *sp, int type, long par1, long par2);
143static void sppp_lcp_input (struct sppp *sp, struct sk_buff *m);
144static void sppp_cisco_input (struct sppp *sp, struct sk_buff *m);
145static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *m);
146static void sppp_lcp_open (struct sppp *sp);
147static void sppp_ipcp_open (struct sppp *sp);
148static int sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
149 int len, u32 *magic);
150static void sppp_cp_timeout (unsigned long arg);
151static char *sppp_lcp_type_name (u8 type);
152static char *sppp_ipcp_type_name (u8 type);
153static void sppp_print_bytes (u8 *p, u16 len);
154
155static int debug;
156
157/* Flush global outgoing packet queue to dev_queue_xmit().
158 *
159 * dev_queue_xmit() must be called with interrupts enabled
160 * which means it can't be called with spinlocks held.
161 * If a packet needs to be sent while a spinlock is held,
162 * then put the packet into tx_queue, and call sppp_flush_xmit()
163 * after spinlock is released.
164 */
165static void sppp_flush_xmit(void)
166{
167 struct sk_buff *skb;
168 while ((skb = skb_dequeue(&tx_queue)) != NULL)
169 dev_queue_xmit(skb);
170}
171
172/*
173 * Interface down stub
174 */
175
176static void if_down(struct net_device *dev)
177{
178 struct sppp *sp = (struct sppp *)sppp_of(dev);
179
180 sp->pp_link_state=SPPP_LINK_DOWN;
181}
182
183/*
184 * Timeout routine activations.
185 */
186
187static void sppp_set_timeout(struct sppp *p,int s)
188{
189 if (! (p->pp_flags & PP_TIMO))
190 {
191 init_timer(&p->pp_timer);
192 p->pp_timer.function=sppp_cp_timeout;
193 p->pp_timer.expires=jiffies+s*HZ;
194 p->pp_timer.data=(unsigned long)p;
195 p->pp_flags |= PP_TIMO;
196 add_timer(&p->pp_timer);
197 }
198}
199
200static void sppp_clear_timeout(struct sppp *p)
201{
202 if (p->pp_flags & PP_TIMO)
203 {
204 del_timer(&p->pp_timer);
205 p->pp_flags &= ~PP_TIMO;
206 }
207}
208
209/**
210 * sppp_input - receive and process a WAN PPP frame
211 * @skb: The buffer to process
212 * @dev: The device it arrived on
213 *
214 * This can be called directly by cards that do not have
215 * timing constraints but is normally called from the network layer
216 * after interrupt servicing to process frames queued via netif_rx().
217 *
218 * We process the options in the card. If the frame is destined for
219 * the protocol stacks then it requeues the frame for the upper level
220 * protocol. If it is a control from it is processed and discarded
221 * here.
222 */
223
224void sppp_input (struct net_device *dev, struct sk_buff *skb)
225{
226 struct ppp_header *h;
227 struct sppp *sp = (struct sppp *)sppp_of(dev);
228 unsigned long flags;
229
230 skb->dev=dev;
231 skb->mac.raw=skb->data;
232
233 if (dev->flags & IFF_RUNNING)
234 {
235 /* Count received bytes, add FCS and one flag */
236 sp->ibytes+= skb->len + 3;
237 sp->ipkts++;
238 }
239
240 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
241 /* Too small packet, drop it. */
242 if (sp->pp_flags & PP_DEBUG)
243 printk (KERN_DEBUG "%s: input packet is too small, %d bytes\n",
244 dev->name, skb->len);
245 kfree_skb(skb);
246 return;
247 }
248
249 /* Get PPP header. */
250 h = (struct ppp_header *)skb->data;
251 skb_pull(skb,sizeof(struct ppp_header));
252
253 spin_lock_irqsave(&sp->lock, flags);
254
255 switch (h->address) {
256 default: /* Invalid PPP packet. */
257 goto invalid;
258 case PPP_ALLSTATIONS:
259 if (h->control != PPP_UI)
260 goto invalid;
261 if (sp->pp_flags & PP_CISCO) {
262 if (sp->pp_flags & PP_DEBUG)
263 printk (KERN_WARNING "%s: PPP packet in Cisco mode <0x%x 0x%x 0x%x>\n",
264 dev->name,
265 h->address, h->control, ntohs (h->protocol));
266 goto drop;
267 }
268 switch (ntohs (h->protocol)) {
269 default:
270 if (sp->lcp.state == LCP_STATE_OPENED)
271 sppp_cp_send (sp, PPP_LCP, LCP_PROTO_REJ,
272 ++sp->pp_seq, skb->len + 2,
273 &h->protocol);
274 if (sp->pp_flags & PP_DEBUG)
275 printk (KERN_WARNING "%s: invalid input protocol <0x%x 0x%x 0x%x>\n",
276 dev->name,
277 h->address, h->control, ntohs (h->protocol));
278 goto drop;
279 case PPP_LCP:
280 sppp_lcp_input (sp, skb);
281 goto drop;
282 case PPP_IPCP:
283 if (sp->lcp.state == LCP_STATE_OPENED)
284 sppp_ipcp_input (sp, skb);
285 else
286 printk(KERN_DEBUG "IPCP when still waiting LCP finish.\n");
287 goto drop;
288 case PPP_IP:
289 if (sp->ipcp.state == IPCP_STATE_OPENED) {
290 if(sp->pp_flags&PP_DEBUG)
291 printk(KERN_DEBUG "Yow an IP frame.\n");
292 skb->protocol=htons(ETH_P_IP);
293 netif_rx(skb);
294 dev->last_rx = jiffies;
295 goto done;
296 }
297 break;
298#ifdef IPX
299 case PPP_IPX:
300 /* IPX IPXCP not implemented yet */
301 if (sp->lcp.state == LCP_STATE_OPENED) {
302 skb->protocol=htons(ETH_P_IPX);
303 netif_rx(skb);
304 dev->last_rx = jiffies;
305 goto done;
306 }
307 break;
308#endif
309 }
310 break;
311 case CISCO_MULTICAST:
312 case CISCO_UNICAST:
313 /* Don't check the control field here (RFC 1547). */
314 if (! (sp->pp_flags & PP_CISCO)) {
315 if (sp->pp_flags & PP_DEBUG)
316 printk (KERN_WARNING "%s: Cisco packet in PPP mode <0x%x 0x%x 0x%x>\n",
317 dev->name,
318 h->address, h->control, ntohs (h->protocol));
319 goto drop;
320 }
321 switch (ntohs (h->protocol)) {
322 default:
323 goto invalid;
324 case CISCO_KEEPALIVE:
325 sppp_cisco_input (sp, skb);
326 goto drop;
327#ifdef CONFIG_INET
328 case ETH_P_IP:
329 skb->protocol=htons(ETH_P_IP);
330 netif_rx(skb);
331 dev->last_rx = jiffies;
332 goto done;
333#endif
334#ifdef CONFIG_IPX
335 case ETH_P_IPX:
336 skb->protocol=htons(ETH_P_IPX);
337 netif_rx(skb);
338 dev->last_rx = jiffies;
339 goto done;
340#endif
341 }
342 break;
343 }
344 goto drop;
345
346invalid:
347 if (sp->pp_flags & PP_DEBUG)
348 printk (KERN_WARNING "%s: invalid input packet <0x%x 0x%x 0x%x>\n",
349 dev->name, h->address, h->control, ntohs (h->protocol));
350drop:
351 kfree_skb(skb);
352done:
353 spin_unlock_irqrestore(&sp->lock, flags);
354 sppp_flush_xmit();
355 return;
356}
357
358EXPORT_SYMBOL(sppp_input);
359
360/*
361 * Handle transmit packets.
362 */
363
364static int sppp_hard_header(struct sk_buff *skb, struct net_device *dev, __u16 type,
365 void *daddr, void *saddr, unsigned int len)
366{
367 struct sppp *sp = (struct sppp *)sppp_of(dev);
368 struct ppp_header *h;
369 skb_push(skb,sizeof(struct ppp_header));
370 h=(struct ppp_header *)skb->data;
371 if(sp->pp_flags&PP_CISCO)
372 {
373 h->address = CISCO_UNICAST;
374 h->control = 0;
375 }
376 else
377 {
378 h->address = PPP_ALLSTATIONS;
379 h->control = PPP_UI;
380 }
381 if(sp->pp_flags & PP_CISCO)
382 {
383 h->protocol = htons(type);
384 }
385 else switch(type)
386 {
387 case ETH_P_IP:
388 h->protocol = htons(PPP_IP);
389 break;
390 case ETH_P_IPX:
391 h->protocol = htons(PPP_IPX);
392 break;
393 }
394 return sizeof(struct ppp_header);
395}
396
397static int sppp_rebuild_header(struct sk_buff *skb)
398{
399 return 0;
400}
401
402/*
403 * Send keepalive packets, every 10 seconds.
404 */
405
406static void sppp_keepalive (unsigned long dummy)
407{
408 struct sppp *sp;
409 unsigned long flags;
410
411 spin_lock_irqsave(&spppq_lock, flags);
412
413 for (sp=spppq; sp; sp=sp->pp_next)
414 {
415 struct net_device *dev = sp->pp_if;
416
417 /* Keepalive mode disabled or channel down? */
418 if (! (sp->pp_flags & PP_KEEPALIVE) ||
419 ! (dev->flags & IFF_UP))
420 continue;
421
422 spin_lock(&sp->lock);
423
424 /* No keepalive in PPP mode if LCP not opened yet. */
425 if (! (sp->pp_flags & PP_CISCO) &&
426 sp->lcp.state != LCP_STATE_OPENED) {
427 spin_unlock(&sp->lock);
428 continue;
429 }
430
431 if (sp->pp_alivecnt == MAXALIVECNT) {
432 /* No keepalive packets got. Stop the interface. */
433 printk (KERN_WARNING "%s: protocol down\n", dev->name);
434 if_down (dev);
435 if (! (sp->pp_flags & PP_CISCO)) {
436 /* Shut down the PPP link. */
437 sp->lcp.magic = jiffies;
438 sp->lcp.state = LCP_STATE_CLOSED;
439 sp->ipcp.state = IPCP_STATE_CLOSED;
440 sppp_clear_timeout (sp);
441 /* Initiate negotiation. */
442 sppp_lcp_open (sp);
443 }
444 }
445 if (sp->pp_alivecnt <= MAXALIVECNT)
446 ++sp->pp_alivecnt;
447 if (sp->pp_flags & PP_CISCO)
448 sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq,
449 sp->pp_rseq);
450 else if (sp->lcp.state == LCP_STATE_OPENED) {
451 long nmagic = htonl (sp->lcp.magic);
452 sp->lcp.echoid = ++sp->pp_seq;
453 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REQ,
454 sp->lcp.echoid, 4, &nmagic);
455 }
456
457 spin_unlock(&sp->lock);
458 }
459 spin_unlock_irqrestore(&spppq_lock, flags);
460 sppp_flush_xmit();
461 sppp_keepalive_timer.expires=jiffies+10*HZ;
462 add_timer(&sppp_keepalive_timer);
463}
464
465/*
466 * Handle incoming PPP Link Control Protocol packets.
467 */
468
469static void sppp_lcp_input (struct sppp *sp, struct sk_buff *skb)
470{
471 struct lcp_header *h;
472 struct net_device *dev = sp->pp_if;
473 int len = skb->len;
474 u8 *p, opt[6];
475 u32 rmagic;
476
477 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
478 if (sp->pp_flags & PP_DEBUG)
479 printk (KERN_WARNING "%s: invalid lcp packet length: %d bytes\n",
480 dev->name, len);
481 return;
482 }
483 h = (struct lcp_header *)skb->data;
484 skb_pull(skb,sizeof(struct lcp_header *));
485
486 if (sp->pp_flags & PP_DEBUG)
487 {
488 char state = '?';
489 switch (sp->lcp.state) {
490 case LCP_STATE_CLOSED: state = 'C'; break;
491 case LCP_STATE_ACK_RCVD: state = 'R'; break;
492 case LCP_STATE_ACK_SENT: state = 'S'; break;
493 case LCP_STATE_OPENED: state = 'O'; break;
494 }
495 printk (KERN_WARNING "%s: lcp input(%c): %d bytes <%s id=%xh len=%xh",
496 dev->name, state, len,
497 sppp_lcp_type_name (h->type), h->ident, ntohs (h->len));
498 if (len > 4)
499 sppp_print_bytes ((u8*) (h+1), len-4);
500 printk (">\n");
501 }
502 if (len > ntohs (h->len))
503 len = ntohs (h->len);
504 switch (h->type) {
505 default:
506 /* Unknown packet type -- send Code-Reject packet. */
507 sppp_cp_send (sp, PPP_LCP, LCP_CODE_REJ, ++sp->pp_seq,
508 skb->len, h);
509 break;
510 case LCP_CONF_REQ:
511 if (len < 4) {
512 if (sp->pp_flags & PP_DEBUG)
513 printk (KERN_DEBUG"%s: invalid lcp configure request packet length: %d bytes\n",
514 dev->name, len);
515 break;
516 }
517 if (len>4 && !sppp_lcp_conf_parse_options (sp, h, len, &rmagic))
518 goto badreq;
519 if (rmagic == sp->lcp.magic) {
520 /* Local and remote magics equal -- loopback? */
521 if (sp->pp_loopcnt >= MAXALIVECNT*5) {
522 printk (KERN_WARNING "%s: loopback\n",
523 dev->name);
524 sp->pp_loopcnt = 0;
525 if (dev->flags & IFF_UP) {
526 if_down (dev);
527 }
528 } else if (sp->pp_flags & PP_DEBUG)
529 printk (KERN_DEBUG "%s: conf req: magic glitch\n",
530 dev->name);
531 ++sp->pp_loopcnt;
532
533 /* MUST send Conf-Nack packet. */
534 rmagic = ~sp->lcp.magic;
535 opt[0] = LCP_OPT_MAGIC;
536 opt[1] = sizeof (opt);
537 opt[2] = rmagic >> 24;
538 opt[3] = rmagic >> 16;
539 opt[4] = rmagic >> 8;
540 opt[5] = rmagic;
541 sppp_cp_send (sp, PPP_LCP, LCP_CONF_NAK,
542 h->ident, sizeof (opt), &opt);
543badreq:
544 switch (sp->lcp.state) {
545 case LCP_STATE_OPENED:
546 /* Initiate renegotiation. */
547 sppp_lcp_open (sp);
548 /* fall through... */
549 case LCP_STATE_ACK_SENT:
550 /* Go to closed state. */
551 sp->lcp.state = LCP_STATE_CLOSED;
552 sp->ipcp.state = IPCP_STATE_CLOSED;
553 }
554 break;
555 }
556 /* Send Configure-Ack packet. */
557 sp->pp_loopcnt = 0;
558 if (sp->lcp.state != LCP_STATE_OPENED) {
559 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
560 h->ident, len-4, h+1);
561 }
562 /* Change the state. */
563 switch (sp->lcp.state) {
564 case LCP_STATE_CLOSED:
565 sp->lcp.state = LCP_STATE_ACK_SENT;
566 break;
567 case LCP_STATE_ACK_RCVD:
568 sp->lcp.state = LCP_STATE_OPENED;
569 sppp_ipcp_open (sp);
570 break;
571 case LCP_STATE_OPENED:
572 /* Remote magic changed -- close session. */
573 sp->lcp.state = LCP_STATE_CLOSED;
574 sp->ipcp.state = IPCP_STATE_CLOSED;
575 /* Initiate renegotiation. */
576 sppp_lcp_open (sp);
577 /* Send ACK after our REQ in attempt to break loop */
578 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
579 h->ident, len-4, h+1);
580 sp->lcp.state = LCP_STATE_ACK_SENT;
581 break;
582 }
583 break;
584 case LCP_CONF_ACK:
585 if (h->ident != sp->lcp.confid)
586 break;
587 sppp_clear_timeout (sp);
588 if ((sp->pp_link_state != SPPP_LINK_UP) &&
589 (dev->flags & IFF_UP)) {
590 /* Coming out of loopback mode. */
591 sp->pp_link_state=SPPP_LINK_UP;
592 printk (KERN_INFO "%s: protocol up\n", dev->name);
593 }
594 switch (sp->lcp.state) {
595 case LCP_STATE_CLOSED:
596 sp->lcp.state = LCP_STATE_ACK_RCVD;
597 sppp_set_timeout (sp, 5);
598 break;
599 case LCP_STATE_ACK_SENT:
600 sp->lcp.state = LCP_STATE_OPENED;
601 sppp_ipcp_open (sp);
602 break;
603 }
604 break;
605 case LCP_CONF_NAK:
606 if (h->ident != sp->lcp.confid)
607 break;
608 p = (u8*) (h+1);
609 if (len>=10 && p[0] == LCP_OPT_MAGIC && p[1] >= 4) {
610 rmagic = (u32)p[2] << 24 |
611 (u32)p[3] << 16 | p[4] << 8 | p[5];
612 if (rmagic == ~sp->lcp.magic) {
613 int newmagic;
614 if (sp->pp_flags & PP_DEBUG)
615 printk (KERN_DEBUG "%s: conf nak: magic glitch\n",
616 dev->name);
617 get_random_bytes(&newmagic, sizeof(newmagic));
618 sp->lcp.magic += newmagic;
619 } else
620 sp->lcp.magic = rmagic;
621 }
622 if (sp->lcp.state != LCP_STATE_ACK_SENT) {
623 /* Go to closed state. */
624 sp->lcp.state = LCP_STATE_CLOSED;
625 sp->ipcp.state = IPCP_STATE_CLOSED;
626 }
627 /* The link will be renegotiated after timeout,
628 * to avoid endless req-nack loop. */
629 sppp_clear_timeout (sp);
630 sppp_set_timeout (sp, 2);
631 break;
632 case LCP_CONF_REJ:
633 if (h->ident != sp->lcp.confid)
634 break;
635 sppp_clear_timeout (sp);
636 /* Initiate renegotiation. */
637 sppp_lcp_open (sp);
638 if (sp->lcp.state != LCP_STATE_ACK_SENT) {
639 /* Go to closed state. */
640 sp->lcp.state = LCP_STATE_CLOSED;
641 sp->ipcp.state = IPCP_STATE_CLOSED;
642 }
643 break;
644 case LCP_TERM_REQ:
645 sppp_clear_timeout (sp);
646 /* Send Terminate-Ack packet. */
647 sppp_cp_send (sp, PPP_LCP, LCP_TERM_ACK, h->ident, 0, NULL);
648 /* Go to closed state. */
649 sp->lcp.state = LCP_STATE_CLOSED;
650 sp->ipcp.state = IPCP_STATE_CLOSED;
651 /* Initiate renegotiation. */
652 sppp_lcp_open (sp);
653 break;
654 case LCP_TERM_ACK:
655 case LCP_CODE_REJ:
656 case LCP_PROTO_REJ:
657 /* Ignore for now. */
658 break;
659 case LCP_DISC_REQ:
660 /* Discard the packet. */
661 break;
662 case LCP_ECHO_REQ:
663 if (sp->lcp.state != LCP_STATE_OPENED)
664 break;
665 if (len < 8) {
666 if (sp->pp_flags & PP_DEBUG)
667 printk (KERN_WARNING "%s: invalid lcp echo request packet length: %d bytes\n",
668 dev->name, len);
669 break;
670 }
671 if (ntohl (*(long*)(h+1)) == sp->lcp.magic) {
672 /* Line loopback mode detected. */
673 printk (KERN_WARNING "%s: loopback\n", dev->name);
674 if_down (dev);
675
676 /* Shut down the PPP link. */
677 sp->lcp.state = LCP_STATE_CLOSED;
678 sp->ipcp.state = IPCP_STATE_CLOSED;
679 sppp_clear_timeout (sp);
680 /* Initiate negotiation. */
681 sppp_lcp_open (sp);
682 break;
683 }
684 *(long*)(h+1) = htonl (sp->lcp.magic);
685 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REPLY, h->ident, len-4, h+1);
686 break;
687 case LCP_ECHO_REPLY:
688 if (h->ident != sp->lcp.echoid)
689 break;
690 if (len < 8) {
691 if (sp->pp_flags & PP_DEBUG)
692 printk (KERN_WARNING "%s: invalid lcp echo reply packet length: %d bytes\n",
693 dev->name, len);
694 break;
695 }
696 if (ntohl (*(long*)(h+1)) != sp->lcp.magic)
697 sp->pp_alivecnt = 0;
698 break;
699 }
700}
701
702/*
703 * Handle incoming Cisco keepalive protocol packets.
704 */
705
706static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
707{
708 struct cisco_packet *h;
709 struct net_device *dev = sp->pp_if;
710
711 if (!pskb_may_pull(skb, sizeof(struct cisco_packet))
712 || (skb->len != CISCO_PACKET_LEN
713 && skb->len != CISCO_BIG_PACKET_LEN)) {
714 if (sp->pp_flags & PP_DEBUG)
715 printk (KERN_WARNING "%s: invalid cisco packet length: %d bytes\n",
716 dev->name, skb->len);
717 return;
718 }
719 h = (struct cisco_packet *)skb->data;
720 skb_pull(skb, sizeof(struct cisco_packet*));
721 if (sp->pp_flags & PP_DEBUG)
722 printk (KERN_WARNING "%s: cisco input: %d bytes <%xh %xh %xh %xh %xh-%xh>\n",
723 dev->name, skb->len,
724 ntohl (h->type), h->par1, h->par2, h->rel,
725 h->time0, h->time1);
726 switch (ntohl (h->type)) {
727 default:
728 if (sp->pp_flags & PP_DEBUG)
729 printk (KERN_WARNING "%s: unknown cisco packet type: 0x%x\n",
730 dev->name, ntohl (h->type));
731 break;
732 case CISCO_ADDR_REPLY:
733 /* Reply on address request, ignore */
734 break;
735 case CISCO_KEEPALIVE_REQ:
736 sp->pp_alivecnt = 0;
737 sp->pp_rseq = ntohl (h->par1);
738 if (sp->pp_seq == sp->pp_rseq) {
739 /* Local and remote sequence numbers are equal.
740 * Probably, the line is in loopback mode. */
741 int newseq;
742 if (sp->pp_loopcnt >= MAXALIVECNT) {
743 printk (KERN_WARNING "%s: loopback\n",
744 dev->name);
745 sp->pp_loopcnt = 0;
746 if (dev->flags & IFF_UP) {
747 if_down (dev);
748 }
749 }
750 ++sp->pp_loopcnt;
751
752 /* Generate new local sequence number */
753 get_random_bytes(&newseq, sizeof(newseq));
754 sp->pp_seq ^= newseq;
755 break;
756 }
757 sp->pp_loopcnt = 0;
758 if (sp->pp_link_state==SPPP_LINK_DOWN &&
759 (dev->flags & IFF_UP)) {
760 sp->pp_link_state=SPPP_LINK_UP;
761 printk (KERN_INFO "%s: protocol up\n", dev->name);
762 }
763 break;
764 case CISCO_ADDR_REQ:
765 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
766 {
767 struct in_device *in_dev;
768 struct in_ifaddr *ifa;
769 u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
770#ifdef CONFIG_INET
771 rcu_read_lock();
772 if ((in_dev = __in_dev_get(dev)) != NULL)
773 {
774 for (ifa=in_dev->ifa_list; ifa != NULL;
775 ifa=ifa->ifa_next) {
776 if (strcmp(dev->name, ifa->ifa_label) == 0)
777 {
778 addr = ifa->ifa_local;
779 mask = ifa->ifa_mask;
780 break;
781 }
782 }
783 }
784 rcu_read_unlock();
785#endif
786 /* I hope both addr and mask are in the net order */
787 sppp_cisco_send (sp, CISCO_ADDR_REPLY, addr, mask);
788 break;
789 }
790 }
791}
792
793
794/*
795 * Send PPP LCP packet.
796 */
797
798static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
799 u8 ident, u16 len, void *data)
800{
801 struct ppp_header *h;
802 struct lcp_header *lh;
803 struct sk_buff *skb;
804 struct net_device *dev = sp->pp_if;
805
806 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+LCP_HEADER_LEN+len,
807 GFP_ATOMIC);
808 if (skb==NULL)
809 return;
810
811 skb_reserve(skb,dev->hard_header_len);
812
813 h = (struct ppp_header *)skb_put(skb, sizeof(struct ppp_header));
814 h->address = PPP_ALLSTATIONS; /* broadcast address */
815 h->control = PPP_UI; /* Unnumbered Info */
816 h->protocol = htons (proto); /* Link Control Protocol */
817
818 lh = (struct lcp_header *)skb_put(skb, sizeof(struct lcp_header));
819 lh->type = type;
820 lh->ident = ident;
821 lh->len = htons (LCP_HEADER_LEN + len);
822
823 if (len)
824 memcpy(skb_put(skb,len),data, len);
825
826 if (sp->pp_flags & PP_DEBUG) {
827 printk (KERN_WARNING "%s: %s output <%s id=%xh len=%xh",
828 dev->name,
829 proto==PPP_LCP ? "lcp" : "ipcp",
830 proto==PPP_LCP ? sppp_lcp_type_name (lh->type) :
831 sppp_ipcp_type_name (lh->type), lh->ident,
832 ntohs (lh->len));
833 if (len)
834 sppp_print_bytes ((u8*) (lh+1), len);
835 printk (">\n");
836 }
837 sp->obytes += skb->len;
838 /* Control is high priority so it doesn't get queued behind data */
839 skb->priority=TC_PRIO_CONTROL;
840 skb->dev = dev;
841 skb_queue_tail(&tx_queue, skb);
842}
843
844/*
845 * Send Cisco keepalive packet.
846 */
847
848static void sppp_cisco_send (struct sppp *sp, int type, long par1, long par2)
849{
850 struct ppp_header *h;
851 struct cisco_packet *ch;
852 struct sk_buff *skb;
853 struct net_device *dev = sp->pp_if;
854 u32 t = jiffies * 1000/HZ;
855
856 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+CISCO_PACKET_LEN,
857 GFP_ATOMIC);
858
859 if(skb==NULL)
860 return;
861
862 skb_reserve(skb, dev->hard_header_len);
863 h = (struct ppp_header *)skb_put (skb, sizeof(struct ppp_header));
864 h->address = CISCO_MULTICAST;
865 h->control = 0;
866 h->protocol = htons (CISCO_KEEPALIVE);
867
868 ch = (struct cisco_packet*)skb_put(skb, CISCO_PACKET_LEN);
869 ch->type = htonl (type);
870 ch->par1 = htonl (par1);
871 ch->par2 = htonl (par2);
872 ch->rel = -1;
873 ch->time0 = htons ((u16) (t >> 16));
874 ch->time1 = htons ((u16) t);
875
876 if (sp->pp_flags & PP_DEBUG)
877 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
878 dev->name, ntohl (ch->type), ch->par1,
879 ch->par2, ch->rel, ch->time0, ch->time1);
880 sp->obytes += skb->len;
881 skb->priority=TC_PRIO_CONTROL;
882 skb->dev = dev;
883 skb_queue_tail(&tx_queue, skb);
884}
885
886/**
887 * sppp_close - close down a synchronous PPP or Cisco HDLC link
888 * @dev: The network device to drop the link of
889 *
890 * This drops the logical interface to the channel. It is not
891 * done politely as we assume we will also be dropping DTR. Any
892 * timeouts are killed.
893 */
894
895int sppp_close (struct net_device *dev)
896{
897 struct sppp *sp = (struct sppp *)sppp_of(dev);
898 unsigned long flags;
899
900 spin_lock_irqsave(&sp->lock, flags);
901 sp->pp_link_state = SPPP_LINK_DOWN;
902 sp->lcp.state = LCP_STATE_CLOSED;
903 sp->ipcp.state = IPCP_STATE_CLOSED;
904 sppp_clear_timeout (sp);
905 spin_unlock_irqrestore(&sp->lock, flags);
906
907 return 0;
908}
909
910EXPORT_SYMBOL(sppp_close);
911
912/**
913 * sppp_open - open a synchronous PPP or Cisco HDLC link
914 * @dev: Network device to activate
915 *
916 * Close down any existing synchronous session and commence
917 * from scratch. In the PPP case this means negotiating LCP/IPCP
918 * and friends, while for Cisco HDLC we simply need to start sending
919 * keepalives
920 */
921
922int sppp_open (struct net_device *dev)
923{
924 struct sppp *sp = (struct sppp *)sppp_of(dev);
925 unsigned long flags;
926
927 sppp_close(dev);
928
929 spin_lock_irqsave(&sp->lock, flags);
930 if (!(sp->pp_flags & PP_CISCO)) {
931 sppp_lcp_open (sp);
932 }
933 sp->pp_link_state = SPPP_LINK_DOWN;
934 spin_unlock_irqrestore(&sp->lock, flags);
935 sppp_flush_xmit();
936
937 return 0;
938}
939
940EXPORT_SYMBOL(sppp_open);
941
942/**
943 * sppp_reopen - notify of physical link loss
944 * @dev: Device that lost the link
945 *
946 * This function informs the synchronous protocol code that
947 * the underlying link died (for example a carrier drop on X.21)
948 *
949 * We increment the magic numbers to ensure that if the other end
950 * failed to notice we will correctly start a new session. It happens
951 * do to the nature of telco circuits is that you can lose carrier on
952 * one endonly.
953 *
954 * Having done this we go back to negotiating. This function may
955 * be called from an interrupt context.
956 */
957
958int sppp_reopen (struct net_device *dev)
959{
960 struct sppp *sp = (struct sppp *)sppp_of(dev);
961 unsigned long flags;
962
963 sppp_close(dev);
964
965 spin_lock_irqsave(&sp->lock, flags);
966 if (!(sp->pp_flags & PP_CISCO))
967 {
968 sp->lcp.magic = jiffies;
969 ++sp->pp_seq;
970 sp->lcp.state = LCP_STATE_CLOSED;
971 sp->ipcp.state = IPCP_STATE_CLOSED;
972 /* Give it a moment for the line to settle then go */
973 sppp_set_timeout (sp, 1);
974 }
975 sp->pp_link_state=SPPP_LINK_DOWN;
976 spin_unlock_irqrestore(&sp->lock, flags);
977
978 return 0;
979}
980
981EXPORT_SYMBOL(sppp_reopen);
982
983/**
984 * sppp_change_mtu - Change the link MTU
985 * @dev: Device to change MTU on
986 * @new_mtu: New MTU
987 *
988 * Change the MTU on the link. This can only be called with
989 * the link down. It returns an error if the link is up or
990 * the mtu is out of range.
991 */
992
993int sppp_change_mtu(struct net_device *dev, int new_mtu)
994{
995 if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
996 return -EINVAL;
997 dev->mtu=new_mtu;
998 return 0;
999}
1000
1001EXPORT_SYMBOL(sppp_change_mtu);
1002
1003/**
1004 * sppp_do_ioctl - Ioctl handler for ppp/hdlc
1005 * @dev: Device subject to ioctl
1006 * @ifr: Interface request block from the user
1007 * @cmd: Command that is being issued
1008 *
1009 * This function handles the ioctls that may be issued by the user
1010 * to control the settings of a PPP/HDLC link. It does both busy
1011 * and security checks. This function is intended to be wrapped by
1012 * callers who wish to add additional ioctl calls of their own.
1013 */
1014
1015int sppp_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1016{
1017 struct sppp *sp = (struct sppp *)sppp_of(dev);
1018
1019 if(dev->flags&IFF_UP)
1020 return -EBUSY;
1021
1022 if(!capable(CAP_NET_ADMIN))
1023 return -EPERM;
1024
1025 switch(cmd)
1026 {
1027 case SPPPIOCCISCO:
1028 sp->pp_flags|=PP_CISCO;
1029 dev->type = ARPHRD_HDLC;
1030 break;
1031 case SPPPIOCPPP:
1032 sp->pp_flags&=~PP_CISCO;
1033 dev->type = ARPHRD_PPP;
1034 break;
1035 case SPPPIOCDEBUG:
1036 sp->pp_flags&=~PP_DEBUG;
1037 if(ifr->ifr_flags)
1038 sp->pp_flags|=PP_DEBUG;
1039 break;
1040 case SPPPIOCGFLAGS:
1041 if(copy_to_user(ifr->ifr_data, &sp->pp_flags, sizeof(sp->pp_flags)))
1042 return -EFAULT;
1043 break;
1044 case SPPPIOCSFLAGS:
1045 if(copy_from_user(&sp->pp_flags, ifr->ifr_data, sizeof(sp->pp_flags)))
1046 return -EFAULT;
1047 break;
1048 default:
1049 return -EINVAL;
1050 }
1051 return 0;
1052}
1053
1054EXPORT_SYMBOL(sppp_do_ioctl);
1055
1056/**
1057 * sppp_attach - attach synchronous PPP/HDLC to a device
1058 * @pd: PPP device to initialise
1059 *
1060 * This initialises the PPP/HDLC support on an interface. At the
1061 * time of calling the dev element must point to the network device
1062 * that this interface is attached to. The interface should not yet
1063 * be registered.
1064 */
1065
1066void sppp_attach(struct ppp_device *pd)
1067{
1068 struct net_device *dev = pd->dev;
1069 struct sppp *sp = &pd->sppp;
1070 unsigned long flags;
1071
1072 /* Make sure embedding is safe for sppp_of */
1073 BUG_ON(sppp_of(dev) != sp);
1074
1075 spin_lock_irqsave(&spppq_lock, flags);
1076 /* Initialize keepalive handler. */
1077 if (! spppq)
1078 {
1079 init_timer(&sppp_keepalive_timer);
1080 sppp_keepalive_timer.expires=jiffies+10*HZ;
1081 sppp_keepalive_timer.function=sppp_keepalive;
1082 add_timer(&sppp_keepalive_timer);
1083 }
1084 /* Insert new entry into the keepalive list. */
1085 sp->pp_next = spppq;
1086 spppq = sp;
1087 spin_unlock_irqrestore(&spppq_lock, flags);
1088
1089 sp->pp_loopcnt = 0;
1090 sp->pp_alivecnt = 0;
1091 sp->pp_seq = 0;
1092 sp->pp_rseq = 0;
1093 sp->pp_flags = PP_KEEPALIVE|PP_CISCO|debug;/*PP_DEBUG;*/
1094 sp->lcp.magic = 0;
1095 sp->lcp.state = LCP_STATE_CLOSED;
1096 sp->ipcp.state = IPCP_STATE_CLOSED;
1097 sp->pp_if = dev;
1098 spin_lock_init(&sp->lock);
1099
1100 /*
1101 * Device specific setup. All but interrupt handler and
1102 * hard_start_xmit.
1103 */
1104
1105 dev->hard_header = sppp_hard_header;
1106 dev->rebuild_header = sppp_rebuild_header;
1107 dev->tx_queue_len = 10;
1108 dev->type = ARPHRD_HDLC;
1109 dev->addr_len = 0;
1110 dev->hard_header_len = sizeof(struct ppp_header);
1111 dev->mtu = PPP_MTU;
1112 /*
1113 * These 4 are callers but MUST also call sppp_ functions
1114 */
1115 dev->do_ioctl = sppp_do_ioctl;
1116#if 0
1117 dev->get_stats = NULL; /* Let the driver override these */
1118 dev->open = sppp_open;
1119 dev->stop = sppp_close;
1120#endif
1121 dev->change_mtu = sppp_change_mtu;
1122 dev->hard_header_cache = NULL;
1123 dev->header_cache_update = NULL;
1124 dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
1125}
1126
1127EXPORT_SYMBOL(sppp_attach);
1128
1129/**
1130 * sppp_detach - release PPP resources from a device
1131 * @dev: Network device to release
1132 *
1133 * Stop and free up any PPP/HDLC resources used by this
1134 * interface. This must be called before the device is
1135 * freed.
1136 */
1137
1138void sppp_detach (struct net_device *dev)
1139{
1140 struct sppp **q, *p, *sp = (struct sppp *)sppp_of(dev);
1141 unsigned long flags;
1142
1143 spin_lock_irqsave(&spppq_lock, flags);
1144 /* Remove the entry from the keepalive list. */
1145 for (q = &spppq; (p = *q); q = &p->pp_next)
1146 if (p == sp) {
1147 *q = p->pp_next;
1148 break;
1149 }
1150
1151 /* Stop keepalive handler. */
1152 if (! spppq)
1153 del_timer(&sppp_keepalive_timer);
1154 sppp_clear_timeout (sp);
1155 spin_unlock_irqrestore(&spppq_lock, flags);
1156}
1157
1158EXPORT_SYMBOL(sppp_detach);
1159
1160/*
1161 * Analyze the LCP Configure-Request options list
1162 * for the presence of unknown options.
1163 * If the request contains unknown options, build and
1164 * send Configure-reject packet, containing only unknown options.
1165 */
1166static int
1167sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
1168 int len, u32 *magic)
1169{
1170 u8 *buf, *r, *p;
1171 int rlen;
1172
1173 len -= 4;
1174 buf = r = kmalloc (len, GFP_ATOMIC);
1175 if (! buf)
1176 return (0);
1177
1178 p = (void*) (h+1);
1179 for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
1180 switch (*p) {
1181 case LCP_OPT_MAGIC:
1182 /* Magic number -- extract. */
1183 if (len >= 6 && p[1] == 6) {
1184 *magic = (u32)p[2] << 24 |
1185 (u32)p[3] << 16 | p[4] << 8 | p[5];
1186 continue;
1187 }
1188 break;
1189 case LCP_OPT_ASYNC_MAP:
1190 /* Async control character map -- check to be zero. */
1191 if (len >= 6 && p[1] == 6 && ! p[2] && ! p[3] &&
1192 ! p[4] && ! p[5])
1193 continue;
1194 break;
1195 case LCP_OPT_MRU:
1196 /* Maximum receive unit -- always OK. */
1197 continue;
1198 default:
1199 /* Others not supported. */
1200 break;
1201 }
1202 /* Add the option to rejected list. */
1203 memcpy(r, p, p[1]);
1204 r += p[1];
1205 rlen += p[1];
1206 }
1207 if (rlen)
1208 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REJ, h->ident, rlen, buf);
1209 kfree(buf);
1210 return (rlen == 0);
1211}
1212
1213static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *skb)
1214{
1215 struct lcp_header *h;
1216 struct net_device *dev = sp->pp_if;
1217 int len = skb->len;
1218
1219 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
1220 if (sp->pp_flags & PP_DEBUG)
1221 printk (KERN_WARNING "%s: invalid ipcp packet length: %d bytes\n",
1222 dev->name, len);
1223 return;
1224 }
1225 h = (struct lcp_header *)skb->data;
1226 skb_pull(skb,sizeof(struct lcp_header));
1227 if (sp->pp_flags & PP_DEBUG) {
1228 printk (KERN_WARNING "%s: ipcp input: %d bytes <%s id=%xh len=%xh",
1229 dev->name, len,
1230 sppp_ipcp_type_name (h->type), h->ident, ntohs (h->len));
1231 if (len > 4)
1232 sppp_print_bytes ((u8*) (h+1), len-4);
1233 printk (">\n");
1234 }
1235 if (len > ntohs (h->len))
1236 len = ntohs (h->len);
1237 switch (h->type) {
1238 default:
1239 /* Unknown packet type -- send Code-Reject packet. */
1240 sppp_cp_send (sp, PPP_IPCP, IPCP_CODE_REJ, ++sp->pp_seq, len, h);
1241 break;
1242 case IPCP_CONF_REQ:
1243 if (len < 4) {
1244 if (sp->pp_flags & PP_DEBUG)
1245 printk (KERN_WARNING "%s: invalid ipcp configure request packet length: %d bytes\n",
1246 dev->name, len);
1247 return;
1248 }
1249 if (len > 4) {
1250 sppp_cp_send (sp, PPP_IPCP, LCP_CONF_REJ, h->ident,
1251 len-4, h+1);
1252
1253 switch (sp->ipcp.state) {
1254 case IPCP_STATE_OPENED:
1255 /* Initiate renegotiation. */
1256 sppp_ipcp_open (sp);
1257 /* fall through... */
1258 case IPCP_STATE_ACK_SENT:
1259 /* Go to closed state. */
1260 sp->ipcp.state = IPCP_STATE_CLOSED;
1261 }
1262 } else {
1263 /* Send Configure-Ack packet. */
1264 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_ACK, h->ident,
1265 0, NULL);
1266 /* Change the state. */
1267 if (sp->ipcp.state == IPCP_STATE_ACK_RCVD)
1268 sp->ipcp.state = IPCP_STATE_OPENED;
1269 else
1270 sp->ipcp.state = IPCP_STATE_ACK_SENT;
1271 }
1272 break;
1273 case IPCP_CONF_ACK:
1274 if (h->ident != sp->ipcp.confid)
1275 break;
1276 sppp_clear_timeout (sp);
1277 switch (sp->ipcp.state) {
1278 case IPCP_STATE_CLOSED:
1279 sp->ipcp.state = IPCP_STATE_ACK_RCVD;
1280 sppp_set_timeout (sp, 5);
1281 break;
1282 case IPCP_STATE_ACK_SENT:
1283 sp->ipcp.state = IPCP_STATE_OPENED;
1284 break;
1285 }
1286 break;
1287 case IPCP_CONF_NAK:
1288 case IPCP_CONF_REJ:
1289 if (h->ident != sp->ipcp.confid)
1290 break;
1291 sppp_clear_timeout (sp);
1292 /* Initiate renegotiation. */
1293 sppp_ipcp_open (sp);
1294 if (sp->ipcp.state != IPCP_STATE_ACK_SENT)
1295 /* Go to closed state. */
1296 sp->ipcp.state = IPCP_STATE_CLOSED;
1297 break;
1298 case IPCP_TERM_REQ:
1299 /* Send Terminate-Ack packet. */
1300 sppp_cp_send (sp, PPP_IPCP, IPCP_TERM_ACK, h->ident, 0, NULL);
1301 /* Go to closed state. */
1302 sp->ipcp.state = IPCP_STATE_CLOSED;
1303 /* Initiate renegotiation. */
1304 sppp_ipcp_open (sp);
1305 break;
1306 case IPCP_TERM_ACK:
1307 /* Ignore for now. */
1308 case IPCP_CODE_REJ:
1309 /* Ignore for now. */
1310 break;
1311 }
1312}
1313
1314static void sppp_lcp_open (struct sppp *sp)
1315{
1316 char opt[6];
1317
1318 if (! sp->lcp.magic)
1319 sp->lcp.magic = jiffies;
1320 opt[0] = LCP_OPT_MAGIC;
1321 opt[1] = sizeof (opt);
1322 opt[2] = sp->lcp.magic >> 24;
1323 opt[3] = sp->lcp.magic >> 16;
1324 opt[4] = sp->lcp.magic >> 8;
1325 opt[5] = sp->lcp.magic;
1326 sp->lcp.confid = ++sp->pp_seq;
1327 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REQ, sp->lcp.confid,
1328 sizeof (opt), &opt);
1329 sppp_set_timeout (sp, 2);
1330}
1331
1332static void sppp_ipcp_open (struct sppp *sp)
1333{
1334 sp->ipcp.confid = ++sp->pp_seq;
1335 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_REQ, sp->ipcp.confid, 0, NULL);
1336 sppp_set_timeout (sp, 2);
1337}
1338
1339/*
1340 * Process PPP control protocol timeouts.
1341 */
1342
1343static void sppp_cp_timeout (unsigned long arg)
1344{
1345 struct sppp *sp = (struct sppp*) arg;
1346 unsigned long flags;
1347
1348 spin_lock_irqsave(&sp->lock, flags);
1349
1350 sp->pp_flags &= ~PP_TIMO;
1351 if (! (sp->pp_if->flags & IFF_UP) || (sp->pp_flags & PP_CISCO)) {
1352 spin_unlock_irqrestore(&sp->lock, flags);
1353 return;
1354 }
1355 switch (sp->lcp.state) {
1356 case LCP_STATE_CLOSED:
1357 /* No ACK for Configure-Request, retry. */
1358 sppp_lcp_open (sp);
1359 break;
1360 case LCP_STATE_ACK_RCVD:
1361 /* ACK got, but no Configure-Request for peer, retry. */
1362 sppp_lcp_open (sp);
1363 sp->lcp.state = LCP_STATE_CLOSED;
1364 break;
1365 case LCP_STATE_ACK_SENT:
1366 /* ACK sent but no ACK for Configure-Request, retry. */
1367 sppp_lcp_open (sp);
1368 break;
1369 case LCP_STATE_OPENED:
1370 /* LCP is already OK, try IPCP. */
1371 switch (sp->ipcp.state) {
1372 case IPCP_STATE_CLOSED:
1373 /* No ACK for Configure-Request, retry. */
1374 sppp_ipcp_open (sp);
1375 break;
1376 case IPCP_STATE_ACK_RCVD:
1377 /* ACK got, but no Configure-Request for peer, retry. */
1378 sppp_ipcp_open (sp);
1379 sp->ipcp.state = IPCP_STATE_CLOSED;
1380 break;
1381 case IPCP_STATE_ACK_SENT:
1382 /* ACK sent but no ACK for Configure-Request, retry. */
1383 sppp_ipcp_open (sp);
1384 break;
1385 case IPCP_STATE_OPENED:
1386 /* IPCP is OK. */
1387 break;
1388 }
1389 break;
1390 }
1391 spin_unlock_irqrestore(&sp->lock, flags);
1392 sppp_flush_xmit();
1393}
1394
1395static char *sppp_lcp_type_name (u8 type)
1396{
1397 static char buf [8];
1398 switch (type) {
1399 case LCP_CONF_REQ: return ("conf-req");
1400 case LCP_CONF_ACK: return ("conf-ack");
1401 case LCP_CONF_NAK: return ("conf-nack");
1402 case LCP_CONF_REJ: return ("conf-rej");
1403 case LCP_TERM_REQ: return ("term-req");
1404 case LCP_TERM_ACK: return ("term-ack");
1405 case LCP_CODE_REJ: return ("code-rej");
1406 case LCP_PROTO_REJ: return ("proto-rej");
1407 case LCP_ECHO_REQ: return ("echo-req");
1408 case LCP_ECHO_REPLY: return ("echo-reply");
1409 case LCP_DISC_REQ: return ("discard-req");
1410 }
1411 sprintf (buf, "%xh", type);
1412 return (buf);
1413}
1414
1415static char *sppp_ipcp_type_name (u8 type)
1416{
1417 static char buf [8];
1418 switch (type) {
1419 case IPCP_CONF_REQ: return ("conf-req");
1420 case IPCP_CONF_ACK: return ("conf-ack");
1421 case IPCP_CONF_NAK: return ("conf-nack");
1422 case IPCP_CONF_REJ: return ("conf-rej");
1423 case IPCP_TERM_REQ: return ("term-req");
1424 case IPCP_TERM_ACK: return ("term-ack");
1425 case IPCP_CODE_REJ: return ("code-rej");
1426 }
1427 sprintf (buf, "%xh", type);
1428 return (buf);
1429}
1430
1431static void sppp_print_bytes (u_char *p, u16 len)
1432{
1433 printk (" %x", *p++);
1434 while (--len > 0)
1435 printk ("-%x", *p++);
1436}
1437
1438/**
1439 * sppp_rcv - receive and process a WAN PPP frame
1440 * @skb: The buffer to process
1441 * @dev: The device it arrived on
1442 * @p: Unused
1443 *
1444 * Protocol glue. This drives the deferred processing mode the poorer
1445 * cards use. This can be called directly by cards that do not have
1446 * timing constraints but is normally called from the network layer
1447 * after interrupt servicing to process frames queued via netif_rx.
1448 */
1449
1450static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p)
1451{
1452 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1453 return NET_RX_DROP;
1454 sppp_input(dev,skb);
1455 return 0;
1456}
1457
1458struct packet_type sppp_packet_type = {
1459 .type = __constant_htons(ETH_P_WAN_PPP),
1460 .func = sppp_rcv,
1461};
1462
1463static char banner[] __initdata =
1464 KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n"
1465 KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & "
1466 "Jan \"Yenya\" Kasprzak.\n";
1467
1468static int __init sync_ppp_init(void)
1469{
1470 if(debug)
1471 debug=PP_DEBUG;
1472 printk(banner);
1473 skb_queue_head_init(&tx_queue);
1474 dev_add_pack(&sppp_packet_type);
1475 return 0;
1476}
1477
1478
1479static void __exit sync_ppp_cleanup(void)
1480{
1481 dev_remove_pack(&sppp_packet_type);
1482}
1483
1484module_init(sync_ppp_init);
1485module_exit(sync_ppp_cleanup);
1486module_param(debug, int, 0);
1487MODULE_LICENSE("GPL");
1488