aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-11-22 00:30:58 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-22 00:30:58 -0500
commitc46920dadba65856eb1a1f1ffa1b350875db1228 (patch)
tree78199a497b70bc6c449c761a6cc4cb223426e0cd /drivers
parentf5f4cf08467db10de061a1b90037a56a360d3554 (diff)
parent6476a907b57d9229de7807aeea534ad45e19a4ce (diff)
Merge branch 'for-david' of git://git.kernel.org/pub/scm/linux/kernel/git/chris/linux-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wan/Makefile2
-rw-r--r--drivers/net/wan/c101.c6
-rw-r--r--drivers/net/wan/hd64570.c (renamed from drivers/net/wan/hd6457x.c)254
-rw-r--r--drivers/net/wan/hd64572.c640
-rw-r--r--drivers/net/wan/hdlc_ppp.c648
-rw-r--r--drivers/net/wan/n2.c9
-rw-r--r--drivers/net/wan/pc300too.c119
-rw-r--r--drivers/net/wan/pci200syn.c77
-rw-r--r--drivers/net/wan/syncppp.c1476
9 files changed, 1374 insertions, 1857 deletions
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 102549605d09..cec16818a130 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_HDLC_RAW) += hdlc_raw.o
14obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o 14obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o
15obj-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o 15obj-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o
16obj-$(CONFIG_HDLC_FR) += hdlc_fr.o 16obj-$(CONFIG_HDLC_FR) += hdlc_fr.o
17obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o syncppp.o 17obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
18obj-$(CONFIG_HDLC_X25) += hdlc_x25.o 18obj-$(CONFIG_HDLC_X25) += hdlc_x25.o
19 19
20pc300-y := pc300_drv.o 20pc300-y := pc300_drv.o
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index c8e563106a4a..b46897996f7e 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -88,7 +88,7 @@ static card_t **new_card = &first_card;
88/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */ 88/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
89#define sca_outw(value, reg, card) do { \ 89#define sca_outw(value, reg, card) do { \
90 writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \ 90 writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
91 writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg+1));\ 91 writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
92} while(0) 92} while(0)
93 93
94#define port_to_card(port) (port) 94#define port_to_card(port) (port)
@@ -113,7 +113,7 @@ static inline void openwin(card_t *card, u8 page)
113} 113}
114 114
115 115
116#include "hd6457x.c" 116#include "hd64570.c"
117 117
118 118
119static inline void set_carrier(port_t *port) 119static inline void set_carrier(port_t *port)
@@ -381,7 +381,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
381 return result; 381 return result;
382 } 382 }
383 383
384 sca_init_sync_port(card); /* Set up C101 memory */ 384 sca_init_port(card); /* Set up C101 memory */
385 set_carrier(card); 385 set_carrier(card);
386 386
387 printk(KERN_INFO "%s: Moxa C101 on IRQ%u," 387 printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd64570.c
index 434583a94b32..223238de475c 100644
--- a/drivers/net/wan/hd6457x.c
+++ b/drivers/net/wan/hd64570.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Hitachi SCA HD64570 and HD64572 common driver for Linux 2 * Hitachi SCA HD64570 driver for Linux
3 * 3 *
4 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
@@ -7,9 +7,7 @@
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation. 8 * as published by the Free Software Foundation.
9 * 9 *
10 * Sources of information: 10 * Source of information: Hitachi HD64570 SCA User's Manual
11 * Hitachi HD64570 SCA User's Manual
12 * Hitachi HD64572 SCA-II User's Manual
13 * 11 *
14 * We use the following SCA memory map: 12 * We use the following SCA memory map:
15 * 13 *
@@ -26,33 +24,26 @@
26 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used) 24 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
27 */ 25 */
28 26
29#include <linux/module.h> 27#include <linux/bitops.h>
30#include <linux/kernel.h> 28#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/jiffies.h>
33#include <linux/types.h>
34#include <linux/fcntl.h> 29#include <linux/fcntl.h>
35#include <linux/interrupt.h> 30#include <linux/hdlc.h>
36#include <linux/in.h> 31#include <linux/in.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/interrupt.h>
40#include <linux/ioport.h> 34#include <linux/ioport.h>
41#include <linux/bitops.h> 35#include <linux/jiffies.h>
42 36#include <linux/kernel.h>
43#include <asm/system.h> 37#include <linux/module.h>
44#include <asm/uaccess.h>
45#include <asm/io.h>
46
47#include <linux/netdevice.h> 38#include <linux/netdevice.h>
48#include <linux/skbuff.h> 39#include <linux/skbuff.h>
49 40#include <linux/slab.h>
50#include <linux/hdlc.h> 41#include <linux/string.h>
51 42#include <linux/types.h>
52#if (!defined (__HD64570_H) && !defined (__HD64572_H)) || \ 43#include <asm/io.h>
53 (defined (__HD64570_H) && defined (__HD64572_H)) 44#include <asm/system.h>
54#error Either hd64570.h or hd64572.h must be included 45#include <asm/uaccess.h>
55#endif 46#include "hd64570.h"
56 47
57#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) 48#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
58#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET) 49#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
@@ -62,16 +53,6 @@
62#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02) 53#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
63#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04) 54#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
64 55
65#ifdef __HD64570_H /* HD64570 */
66#define sca_outa(value, reg, card) sca_outw(value, reg, card)
67#define sca_ina(reg, card) sca_inw(reg, card)
68#define writea(value, ptr) writew(value, ptr)
69
70#else /* HD64572 */
71#define sca_outa(value, reg, card) sca_outl(value, reg, card)
72#define sca_ina(reg, card) sca_inl(reg, card)
73#define writea(value, ptr) writel(value, ptr)
74#endif
75 56
76static inline struct net_device *port_to_dev(port_t *port) 57static inline struct net_device *port_to_dev(port_t *port)
77{ 58{
@@ -81,8 +62,6 @@ static inline struct net_device *port_to_dev(port_t *port)
81static inline int sca_intr_status(card_t *card) 62static inline int sca_intr_status(card_t *card)
82{ 63{
83 u8 result = 0; 64 u8 result = 0;
84
85#ifdef __HD64570_H /* HD64570 */
86 u8 isr0 = sca_in(ISR0, card); 65 u8 isr0 = sca_in(ISR0, card);
87 u8 isr1 = sca_in(ISR1, card); 66 u8 isr1 = sca_in(ISR1, card);
88 67
@@ -93,18 +72,6 @@ static inline int sca_intr_status(card_t *card)
93 if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0); 72 if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
94 if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1); 73 if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
95 74
96#else /* HD64572 */
97 u32 isr0 = sca_inl(ISR0, card);
98
99 if (isr0 & 0x0000000F) result |= SCA_INTR_DMAC_RX(0);
100 if (isr0 & 0x000000F0) result |= SCA_INTR_DMAC_TX(0);
101 if (isr0 & 0x00000F00) result |= SCA_INTR_DMAC_RX(1);
102 if (isr0 & 0x0000F000) result |= SCA_INTR_DMAC_TX(1);
103 if (isr0 & 0x003E0000) result |= SCA_INTR_MSCI(0);
104 if (isr0 & 0x3E000000) result |= SCA_INTR_MSCI(1);
105
106#endif /* HD64570 vs HD64572 */
107
108 if (!(result & SCA_INTR_DMAC_TX(0))) 75 if (!(result & SCA_INTR_DMAC_TX(0)))
109 if (sca_in(DSR_TX(0), card) & DSR_EOM) 76 if (sca_in(DSR_TX(0), card) & DSR_EOM)
110 result |= SCA_INTR_DMAC_TX(0); 77 result |= SCA_INTR_DMAC_TX(0);
@@ -127,7 +94,6 @@ static inline u16 next_desc(port_t *port, u16 desc, int transmit)
127} 94}
128 95
129 96
130
131static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit) 97static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
132{ 98{
133 u16 rx_buffs = port_to_card(port)->rx_ring_buffers; 99 u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
@@ -139,28 +105,26 @@ static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
139} 105}
140 106
141 107
142
143static inline u16 desc_offset(port_t *port, u16 desc, int transmit) 108static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
144{ 109{
145 /* Descriptor offset always fits in 16 bytes */ 110 /* Descriptor offset always fits in 16 bits */
146 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc); 111 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
147} 112}
148 113
149 114
150 115static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
151static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, int transmit) 116 int transmit)
152{ 117{
153#ifdef PAGE0_ALWAYS_MAPPED 118#ifdef PAGE0_ALWAYS_MAPPED
154 return (pkt_desc __iomem *)(win0base(port_to_card(port)) 119 return (pkt_desc __iomem *)(win0base(port_to_card(port))
155 + desc_offset(port, desc, transmit)); 120 + desc_offset(port, desc, transmit));
156#else 121#else
157 return (pkt_desc __iomem *)(winbase(port_to_card(port)) 122 return (pkt_desc __iomem *)(winbase(port_to_card(port))
158 + desc_offset(port, desc, transmit)); 123 + desc_offset(port, desc, transmit));
159#endif 124#endif
160} 125}
161 126
162 127
163
164static inline u32 buffer_offset(port_t *port, u16 desc, int transmit) 128static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
165{ 129{
166 return port_to_card(port)->buff_offset + 130 return port_to_card(port)->buff_offset +
@@ -186,7 +150,7 @@ static inline void sca_set_carrier(port_t *port)
186} 150}
187 151
188 152
189static void sca_init_sync_port(port_t *port) 153static void sca_init_port(port_t *port)
190{ 154{
191 card_t *card = port_to_card(port); 155 card_t *card = port_to_card(port);
192 int transmit, i; 156 int transmit, i;
@@ -195,7 +159,7 @@ static void sca_init_sync_port(port_t *port)
195 port->txin = 0; 159 port->txin = 0;
196 port->txlast = 0; 160 port->txlast = 0;
197 161
198#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 162#ifndef PAGE0_ALWAYS_MAPPED
199 openwin(card, 0); 163 openwin(card, 0);
200#endif 164#endif
201 165
@@ -209,7 +173,7 @@ static void sca_init_sync_port(port_t *port)
209 u16 chain_off = desc_offset(port, i + 1, transmit); 173 u16 chain_off = desc_offset(port, i + 1, transmit);
210 u32 buff_off = buffer_offset(port, i, transmit); 174 u32 buff_off = buffer_offset(port, i, transmit);
211 175
212 writea(chain_off, &desc->cp); 176 writew(chain_off, &desc->cp);
213 writel(buff_off, &desc->bp); 177 writel(buff_off, &desc->bp);
214 writew(0, &desc->len); 178 writew(0, &desc->len);
215 writeb(0, &desc->stat); 179 writeb(0, &desc->stat);
@@ -222,16 +186,14 @@ static void sca_init_sync_port(port_t *port)
222 sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) : 186 sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
223 DCR_RX(phy_node(port)), card); 187 DCR_RX(phy_node(port)), card);
224 188
225#ifdef __HD64570_H
226 sca_out(0, dmac + CPB, card); /* pointer base */
227#endif
228 /* current desc addr */ 189 /* current desc addr */
229 sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card); 190 sca_out(0, dmac + CPB, card); /* pointer base */
191 sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card);
230 if (!transmit) 192 if (!transmit)
231 sca_outa(desc_offset(port, buffs - 1, transmit), 193 sca_outw(desc_offset(port, buffs - 1, transmit),
232 dmac + EDAL, card); 194 dmac + EDAL, card);
233 else 195 else
234 sca_outa(desc_offset(port, 0, transmit), dmac + EDAL, 196 sca_outw(desc_offset(port, 0, transmit), dmac + EDAL,
235 card); 197 card);
236 198
237 /* clear frame end interrupt counter */ 199 /* clear frame end interrupt counter */
@@ -258,7 +220,6 @@ static void sca_init_sync_port(port_t *port)
258} 220}
259 221
260 222
261
262#ifdef NEED_SCA_MSCI_INTR 223#ifdef NEED_SCA_MSCI_INTR
263/* MSCI interrupt service */ 224/* MSCI interrupt service */
264static inline void sca_msci_intr(port_t *port) 225static inline void sca_msci_intr(port_t *port)
@@ -282,17 +243,15 @@ static inline void sca_msci_intr(port_t *port)
282#endif 243#endif
283 244
284 245
285 246static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
286static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin) 247 u16 rxin)
287{ 248{
288 struct net_device *dev = port_to_dev(port); 249 struct net_device *dev = port_to_dev(port);
289 struct sk_buff *skb; 250 struct sk_buff *skb;
290 u16 len; 251 u16 len;
291 u32 buff; 252 u32 buff;
292#ifndef ALL_PAGES_ALWAYS_MAPPED
293 u32 maxlen; 253 u32 maxlen;
294 u8 page; 254 u8 page;
295#endif
296 255
297 len = readw(&desc->len); 256 len = readw(&desc->len);
298 skb = dev_alloc_skb(len); 257 skb = dev_alloc_skb(len);
@@ -302,7 +261,6 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
302 } 261 }
303 262
304 buff = buffer_offset(port, rxin, 0); 263 buff = buffer_offset(port, rxin, 0);
305#ifndef ALL_PAGES_ALWAYS_MAPPED
306 page = buff / winsize(card); 264 page = buff / winsize(card);
307 buff = buff % winsize(card); 265 buff = buff % winsize(card);
308 maxlen = winsize(card) - buff; 266 maxlen = winsize(card) - buff;
@@ -314,12 +272,10 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
314 openwin(card, page + 1); 272 openwin(card, page + 1);
315 memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen); 273 memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
316 } else 274 } else
317#endif 275 memcpy_fromio(skb->data, winbase(card) + buff, len);
318 memcpy_fromio(skb->data, winbase(card) + buff, len);
319 276
320#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 277#ifndef PAGE0_ALWAYS_MAPPED
321 /* select pkt_desc table page back */ 278 openwin(card, 0); /* select pkt_desc table page back */
322 openwin(card, 0);
323#endif 279#endif
324 skb_put(skb, len); 280 skb_put(skb, len);
325#ifdef DEBUG_PKT 281#ifdef DEBUG_PKT
@@ -333,7 +289,6 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
333} 289}
334 290
335 291
336
337/* Receive DMA interrupt service */ 292/* Receive DMA interrupt service */
338static inline void sca_rx_intr(port_t *port) 293static inline void sca_rx_intr(port_t *port)
339{ 294{
@@ -353,7 +308,7 @@ static inline void sca_rx_intr(port_t *port)
353 while (1) { 308 while (1) {
354 u32 desc_off = desc_offset(port, port->rxin, 0); 309 u32 desc_off = desc_offset(port, port->rxin, 0);
355 pkt_desc __iomem *desc; 310 pkt_desc __iomem *desc;
356 u32 cda = sca_ina(dmac + CDAL, card); 311 u32 cda = sca_inw(dmac + CDAL, card);
357 312
358 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) 313 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
359 break; /* No frame received */ 314 break; /* No frame received */
@@ -377,7 +332,7 @@ static inline void sca_rx_intr(port_t *port)
377 sca_rx(card, port, desc, port->rxin); 332 sca_rx(card, port, desc, port->rxin);
378 333
379 /* Set new error descriptor address */ 334 /* Set new error descriptor address */
380 sca_outa(desc_off, dmac + EDAL, card); 335 sca_outw(desc_off, dmac + EDAL, card);
381 port->rxin = next_desc(port, port->rxin, 0); 336 port->rxin = next_desc(port, port->rxin, 0);
382 } 337 }
383 338
@@ -386,7 +341,6 @@ static inline void sca_rx_intr(port_t *port)
386} 341}
387 342
388 343
389
390/* Transmit DMA interrupt service */ 344/* Transmit DMA interrupt service */
391static inline void sca_tx_intr(port_t *port) 345static inline void sca_tx_intr(port_t *port)
392{ 346{
@@ -407,7 +361,7 @@ static inline void sca_tx_intr(port_t *port)
407 pkt_desc __iomem *desc; 361 pkt_desc __iomem *desc;
408 362
409 u32 desc_off = desc_offset(port, port->txlast, 1); 363 u32 desc_off = desc_offset(port, port->txlast, 1);
410 u32 cda = sca_ina(dmac + CDAL, card); 364 u32 cda = sca_inw(dmac + CDAL, card);
411 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) 365 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
412 break; /* Transmitter is/will_be sending this frame */ 366 break; /* Transmitter is/will_be sending this frame */
413 367
@@ -423,17 +377,13 @@ static inline void sca_tx_intr(port_t *port)
423} 377}
424 378
425 379
426
427static irqreturn_t sca_intr(int irq, void* dev_id) 380static irqreturn_t sca_intr(int irq, void* dev_id)
428{ 381{
429 card_t *card = dev_id; 382 card_t *card = dev_id;
430 int i; 383 int i;
431 u8 stat; 384 u8 stat;
432 int handled = 0; 385 int handled = 0;
433
434#ifndef ALL_PAGES_ALWAYS_MAPPED
435 u8 page = sca_get_page(card); 386 u8 page = sca_get_page(card);
436#endif
437 387
438 while((stat = sca_intr_status(card)) != 0) { 388 while((stat = sca_intr_status(card)) != 0) {
439 handled = 1; 389 handled = 1;
@@ -452,14 +402,11 @@ static irqreturn_t sca_intr(int irq, void* dev_id)
452 } 402 }
453 } 403 }
454 404
455#ifndef ALL_PAGES_ALWAYS_MAPPED
456 openwin(card, page); /* Restore original page */ 405 openwin(card, page); /* Restore original page */
457#endif
458 return IRQ_RETVAL(handled); 406 return IRQ_RETVAL(handled);
459} 407}
460 408
461 409
462
463static void sca_set_port(port_t *port) 410static void sca_set_port(port_t *port)
464{ 411{
465 card_t* card = port_to_card(port); 412 card_t* card = port_to_card(port);
@@ -497,12 +444,7 @@ static void sca_set_port(port_t *port)
497 port->tmc = tmc; 444 port->tmc = tmc;
498 445
499 /* baud divisor - time constant*/ 446 /* baud divisor - time constant*/
500#ifdef __HD64570_H
501 sca_out(port->tmc, msci + TMC, card); 447 sca_out(port->tmc, msci + TMC, card);
502#else
503 sca_out(port->tmc, msci + TMCR, card);
504 sca_out(port->tmc, msci + TMCT, card);
505#endif
506 448
507 /* Set BRG bits */ 449 /* Set BRG bits */
508 sca_out(port->rxs, msci + RXS, card); 450 sca_out(port->rxs, msci + RXS, card);
@@ -518,7 +460,6 @@ static void sca_set_port(port_t *port)
518} 460}
519 461
520 462
521
522static void sca_open(struct net_device *dev) 463static void sca_open(struct net_device *dev)
523{ 464{
524 port_t *port = dev_to_port(dev); 465 port_t *port = dev_to_port(dev);
@@ -540,11 +481,7 @@ static void sca_open(struct net_device *dev)
540 switch(port->parity) { 481 switch(port->parity) {
541 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break; 482 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
542 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break; 483 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
543#ifdef __HD64570_H
544 case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break; 484 case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
545#else
546 case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
547#endif
548 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break; 485 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
549 default: md0 = MD0_HDLC | MD0_CRC_NONE; 486 default: md0 = MD0_HDLC | MD0_CRC_NONE;
550 } 487 }
@@ -554,35 +491,20 @@ static void sca_open(struct net_device *dev)
554 sca_out(0x00, msci + MD1, card); /* no address field check */ 491 sca_out(0x00, msci + MD1, card); /* no address field check */
555 sca_out(md2, msci + MD2, card); 492 sca_out(md2, msci + MD2, card);
556 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */ 493 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
557#ifdef __HD64570_H
558 sca_out(CTL_IDLE, msci + CTL, card); 494 sca_out(CTL_IDLE, msci + CTL, card);
559#else
560 /* Skip the rest of underrun frame */
561 sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
562#endif
563 495
564#ifdef __HD64570_H
565 /* Allow at least 8 bytes before requesting RX DMA operation */ 496 /* Allow at least 8 bytes before requesting RX DMA operation */
566 /* TX with higher priority and possibly with shorter transfers */ 497 /* TX with higher priority and possibly with shorter transfers */
567 sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/ 498 sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
568 sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/ 499 sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
569 sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */ 500 sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
570#else
571 sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
572 sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
573 sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
574 sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
575 sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
576#endif
577 501
578/* We're using the following interrupts: 502/* We're using the following interrupts:
579 - TXINT (DMAC completed all transmisions, underrun or DCD change) 503 - TXINT (DMAC completed all transmisions, underrun or DCD change)
580 - all DMA interrupts 504 - all DMA interrupts
581*/ 505*/
582
583 sca_set_carrier(port); 506 sca_set_carrier(port);
584 507
585#ifdef __HD64570_H
586 /* MSCI TX INT and RX INT A IRQ enable */ 508 /* MSCI TX INT and RX INT A IRQ enable */
587 sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card); 509 sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
588 sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card); 510 sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
@@ -591,21 +513,8 @@ static void sca_open(struct net_device *dev)
591 /* enable DMA IRQ */ 513 /* enable DMA IRQ */
592 sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F), 514 sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
593 IER1, card); 515 IER1, card);
594#else
595 /* MSCI TXINT and RXINTA interrupt enable */
596 sca_outl(IE0_TXINT | IE0_RXINTA | IE0_UDRN | IE0_CDCD, msci + IE0,
597 card);
598 /* DMA & MSCI IRQ enable */
599 sca_outl(sca_inl(IER0, card) |
600 (phy_node(port) ? 0x0A006600 : 0x000A0066), IER0, card);
601#endif
602 516
603#ifdef __HD64570_H
604 sca_out(port->tmc, msci + TMC, card); /* Restore registers */ 517 sca_out(port->tmc, msci + TMC, card); /* Restore registers */
605#else
606 sca_out(port->tmc, msci + TMCR, card);
607 sca_out(port->tmc, msci + TMCT, card);
608#endif
609 sca_out(port->rxs, msci + RXS, card); 518 sca_out(port->rxs, msci + RXS, card);
610 sca_out(port->txs, msci + TXS, card); 519 sca_out(port->txs, msci + TXS, card);
611 sca_out(CMD_TX_ENABLE, msci + CMD, card); 520 sca_out(CMD_TX_ENABLE, msci + CMD, card);
@@ -615,7 +524,6 @@ static void sca_open(struct net_device *dev)
615} 524}
616 525
617 526
618
619static void sca_close(struct net_device *dev) 527static void sca_close(struct net_device *dev)
620{ 528{
621 port_t *port = dev_to_port(dev); 529 port_t *port = dev_to_port(dev);
@@ -623,23 +531,17 @@ static void sca_close(struct net_device *dev)
623 531
624 /* reset channel */ 532 /* reset channel */
625 sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port)); 533 sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
626#ifdef __HD64570_H
627 /* disable MSCI interrupts */ 534 /* disable MSCI interrupts */
628 sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0), 535 sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
629 IER0, card); 536 IER0, card);
630 /* disable DMA interrupts */ 537 /* disable DMA interrupts */
631 sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0), 538 sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
632 IER1, card); 539 IER1, card);
633#else 540
634 /* disable DMA & MSCI IRQ */
635 sca_outl(sca_inl(IER0, card) &
636 (phy_node(port) ? 0x00FF00FF : 0xFF00FF00), IER0, card);
637#endif
638 netif_stop_queue(dev); 541 netif_stop_queue(dev);
639} 542}
640 543
641 544
642
643static int sca_attach(struct net_device *dev, unsigned short encoding, 545static int sca_attach(struct net_device *dev, unsigned short encoding,
644 unsigned short parity) 546 unsigned short parity)
645{ 547{
@@ -653,11 +555,7 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
653 if (parity != PARITY_NONE && 555 if (parity != PARITY_NONE &&
654 parity != PARITY_CRC16_PR0 && 556 parity != PARITY_CRC16_PR0 &&
655 parity != PARITY_CRC16_PR1 && 557 parity != PARITY_CRC16_PR1 &&
656#ifdef __HD64570_H
657 parity != PARITY_CRC16_PR0_CCITT && 558 parity != PARITY_CRC16_PR0_CCITT &&
658#else
659 parity != PARITY_CRC32_PR1_CCITT &&
660#endif
661 parity != PARITY_CRC16_PR1_CCITT) 559 parity != PARITY_CRC16_PR1_CCITT)
662 return -EINVAL; 560 return -EINVAL;
663 561
@@ -667,34 +565,30 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
667} 565}
668 566
669 567
670
671#ifdef DEBUG_RINGS 568#ifdef DEBUG_RINGS
672static void sca_dump_rings(struct net_device *dev) 569static void sca_dump_rings(struct net_device *dev)
673{ 570{
674 port_t *port = dev_to_port(dev); 571 port_t *port = dev_to_port(dev);
675 card_t *card = port_to_card(port); 572 card_t *card = port_to_card(port);
676 u16 cnt; 573 u16 cnt;
677#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 574#ifndef PAGE0_ALWAYS_MAPPED
678 u8 page; 575 u8 page = sca_get_page(card);
679#endif
680 576
681#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
682 page = sca_get_page(card);
683 openwin(card, 0); 577 openwin(card, 0);
684#endif 578#endif
685 579
686 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive", 580 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
687 sca_ina(get_dmac_rx(port) + CDAL, card), 581 sca_inw(get_dmac_rx(port) + CDAL, card),
688 sca_ina(get_dmac_rx(port) + EDAL, card), 582 sca_inw(get_dmac_rx(port) + EDAL, card),
689 sca_in(DSR_RX(phy_node(port)), card), port->rxin, 583 sca_in(DSR_RX(phy_node(port)), card), port->rxin,
690 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE?"":"in"); 584 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
691 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++) 585 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
692 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); 586 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
693 587
694 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " 588 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
695 "last=%u %sactive", 589 "last=%u %sactive",
696 sca_ina(get_dmac_tx(port) + CDAL, card), 590 sca_inw(get_dmac_tx(port) + CDAL, card),
697 sca_ina(get_dmac_tx(port) + EDAL, card), 591 sca_inw(get_dmac_tx(port) + EDAL, card),
698 sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast, 592 sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
699 sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in"); 593 sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
700 594
@@ -702,12 +596,8 @@ static void sca_dump_rings(struct net_device *dev)
702 printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat))); 596 printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
703 printk("\n"); 597 printk("\n");
704 598
705 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, " 599 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x,"
706 "ST: %02x %02x %02x %02x" 600 " FST: %02x CST: %02x %02x\n",
707#ifdef __HD64572_H
708 " %02x"
709#endif
710 ", FST: %02x CST: %02x %02x\n",
711 sca_in(get_msci(port) + MD0, card), 601 sca_in(get_msci(port) + MD0, card),
712 sca_in(get_msci(port) + MD1, card), 602 sca_in(get_msci(port) + MD1, card),
713 sca_in(get_msci(port) + MD2, card), 603 sca_in(get_msci(port) + MD2, card),
@@ -715,52 +605,33 @@ static void sca_dump_rings(struct net_device *dev)
715 sca_in(get_msci(port) + ST1, card), 605 sca_in(get_msci(port) + ST1, card),
716 sca_in(get_msci(port) + ST2, card), 606 sca_in(get_msci(port) + ST2, card),
717 sca_in(get_msci(port) + ST3, card), 607 sca_in(get_msci(port) + ST3, card),
718#ifdef __HD64572_H
719 sca_in(get_msci(port) + ST4, card),
720#endif
721 sca_in(get_msci(port) + FST, card), 608 sca_in(get_msci(port) + FST, card),
722 sca_in(get_msci(port) + CST0, card), 609 sca_in(get_msci(port) + CST0, card),
723 sca_in(get_msci(port) + CST1, card)); 610 sca_in(get_msci(port) + CST1, card));
724 611
725#ifdef __HD64572_H
726 printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
727 sca_inl(ISR0, card), sca_inl(ISR1, card));
728#else
729 printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card), 612 printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
730 sca_in(ISR1, card), sca_in(ISR2, card)); 613 sca_in(ISR1, card), sca_in(ISR2, card));
731#endif
732 614
733#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 615#ifndef PAGE0_ALWAYS_MAPPED
734 openwin(card, page); /* Restore original page */ 616 openwin(card, page); /* Restore original page */
735#endif 617#endif
736} 618}
737#endif /* DEBUG_RINGS */ 619#endif /* DEBUG_RINGS */
738 620
739 621
740
741static int sca_xmit(struct sk_buff *skb, struct net_device *dev) 622static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
742{ 623{
743 port_t *port = dev_to_port(dev); 624 port_t *port = dev_to_port(dev);
744 card_t *card = port_to_card(port); 625 card_t *card = port_to_card(port);
745 pkt_desc __iomem *desc; 626 pkt_desc __iomem *desc;
746 u32 buff, len; 627 u32 buff, len;
747#ifndef ALL_PAGES_ALWAYS_MAPPED
748 u8 page; 628 u8 page;
749 u32 maxlen; 629 u32 maxlen;
750#endif
751 630
752 spin_lock_irq(&port->lock); 631 spin_lock_irq(&port->lock);
753 632
754 desc = desc_address(port, port->txin + 1, 1); 633 desc = desc_address(port, port->txin + 1, 1);
755 if (readb(&desc->stat)) { /* allow 1 packet gap */ 634 BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
756 /* should never happen - previous xmit should stop queue */
757#ifdef DEBUG_PKT
758 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
759#endif
760 netif_stop_queue(dev);
761 spin_unlock_irq(&port->lock);
762 return 1; /* request packet to be queued */
763 }
764 635
765#ifdef DEBUG_PKT 636#ifdef DEBUG_PKT
766 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len); 637 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
@@ -770,7 +641,6 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
770 desc = desc_address(port, port->txin, 1); 641 desc = desc_address(port, port->txin, 1);
771 buff = buffer_offset(port, port->txin, 1); 642 buff = buffer_offset(port, port->txin, 1);
772 len = skb->len; 643 len = skb->len;
773#ifndef ALL_PAGES_ALWAYS_MAPPED
774 page = buff / winsize(card); 644 page = buff / winsize(card);
775 buff = buff % winsize(card); 645 buff = buff % winsize(card);
776 maxlen = winsize(card) - buff; 646 maxlen = winsize(card) - buff;
@@ -780,12 +650,10 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
780 memcpy_toio(winbase(card) + buff, skb->data, maxlen); 650 memcpy_toio(winbase(card) + buff, skb->data, maxlen);
781 openwin(card, page + 1); 651 openwin(card, page + 1);
782 memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen); 652 memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
783 } 653 } else
784 else
785#endif
786 memcpy_toio(winbase(card) + buff, skb->data, len); 654 memcpy_toio(winbase(card) + buff, skb->data, len);
787 655
788#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 656#ifndef PAGE0_ALWAYS_MAPPED
789 openwin(card, 0); /* select pkt_desc table page back */ 657 openwin(card, 0); /* select pkt_desc table page back */
790#endif 658#endif
791 writew(len, &desc->len); 659 writew(len, &desc->len);
@@ -793,7 +661,7 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
793 dev->trans_start = jiffies; 661 dev->trans_start = jiffies;
794 662
795 port->txin = next_desc(port, port->txin, 1); 663 port->txin = next_desc(port, port->txin, 1);
796 sca_outa(desc_offset(port, port->txin, 1), 664 sca_outw(desc_offset(port, port->txin, 1),
797 get_dmac_tx(port) + EDAL, card); 665 get_dmac_tx(port) + EDAL, card);
798 666
799 sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */ 667 sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
@@ -809,40 +677,29 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
809} 677}
810 678
811 679
812
813#ifdef NEED_DETECT_RAM 680#ifdef NEED_DETECT_RAM
814static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize) 681static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
682 u32 ramsize)
815{ 683{
816 /* Round RAM size to 32 bits, fill from end to start */ 684 /* Round RAM size to 32 bits, fill from end to start */
817 u32 i = ramsize &= ~3; 685 u32 i = ramsize &= ~3;
818
819#ifndef ALL_PAGES_ALWAYS_MAPPED
820 u32 size = winsize(card); 686 u32 size = winsize(card);
821 687
822 openwin(card, (i - 4) / size); /* select last window */ 688 openwin(card, (i - 4) / size); /* select last window */
823#endif 689
824 do { 690 do {
825 i -= 4; 691 i -= 4;
826#ifndef ALL_PAGES_ALWAYS_MAPPED
827 if ((i + 4) % size == 0) 692 if ((i + 4) % size == 0)
828 openwin(card, i / size); 693 openwin(card, i / size);
829 writel(i ^ 0x12345678, rambase + i % size); 694 writel(i ^ 0x12345678, rambase + i % size);
830#else 695 } while (i > 0);
831 writel(i ^ 0x12345678, rambase + i);
832#endif
833 }while (i > 0);
834 696
835 for (i = 0; i < ramsize ; i += 4) { 697 for (i = 0; i < ramsize ; i += 4) {
836#ifndef ALL_PAGES_ALWAYS_MAPPED
837 if (i % size == 0) 698 if (i % size == 0)
838 openwin(card, i / size); 699 openwin(card, i / size);
839 700
840 if (readl(rambase + i % size) != (i ^ 0x12345678)) 701 if (readl(rambase + i % size) != (i ^ 0x12345678))
841 break; 702 break;
842#else
843 if (readl(rambase + i) != (i ^ 0x12345678))
844 break;
845#endif
846 } 703 }
847 704
848 return i; 705 return i;
@@ -850,7 +707,6 @@ static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsi
850#endif /* NEED_DETECT_RAM */ 707#endif /* NEED_DETECT_RAM */
851 708
852 709
853
854static void __devinit sca_init(card_t *card, int wait_states) 710static void __devinit sca_init(card_t *card, int wait_states)
855{ 711{
856 sca_out(wait_states, WCRL, card); /* Wait Control */ 712 sca_out(wait_states, WCRL, card); /* Wait Control */
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
new file mode 100644
index 000000000000..0bcc0b5f22d7
--- /dev/null
+++ b/drivers/net/wan/hd64572.c
@@ -0,0 +1,640 @@
1/*
2 * Hitachi (now Renesas) SCA-II HD64572 driver for Linux
3 *
4 * Copyright (C) 1998-2008 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * Source of information: HD64572 SCA-II User's Manual
11 *
12 * We use the following SCA memory map:
13 *
14 * Packet buffer descriptor rings - starting from card->rambase:
15 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
16 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
17 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
18 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
19 *
20 * Packet data buffers - starting from card->rambase + buff_offset:
21 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
22 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
23 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
24 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
25 */
26
27#include <linux/bitops.h>
28#include <linux/errno.h>
29#include <linux/fcntl.h>
30#include <linux/hdlc.h>
31#include <linux/in.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/ioport.h>
35#include <linux/jiffies.h>
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/netdevice.h>
39#include <linux/skbuff.h>
40#include <linux/slab.h>
41#include <linux/string.h>
42#include <linux/types.h>
43#include <asm/io.h>
44#include <asm/system.h>
45#include <asm/uaccess.h>
46#include "hd64572.h"
47
48#define NAPI_WEIGHT 16
49
50#define get_msci(port) (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET)
51#define get_dmac_rx(port) (port->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
52#define get_dmac_tx(port) (port->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
53
54#define sca_in(reg, card) readb(card->scabase + (reg))
55#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
56#define sca_inw(reg, card) readw(card->scabase + (reg))
57#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
58#define sca_inl(reg, card) readl(card->scabase + (reg))
59#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
60
61static int sca_poll(struct napi_struct *napi, int budget);
62
63static inline port_t* dev_to_port(struct net_device *dev)
64{
65 return dev_to_hdlc(dev)->priv;
66}
67
68static inline void enable_intr(port_t *port)
69{
70 /* enable DMIB and MSCI RXINTA interrupts */
71 sca_outl(sca_inl(IER0, port->card) |
72 (port->chan ? 0x08002200 : 0x00080022), IER0, port->card);
73}
74
75static inline void disable_intr(port_t *port)
76{
77 sca_outl(sca_inl(IER0, port->card) &
78 (port->chan ? 0x00FF00FF : 0xFF00FF00), IER0, port->card);
79}
80
81static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
82{
83 u16 rx_buffs = port->card->rx_ring_buffers;
84 u16 tx_buffs = port->card->tx_ring_buffers;
85
86 desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
87 return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc;
88}
89
90
91static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
92{
93 /* Descriptor offset always fits in 16 bits */
94 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
95}
96
97
98static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
99 int transmit)
100{
101 return (pkt_desc __iomem *)(port->card->rambase +
102 desc_offset(port, desc, transmit));
103}
104
105
106static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
107{
108 return port->card->buff_offset +
109 desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
110}
111
112
113static inline void sca_set_carrier(port_t *port)
114{
115 if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) {
116#ifdef DEBUG_LINK
117 printk(KERN_DEBUG "%s: sca_set_carrier on\n",
118 port->netdev.name);
119#endif
120 netif_carrier_on(port->netdev);
121 } else {
122#ifdef DEBUG_LINK
123 printk(KERN_DEBUG "%s: sca_set_carrier off\n",
124 port->netdev.name);
125#endif
126 netif_carrier_off(port->netdev);
127 }
128}
129
130
131static void sca_init_port(port_t *port)
132{
133 card_t *card = port->card;
134 u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port);
135 int transmit, i;
136
137 port->rxin = 0;
138 port->txin = 0;
139 port->txlast = 0;
140
141 for (transmit = 0; transmit < 2; transmit++) {
142 u16 buffs = transmit ? card->tx_ring_buffers
143 : card->rx_ring_buffers;
144
145 for (i = 0; i < buffs; i++) {
146 pkt_desc __iomem *desc = desc_address(port, i, transmit);
147 u16 chain_off = desc_offset(port, i + 1, transmit);
148 u32 buff_off = buffer_offset(port, i, transmit);
149
150 writel(chain_off, &desc->cp);
151 writel(buff_off, &desc->bp);
152 writew(0, &desc->len);
153 writeb(0, &desc->stat);
154 }
155 }
156
157 /* DMA disable - to halt state */
158 sca_out(0, DSR_RX(port->chan), card);
159 sca_out(0, DSR_TX(port->chan), card);
160
161 /* software ABORT - to initial state */
162 sca_out(DCR_ABORT, DCR_RX(port->chan), card);
163 sca_out(DCR_ABORT, DCR_TX(port->chan), card);
164
165 /* current desc addr */
166 sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card);
167 sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0),
168 dmac_rx + EDAL, card);
169 sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card);
170 sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card);
171
172 /* clear frame end interrupt counter */
173 sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card);
174 sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card);
175
176 /* Receive */
177 sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */
178 sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */
179 sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */
180 sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */
181
182 /* Transmit */
183 sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */
184 sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */
185
186 sca_set_carrier(port);
187 netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT);
188}
189
190
191/* MSCI interrupt service */
192static inline void sca_msci_intr(port_t *port)
193{
194 u16 msci = get_msci(port);
195 card_t* card = port->card;
196
197 if (sca_in(msci + ST1, card) & ST1_CDCD) {
198 /* Reset MSCI CDCD status bit */
199 sca_out(ST1_CDCD, msci + ST1, card);
200 sca_set_carrier(port);
201 }
202}
203
204
205static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
206 u16 rxin)
207{
208 struct net_device *dev = port->netdev;
209 struct sk_buff *skb;
210 u16 len;
211 u32 buff;
212
213 len = readw(&desc->len);
214 skb = dev_alloc_skb(len);
215 if (!skb) {
216 dev->stats.rx_dropped++;
217 return;
218 }
219
220 buff = buffer_offset(port, rxin, 0);
221 memcpy_fromio(skb->data, card->rambase + buff, len);
222
223 skb_put(skb, len);
224#ifdef DEBUG_PKT
225 printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
226 debug_frame(skb);
227#endif
228 dev->stats.rx_packets++;
229 dev->stats.rx_bytes += skb->len;
230 skb->protocol = hdlc_type_trans(skb, dev);
231 netif_receive_skb(skb);
232}
233
234
235/* Receive DMA service */
236static inline int sca_rx_done(port_t *port, int budget)
237{
238 struct net_device *dev = port->netdev;
239 u16 dmac = get_dmac_rx(port);
240 card_t *card = port->card;
241 u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */
242 int received = 0;
243
244 /* Reset DSR status bits */
245 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
246 DSR_RX(port->chan), card);
247
248 if (stat & DSR_BOF)
249 /* Dropped one or more frames */
250 dev->stats.rx_over_errors++;
251
252 while (received < budget) {
253 u32 desc_off = desc_offset(port, port->rxin, 0);
254 pkt_desc __iomem *desc;
255 u32 cda = sca_inl(dmac + CDAL, card);
256
257 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
258 break; /* No frame received */
259
260 desc = desc_address(port, port->rxin, 0);
261 stat = readb(&desc->stat);
262 if (!(stat & ST_RX_EOM))
263 port->rxpart = 1; /* partial frame received */
264 else if ((stat & ST_ERROR_MASK) || port->rxpart) {
265 dev->stats.rx_errors++;
266 if (stat & ST_RX_OVERRUN)
267 dev->stats.rx_fifo_errors++;
268 else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
269 ST_RX_RESBIT)) || port->rxpart)
270 dev->stats.rx_frame_errors++;
271 else if (stat & ST_RX_CRC)
272 dev->stats.rx_crc_errors++;
273 if (stat & ST_RX_EOM)
274 port->rxpart = 0; /* received last fragment */
275 } else {
276 sca_rx(card, port, desc, port->rxin);
277 received++;
278 }
279
280 /* Set new error descriptor address */
281 sca_outl(desc_off, dmac + EDAL, card);
282 port->rxin = (port->rxin + 1) % card->rx_ring_buffers;
283 }
284
285 /* make sure RX DMA is enabled */
286 sca_out(DSR_DE, DSR_RX(port->chan), card);
287 return received;
288}
289
290
291/* Transmit DMA service */
292static inline void sca_tx_done(port_t *port)
293{
294 struct net_device *dev = port->netdev;
295 card_t* card = port->card;
296 u8 stat;
297
298 spin_lock(&port->lock);
299
300 stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */
301
302 /* Reset DSR status bits */
303 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
304 DSR_TX(port->chan), card);
305
306 while (1) {
307 pkt_desc __iomem *desc = desc_address(port, port->txlast, 1);
308 u8 stat = readb(&desc->stat);
309
310 if (!(stat & ST_TX_OWNRSHP))
311 break; /* not yet transmitted */
312 if (stat & ST_TX_UNDRRUN) {
313 dev->stats.tx_errors++;
314 dev->stats.tx_fifo_errors++;
315 } else {
316 dev->stats.tx_packets++;
317 dev->stats.tx_bytes += readw(&desc->len);
318 }
319 writeb(0, &desc->stat); /* Free descriptor */
320 port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
321 }
322
323 netif_wake_queue(dev);
324 spin_unlock(&port->lock);
325}
326
327
328static int sca_poll(struct napi_struct *napi, int budget)
329{
330 port_t *port = container_of(napi, port_t, napi);
331 u32 isr0 = sca_inl(ISR0, port->card);
332 int received = 0;
333
334 if (isr0 & (port->chan ? 0x08000000 : 0x00080000))
335 sca_msci_intr(port);
336
337 if (isr0 & (port->chan ? 0x00002000 : 0x00000020))
338 sca_tx_done(port);
339
340 if (isr0 & (port->chan ? 0x00000200 : 0x00000002))
341 received = sca_rx_done(port, budget);
342
343 if (received < budget) {
344 netif_rx_complete(port->netdev, napi);
345 enable_intr(port);
346 }
347
348 return received;
349}
350
351static irqreturn_t sca_intr(int irq, void *dev_id)
352{
353 card_t *card = dev_id;
354 u32 isr0 = sca_inl(ISR0, card);
355 int i, handled = 0;
356
357 for (i = 0; i < 2; i++) {
358 port_t *port = get_port(card, i);
359 if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
360 handled = 1;
361 disable_intr(port);
362 netif_rx_schedule(port->netdev, &port->napi);
363 }
364 }
365
366 return IRQ_RETVAL(handled);
367}
368
369
370static void sca_set_port(port_t *port)
371{
372 card_t* card = port->card;
373 u16 msci = get_msci(port);
374 u8 md2 = sca_in(msci + MD2, card);
375 unsigned int tmc, br = 10, brv = 1024;
376
377
378 if (port->settings.clock_rate > 0) {
379 /* Try lower br for better accuracy*/
380 do {
381 br--;
382 brv >>= 1; /* brv = 2^9 = 512 max in specs */
383
384 /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
385 tmc = CLOCK_BASE / brv / port->settings.clock_rate;
386 }while (br > 1 && tmc <= 128);
387
388 if (tmc < 1) {
389 tmc = 1;
390 br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
391 brv = 1;
392 } else if (tmc > 255)
393 tmc = 256; /* tmc=0 means 256 - low baud rates */
394
395 port->settings.clock_rate = CLOCK_BASE / brv / tmc;
396 } else {
397 br = 9; /* Minimum clock rate */
398 tmc = 256; /* 8bit = 0 */
399 port->settings.clock_rate = CLOCK_BASE / (256 * 512);
400 }
401
402 port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
403 port->txs = (port->txs & ~CLK_BRG_MASK) | br;
404 port->tmc = tmc;
405
406 /* baud divisor - time constant*/
407 sca_out(port->tmc, msci + TMCR, card);
408 sca_out(port->tmc, msci + TMCT, card);
409
410 /* Set BRG bits */
411 sca_out(port->rxs, msci + RXS, card);
412 sca_out(port->txs, msci + TXS, card);
413
414 if (port->settings.loopback)
415 md2 |= MD2_LOOPBACK;
416 else
417 md2 &= ~MD2_LOOPBACK;
418
419 sca_out(md2, msci + MD2, card);
420
421}
422
423
424static void sca_open(struct net_device *dev)
425{
426 port_t *port = dev_to_port(dev);
427 card_t* card = port->card;
428 u16 msci = get_msci(port);
429 u8 md0, md2;
430
431 switch(port->encoding) {
432 case ENCODING_NRZ: md2 = MD2_NRZ; break;
433 case ENCODING_NRZI: md2 = MD2_NRZI; break;
434 case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
435 case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
436 default: md2 = MD2_MANCHESTER;
437 }
438
439 if (port->settings.loopback)
440 md2 |= MD2_LOOPBACK;
441
442 switch(port->parity) {
443 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
444 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
445 case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
446 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
447 default: md0 = MD0_HDLC | MD0_CRC_NONE;
448 }
449
450 sca_out(CMD_RESET, msci + CMD, card);
451 sca_out(md0, msci + MD0, card);
452 sca_out(0x00, msci + MD1, card); /* no address field check */
453 sca_out(md2, msci + MD2, card);
454 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
455 /* Skip the rest of underrun frame */
456 sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
457 sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
458 sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
459 sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
460 sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
461 sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
462
463/* We're using the following interrupts:
464 - RXINTA (DCD changes only)
465 - DMIB (EOM - single frame transfer complete)
466*/
467 sca_outl(IE0_RXINTA | IE0_CDCD, msci + IE0, card);
468
469 sca_out(port->tmc, msci + TMCR, card);
470 sca_out(port->tmc, msci + TMCT, card);
471 sca_out(port->rxs, msci + RXS, card);
472 sca_out(port->txs, msci + TXS, card);
473 sca_out(CMD_TX_ENABLE, msci + CMD, card);
474 sca_out(CMD_RX_ENABLE, msci + CMD, card);
475
476 sca_set_carrier(port);
477 enable_intr(port);
478 napi_enable(&port->napi);
479 netif_start_queue(dev);
480}
481
482
483static void sca_close(struct net_device *dev)
484{
485 port_t *port = dev_to_port(dev);
486
487 /* reset channel */
488 sca_out(CMD_RESET, get_msci(port) + CMD, port->card);
489 disable_intr(port);
490 napi_disable(&port->napi);
491 netif_stop_queue(dev);
492}
493
494
495static int sca_attach(struct net_device *dev, unsigned short encoding,
496 unsigned short parity)
497{
498 if (encoding != ENCODING_NRZ &&
499 encoding != ENCODING_NRZI &&
500 encoding != ENCODING_FM_MARK &&
501 encoding != ENCODING_FM_SPACE &&
502 encoding != ENCODING_MANCHESTER)
503 return -EINVAL;
504
505 if (parity != PARITY_NONE &&
506 parity != PARITY_CRC16_PR0 &&
507 parity != PARITY_CRC16_PR1 &&
508 parity != PARITY_CRC32_PR1_CCITT &&
509 parity != PARITY_CRC16_PR1_CCITT)
510 return -EINVAL;
511
512 dev_to_port(dev)->encoding = encoding;
513 dev_to_port(dev)->parity = parity;
514 return 0;
515}
516
517
518#ifdef DEBUG_RINGS
519static void sca_dump_rings(struct net_device *dev)
520{
521 port_t *port = dev_to_port(dev);
522 card_t *card = port->card;
523 u16 cnt;
524
525 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
526 sca_inl(get_dmac_rx(port) + CDAL, card),
527 sca_inl(get_dmac_rx(port) + EDAL, card),
528 sca_in(DSR_RX(port->chan), card), port->rxin,
529 sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
530 for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
531 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
532
533 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
534 "last=%u %sactive",
535 sca_inl(get_dmac_tx(port) + CDAL, card),
536 sca_inl(get_dmac_tx(port) + EDAL, card),
537 sca_in(DSR_TX(port->chan), card), port->txin, port->txlast,
538 sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in");
539
540 for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++)
541 printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
542 printk("\n");
543
544 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x,"
545 " ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n",
546 sca_in(get_msci(port) + MD0, card),
547 sca_in(get_msci(port) + MD1, card),
548 sca_in(get_msci(port) + MD2, card),
549 sca_in(get_msci(port) + ST0, card),
550 sca_in(get_msci(port) + ST1, card),
551 sca_in(get_msci(port) + ST2, card),
552 sca_in(get_msci(port) + ST3, card),
553 sca_in(get_msci(port) + ST4, card),
554 sca_in(get_msci(port) + FST, card),
555 sca_in(get_msci(port) + CST0, card),
556 sca_in(get_msci(port) + CST1, card));
557
558 printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
559 sca_inl(ISR0, card), sca_inl(ISR1, card));
560}
561#endif /* DEBUG_RINGS */
562
563
564static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
565{
566 port_t *port = dev_to_port(dev);
567 card_t *card = port->card;
568 pkt_desc __iomem *desc;
569 u32 buff, len;
570
571 spin_lock_irq(&port->lock);
572
573 desc = desc_address(port, port->txin + 1, 1);
574 BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
575
576#ifdef DEBUG_PKT
577 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
578 debug_frame(skb);
579#endif
580
581 desc = desc_address(port, port->txin, 1);
582 buff = buffer_offset(port, port->txin, 1);
583 len = skb->len;
584 memcpy_toio(card->rambase + buff, skb->data, len);
585
586 writew(len, &desc->len);
587 writeb(ST_TX_EOM, &desc->stat);
588 dev->trans_start = jiffies;
589
590 port->txin = (port->txin + 1) % card->tx_ring_buffers;
591 sca_outl(desc_offset(port, port->txin, 1),
592 get_dmac_tx(port) + EDAL, card);
593
594 sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */
595
596 desc = desc_address(port, port->txin + 1, 1);
597 if (readb(&desc->stat)) /* allow 1 packet gap */
598 netif_stop_queue(dev);
599
600 spin_unlock_irq(&port->lock);
601
602 dev_kfree_skb(skb);
603 return 0;
604}
605
606
607static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
608 u32 ramsize)
609{
610 /* Round RAM size to 32 bits, fill from end to start */
611 u32 i = ramsize &= ~3;
612
613 do {
614 i -= 4;
615 writel(i ^ 0x12345678, rambase + i);
616 } while (i > 0);
617
618 for (i = 0; i < ramsize ; i += 4) {
619 if (readl(rambase + i) != (i ^ 0x12345678))
620 break;
621 }
622
623 return i;
624}
625
626
627static void __devinit sca_init(card_t *card, int wait_states)
628{
629 sca_out(wait_states, WCRL, card); /* Wait Control */
630 sca_out(wait_states, WCRM, card);
631 sca_out(wait_states, WCRH, card);
632
633 sca_out(0, DMER, card); /* DMA Master disable */
634 sca_out(0x03, PCR, card); /* DMA priority */
635 sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
636 sca_out(0, DSR_TX(0), card);
637 sca_out(0, DSR_RX(1), card);
638 sca_out(0, DSR_TX(1), card);
639 sca_out(DMER_DME, DMER, card); /* DMA Master enable */
640}
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 4efe9e6d32d5..72fae217f1c4 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -2,7 +2,7 @@
2 * Generic HDLC support routines for Linux 2 * Generic HDLC support routines for Linux
3 * Point-to-point protocol support 3 * Point-to-point protocol support
4 * 4 *
5 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> 5 * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License 8 * under the terms of version 2 of the GNU General Public License
@@ -18,87 +18,632 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
20#include <linux/poll.h> 20#include <linux/poll.h>
21#include <linux/rtnetlink.h>
22#include <linux/skbuff.h> 21#include <linux/skbuff.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <net/syncppp.h> 23#include <linux/spinlock.h>
24
25#define DEBUG_CP 0 /* also bytes# to dump */
26#define DEBUG_STATE 0
27#define DEBUG_HARD_HEADER 0
28
29#define HDLC_ADDR_ALLSTATIONS 0xFF
30#define HDLC_CTRL_UI 0x03
31
32#define PID_LCP 0xC021
33#define PID_IP 0x0021
34#define PID_IPCP 0x8021
35#define PID_IPV6 0x0057
36#define PID_IPV6CP 0x8057
37
38enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT};
39enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ,
40 CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY,
41 LCP_DISC_REQ, CP_CODES};
42#if DEBUG_CP
43static const char *const code_names[CP_CODES] = {
44 "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
45 "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
46};
47static char debug_buffer[64 + 3 * DEBUG_CP];
48#endif
49
50enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5};
51
52struct hdlc_header {
53 u8 address;
54 u8 control;
55 __be16 protocol;
56};
57
58struct cp_header {
59 u8 code;
60 u8 id;
61 __be16 len;
62};
63
25 64
26struct ppp_state { 65struct proto {
27 struct ppp_device pppdev; 66 struct net_device *dev;
28 struct ppp_device *syncppp_ptr; 67 struct timer_list timer;
29 int (*old_change_mtu)(struct net_device *dev, int new_mtu); 68 unsigned long timeout;
69 u16 pid; /* protocol ID */
70 u8 state;
71 u8 cr_id; /* ID of last Configuration-Request */
72 u8 restart_counter;
30}; 73};
31 74
75struct ppp {
76 struct proto protos[IDX_COUNT];
77 spinlock_t lock;
78 unsigned long last_pong;
79 unsigned int req_timeout, cr_retries, term_retries;
80 unsigned int keepalive_interval, keepalive_timeout;
81 u8 seq; /* local sequence number for requests */
82 u8 echo_id; /* ID of last Echo-Request (LCP) */
83};
84
85enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED,
86 STATES, STATE_MASK = 0xF};
87enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA,
88 RUC, RXJ_GOOD, RXJ_BAD, EVENTS};
89enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
90 SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
91
92#if DEBUG_STATE
93static const char *const state_names[STATES] = {
94 "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
95 "Opened"
96};
97static const char *const event_names[EVENTS] = {
98 "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
99 "RTR", "RTA", "RUC", "RXJ+", "RXJ-"
100};
101#endif
102
103static struct sk_buff_head tx_queue; /* used when holding the spin lock */
104
32static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr); 105static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
33 106
107static inline struct ppp* get_ppp(struct net_device *dev)
108{
109 return (struct ppp *)dev_to_hdlc(dev)->state;
110}
34 111
35static inline struct ppp_state* state(hdlc_device *hdlc) 112static inline struct proto* get_proto(struct net_device *dev, u16 pid)
36{ 113{
37 return(struct ppp_state *)(hdlc->state); 114 struct ppp *ppp = get_ppp(dev);
115
116 switch (pid) {
117 case PID_LCP:
118 return &ppp->protos[IDX_LCP];
119 case PID_IPCP:
120 return &ppp->protos[IDX_IPCP];
121 case PID_IPV6CP:
122 return &ppp->protos[IDX_IPV6CP];
123 default:
124 return NULL;
125 }
38} 126}
39 127
128static inline const char* proto_name(u16 pid)
129{
130 switch (pid) {
131 case PID_LCP:
132 return "LCP";
133 case PID_IPCP:
134 return "IPCP";
135 case PID_IPV6CP:
136 return "IPV6CP";
137 default:
138 return NULL;
139 }
140}
40 141
41static int ppp_open(struct net_device *dev) 142static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
42{ 143{
43 hdlc_device *hdlc = dev_to_hdlc(dev); 144 struct hdlc_header *data = (struct hdlc_header*)skb->data;
44 int (*old_ioctl)(struct net_device *, struct ifreq *, int); 145
45 int result; 146 if (skb->len < sizeof(struct hdlc_header))
147 return htons(ETH_P_HDLC);
148 if (data->address != HDLC_ADDR_ALLSTATIONS ||
149 data->control != HDLC_CTRL_UI)
150 return htons(ETH_P_HDLC);
151
152 switch (data->protocol) {
153 case __constant_htons(PID_IP):
154 skb_pull(skb, sizeof(struct hdlc_header));
155 return htons(ETH_P_IP);
46 156
47 dev->ml_priv = &state(hdlc)->syncppp_ptr; 157 case __constant_htons(PID_IPV6):
48 state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev; 158 skb_pull(skb, sizeof(struct hdlc_header));
49 state(hdlc)->pppdev.dev = dev; 159 return htons(ETH_P_IPV6);
50 160
51 old_ioctl = dev->do_ioctl; 161 default:
52 state(hdlc)->old_change_mtu = dev->change_mtu; 162 return htons(ETH_P_HDLC);
53 sppp_attach(&state(hdlc)->pppdev);
54 /* sppp_attach nukes them. We don't need syncppp's ioctl */
55 dev->do_ioctl = old_ioctl;
56 state(hdlc)->pppdev.sppp.pp_flags &= ~PP_CISCO;
57 dev->type = ARPHRD_PPP;
58 result = sppp_open(dev);
59 if (result) {
60 sppp_detach(dev);
61 return result;
62 } 163 }
164}
63 165
64 return 0; 166
167static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
168 u16 type, const void *daddr, const void *saddr,
169 unsigned int len)
170{
171 struct hdlc_header *data;
172#if DEBUG_HARD_HEADER
173 printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
174#endif
175
176 skb_push(skb, sizeof(struct hdlc_header));
177 data = (struct hdlc_header*)skb->data;
178
179 data->address = HDLC_ADDR_ALLSTATIONS;
180 data->control = HDLC_CTRL_UI;
181 switch (type) {
182 case ETH_P_IP:
183 data->protocol = htons(PID_IP);
184 break;
185 case ETH_P_IPV6:
186 data->protocol = htons(PID_IPV6);
187 break;
188 case PID_LCP:
189 case PID_IPCP:
190 case PID_IPV6CP:
191 data->protocol = htons(type);
192 break;
193 default: /* unknown protocol */
194 data->protocol = 0;
195 }
196 return sizeof(struct hdlc_header);
65} 197}
66 198
67 199
200static void ppp_tx_flush(void)
201{
202 struct sk_buff *skb;
203 while ((skb = skb_dequeue(&tx_queue)) != NULL)
204 dev_queue_xmit(skb);
205}
68 206
69static void ppp_close(struct net_device *dev) 207static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
208 u8 id, unsigned int len, const void *data)
70{ 209{
71 hdlc_device *hdlc = dev_to_hdlc(dev); 210 struct sk_buff *skb;
211 struct cp_header *cp;
212 unsigned int magic_len = 0;
213 static u32 magic;
214
215#if DEBUG_CP
216 int i;
217 char *ptr;
218#endif
219
220 if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
221 magic_len = sizeof(magic);
222
223 skb = dev_alloc_skb(sizeof(struct hdlc_header) +
224 sizeof(struct cp_header) + magic_len + len);
225 if (!skb) {
226 printk(KERN_WARNING "%s: out of memory in ppp_tx_cp()\n",
227 dev->name);
228 return;
229 }
230 skb_reserve(skb, sizeof(struct hdlc_header));
231
232 cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header));
233 cp->code = code;
234 cp->id = id;
235 cp->len = htons(sizeof(struct cp_header) + magic_len + len);
236
237 if (magic_len)
238 memcpy(skb_put(skb, magic_len), &magic, magic_len);
239 if (len)
240 memcpy(skb_put(skb, len), data, len);
241
242#if DEBUG_CP
243 BUG_ON(code >= CP_CODES);
244 ptr = debug_buffer;
245 *ptr = '\x0';
246 for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
247 sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
248 ptr += strlen(ptr);
249 }
250 printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
251 proto_name(pid), code_names[code], id, debug_buffer);
252#endif
72 253
73 sppp_close(dev); 254 ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
74 sppp_detach(dev);
75 255
76 dev->change_mtu = state(hdlc)->old_change_mtu; 256 skb->priority = TC_PRIO_CONTROL;
77 dev->mtu = HDLC_MAX_MTU; 257 skb->dev = dev;
78 dev->hard_header_len = 16; 258 skb_reset_network_header(skb);
259 skb_queue_tail(&tx_queue, skb);
79} 260}
80 261
81 262
263/* State transition table (compare STD-51)
264 Events Actions
265 TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count
266 TO- = Timeout with counter expired zrc = Zero-Restart-Count
267
268 RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request
269 RCR- = Receive-Configure-Request (Bad)
270 RCA = Receive-Configure-Ack sca = Send-Configure-Ack
271 RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej
272
273 RTR = Receive-Terminate-Request str = Send-Terminate-Request
274 RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack
275
276 RUC = Receive-Unknown-Code scj = Send-Code-Reject
277 RXJ+ = Receive-Code-Reject (permitted)
278 or Receive-Protocol-Reject
279 RXJ- = Receive-Code-Reject (catastrophic)
280 or Receive-Protocol-Reject
281*/
282static int cp_table[EVENTS][STATES] = {
283 /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
284 0 1 2 3 4 5 6 */
285 {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */
286 { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */
287 { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */
288 { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */
289 { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */
290 { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */
291 { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */
292 { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */
293 { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */
294 { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */
295 { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */
296 { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */
297 { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */
298};
299
82 300
83static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev) 301/* SCA: RCR+ must supply id, len and data
302 SCN: RCR- must supply code, id, len and data
303 STA: RTR must supply id
304 SCJ: RUC must supply CP packet len and data */
305static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
306 u8 id, unsigned int len, void *data)
84{ 307{
85 return __constant_htons(ETH_P_WAN_PPP); 308 int old_state, action;
309 struct ppp *ppp = get_ppp(dev);
310 struct proto *proto = get_proto(dev, pid);
311
312 old_state = proto->state;
313 BUG_ON(old_state >= STATES);
314 BUG_ON(event >= EVENTS);
315
316#if DEBUG_STATE
317 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
318 proto_name(pid), event_names[event], state_names[proto->state]);
319#endif
320
321 action = cp_table[event][old_state];
322
323 proto->state = action & STATE_MASK;
324 if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
325 mod_timer(&proto->timer, proto->timeout =
326 jiffies + ppp->req_timeout * HZ);
327 if (action & ZRC)
328 proto->restart_counter = 0;
329 if (action & IRC)
330 proto->restart_counter = (proto->state == STOPPING) ?
331 ppp->term_retries : ppp->cr_retries;
332
333 if (action & SCR) /* send Configure-Request */
334 ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
335 0, NULL);
336 if (action & SCA) /* send Configure-Ack */
337 ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
338 if (action & SCN) /* send Configure-Nak/Reject */
339 ppp_tx_cp(dev, pid, code, id, len, data);
340 if (action & STR) /* send Terminate-Request */
341 ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
342 if (action & STA) /* send Terminate-Ack */
343 ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
344 if (action & SCJ) /* send Code-Reject */
345 ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
346
347 if (old_state != OPENED && proto->state == OPENED) {
348 printk(KERN_INFO "%s: %s up\n", dev->name, proto_name(pid));
349 if (pid == PID_LCP) {
350 netif_dormant_off(dev);
351 ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
352 ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
353 ppp->last_pong = jiffies;
354 mod_timer(&proto->timer, proto->timeout =
355 jiffies + ppp->keepalive_interval * HZ);
356 }
357 }
358 if (old_state == OPENED && proto->state != OPENED) {
359 printk(KERN_INFO "%s: %s down\n", dev->name, proto_name(pid));
360 if (pid == PID_LCP) {
361 netif_dormant_on(dev);
362 ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
363 ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
364 }
365 }
366 if (old_state != CLOSED && proto->state == CLOSED)
367 del_timer(&proto->timer);
368
369#if DEBUG_STATE
370 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
371 proto_name(pid), event_names[event], state_names[proto->state]);
372#endif
86} 373}
87 374
88 375
376static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
377 unsigned int len, u8 *data)
378{
379 static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
380 u8 *opt, *out;
381 unsigned int nak_len = 0, rej_len = 0;
382
383 if (!(out = kmalloc(len, GFP_ATOMIC))) {
384 dev->stats.rx_dropped++;
385 return; /* out of memory, ignore CR packet */
386 }
387
388 for (opt = data; len; len -= opt[1], opt += opt[1]) {
389 if (len < 2 || len < opt[1]) {
390 dev->stats.rx_errors++;
391 return; /* bad packet, drop silently */
392 }
393
394 if (pid == PID_LCP)
395 switch (opt[0]) {
396 case LCP_OPTION_MRU:
397 continue; /* MRU always OK and > 1500 bytes? */
398
399 case LCP_OPTION_ACCM: /* async control character map */
400 if (!memcmp(opt, valid_accm,
401 sizeof(valid_accm)))
402 continue;
403 if (!rej_len) { /* NAK it */
404 memcpy(out + nak_len, valid_accm,
405 sizeof(valid_accm));
406 nak_len += sizeof(valid_accm);
407 continue;
408 }
409 break;
410 case LCP_OPTION_MAGIC:
411 if (opt[1] != 6 || (!opt[2] && !opt[3] &&
412 !opt[4] && !opt[5]))
413 break; /* reject invalid magic number */
414 continue;
415 }
416 /* reject this option */
417 memcpy(out + rej_len, opt, opt[1]);
418 rej_len += opt[1];
419 }
420
421 if (rej_len)
422 ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
423 else if (nak_len)
424 ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
425 else
426 ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, len, data);
427
428 kfree(out);
429}
430
431static int ppp_rx(struct sk_buff *skb)
432{
433 struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
434 struct net_device *dev = skb->dev;
435 struct ppp *ppp = get_ppp(dev);
436 struct proto *proto;
437 struct cp_header *cp;
438 unsigned long flags;
439 unsigned int len;
440 u16 pid;
441#if DEBUG_CP
442 int i;
443 char *ptr;
444#endif
445
446 spin_lock_irqsave(&ppp->lock, flags);
447 /* Check HDLC header */
448 if (skb->len < sizeof(struct hdlc_header))
449 goto rx_error;
450 cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header));
451 if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
452 hdr->control != HDLC_CTRL_UI)
453 goto rx_error;
454
455 pid = ntohs(hdr->protocol);
456 proto = get_proto(dev, pid);
457 if (!proto) {
458 if (ppp->protos[IDX_LCP].state == OPENED)
459 ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
460 ++ppp->seq, skb->len + 2, &hdr->protocol);
461 goto rx_error;
462 }
463
464 len = ntohs(cp->len);
465 if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
466 skb->len < len /* truncated packet? */)
467 goto rx_error;
468 skb_pull(skb, sizeof(struct cp_header));
469 len -= sizeof(struct cp_header);
470
471 /* HDLC and CP headers stripped from skb */
472#if DEBUG_CP
473 if (cp->code < CP_CODES)
474 sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
475 cp->id);
476 else
477 sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
478 ptr = debug_buffer + strlen(debug_buffer);
479 for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
480 sprintf(ptr, " %02X", skb->data[i]);
481 ptr += strlen(ptr);
482 }
483 printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
484 debug_buffer);
485#endif
486
487 /* LCP only */
488 if (pid == PID_LCP)
489 switch (cp->code) {
490 case LCP_PROTO_REJ:
491 pid = ntohs(*(__be16*)skb->data);
492 if (pid == PID_LCP || pid == PID_IPCP ||
493 pid == PID_IPV6CP)
494 ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
495 0, NULL);
496 goto out;
497
498 case LCP_ECHO_REQ: /* send Echo-Reply */
499 if (len >= 4 && proto->state == OPENED)
500 ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
501 cp->id, len - 4, skb->data + 4);
502 goto out;
503
504 case LCP_ECHO_REPLY:
505 if (cp->id == ppp->echo_id)
506 ppp->last_pong = jiffies;
507 goto out;
508
509 case LCP_DISC_REQ: /* discard */
510 goto out;
511 }
512
513 /* LCP, IPCP and IPV6CP */
514 switch (cp->code) {
515 case CP_CONF_REQ:
516 ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
517 goto out;
518
519 case CP_CONF_ACK:
520 if (cp->id == proto->cr_id)
521 ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
522 goto out;
523
524 case CP_CONF_REJ:
525 case CP_CONF_NAK:
526 if (cp->id == proto->cr_id)
527 ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
528 goto out;
529
530 case CP_TERM_REQ:
531 ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
532 goto out;
533
534 case CP_TERM_ACK:
535 ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
536 goto out;
537
538 case CP_CODE_REJ:
539 ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
540 goto out;
541
542 default:
543 len += sizeof(struct cp_header);
544 if (len > dev->mtu)
545 len = dev->mtu;
546 ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
547 goto out;
548 }
549 goto out;
550
551rx_error:
552 dev->stats.rx_errors++;
553out:
554 spin_unlock_irqrestore(&ppp->lock, flags);
555 dev_kfree_skb_any(skb);
556 ppp_tx_flush();
557 return NET_RX_DROP;
558}
559
560
561static void ppp_timer(unsigned long arg)
562{
563 struct proto *proto = (struct proto *)arg;
564 struct ppp *ppp = get_ppp(proto->dev);
565 unsigned long flags;
566
567 spin_lock_irqsave(&ppp->lock, flags);
568 switch (proto->state) {
569 case STOPPING:
570 case REQ_SENT:
571 case ACK_RECV:
572 case ACK_SENT:
573 if (proto->restart_counter) {
574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
575 0, NULL);
576 proto->restart_counter--;
577 } else
578 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
579 0, NULL);
580 break;
581
582 case OPENED:
583 if (proto->pid != PID_LCP)
584 break;
585 if (time_after(jiffies, ppp->last_pong +
586 ppp->keepalive_timeout * HZ)) {
587 printk(KERN_INFO "%s: Link down\n", proto->dev->name);
588 ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
589 ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
590 } else { /* send keep-alive packet */
591 ppp->echo_id = ++ppp->seq;
592 ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
593 ppp->echo_id, 0, NULL);
594 proto->timer.expires = jiffies +
595 ppp->keepalive_interval * HZ;
596 add_timer(&proto->timer);
597 }
598 break;
599 }
600 spin_unlock_irqrestore(&ppp->lock, flags);
601 ppp_tx_flush();
602}
603
604
605static void ppp_start(struct net_device *dev)
606{
607 struct ppp *ppp = get_ppp(dev);
608 int i;
609
610 for (i = 0; i < IDX_COUNT; i++) {
611 struct proto *proto = &ppp->protos[i];
612 proto->dev = dev;
613 init_timer(&proto->timer);
614 proto->timer.function = ppp_timer;
615 proto->timer.data = (unsigned long)proto;
616 proto->state = CLOSED;
617 }
618 ppp->protos[IDX_LCP].pid = PID_LCP;
619 ppp->protos[IDX_IPCP].pid = PID_IPCP;
620 ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
621
622 ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
623}
624
625static void ppp_stop(struct net_device *dev)
626{
627 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
628}
89 629
90static struct hdlc_proto proto = { 630static struct hdlc_proto proto = {
91 .open = ppp_open, 631 .start = ppp_start,
92 .close = ppp_close, 632 .stop = ppp_stop,
93 .type_trans = ppp_type_trans, 633 .type_trans = ppp_type_trans,
94 .ioctl = ppp_ioctl, 634 .ioctl = ppp_ioctl,
635 .netif_rx = ppp_rx,
95 .module = THIS_MODULE, 636 .module = THIS_MODULE,
96}; 637};
97 638
639static const struct header_ops ppp_header_ops = {
640 .create = ppp_hard_header,
641};
98 642
99static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) 643static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
100{ 644{
101 hdlc_device *hdlc = dev_to_hdlc(dev); 645 hdlc_device *hdlc = dev_to_hdlc(dev);
646 struct ppp *ppp;
102 int result; 647 int result;
103 648
104 switch (ifr->ifr_settings.type) { 649 switch (ifr->ifr_settings.type) {
@@ -109,25 +654,35 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
109 return 0; /* return protocol only, no settable parameters */ 654 return 0; /* return protocol only, no settable parameters */
110 655
111 case IF_PROTO_PPP: 656 case IF_PROTO_PPP:
112 if(!capable(CAP_NET_ADMIN)) 657 if (!capable(CAP_NET_ADMIN))
113 return -EPERM; 658 return -EPERM;
114 659
115 if(dev->flags & IFF_UP) 660 if (dev->flags & IFF_UP)
116 return -EBUSY; 661 return -EBUSY;
117 662
118 /* no settable parameters */ 663 /* no settable parameters */
119 664
120 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 665 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
121 if (result) 666 if (result)
122 return result; 667 return result;
123 668
124 result = attach_hdlc_protocol(dev, &proto, 669 result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
125 sizeof(struct ppp_state));
126 if (result) 670 if (result)
127 return result; 671 return result;
672
673 ppp = get_ppp(dev);
674 spin_lock_init(&ppp->lock);
675 ppp->req_timeout = 2;
676 ppp->cr_retries = 10;
677 ppp->term_retries = 2;
678 ppp->keepalive_interval = 10;
679 ppp->keepalive_timeout = 60;
680
128 dev->hard_start_xmit = hdlc->xmit; 681 dev->hard_start_xmit = hdlc->xmit;
682 dev->hard_header_len = sizeof(struct hdlc_header);
683 dev->header_ops = &ppp_header_ops;
129 dev->type = ARPHRD_PPP; 684 dev->type = ARPHRD_PPP;
130 netif_dormant_off(dev); 685 netif_dormant_on(dev);
131 return 0; 686 return 0;
132 } 687 }
133 688
@@ -137,12 +692,11 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
137 692
138static int __init mod_init(void) 693static int __init mod_init(void)
139{ 694{
695 skb_queue_head_init(&tx_queue);
140 register_hdlc_protocol(&proto); 696 register_hdlc_protocol(&proto);
141 return 0; 697 return 0;
142} 698}
143 699
144
145
146static void __exit mod_exit(void) 700static void __exit mod_exit(void)
147{ 701{
148 unregister_hdlc_protocol(&proto); 702 unregister_hdlc_protocol(&proto);
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 0a566b0daacb..697715ae80f4 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -53,7 +53,7 @@ static const char* devname = "RISCom/N2";
53#define NEED_SCA_MSCI_INTR 53#define NEED_SCA_MSCI_INTR
54#define MAX_TX_BUFFERS 10 54#define MAX_TX_BUFFERS 10
55 55
56static char *hw = NULL; /* pointer to hw=xxx command line string */ 56static char *hw; /* pointer to hw=xxx command line string */
57 57
58/* RISCom/N2 Board Registers */ 58/* RISCom/N2 Board Registers */
59 59
@@ -145,7 +145,6 @@ static card_t **new_card = &first_card;
145 &(card)->ports[port] : NULL) 145 &(card)->ports[port] : NULL)
146 146
147 147
148
149static __inline__ u8 sca_get_page(card_t *card) 148static __inline__ u8 sca_get_page(card_t *card)
150{ 149{
151 return inb(card->io + N2_PSR) & PSR_PAGEBITS; 150 return inb(card->io + N2_PSR) & PSR_PAGEBITS;
@@ -159,9 +158,7 @@ static __inline__ void openwin(card_t *card, u8 page)
159} 158}
160 159
161 160
162 161#include "hd64570.c"
163#include "hd6457x.c"
164
165 162
166 163
167static void n2_set_iface(port_t *port) 164static void n2_set_iface(port_t *port)
@@ -478,7 +475,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
478 n2_destroy_card(card); 475 n2_destroy_card(card);
479 return -ENOBUFS; 476 return -ENOBUFS;
480 } 477 }
481 sca_init_sync_port(port); /* Set up SCA memory */ 478 sca_init_port(port); /* Set up SCA memory */
482 479
483 printk(KERN_INFO "%s: RISCom/N2 node %d\n", 480 printk(KERN_INFO "%s: RISCom/N2 node %d\n",
484 dev->name, port->phy_node); 481 dev->name, port->phy_node);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 222671165223..f247e5d9002a 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cyclades PC300 synchronous serial card driver for Linux 2 * Cyclades PC300 synchronous serial card driver for Linux
3 * 3 *
4 * Copyright (C) 2000-2007 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 2000-2008 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -11,7 +11,7 @@
11 * 11 *
12 * Sources of information: 12 * Sources of information:
13 * Hitachi HD64572 SCA-II User's Manual 13 * Hitachi HD64572 SCA-II User's Manual
14 * Cyclades PC300 Linux driver 14 * Original Cyclades PC300 Linux driver
15 * 15 *
16 * This driver currently supports only PC300/RSV (V.24/V.35) and 16 * This driver currently supports only PC300/RSV (V.24/V.35) and
17 * PC300/X21 cards. 17 * PC300/X21 cards.
@@ -37,17 +37,11 @@
37 37
38#include "hd64572.h" 38#include "hd64572.h"
39 39
40static const char* version = "Cyclades PC300 driver version: 1.17";
41static const char* devname = "PC300";
42
43#undef DEBUG_PKT 40#undef DEBUG_PKT
44#define DEBUG_RINGS 41#define DEBUG_RINGS
45 42
46#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */ 43#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */
47#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */ 44#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */
48#define ALL_PAGES_ALWAYS_MAPPED
49#define NEED_DETECT_RAM
50#define NEED_SCA_MSCI_INTR
51#define MAX_TX_BUFFERS 10 45#define MAX_TX_BUFFERS 10
52 46
53static int pci_clock_freq = 33000000; 47static int pci_clock_freq = 33000000;
@@ -81,7 +75,8 @@ typedef struct {
81 75
82 76
83typedef struct port_s { 77typedef struct port_s {
84 struct net_device *dev; 78 struct napi_struct napi;
79 struct net_device *netdev;
85 struct card_s *card; 80 struct card_s *card;
86 spinlock_t lock; /* TX lock */ 81 spinlock_t lock; /* TX lock */
87 sync_serial_settings settings; 82 sync_serial_settings settings;
@@ -93,7 +88,7 @@ typedef struct port_s {
93 u16 txin; /* tx ring buffer 'in' and 'last' pointers */ 88 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
94 u16 txlast; 89 u16 txlast;
95 u8 rxs, txs, tmc; /* SCA registers */ 90 u8 rxs, txs, tmc; /* SCA registers */
96 u8 phy_node; /* physical port # - 0 or 1 */ 91 u8 chan; /* physical port # - 0 or 1 */
97}port_t; 92}port_t;
98 93
99 94
@@ -114,21 +109,10 @@ typedef struct card_s {
114}card_t; 109}card_t;
115 110
116 111
117#define sca_in(reg, card) readb(card->scabase + (reg))
118#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
119#define sca_inw(reg, card) readw(card->scabase + (reg))
120#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
121#define sca_inl(reg, card) readl(card->scabase + (reg))
122#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
123
124#define port_to_card(port) (port->card)
125#define log_node(port) (port->phy_node)
126#define phy_node(port) (port->phy_node)
127#define winbase(card) (card->rambase)
128#define get_port(card, port) ((port) < (card)->n_ports ? \ 112#define get_port(card, port) ((port) < (card)->n_ports ? \
129 (&(card)->ports[port]) : (NULL)) 113 (&(card)->ports[port]) : (NULL))
130 114
131#include "hd6457x.c" 115#include "hd64572.c"
132 116
133 117
134static void pc300_set_iface(port_t *port) 118static void pc300_set_iface(port_t *port)
@@ -139,8 +123,8 @@ static void pc300_set_iface(port_t *port)
139 u8 rxs = port->rxs & CLK_BRG_MASK; 123 u8 rxs = port->rxs & CLK_BRG_MASK;
140 u8 txs = port->txs & CLK_BRG_MASK; 124 u8 txs = port->txs & CLK_BRG_MASK;
141 125
142 sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS, 126 sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
143 port_to_card(port)); 127 port->card);
144 switch(port->settings.clock_type) { 128 switch(port->settings.clock_type) {
145 case CLOCK_INT: 129 case CLOCK_INT:
146 rxs |= CLK_BRG; /* BRG output */ 130 rxs |= CLK_BRG; /* BRG output */
@@ -172,10 +156,10 @@ static void pc300_set_iface(port_t *port)
172 if (port->card->type == PC300_RSV) { 156 if (port->card->type == PC300_RSV) {
173 if (port->iface == IF_IFACE_V35) 157 if (port->iface == IF_IFACE_V35)
174 writel(card->init_ctrl_value | 158 writel(card->init_ctrl_value |
175 PC300_CHMEDIA_MASK(port->phy_node), init_ctrl); 159 PC300_CHMEDIA_MASK(port->chan), init_ctrl);
176 else 160 else
177 writel(card->init_ctrl_value & 161 writel(card->init_ctrl_value &
178 ~PC300_CHMEDIA_MASK(port->phy_node), init_ctrl); 162 ~PC300_CHMEDIA_MASK(port->chan), init_ctrl);
179 } 163 }
180} 164}
181 165
@@ -280,10 +264,8 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
280 card_t *card = pci_get_drvdata(pdev); 264 card_t *card = pci_get_drvdata(pdev);
281 265
282 for (i = 0; i < 2; i++) 266 for (i = 0; i < 2; i++)
283 if (card->ports[i].card) { 267 if (card->ports[i].card)
284 struct net_device *dev = port_to_dev(&card->ports[i]); 268 unregister_hdlc_device(card->ports[i].netdev);
285 unregister_hdlc_device(dev);
286 }
287 269
288 if (card->irq) 270 if (card->irq)
289 free_irq(card->irq, card); 271 free_irq(card->irq, card);
@@ -298,10 +280,10 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
298 pci_release_regions(pdev); 280 pci_release_regions(pdev);
299 pci_disable_device(pdev); 281 pci_disable_device(pdev);
300 pci_set_drvdata(pdev, NULL); 282 pci_set_drvdata(pdev, NULL);
301 if (card->ports[0].dev) 283 if (card->ports[0].netdev)
302 free_netdev(card->ports[0].dev); 284 free_netdev(card->ports[0].netdev);
303 if (card->ports[1].dev) 285 if (card->ports[1].netdev)
304 free_netdev(card->ports[1].dev); 286 free_netdev(card->ports[1].netdev);
305 kfree(card); 287 kfree(card);
306} 288}
307 289
@@ -318,12 +300,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
318 u32 scaphys; /* SCA memory base */ 300 u32 scaphys; /* SCA memory base */
319 u32 plxphys; /* PLX registers memory base */ 301 u32 plxphys; /* PLX registers memory base */
320 302
321#ifndef MODULE
322 static int printed_version;
323 if (!printed_version++)
324 printk(KERN_INFO "%s\n", version);
325#endif
326
327 i = pci_enable_device(pdev); 303 i = pci_enable_device(pdev);
328 if (i) 304 if (i)
329 return i; 305 return i;
@@ -343,27 +319,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
343 } 319 }
344 pci_set_drvdata(pdev, card); 320 pci_set_drvdata(pdev, card);
345 321
346 if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
347 pdev->device == PCI_DEVICE_ID_PC300_TE_2)
348 card->type = PC300_TE; /* not fully supported */
349 else if (card->init_ctrl_value & PC300_CTYPE_MASK)
350 card->type = PC300_X21;
351 else
352 card->type = PC300_RSV;
353
354 if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
355 pdev->device == PCI_DEVICE_ID_PC300_TE_1)
356 card->n_ports = 1;
357 else
358 card->n_ports = 2;
359
360 for (i = 0; i < card->n_ports; i++)
361 if (!(card->ports[i].dev = alloc_hdlcdev(&card->ports[i]))) {
362 printk(KERN_ERR "pc300: unable to allocate memory\n");
363 pc300_pci_remove_one(pdev);
364 return -ENOMEM;
365 }
366
367 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE || 322 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
368 pci_resource_len(pdev, 2) != PC300_SCA_SIZE || 323 pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
369 pci_resource_len(pdev, 3) < 16384) { 324 pci_resource_len(pdev, 3) < 16384) {
@@ -372,13 +327,13 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
372 return -EFAULT; 327 return -EFAULT;
373 } 328 }
374 329
375 plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK; 330 plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
376 card->plxbase = ioremap(plxphys, PC300_PLX_SIZE); 331 card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);
377 332
378 scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK; 333 scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
379 card->scabase = ioremap(scaphys, PC300_SCA_SIZE); 334 card->scabase = ioremap(scaphys, PC300_SCA_SIZE);
380 335
381 ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK; 336 ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
382 card->rambase = pci_ioremap_bar(pdev, 3); 337 card->rambase = pci_ioremap_bar(pdev, 3);
383 338
384 if (card->plxbase == NULL || 339 if (card->plxbase == NULL ||
@@ -393,6 +348,27 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
393 card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl); 348 card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl);
394 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys); 349 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);
395 350
351 if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
352 pdev->device == PCI_DEVICE_ID_PC300_TE_2)
353 card->type = PC300_TE; /* not fully supported */
354 else if (card->init_ctrl_value & PC300_CTYPE_MASK)
355 card->type = PC300_X21;
356 else
357 card->type = PC300_RSV;
358
359 if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
360 pdev->device == PCI_DEVICE_ID_PC300_TE_1)
361 card->n_ports = 1;
362 else
363 card->n_ports = 2;
364
365 for (i = 0; i < card->n_ports; i++)
366 if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) {
367 printk(KERN_ERR "pc300: unable to allocate memory\n");
368 pc300_pci_remove_one(pdev);
369 return -ENOMEM;
370 }
371
396 /* Reset PLX */ 372 /* Reset PLX */
397 p = &card->plxbase->init_ctrl; 373 p = &card->plxbase->init_ctrl;
398 writel(card->init_ctrl_value | 0x40000000, p); 374 writel(card->init_ctrl_value | 0x40000000, p);
@@ -446,7 +422,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
446 writew(0x0041, &card->plxbase->intr_ctrl_stat); 422 writew(0x0041, &card->plxbase->intr_ctrl_stat);
447 423
448 /* Allocate IRQ */ 424 /* Allocate IRQ */
449 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) { 425 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) {
450 printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n", 426 printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n",
451 pdev->irq); 427 pdev->irq);
452 pc300_pci_remove_one(pdev); 428 pc300_pci_remove_one(pdev);
@@ -463,9 +439,9 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
463 439
464 for (i = 0; i < card->n_ports; i++) { 440 for (i = 0; i < card->n_ports; i++) {
465 port_t *port = &card->ports[i]; 441 port_t *port = &card->ports[i];
466 struct net_device *dev = port_to_dev(port); 442 struct net_device *dev = port->netdev;
467 hdlc_device *hdlc = dev_to_hdlc(dev); 443 hdlc_device *hdlc = dev_to_hdlc(dev);
468 port->phy_node = i; 444 port->chan = i;
469 445
470 spin_lock_init(&port->lock); 446 spin_lock_init(&port->lock);
471 dev->irq = card->irq; 447 dev->irq = card->irq;
@@ -484,6 +460,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
484 else 460 else
485 port->iface = IF_IFACE_V35; 461 port->iface = IF_IFACE_V35;
486 462
463 sca_init_port(port);
487 if (register_hdlc_device(dev)) { 464 if (register_hdlc_device(dev)) {
488 printk(KERN_ERR "pc300: unable to register hdlc " 465 printk(KERN_ERR "pc300: unable to register hdlc "
489 "device\n"); 466 "device\n");
@@ -491,10 +468,9 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
491 pc300_pci_remove_one(pdev); 468 pc300_pci_remove_one(pdev);
492 return -ENOBUFS; 469 return -ENOBUFS;
493 } 470 }
494 sca_init_sync_port(port); /* Set up SCA memory */
495 471
496 printk(KERN_INFO "%s: PC300 node %d\n", 472 printk(KERN_INFO "%s: PC300 channel %d\n",
497 dev->name, port->phy_node); 473 dev->name, port->chan);
498 } 474 }
499 return 0; 475 return 0;
500} 476}
@@ -524,9 +500,6 @@ static struct pci_driver pc300_pci_driver = {
524 500
525static int __init pc300_init_module(void) 501static int __init pc300_init_module(void)
526{ 502{
527#ifdef MODULE
528 printk(KERN_INFO "%s\n", version);
529#endif
530 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) { 503 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
531 printk(KERN_ERR "pc300: Invalid PCI clock frequency\n"); 504 printk(KERN_ERR "pc300: Invalid PCI clock frequency\n");
532 return -EINVAL; 505 return -EINVAL;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index bba111cdeebe..1104d3a692f7 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Goramo PCI200SYN synchronous serial card driver for Linux 2 * Goramo PCI200SYN synchronous serial card driver for Linux
3 * 3 *
4 * Copyright (C) 2002-2003 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 2002-2008 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -33,17 +33,11 @@
33 33
34#include "hd64572.h" 34#include "hd64572.h"
35 35
36static const char* version = "Goramo PCI200SYN driver version: 1.16";
37static const char* devname = "PCI200SYN";
38
39#undef DEBUG_PKT 36#undef DEBUG_PKT
40#define DEBUG_RINGS 37#define DEBUG_RINGS
41 38
42#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */ 39#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */
43#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */ 40#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */
44#define ALL_PAGES_ALWAYS_MAPPED
45#define NEED_DETECT_RAM
46#define NEED_SCA_MSCI_INTR
47#define MAX_TX_BUFFERS 10 41#define MAX_TX_BUFFERS 10
48 42
49static int pci_clock_freq = 33000000; 43static int pci_clock_freq = 33000000;
@@ -68,7 +62,8 @@ typedef struct {
68 62
69 63
70typedef struct port_s { 64typedef struct port_s {
71 struct net_device *dev; 65 struct napi_struct napi;
66 struct net_device *netdev;
72 struct card_s *card; 67 struct card_s *card;
73 spinlock_t lock; /* TX lock */ 68 spinlock_t lock; /* TX lock */
74 sync_serial_settings settings; 69 sync_serial_settings settings;
@@ -79,7 +74,7 @@ typedef struct port_s {
79 u16 txin; /* tx ring buffer 'in' and 'last' pointers */ 74 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
80 u16 txlast; 75 u16 txlast;
81 u8 rxs, txs, tmc; /* SCA registers */ 76 u8 rxs, txs, tmc; /* SCA registers */
82 u8 phy_node; /* physical port # - 0 or 1 */ 77 u8 chan; /* physical port # - 0 or 1 */
83}port_t; 78}port_t;
84 79
85 80
@@ -97,17 +92,6 @@ typedef struct card_s {
97}card_t; 92}card_t;
98 93
99 94
100#define sca_in(reg, card) readb(card->scabase + (reg))
101#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
102#define sca_inw(reg, card) readw(card->scabase + (reg))
103#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
104#define sca_inl(reg, card) readl(card->scabase + (reg))
105#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
106
107#define port_to_card(port) (port->card)
108#define log_node(port) (port->phy_node)
109#define phy_node(port) (port->phy_node)
110#define winbase(card) (card->rambase)
111#define get_port(card, port) (&card->ports[port]) 95#define get_port(card, port) (&card->ports[port])
112#define sca_flush(card) (sca_in(IER0, card)); 96#define sca_flush(card) (sca_in(IER0, card));
113 97
@@ -127,7 +111,7 @@ static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
127#undef memcpy_toio 111#undef memcpy_toio
128#define memcpy_toio new_memcpy_toio 112#define memcpy_toio new_memcpy_toio
129 113
130#include "hd6457x.c" 114#include "hd64572.c"
131 115
132 116
133static void pci200_set_iface(port_t *port) 117static void pci200_set_iface(port_t *port)
@@ -137,8 +121,8 @@ static void pci200_set_iface(port_t *port)
137 u8 rxs = port->rxs & CLK_BRG_MASK; 121 u8 rxs = port->rxs & CLK_BRG_MASK;
138 u8 txs = port->txs & CLK_BRG_MASK; 122 u8 txs = port->txs & CLK_BRG_MASK;
139 123
140 sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS, 124 sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
141 port_to_card(port)); 125 port->card);
142 switch(port->settings.clock_type) { 126 switch(port->settings.clock_type) {
143 case CLOCK_INT: 127 case CLOCK_INT:
144 rxs |= CLK_BRG; /* BRG output */ 128 rxs |= CLK_BRG; /* BRG output */
@@ -180,7 +164,7 @@ static int pci200_open(struct net_device *dev)
180 164
181 sca_open(dev); 165 sca_open(dev);
182 pci200_set_iface(port); 166 pci200_set_iface(port);
183 sca_flush(port_to_card(port)); 167 sca_flush(port->card);
184 return 0; 168 return 0;
185} 169}
186 170
@@ -189,7 +173,7 @@ static int pci200_open(struct net_device *dev)
189static int pci200_close(struct net_device *dev) 173static int pci200_close(struct net_device *dev)
190{ 174{
191 sca_close(dev); 175 sca_close(dev);
192 sca_flush(port_to_card(dev_to_port(dev))); 176 sca_flush(dev_to_port(dev)->card);
193 hdlc_close(dev); 177 hdlc_close(dev);
194 return 0; 178 return 0;
195} 179}
@@ -242,7 +226,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
242 226
243 memcpy(&port->settings, &new_line, size); /* Update settings */ 227 memcpy(&port->settings, &new_line, size); /* Update settings */
244 pci200_set_iface(port); 228 pci200_set_iface(port);
245 sca_flush(port_to_card(port)); 229 sca_flush(port->card);
246 return 0; 230 return 0;
247 231
248 default: 232 default:
@@ -258,10 +242,8 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
258 card_t *card = pci_get_drvdata(pdev); 242 card_t *card = pci_get_drvdata(pdev);
259 243
260 for (i = 0; i < 2; i++) 244 for (i = 0; i < 2; i++)
261 if (card->ports[i].card) { 245 if (card->ports[i].card)
262 struct net_device *dev = port_to_dev(&card->ports[i]); 246 unregister_hdlc_device(card->ports[i].netdev);
263 unregister_hdlc_device(dev);
264 }
265 247
266 if (card->irq) 248 if (card->irq)
267 free_irq(card->irq, card); 249 free_irq(card->irq, card);
@@ -276,10 +258,10 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
276 pci_release_regions(pdev); 258 pci_release_regions(pdev);
277 pci_disable_device(pdev); 259 pci_disable_device(pdev);
278 pci_set_drvdata(pdev, NULL); 260 pci_set_drvdata(pdev, NULL);
279 if (card->ports[0].dev) 261 if (card->ports[0].netdev)
280 free_netdev(card->ports[0].dev); 262 free_netdev(card->ports[0].netdev);
281 if (card->ports[1].dev) 263 if (card->ports[1].netdev)
282 free_netdev(card->ports[1].dev); 264 free_netdev(card->ports[1].netdev);
283 kfree(card); 265 kfree(card);
284} 266}
285 267
@@ -296,12 +278,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
296 u32 scaphys; /* SCA memory base */ 278 u32 scaphys; /* SCA memory base */
297 u32 plxphys; /* PLX registers memory base */ 279 u32 plxphys; /* PLX registers memory base */
298 280
299#ifndef MODULE
300 static int printed_version;
301 if (!printed_version++)
302 printk(KERN_INFO "%s\n", version);
303#endif
304
305 i = pci_enable_device(pdev); 281 i = pci_enable_device(pdev);
306 if (i) 282 if (i)
307 return i; 283 return i;
@@ -320,9 +296,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
320 return -ENOBUFS; 296 return -ENOBUFS;
321 } 297 }
322 pci_set_drvdata(pdev, card); 298 pci_set_drvdata(pdev, card);
323 card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); 299 card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]);
324 card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); 300 card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]);
325 if (!card->ports[0].dev || !card->ports[1].dev) { 301 if (!card->ports[0].netdev || !card->ports[1].netdev) {
326 printk(KERN_ERR "pci200syn: unable to allocate memory\n"); 302 printk(KERN_ERR "pci200syn: unable to allocate memory\n");
327 pci200_pci_remove_one(pdev); 303 pci200_pci_remove_one(pdev);
328 return -ENOMEM; 304 return -ENOMEM;
@@ -398,7 +374,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
398 writew(readw(p) | 0x0040, p); 374 writew(readw(p) | 0x0040, p);
399 375
400 /* Allocate IRQ */ 376 /* Allocate IRQ */
401 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) { 377 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) {
402 printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n", 378 printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
403 pdev->irq); 379 pdev->irq);
404 pci200_pci_remove_one(pdev); 380 pci200_pci_remove_one(pdev);
@@ -410,9 +386,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
410 386
411 for (i = 0; i < 2; i++) { 387 for (i = 0; i < 2; i++) {
412 port_t *port = &card->ports[i]; 388 port_t *port = &card->ports[i];
413 struct net_device *dev = port_to_dev(port); 389 struct net_device *dev = port->netdev;
414 hdlc_device *hdlc = dev_to_hdlc(dev); 390 hdlc_device *hdlc = dev_to_hdlc(dev);
415 port->phy_node = i; 391 port->chan = i;
416 392
417 spin_lock_init(&port->lock); 393 spin_lock_init(&port->lock);
418 dev->irq = card->irq; 394 dev->irq = card->irq;
@@ -426,6 +402,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
426 hdlc->xmit = sca_xmit; 402 hdlc->xmit = sca_xmit;
427 port->settings.clock_type = CLOCK_EXT; 403 port->settings.clock_type = CLOCK_EXT;
428 port->card = card; 404 port->card = card;
405 sca_init_port(port);
429 if (register_hdlc_device(dev)) { 406 if (register_hdlc_device(dev)) {
430 printk(KERN_ERR "pci200syn: unable to register hdlc " 407 printk(KERN_ERR "pci200syn: unable to register hdlc "
431 "device\n"); 408 "device\n");
@@ -433,10 +410,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
433 pci200_pci_remove_one(pdev); 410 pci200_pci_remove_one(pdev);
434 return -ENOBUFS; 411 return -ENOBUFS;
435 } 412 }
436 sca_init_sync_port(port); /* Set up SCA memory */
437 413
438 printk(KERN_INFO "%s: PCI200SYN node %d\n", 414 printk(KERN_INFO "%s: PCI200SYN channel %d\n",
439 dev->name, port->phy_node); 415 dev->name, port->chan);
440 } 416 }
441 417
442 sca_flush(card); 418 sca_flush(card);
@@ -464,9 +440,6 @@ static struct pci_driver pci200_pci_driver = {
464 440
465static int __init pci200_init_module(void) 441static int __init pci200_init_module(void)
466{ 442{
467#ifdef MODULE
468 printk(KERN_INFO "%s\n", version);
469#endif
470 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) { 443 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
471 printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n"); 444 printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
472 return -EINVAL; 445 return -EINVAL;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
deleted file mode 100644
index 58ae8a2223af..000000000000
--- a/drivers/net/wan/syncppp.c
+++ /dev/null
@@ -1,1476 +0,0 @@
1/*
2 * NET3: A (fairly minimal) implementation of synchronous PPP for Linux
3 * as well as a CISCO HDLC implementation. See the copyright
4 * message below for the original source.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the license, or (at your option) any later version.
10 *
11 * Note however. This code is also used in a different form by FreeBSD.
12 * Therefore when making any non OS specific change please consider
13 * contributing it back to the original author under the terms
14 * below in addition.
15 * -- Alan
16 *
17 * Port for Linux-2.1 by Jan "Yenya" Kasprzak <kas@fi.muni.cz>
18 */
19
20/*
21 * Synchronous PPP/Cisco link level subroutines.
22 * Keepalive protocol implemented in both Cisco and PPP modes.
23 *
24 * Copyright (C) 1994 Cronyx Ltd.
25 * Author: Serge Vakulenko, <vak@zebub.msk.su>
26 *
27 * This software is distributed with NO WARRANTIES, not even the implied
28 * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Authors grant any other persons or organisations permission to use
31 * or modify this software as long as this message is kept with the software,
32 * all derivative works or modified versions.
33 *
34 * Version 1.9, Wed Oct 4 18:58:15 MSK 1995
35 *
36 * $Id: syncppp.c,v 1.18 2000/04/11 05:25:31 asj Exp $
37 */
38#undef DEBUG
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/errno.h>
43#include <linux/init.h>
44#include <linux/if_arp.h>
45#include <linux/skbuff.h>
46#include <linux/route.h>
47#include <linux/netdevice.h>
48#include <linux/inetdevice.h>
49#include <linux/random.h>
50#include <linux/pkt_sched.h>
51#include <linux/spinlock.h>
52#include <linux/rcupdate.h>
53
54#include <net/net_namespace.h>
55#include <net/syncppp.h>
56
57#include <asm/byteorder.h>
58#include <asm/uaccess.h>
59
60#define MAXALIVECNT 6 /* max. alive packets */
61
62#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
63#define PPP_UI 0x03 /* Unnumbered Information */
64#define PPP_IP 0x0021 /* Internet Protocol */
65#define PPP_ISO 0x0023 /* ISO OSI Protocol */
66#define PPP_XNS 0x0025 /* Xerox NS Protocol */
67#define PPP_IPX 0x002b /* Novell IPX Protocol */
68#define PPP_LCP 0xc021 /* Link Control Protocol */
69#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
70
71#define LCP_CONF_REQ 1 /* PPP LCP configure request */
72#define LCP_CONF_ACK 2 /* PPP LCP configure acknowledge */
73#define LCP_CONF_NAK 3 /* PPP LCP configure negative ack */
74#define LCP_CONF_REJ 4 /* PPP LCP configure reject */
75#define LCP_TERM_REQ 5 /* PPP LCP terminate request */
76#define LCP_TERM_ACK 6 /* PPP LCP terminate acknowledge */
77#define LCP_CODE_REJ 7 /* PPP LCP code reject */
78#define LCP_PROTO_REJ 8 /* PPP LCP protocol reject */
79#define LCP_ECHO_REQ 9 /* PPP LCP echo request */
80#define LCP_ECHO_REPLY 10 /* PPP LCP echo reply */
81#define LCP_DISC_REQ 11 /* PPP LCP discard request */
82
83#define LCP_OPT_MRU 1 /* maximum receive unit */
84#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
85#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
86#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
87#define LCP_OPT_MAGIC 5 /* magic number */
88#define LCP_OPT_RESERVED 6 /* reserved */
89#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
90#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
91
92#define IPCP_CONF_REQ LCP_CONF_REQ /* PPP IPCP configure request */
93#define IPCP_CONF_ACK LCP_CONF_ACK /* PPP IPCP configure acknowledge */
94#define IPCP_CONF_NAK LCP_CONF_NAK /* PPP IPCP configure negative ack */
95#define IPCP_CONF_REJ LCP_CONF_REJ /* PPP IPCP configure reject */
96#define IPCP_TERM_REQ LCP_TERM_REQ /* PPP IPCP terminate request */
97#define IPCP_TERM_ACK LCP_TERM_ACK /* PPP IPCP terminate acknowledge */
98#define IPCP_CODE_REJ LCP_CODE_REJ /* PPP IPCP code reject */
99
100#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
101#define CISCO_UNICAST 0x0f /* Cisco unicast address */
102#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
103#define CISCO_ADDR_REQ 0 /* Cisco address request */
104#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
105#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
106
107struct ppp_header {
108 u8 address;
109 u8 control;
110 __be16 protocol;
111};
112#define PPP_HEADER_LEN sizeof (struct ppp_header)
113
114struct lcp_header {
115 u8 type;
116 u8 ident;
117 __be16 len;
118};
119#define LCP_HEADER_LEN sizeof (struct lcp_header)
120
121struct cisco_packet {
122 __be32 type;
123 __be32 par1;
124 __be32 par2;
125 __be16 rel;
126 __be16 time0;
127 __be16 time1;
128};
129#define CISCO_PACKET_LEN 18
130#define CISCO_BIG_PACKET_LEN 20
131
132static struct sppp *spppq;
133static struct timer_list sppp_keepalive_timer;
134static DEFINE_SPINLOCK(spppq_lock);
135
136/* global xmit queue for sending packets while spinlock is held */
137static struct sk_buff_head tx_queue;
138
139static void sppp_keepalive (unsigned long dummy);
140static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
141 u8 ident, u16 len, void *data);
142static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2);
143static void sppp_lcp_input (struct sppp *sp, struct sk_buff *m);
144static void sppp_cisco_input (struct sppp *sp, struct sk_buff *m);
145static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *m);
146static void sppp_lcp_open (struct sppp *sp);
147static void sppp_ipcp_open (struct sppp *sp);
148static int sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
149 int len, u32 *magic);
150static void sppp_cp_timeout (unsigned long arg);
151static char *sppp_lcp_type_name (u8 type);
152static char *sppp_ipcp_type_name (u8 type);
153static void sppp_print_bytes (u8 *p, u16 len);
154
155static int debug;
156
157/* Flush global outgoing packet queue to dev_queue_xmit().
158 *
159 * dev_queue_xmit() must be called with interrupts enabled
160 * which means it can't be called with spinlocks held.
161 * If a packet needs to be sent while a spinlock is held,
162 * then put the packet into tx_queue, and call sppp_flush_xmit()
163 * after spinlock is released.
164 */
165static void sppp_flush_xmit(void)
166{
167 struct sk_buff *skb;
168 while ((skb = skb_dequeue(&tx_queue)) != NULL)
169 dev_queue_xmit(skb);
170}
171
172/*
173 * Interface down stub
174 */
175
176static void if_down(struct net_device *dev)
177{
178 struct sppp *sp = (struct sppp *)sppp_of(dev);
179
180 sp->pp_link_state=SPPP_LINK_DOWN;
181}
182
183/*
184 * Timeout routine activations.
185 */
186
187static void sppp_set_timeout(struct sppp *p,int s)
188{
189 if (! (p->pp_flags & PP_TIMO))
190 {
191 init_timer(&p->pp_timer);
192 p->pp_timer.function=sppp_cp_timeout;
193 p->pp_timer.expires=jiffies+s*HZ;
194 p->pp_timer.data=(unsigned long)p;
195 p->pp_flags |= PP_TIMO;
196 add_timer(&p->pp_timer);
197 }
198}
199
200static void sppp_clear_timeout(struct sppp *p)
201{
202 if (p->pp_flags & PP_TIMO)
203 {
204 del_timer(&p->pp_timer);
205 p->pp_flags &= ~PP_TIMO;
206 }
207}
208
209/**
210 * sppp_input - receive and process a WAN PPP frame
211 * @skb: The buffer to process
212 * @dev: The device it arrived on
213 *
214 * This can be called directly by cards that do not have
215 * timing constraints but is normally called from the network layer
216 * after interrupt servicing to process frames queued via netif_rx().
217 *
218 * We process the options in the card. If the frame is destined for
219 * the protocol stacks then it requeues the frame for the upper level
220 * protocol. If it is a control from it is processed and discarded
221 * here.
222 */
223
224static void sppp_input (struct net_device *dev, struct sk_buff *skb)
225{
226 struct ppp_header *h;
227 struct sppp *sp = (struct sppp *)sppp_of(dev);
228 unsigned long flags;
229
230 skb->dev=dev;
231 skb_reset_mac_header(skb);
232
233 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
234 /* Too small packet, drop it. */
235 if (sp->pp_flags & PP_DEBUG)
236 printk (KERN_DEBUG "%s: input packet is too small, %d bytes\n",
237 dev->name, skb->len);
238 kfree_skb(skb);
239 return;
240 }
241
242 /* Get PPP header. */
243 h = (struct ppp_header *)skb->data;
244 skb_pull(skb,sizeof(struct ppp_header));
245
246 spin_lock_irqsave(&sp->lock, flags);
247
248 switch (h->address) {
249 default: /* Invalid PPP packet. */
250 goto invalid;
251 case PPP_ALLSTATIONS:
252 if (h->control != PPP_UI)
253 goto invalid;
254 if (sp->pp_flags & PP_CISCO) {
255 if (sp->pp_flags & PP_DEBUG)
256 printk (KERN_WARNING "%s: PPP packet in Cisco mode <0x%x 0x%x 0x%x>\n",
257 dev->name,
258 h->address, h->control, ntohs (h->protocol));
259 goto drop;
260 }
261 switch (ntohs (h->protocol)) {
262 default:
263 if (sp->lcp.state == LCP_STATE_OPENED)
264 sppp_cp_send (sp, PPP_LCP, LCP_PROTO_REJ,
265 ++sp->pp_seq, skb->len + 2,
266 &h->protocol);
267 if (sp->pp_flags & PP_DEBUG)
268 printk (KERN_WARNING "%s: invalid input protocol <0x%x 0x%x 0x%x>\n",
269 dev->name,
270 h->address, h->control, ntohs (h->protocol));
271 goto drop;
272 case PPP_LCP:
273 sppp_lcp_input (sp, skb);
274 goto drop;
275 case PPP_IPCP:
276 if (sp->lcp.state == LCP_STATE_OPENED)
277 sppp_ipcp_input (sp, skb);
278 else
279 printk(KERN_DEBUG "IPCP when still waiting LCP finish.\n");
280 goto drop;
281 case PPP_IP:
282 if (sp->ipcp.state == IPCP_STATE_OPENED) {
283 if(sp->pp_flags&PP_DEBUG)
284 printk(KERN_DEBUG "Yow an IP frame.\n");
285 skb->protocol=htons(ETH_P_IP);
286 netif_rx(skb);
287 goto done;
288 }
289 break;
290#ifdef IPX
291 case PPP_IPX:
292 /* IPX IPXCP not implemented yet */
293 if (sp->lcp.state == LCP_STATE_OPENED) {
294 skb->protocol=htons(ETH_P_IPX);
295 netif_rx(skb);
296 goto done;
297 }
298 break;
299#endif
300 }
301 break;
302 case CISCO_MULTICAST:
303 case CISCO_UNICAST:
304 /* Don't check the control field here (RFC 1547). */
305 if (! (sp->pp_flags & PP_CISCO)) {
306 if (sp->pp_flags & PP_DEBUG)
307 printk (KERN_WARNING "%s: Cisco packet in PPP mode <0x%x 0x%x 0x%x>\n",
308 dev->name,
309 h->address, h->control, ntohs (h->protocol));
310 goto drop;
311 }
312 switch (ntohs (h->protocol)) {
313 default:
314 goto invalid;
315 case CISCO_KEEPALIVE:
316 sppp_cisco_input (sp, skb);
317 goto drop;
318#ifdef CONFIG_INET
319 case ETH_P_IP:
320 skb->protocol=htons(ETH_P_IP);
321 netif_rx(skb);
322 goto done;
323#endif
324#ifdef CONFIG_IPX
325 case ETH_P_IPX:
326 skb->protocol=htons(ETH_P_IPX);
327 netif_rx(skb);
328 goto done;
329#endif
330 }
331 break;
332 }
333 goto drop;
334
335invalid:
336 if (sp->pp_flags & PP_DEBUG)
337 printk (KERN_WARNING "%s: invalid input packet <0x%x 0x%x 0x%x>\n",
338 dev->name, h->address, h->control, ntohs (h->protocol));
339drop:
340 kfree_skb(skb);
341done:
342 spin_unlock_irqrestore(&sp->lock, flags);
343 sppp_flush_xmit();
344 return;
345}
346
347/*
348 * Handle transmit packets.
349 */
350
351static int sppp_hard_header(struct sk_buff *skb,
352 struct net_device *dev, __u16 type,
353 const void *daddr, const void *saddr,
354 unsigned int len)
355{
356 struct sppp *sp = (struct sppp *)sppp_of(dev);
357 struct ppp_header *h;
358 skb_push(skb,sizeof(struct ppp_header));
359 h=(struct ppp_header *)skb->data;
360 if(sp->pp_flags&PP_CISCO)
361 {
362 h->address = CISCO_UNICAST;
363 h->control = 0;
364 }
365 else
366 {
367 h->address = PPP_ALLSTATIONS;
368 h->control = PPP_UI;
369 }
370 if(sp->pp_flags & PP_CISCO)
371 {
372 h->protocol = htons(type);
373 }
374 else switch(type)
375 {
376 case ETH_P_IP:
377 h->protocol = htons(PPP_IP);
378 break;
379 case ETH_P_IPX:
380 h->protocol = htons(PPP_IPX);
381 break;
382 }
383 return sizeof(struct ppp_header);
384}
385
386static const struct header_ops sppp_header_ops = {
387 .create = sppp_hard_header,
388};
389
390/*
391 * Send keepalive packets, every 10 seconds.
392 */
393
394static void sppp_keepalive (unsigned long dummy)
395{
396 struct sppp *sp;
397 unsigned long flags;
398
399 spin_lock_irqsave(&spppq_lock, flags);
400
401 for (sp=spppq; sp; sp=sp->pp_next)
402 {
403 struct net_device *dev = sp->pp_if;
404
405 /* Keepalive mode disabled or channel down? */
406 if (! (sp->pp_flags & PP_KEEPALIVE) ||
407 ! (dev->flags & IFF_UP))
408 continue;
409
410 spin_lock(&sp->lock);
411
412 /* No keepalive in PPP mode if LCP not opened yet. */
413 if (! (sp->pp_flags & PP_CISCO) &&
414 sp->lcp.state != LCP_STATE_OPENED) {
415 spin_unlock(&sp->lock);
416 continue;
417 }
418
419 if (sp->pp_alivecnt == MAXALIVECNT) {
420 /* No keepalive packets got. Stop the interface. */
421 printk (KERN_WARNING "%s: protocol down\n", dev->name);
422 if_down (dev);
423 if (! (sp->pp_flags & PP_CISCO)) {
424 /* Shut down the PPP link. */
425 sp->lcp.magic = jiffies;
426 sp->lcp.state = LCP_STATE_CLOSED;
427 sp->ipcp.state = IPCP_STATE_CLOSED;
428 sppp_clear_timeout (sp);
429 /* Initiate negotiation. */
430 sppp_lcp_open (sp);
431 }
432 }
433 if (sp->pp_alivecnt <= MAXALIVECNT)
434 ++sp->pp_alivecnt;
435 if (sp->pp_flags & PP_CISCO)
436 sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq,
437 sp->pp_rseq);
438 else if (sp->lcp.state == LCP_STATE_OPENED) {
439 __be32 nmagic = htonl (sp->lcp.magic);
440 sp->lcp.echoid = ++sp->pp_seq;
441 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REQ,
442 sp->lcp.echoid, 4, &nmagic);
443 }
444
445 spin_unlock(&sp->lock);
446 }
447 spin_unlock_irqrestore(&spppq_lock, flags);
448 sppp_flush_xmit();
449 sppp_keepalive_timer.expires=jiffies+10*HZ;
450 add_timer(&sppp_keepalive_timer);
451}
452
453/*
454 * Handle incoming PPP Link Control Protocol packets.
455 */
456
457static void sppp_lcp_input (struct sppp *sp, struct sk_buff *skb)
458{
459 struct lcp_header *h;
460 struct net_device *dev = sp->pp_if;
461 int len = skb->len;
462 u8 *p, opt[6];
463 u32 rmagic = 0;
464
465 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
466 if (sp->pp_flags & PP_DEBUG)
467 printk (KERN_WARNING "%s: invalid lcp packet length: %d bytes\n",
468 dev->name, len);
469 return;
470 }
471 h = (struct lcp_header *)skb->data;
472 skb_pull(skb,sizeof(struct lcp_header *));
473
474 if (sp->pp_flags & PP_DEBUG)
475 {
476 char state = '?';
477 switch (sp->lcp.state) {
478 case LCP_STATE_CLOSED: state = 'C'; break;
479 case LCP_STATE_ACK_RCVD: state = 'R'; break;
480 case LCP_STATE_ACK_SENT: state = 'S'; break;
481 case LCP_STATE_OPENED: state = 'O'; break;
482 }
483 printk (KERN_WARNING "%s: lcp input(%c): %d bytes <%s id=%xh len=%xh",
484 dev->name, state, len,
485 sppp_lcp_type_name (h->type), h->ident, ntohs (h->len));
486 if (len > 4)
487 sppp_print_bytes ((u8*) (h+1), len-4);
488 printk (">\n");
489 }
490 if (len > ntohs (h->len))
491 len = ntohs (h->len);
492 switch (h->type) {
493 default:
494 /* Unknown packet type -- send Code-Reject packet. */
495 sppp_cp_send (sp, PPP_LCP, LCP_CODE_REJ, ++sp->pp_seq,
496 skb->len, h);
497 break;
498 case LCP_CONF_REQ:
499 if (len < 4) {
500 if (sp->pp_flags & PP_DEBUG)
501 printk (KERN_DEBUG"%s: invalid lcp configure request packet length: %d bytes\n",
502 dev->name, len);
503 break;
504 }
505 if (len>4 && !sppp_lcp_conf_parse_options (sp, h, len, &rmagic))
506 goto badreq;
507 if (rmagic == sp->lcp.magic) {
508 /* Local and remote magics equal -- loopback? */
509 if (sp->pp_loopcnt >= MAXALIVECNT*5) {
510 printk (KERN_WARNING "%s: loopback\n",
511 dev->name);
512 sp->pp_loopcnt = 0;
513 if (dev->flags & IFF_UP) {
514 if_down (dev);
515 }
516 } else if (sp->pp_flags & PP_DEBUG)
517 printk (KERN_DEBUG "%s: conf req: magic glitch\n",
518 dev->name);
519 ++sp->pp_loopcnt;
520
521 /* MUST send Conf-Nack packet. */
522 rmagic = ~sp->lcp.magic;
523 opt[0] = LCP_OPT_MAGIC;
524 opt[1] = sizeof (opt);
525 opt[2] = rmagic >> 24;
526 opt[3] = rmagic >> 16;
527 opt[4] = rmagic >> 8;
528 opt[5] = rmagic;
529 sppp_cp_send (sp, PPP_LCP, LCP_CONF_NAK,
530 h->ident, sizeof (opt), &opt);
531badreq:
532 switch (sp->lcp.state) {
533 case LCP_STATE_OPENED:
534 /* Initiate renegotiation. */
535 sppp_lcp_open (sp);
536 /* fall through... */
537 case LCP_STATE_ACK_SENT:
538 /* Go to closed state. */
539 sp->lcp.state = LCP_STATE_CLOSED;
540 sp->ipcp.state = IPCP_STATE_CLOSED;
541 }
542 break;
543 }
544 /* Send Configure-Ack packet. */
545 sp->pp_loopcnt = 0;
546 if (sp->lcp.state != LCP_STATE_OPENED) {
547 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
548 h->ident, len-4, h+1);
549 }
550 /* Change the state. */
551 switch (sp->lcp.state) {
552 case LCP_STATE_CLOSED:
553 sp->lcp.state = LCP_STATE_ACK_SENT;
554 break;
555 case LCP_STATE_ACK_RCVD:
556 sp->lcp.state = LCP_STATE_OPENED;
557 sppp_ipcp_open (sp);
558 break;
559 case LCP_STATE_OPENED:
560 /* Remote magic changed -- close session. */
561 sp->lcp.state = LCP_STATE_CLOSED;
562 sp->ipcp.state = IPCP_STATE_CLOSED;
563 /* Initiate renegotiation. */
564 sppp_lcp_open (sp);
565 /* Send ACK after our REQ in attempt to break loop */
566 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
567 h->ident, len-4, h+1);
568 sp->lcp.state = LCP_STATE_ACK_SENT;
569 break;
570 }
571 break;
572 case LCP_CONF_ACK:
573 if (h->ident != sp->lcp.confid)
574 break;
575 sppp_clear_timeout (sp);
576 if ((sp->pp_link_state != SPPP_LINK_UP) &&
577 (dev->flags & IFF_UP)) {
578 /* Coming out of loopback mode. */
579 sp->pp_link_state=SPPP_LINK_UP;
580 printk (KERN_INFO "%s: protocol up\n", dev->name);
581 }
582 switch (sp->lcp.state) {
583 case LCP_STATE_CLOSED:
584 sp->lcp.state = LCP_STATE_ACK_RCVD;
585 sppp_set_timeout (sp, 5);
586 break;
587 case LCP_STATE_ACK_SENT:
588 sp->lcp.state = LCP_STATE_OPENED;
589 sppp_ipcp_open (sp);
590 break;
591 }
592 break;
593 case LCP_CONF_NAK:
594 if (h->ident != sp->lcp.confid)
595 break;
596 p = (u8*) (h+1);
597 if (len>=10 && p[0] == LCP_OPT_MAGIC && p[1] >= 4) {
598 rmagic = (u32)p[2] << 24 |
599 (u32)p[3] << 16 | p[4] << 8 | p[5];
600 if (rmagic == ~sp->lcp.magic) {
601 int newmagic;
602 if (sp->pp_flags & PP_DEBUG)
603 printk (KERN_DEBUG "%s: conf nak: magic glitch\n",
604 dev->name);
605 get_random_bytes(&newmagic, sizeof(newmagic));
606 sp->lcp.magic += newmagic;
607 } else
608 sp->lcp.magic = rmagic;
609 }
610 if (sp->lcp.state != LCP_STATE_ACK_SENT) {
611 /* Go to closed state. */
612 sp->lcp.state = LCP_STATE_CLOSED;
613 sp->ipcp.state = IPCP_STATE_CLOSED;
614 }
615 /* The link will be renegotiated after timeout,
616 * to avoid endless req-nack loop. */
617 sppp_clear_timeout (sp);
618 sppp_set_timeout (sp, 2);
619 break;
620 case LCP_CONF_REJ:
621 if (h->ident != sp->lcp.confid)
622 break;
623 sppp_clear_timeout (sp);
624 /* Initiate renegotiation. */
625 sppp_lcp_open (sp);
626 if (sp->lcp.state != LCP_STATE_ACK_SENT) {
627 /* Go to closed state. */
628 sp->lcp.state = LCP_STATE_CLOSED;
629 sp->ipcp.state = IPCP_STATE_CLOSED;
630 }
631 break;
632 case LCP_TERM_REQ:
633 sppp_clear_timeout (sp);
634 /* Send Terminate-Ack packet. */
635 sppp_cp_send (sp, PPP_LCP, LCP_TERM_ACK, h->ident, 0, NULL);
636 /* Go to closed state. */
637 sp->lcp.state = LCP_STATE_CLOSED;
638 sp->ipcp.state = IPCP_STATE_CLOSED;
639 /* Initiate renegotiation. */
640 sppp_lcp_open (sp);
641 break;
642 case LCP_TERM_ACK:
643 case LCP_CODE_REJ:
644 case LCP_PROTO_REJ:
645 /* Ignore for now. */
646 break;
647 case LCP_DISC_REQ:
648 /* Discard the packet. */
649 break;
650 case LCP_ECHO_REQ:
651 if (sp->lcp.state != LCP_STATE_OPENED)
652 break;
653 if (len < 8) {
654 if (sp->pp_flags & PP_DEBUG)
655 printk (KERN_WARNING "%s: invalid lcp echo request packet length: %d bytes\n",
656 dev->name, len);
657 break;
658 }
659 if (ntohl (*(__be32*)(h+1)) == sp->lcp.magic) {
660 /* Line loopback mode detected. */
661 printk (KERN_WARNING "%s: loopback\n", dev->name);
662 if_down (dev);
663
664 /* Shut down the PPP link. */
665 sp->lcp.state = LCP_STATE_CLOSED;
666 sp->ipcp.state = IPCP_STATE_CLOSED;
667 sppp_clear_timeout (sp);
668 /* Initiate negotiation. */
669 sppp_lcp_open (sp);
670 break;
671 }
672 *(__be32 *)(h+1) = htonl (sp->lcp.magic);
673 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REPLY, h->ident, len-4, h+1);
674 break;
675 case LCP_ECHO_REPLY:
676 if (h->ident != sp->lcp.echoid)
677 break;
678 if (len < 8) {
679 if (sp->pp_flags & PP_DEBUG)
680 printk (KERN_WARNING "%s: invalid lcp echo reply packet length: %d bytes\n",
681 dev->name, len);
682 break;
683 }
684 if (ntohl(*(__be32 *)(h+1)) != sp->lcp.magic)
685 sp->pp_alivecnt = 0;
686 break;
687 }
688}
689
690/*
691 * Handle incoming Cisco keepalive protocol packets.
692 */
693
694static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
695{
696 struct cisco_packet *h;
697 struct net_device *dev = sp->pp_if;
698
699 if (!pskb_may_pull(skb, sizeof(struct cisco_packet))
700 || (skb->len != CISCO_PACKET_LEN
701 && skb->len != CISCO_BIG_PACKET_LEN)) {
702 if (sp->pp_flags & PP_DEBUG)
703 printk (KERN_WARNING "%s: invalid cisco packet length: %d bytes\n",
704 dev->name, skb->len);
705 return;
706 }
707 h = (struct cisco_packet *)skb->data;
708 skb_pull(skb, sizeof(struct cisco_packet*));
709 if (sp->pp_flags & PP_DEBUG)
710 printk (KERN_WARNING "%s: cisco input: %d bytes <%xh %xh %xh %xh %xh-%xh>\n",
711 dev->name, skb->len,
712 ntohl (h->type), h->par1, h->par2, h->rel,
713 h->time0, h->time1);
714 switch (ntohl (h->type)) {
715 default:
716 if (sp->pp_flags & PP_DEBUG)
717 printk (KERN_WARNING "%s: unknown cisco packet type: 0x%x\n",
718 dev->name, ntohl (h->type));
719 break;
720 case CISCO_ADDR_REPLY:
721 /* Reply on address request, ignore */
722 break;
723 case CISCO_KEEPALIVE_REQ:
724 sp->pp_alivecnt = 0;
725 sp->pp_rseq = ntohl (h->par1);
726 if (sp->pp_seq == sp->pp_rseq) {
727 /* Local and remote sequence numbers are equal.
728 * Probably, the line is in loopback mode. */
729 int newseq;
730 if (sp->pp_loopcnt >= MAXALIVECNT) {
731 printk (KERN_WARNING "%s: loopback\n",
732 dev->name);
733 sp->pp_loopcnt = 0;
734 if (dev->flags & IFF_UP) {
735 if_down (dev);
736 }
737 }
738 ++sp->pp_loopcnt;
739
740 /* Generate new local sequence number */
741 get_random_bytes(&newseq, sizeof(newseq));
742 sp->pp_seq ^= newseq;
743 break;
744 }
745 sp->pp_loopcnt = 0;
746 if (sp->pp_link_state==SPPP_LINK_DOWN &&
747 (dev->flags & IFF_UP)) {
748 sp->pp_link_state=SPPP_LINK_UP;
749 printk (KERN_INFO "%s: protocol up\n", dev->name);
750 }
751 break;
752 case CISCO_ADDR_REQ:
753 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
754 {
755 __be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */
756#ifdef CONFIG_INET
757 struct in_device *in_dev;
758 struct in_ifaddr *ifa;
759
760 rcu_read_lock();
761 if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
762 {
763 for (ifa=in_dev->ifa_list; ifa != NULL;
764 ifa=ifa->ifa_next) {
765 if (strcmp(dev->name, ifa->ifa_label) == 0)
766 {
767 addr = ifa->ifa_local;
768 mask = ifa->ifa_mask;
769 break;
770 }
771 }
772 }
773 rcu_read_unlock();
774#endif
775 sppp_cisco_send (sp, CISCO_ADDR_REPLY, ntohl(addr), ntohl(mask));
776 break;
777 }
778 }
779}
780
781
782/*
783 * Send PPP LCP packet.
784 */
785
786static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
787 u8 ident, u16 len, void *data)
788{
789 struct ppp_header *h;
790 struct lcp_header *lh;
791 struct sk_buff *skb;
792 struct net_device *dev = sp->pp_if;
793
794 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+LCP_HEADER_LEN+len,
795 GFP_ATOMIC);
796 if (skb==NULL)
797 return;
798
799 skb_reserve(skb,dev->hard_header_len);
800
801 h = (struct ppp_header *)skb_put(skb, sizeof(struct ppp_header));
802 h->address = PPP_ALLSTATIONS; /* broadcast address */
803 h->control = PPP_UI; /* Unnumbered Info */
804 h->protocol = htons (proto); /* Link Control Protocol */
805
806 lh = (struct lcp_header *)skb_put(skb, sizeof(struct lcp_header));
807 lh->type = type;
808 lh->ident = ident;
809 lh->len = htons (LCP_HEADER_LEN + len);
810
811 if (len)
812 memcpy(skb_put(skb,len),data, len);
813
814 if (sp->pp_flags & PP_DEBUG) {
815 printk (KERN_WARNING "%s: %s output <%s id=%xh len=%xh",
816 dev->name,
817 proto==PPP_LCP ? "lcp" : "ipcp",
818 proto==PPP_LCP ? sppp_lcp_type_name (lh->type) :
819 sppp_ipcp_type_name (lh->type), lh->ident,
820 ntohs (lh->len));
821 if (len)
822 sppp_print_bytes ((u8*) (lh+1), len);
823 printk (">\n");
824 }
825 /* Control is high priority so it doesn't get queued behind data */
826 skb->priority=TC_PRIO_CONTROL;
827 skb->dev = dev;
828 skb_queue_tail(&tx_queue, skb);
829}
830
831/*
832 * Send Cisco keepalive packet.
833 */
834
835static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2)
836{
837 struct ppp_header *h;
838 struct cisco_packet *ch;
839 struct sk_buff *skb;
840 struct net_device *dev = sp->pp_if;
841 u32 t = jiffies * 1000/HZ;
842
843 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+CISCO_PACKET_LEN,
844 GFP_ATOMIC);
845
846 if(skb==NULL)
847 return;
848
849 skb_reserve(skb, dev->hard_header_len);
850 h = (struct ppp_header *)skb_put (skb, sizeof(struct ppp_header));
851 h->address = CISCO_MULTICAST;
852 h->control = 0;
853 h->protocol = htons (CISCO_KEEPALIVE);
854
855 ch = (struct cisco_packet*)skb_put(skb, CISCO_PACKET_LEN);
856 ch->type = htonl (type);
857 ch->par1 = htonl (par1);
858 ch->par2 = htonl (par2);
859 ch->rel = htons(0xffff);
860 ch->time0 = htons ((u16) (t >> 16));
861 ch->time1 = htons ((u16) t);
862
863 if (sp->pp_flags & PP_DEBUG)
864 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
865 dev->name, ntohl (ch->type), ch->par1,
866 ch->par2, ch->rel, ch->time0, ch->time1);
867 skb->priority=TC_PRIO_CONTROL;
868 skb->dev = dev;
869 skb_queue_tail(&tx_queue, skb);
870}
871
872/**
873 * sppp_close - close down a synchronous PPP or Cisco HDLC link
874 * @dev: The network device to drop the link of
875 *
876 * This drops the logical interface to the channel. It is not
877 * done politely as we assume we will also be dropping DTR. Any
878 * timeouts are killed.
879 */
880
881int sppp_close (struct net_device *dev)
882{
883 struct sppp *sp = (struct sppp *)sppp_of(dev);
884 unsigned long flags;
885
886 spin_lock_irqsave(&sp->lock, flags);
887 sp->pp_link_state = SPPP_LINK_DOWN;
888 sp->lcp.state = LCP_STATE_CLOSED;
889 sp->ipcp.state = IPCP_STATE_CLOSED;
890 sppp_clear_timeout (sp);
891 spin_unlock_irqrestore(&sp->lock, flags);
892
893 return 0;
894}
895
896EXPORT_SYMBOL(sppp_close);
897
898/**
899 * sppp_open - open a synchronous PPP or Cisco HDLC link
900 * @dev: Network device to activate
901 *
902 * Close down any existing synchronous session and commence
903 * from scratch. In the PPP case this means negotiating LCP/IPCP
904 * and friends, while for Cisco HDLC we simply need to start sending
905 * keepalives
906 */
907
908int sppp_open (struct net_device *dev)
909{
910 struct sppp *sp = (struct sppp *)sppp_of(dev);
911 unsigned long flags;
912
913 sppp_close(dev);
914
915 spin_lock_irqsave(&sp->lock, flags);
916 if (!(sp->pp_flags & PP_CISCO)) {
917 sppp_lcp_open (sp);
918 }
919 sp->pp_link_state = SPPP_LINK_DOWN;
920 spin_unlock_irqrestore(&sp->lock, flags);
921 sppp_flush_xmit();
922
923 return 0;
924}
925
926EXPORT_SYMBOL(sppp_open);
927
928/**
929 * sppp_reopen - notify of physical link loss
930 * @dev: Device that lost the link
931 *
932 * This function informs the synchronous protocol code that
933 * the underlying link died (for example a carrier drop on X.21)
934 *
935 * We increment the magic numbers to ensure that if the other end
936 * failed to notice we will correctly start a new session. It happens
937 * do to the nature of telco circuits is that you can lose carrier on
938 * one endonly.
939 *
940 * Having done this we go back to negotiating. This function may
941 * be called from an interrupt context.
942 */
943
944int sppp_reopen (struct net_device *dev)
945{
946 struct sppp *sp = (struct sppp *)sppp_of(dev);
947 unsigned long flags;
948
949 sppp_close(dev);
950
951 spin_lock_irqsave(&sp->lock, flags);
952 if (!(sp->pp_flags & PP_CISCO))
953 {
954 sp->lcp.magic = jiffies;
955 ++sp->pp_seq;
956 sp->lcp.state = LCP_STATE_CLOSED;
957 sp->ipcp.state = IPCP_STATE_CLOSED;
958 /* Give it a moment for the line to settle then go */
959 sppp_set_timeout (sp, 1);
960 }
961 sp->pp_link_state=SPPP_LINK_DOWN;
962 spin_unlock_irqrestore(&sp->lock, flags);
963
964 return 0;
965}
966
967EXPORT_SYMBOL(sppp_reopen);
968
969/**
970 * sppp_change_mtu - Change the link MTU
971 * @dev: Device to change MTU on
972 * @new_mtu: New MTU
973 *
974 * Change the MTU on the link. This can only be called with
975 * the link down. It returns an error if the link is up or
976 * the mtu is out of range.
977 */
978
979static int sppp_change_mtu(struct net_device *dev, int new_mtu)
980{
981 if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
982 return -EINVAL;
983 dev->mtu=new_mtu;
984 return 0;
985}
986
987/**
988 * sppp_do_ioctl - Ioctl handler for ppp/hdlc
989 * @dev: Device subject to ioctl
990 * @ifr: Interface request block from the user
991 * @cmd: Command that is being issued
992 *
993 * This function handles the ioctls that may be issued by the user
994 * to control the settings of a PPP/HDLC link. It does both busy
995 * and security checks. This function is intended to be wrapped by
996 * callers who wish to add additional ioctl calls of their own.
997 */
998
999int sppp_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1000{
1001 struct sppp *sp = (struct sppp *)sppp_of(dev);
1002
1003 if(dev->flags&IFF_UP)
1004 return -EBUSY;
1005
1006 if(!capable(CAP_NET_ADMIN))
1007 return -EPERM;
1008
1009 switch(cmd)
1010 {
1011 case SPPPIOCCISCO:
1012 sp->pp_flags|=PP_CISCO;
1013 dev->type = ARPHRD_HDLC;
1014 break;
1015 case SPPPIOCPPP:
1016 sp->pp_flags&=~PP_CISCO;
1017 dev->type = ARPHRD_PPP;
1018 break;
1019 case SPPPIOCDEBUG:
1020 sp->pp_flags&=~PP_DEBUG;
1021 if(ifr->ifr_flags)
1022 sp->pp_flags|=PP_DEBUG;
1023 break;
1024 case SPPPIOCGFLAGS:
1025 if(copy_to_user(ifr->ifr_data, &sp->pp_flags, sizeof(sp->pp_flags)))
1026 return -EFAULT;
1027 break;
1028 case SPPPIOCSFLAGS:
1029 if(copy_from_user(&sp->pp_flags, ifr->ifr_data, sizeof(sp->pp_flags)))
1030 return -EFAULT;
1031 break;
1032 default:
1033 return -EINVAL;
1034 }
1035 return 0;
1036}
1037
1038EXPORT_SYMBOL(sppp_do_ioctl);
1039
1040/**
1041 * sppp_attach - attach synchronous PPP/HDLC to a device
1042 * @pd: PPP device to initialise
1043 *
1044 * This initialises the PPP/HDLC support on an interface. At the
1045 * time of calling the dev element must point to the network device
1046 * that this interface is attached to. The interface should not yet
1047 * be registered.
1048 */
1049
1050void sppp_attach(struct ppp_device *pd)
1051{
1052 struct net_device *dev = pd->dev;
1053 struct sppp *sp = &pd->sppp;
1054 unsigned long flags;
1055
1056 /* Make sure embedding is safe for sppp_of */
1057 BUG_ON(sppp_of(dev) != sp);
1058
1059 spin_lock_irqsave(&spppq_lock, flags);
1060 /* Initialize keepalive handler. */
1061 if (! spppq)
1062 {
1063 init_timer(&sppp_keepalive_timer);
1064 sppp_keepalive_timer.expires=jiffies+10*HZ;
1065 sppp_keepalive_timer.function=sppp_keepalive;
1066 add_timer(&sppp_keepalive_timer);
1067 }
1068 /* Insert new entry into the keepalive list. */
1069 sp->pp_next = spppq;
1070 spppq = sp;
1071 spin_unlock_irqrestore(&spppq_lock, flags);
1072
1073 sp->pp_loopcnt = 0;
1074 sp->pp_alivecnt = 0;
1075 sp->pp_seq = 0;
1076 sp->pp_rseq = 0;
1077 sp->pp_flags = PP_KEEPALIVE|PP_CISCO|debug;/*PP_DEBUG;*/
1078 sp->lcp.magic = 0;
1079 sp->lcp.state = LCP_STATE_CLOSED;
1080 sp->ipcp.state = IPCP_STATE_CLOSED;
1081 sp->pp_if = dev;
1082 spin_lock_init(&sp->lock);
1083
1084 /*
1085 * Device specific setup. All but interrupt handler and
1086 * hard_start_xmit.
1087 */
1088
1089 dev->header_ops = &sppp_header_ops;
1090
1091 dev->tx_queue_len = 10;
1092 dev->type = ARPHRD_HDLC;
1093 dev->addr_len = 0;
1094 dev->hard_header_len = sizeof(struct ppp_header);
1095 dev->mtu = PPP_MTU;
1096 /*
1097 * These 4 are callers but MUST also call sppp_ functions
1098 */
1099 dev->do_ioctl = sppp_do_ioctl;
1100#if 0
1101 dev->get_stats = NULL; /* Let the driver override these */
1102 dev->open = sppp_open;
1103 dev->stop = sppp_close;
1104#endif
1105 dev->change_mtu = sppp_change_mtu;
1106 dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
1107}
1108
1109EXPORT_SYMBOL(sppp_attach);
1110
1111/**
1112 * sppp_detach - release PPP resources from a device
1113 * @dev: Network device to release
1114 *
1115 * Stop and free up any PPP/HDLC resources used by this
1116 * interface. This must be called before the device is
1117 * freed.
1118 */
1119
1120void sppp_detach (struct net_device *dev)
1121{
1122 struct sppp **q, *p, *sp = (struct sppp *)sppp_of(dev);
1123 unsigned long flags;
1124
1125 spin_lock_irqsave(&spppq_lock, flags);
1126 /* Remove the entry from the keepalive list. */
1127 for (q = &spppq; (p = *q); q = &p->pp_next)
1128 if (p == sp) {
1129 *q = p->pp_next;
1130 break;
1131 }
1132
1133 /* Stop keepalive handler. */
1134 if (! spppq)
1135 del_timer(&sppp_keepalive_timer);
1136 sppp_clear_timeout (sp);
1137 spin_unlock_irqrestore(&spppq_lock, flags);
1138}
1139
1140EXPORT_SYMBOL(sppp_detach);
1141
1142/*
1143 * Analyze the LCP Configure-Request options list
1144 * for the presence of unknown options.
1145 * If the request contains unknown options, build and
1146 * send Configure-reject packet, containing only unknown options.
1147 */
1148static int
1149sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
1150 int len, u32 *magic)
1151{
1152 u8 *buf, *r, *p;
1153 int rlen;
1154
1155 len -= 4;
1156 buf = r = kmalloc (len, GFP_ATOMIC);
1157 if (! buf)
1158 return (0);
1159
1160 p = (void*) (h+1);
1161 for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
1162 switch (*p) {
1163 case LCP_OPT_MAGIC:
1164 /* Magic number -- extract. */
1165 if (len >= 6 && p[1] == 6) {
1166 *magic = (u32)p[2] << 24 |
1167 (u32)p[3] << 16 | p[4] << 8 | p[5];
1168 continue;
1169 }
1170 break;
1171 case LCP_OPT_ASYNC_MAP:
1172 /* Async control character map -- check to be zero. */
1173 if (len >= 6 && p[1] == 6 && ! p[2] && ! p[3] &&
1174 ! p[4] && ! p[5])
1175 continue;
1176 break;
1177 case LCP_OPT_MRU:
1178 /* Maximum receive unit -- always OK. */
1179 continue;
1180 default:
1181 /* Others not supported. */
1182 break;
1183 }
1184 /* Add the option to rejected list. */
1185 memcpy(r, p, p[1]);
1186 r += p[1];
1187 rlen += p[1];
1188 }
1189 if (rlen)
1190 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REJ, h->ident, rlen, buf);
1191 kfree(buf);
1192 return (rlen == 0);
1193}
1194
1195static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *skb)
1196{
1197 struct lcp_header *h;
1198 struct net_device *dev = sp->pp_if;
1199 int len = skb->len;
1200
1201 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
1202 if (sp->pp_flags & PP_DEBUG)
1203 printk (KERN_WARNING "%s: invalid ipcp packet length: %d bytes\n",
1204 dev->name, len);
1205 return;
1206 }
1207 h = (struct lcp_header *)skb->data;
1208 skb_pull(skb,sizeof(struct lcp_header));
1209 if (sp->pp_flags & PP_DEBUG) {
1210 printk (KERN_WARNING "%s: ipcp input: %d bytes <%s id=%xh len=%xh",
1211 dev->name, len,
1212 sppp_ipcp_type_name (h->type), h->ident, ntohs (h->len));
1213 if (len > 4)
1214 sppp_print_bytes ((u8*) (h+1), len-4);
1215 printk (">\n");
1216 }
1217 if (len > ntohs (h->len))
1218 len = ntohs (h->len);
1219 switch (h->type) {
1220 default:
1221 /* Unknown packet type -- send Code-Reject packet. */
1222 sppp_cp_send (sp, PPP_IPCP, IPCP_CODE_REJ, ++sp->pp_seq, len, h);
1223 break;
1224 case IPCP_CONF_REQ:
1225 if (len < 4) {
1226 if (sp->pp_flags & PP_DEBUG)
1227 printk (KERN_WARNING "%s: invalid ipcp configure request packet length: %d bytes\n",
1228 dev->name, len);
1229 return;
1230 }
1231 if (len > 4) {
1232 sppp_cp_send (sp, PPP_IPCP, LCP_CONF_REJ, h->ident,
1233 len-4, h+1);
1234
1235 switch (sp->ipcp.state) {
1236 case IPCP_STATE_OPENED:
1237 /* Initiate renegotiation. */
1238 sppp_ipcp_open (sp);
1239 /* fall through... */
1240 case IPCP_STATE_ACK_SENT:
1241 /* Go to closed state. */
1242 sp->ipcp.state = IPCP_STATE_CLOSED;
1243 }
1244 } else {
1245 /* Send Configure-Ack packet. */
1246 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_ACK, h->ident,
1247 0, NULL);
1248 /* Change the state. */
1249 if (sp->ipcp.state == IPCP_STATE_ACK_RCVD)
1250 sp->ipcp.state = IPCP_STATE_OPENED;
1251 else
1252 sp->ipcp.state = IPCP_STATE_ACK_SENT;
1253 }
1254 break;
1255 case IPCP_CONF_ACK:
1256 if (h->ident != sp->ipcp.confid)
1257 break;
1258 sppp_clear_timeout (sp);
1259 switch (sp->ipcp.state) {
1260 case IPCP_STATE_CLOSED:
1261 sp->ipcp.state = IPCP_STATE_ACK_RCVD;
1262 sppp_set_timeout (sp, 5);
1263 break;
1264 case IPCP_STATE_ACK_SENT:
1265 sp->ipcp.state = IPCP_STATE_OPENED;
1266 break;
1267 }
1268 break;
1269 case IPCP_CONF_NAK:
1270 case IPCP_CONF_REJ:
1271 if (h->ident != sp->ipcp.confid)
1272 break;
1273 sppp_clear_timeout (sp);
1274 /* Initiate renegotiation. */
1275 sppp_ipcp_open (sp);
1276 if (sp->ipcp.state != IPCP_STATE_ACK_SENT)
1277 /* Go to closed state. */
1278 sp->ipcp.state = IPCP_STATE_CLOSED;
1279 break;
1280 case IPCP_TERM_REQ:
1281 /* Send Terminate-Ack packet. */
1282 sppp_cp_send (sp, PPP_IPCP, IPCP_TERM_ACK, h->ident, 0, NULL);
1283 /* Go to closed state. */
1284 sp->ipcp.state = IPCP_STATE_CLOSED;
1285 /* Initiate renegotiation. */
1286 sppp_ipcp_open (sp);
1287 break;
1288 case IPCP_TERM_ACK:
1289 /* Ignore for now. */
1290 case IPCP_CODE_REJ:
1291 /* Ignore for now. */
1292 break;
1293 }
1294}
1295
1296static void sppp_lcp_open (struct sppp *sp)
1297{
1298 char opt[6];
1299
1300 if (! sp->lcp.magic)
1301 sp->lcp.magic = jiffies;
1302 opt[0] = LCP_OPT_MAGIC;
1303 opt[1] = sizeof (opt);
1304 opt[2] = sp->lcp.magic >> 24;
1305 opt[3] = sp->lcp.magic >> 16;
1306 opt[4] = sp->lcp.magic >> 8;
1307 opt[5] = sp->lcp.magic;
1308 sp->lcp.confid = ++sp->pp_seq;
1309 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REQ, sp->lcp.confid,
1310 sizeof (opt), &opt);
1311 sppp_set_timeout (sp, 2);
1312}
1313
1314static void sppp_ipcp_open (struct sppp *sp)
1315{
1316 sp->ipcp.confid = ++sp->pp_seq;
1317 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_REQ, sp->ipcp.confid, 0, NULL);
1318 sppp_set_timeout (sp, 2);
1319}
1320
1321/*
1322 * Process PPP control protocol timeouts.
1323 */
1324
1325static void sppp_cp_timeout (unsigned long arg)
1326{
1327 struct sppp *sp = (struct sppp*) arg;
1328 unsigned long flags;
1329
1330 spin_lock_irqsave(&sp->lock, flags);
1331
1332 sp->pp_flags &= ~PP_TIMO;
1333 if (! (sp->pp_if->flags & IFF_UP) || (sp->pp_flags & PP_CISCO)) {
1334 spin_unlock_irqrestore(&sp->lock, flags);
1335 return;
1336 }
1337 switch (sp->lcp.state) {
1338 case LCP_STATE_CLOSED:
1339 /* No ACK for Configure-Request, retry. */
1340 sppp_lcp_open (sp);
1341 break;
1342 case LCP_STATE_ACK_RCVD:
1343 /* ACK got, but no Configure-Request for peer, retry. */
1344 sppp_lcp_open (sp);
1345 sp->lcp.state = LCP_STATE_CLOSED;
1346 break;
1347 case LCP_STATE_ACK_SENT:
1348 /* ACK sent but no ACK for Configure-Request, retry. */
1349 sppp_lcp_open (sp);
1350 break;
1351 case LCP_STATE_OPENED:
1352 /* LCP is already OK, try IPCP. */
1353 switch (sp->ipcp.state) {
1354 case IPCP_STATE_CLOSED:
1355 /* No ACK for Configure-Request, retry. */
1356 sppp_ipcp_open (sp);
1357 break;
1358 case IPCP_STATE_ACK_RCVD:
1359 /* ACK got, but no Configure-Request for peer, retry. */
1360 sppp_ipcp_open (sp);
1361 sp->ipcp.state = IPCP_STATE_CLOSED;
1362 break;
1363 case IPCP_STATE_ACK_SENT:
1364 /* ACK sent but no ACK for Configure-Request, retry. */
1365 sppp_ipcp_open (sp);
1366 break;
1367 case IPCP_STATE_OPENED:
1368 /* IPCP is OK. */
1369 break;
1370 }
1371 break;
1372 }
1373 spin_unlock_irqrestore(&sp->lock, flags);
1374 sppp_flush_xmit();
1375}
1376
1377static char *sppp_lcp_type_name (u8 type)
1378{
1379 static char buf [8];
1380 switch (type) {
1381 case LCP_CONF_REQ: return ("conf-req");
1382 case LCP_CONF_ACK: return ("conf-ack");
1383 case LCP_CONF_NAK: return ("conf-nack");
1384 case LCP_CONF_REJ: return ("conf-rej");
1385 case LCP_TERM_REQ: return ("term-req");
1386 case LCP_TERM_ACK: return ("term-ack");
1387 case LCP_CODE_REJ: return ("code-rej");
1388 case LCP_PROTO_REJ: return ("proto-rej");
1389 case LCP_ECHO_REQ: return ("echo-req");
1390 case LCP_ECHO_REPLY: return ("echo-reply");
1391 case LCP_DISC_REQ: return ("discard-req");
1392 }
1393 sprintf (buf, "%xh", type);
1394 return (buf);
1395}
1396
1397static char *sppp_ipcp_type_name (u8 type)
1398{
1399 static char buf [8];
1400 switch (type) {
1401 case IPCP_CONF_REQ: return ("conf-req");
1402 case IPCP_CONF_ACK: return ("conf-ack");
1403 case IPCP_CONF_NAK: return ("conf-nack");
1404 case IPCP_CONF_REJ: return ("conf-rej");
1405 case IPCP_TERM_REQ: return ("term-req");
1406 case IPCP_TERM_ACK: return ("term-ack");
1407 case IPCP_CODE_REJ: return ("code-rej");
1408 }
1409 sprintf (buf, "%xh", type);
1410 return (buf);
1411}
1412
1413static void sppp_print_bytes (u_char *p, u16 len)
1414{
1415 printk (" %x", *p++);
1416 while (--len > 0)
1417 printk ("-%x", *p++);
1418}
1419
1420/**
1421 * sppp_rcv - receive and process a WAN PPP frame
1422 * @skb: The buffer to process
1423 * @dev: The device it arrived on
1424 * @p: Unused
1425 * @orig_dev: Unused
1426 *
1427 * Protocol glue. This drives the deferred processing mode the poorer
1428 * cards use. This can be called directly by cards that do not have
1429 * timing constraints but is normally called from the network layer
1430 * after interrupt servicing to process frames queued via netif_rx.
1431 */
1432
1433static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev)
1434{
1435 if (dev_net(dev) != &init_net) {
1436 kfree_skb(skb);
1437 return 0;
1438 }
1439
1440 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1441 return NET_RX_DROP;
1442 sppp_input(dev,skb);
1443 return 0;
1444}
1445
1446static struct packet_type sppp_packet_type = {
1447 .type = __constant_htons(ETH_P_WAN_PPP),
1448 .func = sppp_rcv,
1449};
1450
1451static char banner[] __initdata =
1452 KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n"
1453 KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & "
1454 "Jan \"Yenya\" Kasprzak.\n";
1455
1456static int __init sync_ppp_init(void)
1457{
1458 if(debug)
1459 debug=PP_DEBUG;
1460 printk(banner);
1461 skb_queue_head_init(&tx_queue);
1462 dev_add_pack(&sppp_packet_type);
1463 return 0;
1464}
1465
1466
1467static void __exit sync_ppp_cleanup(void)
1468{
1469 dev_remove_pack(&sppp_packet_type);
1470}
1471
1472module_init(sync_ppp_init);
1473module_exit(sync_ppp_cleanup);
1474module_param(debug, int, 0);
1475MODULE_LICENSE("GPL");
1476