aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorManuel Lauss <mano@roarinelk.homelinux.net>2008-04-22 16:16:47 -0400
committerJean Delvare <khali@hyperion.delvare>2008-04-22 16:16:47 -0400
commita26c20b1fa6d16fd3c402785b943a5e915eda30a (patch)
treeb02bf7183098c1dbf3cc51114a7bbbea22561765
parentdba7997a87cd12b815c0d58b2a0522a8bb0cf5ec (diff)
i2c: Renesas SH7760 I2C master driver
Driver for I2C interfaces in master mode on SH7760. Signed-off-by: Manuel Lauss <mano@roarinelk.homelinux.net> Signed-off-by: Jean Delvare <khali@linux-fr.org>
-rw-r--r--drivers/i2c/busses/Kconfig9
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c577
-rw-r--r--include/asm-sh/i2c-sh7760.h22
4 files changed, 609 insertions, 0 deletions
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 1577bcad288a..d01d0b175e1c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -683,4 +683,13 @@ config I2C_PMCMSP
683 This driver can also be built as module. If so, the module 683 This driver can also be built as module. If so, the module
684 will be called i2c-pmcmsp. 684 will be called i2c-pmcmsp.
685 685
686config I2C_SH7760
687 tristate "Renesas SH7760 I2C Controller"
688 depends on CPU_SUBTYPE_SH7760
689 help
690 This driver supports the 2 I2C interfaces on the Renesas SH7760.
691
692 This driver can also be built as a module. If so, the module
693 will be called i2c-sh7760.
694
686endmenu 695endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 378d5c07e048..8e72b2649dc7 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_I2C_PROSAVAGE) += i2c-prosavage.o
38obj-$(CONFIG_I2C_PXA) += i2c-pxa.o 38obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
39obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o 39obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
40obj-$(CONFIG_I2C_SAVAGE4) += i2c-savage4.o 40obj-$(CONFIG_I2C_SAVAGE4) += i2c-savage4.o
41obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
41obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o 42obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
42obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o 43obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
43obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o 44obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
new file mode 100644
index 000000000000..5e0e254976de
--- /dev/null
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -0,0 +1,577 @@
1/*
2 * I2C bus driver for the SH7760 I2C Interfaces.
3 *
4 * (c) 2005-2008 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com>
5 *
6 * licensed under the terms outlined in the file COPYING.
7 *
8 */
9
10#include <linux/completion.h>
11#include <linux/delay.h>
12#include <linux/err.h>
13#include <linux/i2c.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include <asm/clock.h>
21#include <asm/i2c-sh7760.h>
22#include <asm/io.h>
23
24/* register offsets */
25#define I2CSCR 0x0 /* slave ctrl */
26#define I2CMCR 0x4 /* master ctrl */
27#define I2CSSR 0x8 /* slave status */
28#define I2CMSR 0xC /* master status */
29#define I2CSIER 0x10 /* slave irq enable */
30#define I2CMIER 0x14 /* master irq enable */
31#define I2CCCR 0x18 /* clock dividers */
32#define I2CSAR 0x1c /* slave address */
33#define I2CMAR 0x20 /* master address */
34#define I2CRXTX 0x24 /* data port */
35#define I2CFCR 0x28 /* fifo control */
36#define I2CFSR 0x2C /* fifo status */
37#define I2CFIER 0x30 /* fifo irq enable */
38#define I2CRFDR 0x34 /* rx fifo count */
39#define I2CTFDR 0x38 /* tx fifo count */
40
41#define REGSIZE 0x3C
42
43#define MCR_MDBS 0x80 /* non-fifo mode switch */
44#define MCR_FSCL 0x40 /* override SCL pin */
45#define MCR_FSDA 0x20 /* override SDA pin */
46#define MCR_OBPC 0x10 /* override pins */
47#define MCR_MIE 0x08 /* master if enable */
48#define MCR_TSBE 0x04
49#define MCR_FSB 0x02 /* force stop bit */
50#define MCR_ESG 0x01 /* en startbit gen. */
51
52#define MSR_MNR 0x40 /* nack received */
53#define MSR_MAL 0x20 /* arbitration lost */
54#define MSR_MST 0x10 /* sent a stop */
55#define MSR_MDE 0x08
56#define MSR_MDT 0x04
57#define MSR_MDR 0x02
58#define MSR_MAT 0x01 /* slave addr xfer done */
59
60#define MIE_MNRE 0x40 /* nack irq en */
61#define MIE_MALE 0x20 /* arblos irq en */
62#define MIE_MSTE 0x10 /* stop irq en */
63#define MIE_MDEE 0x08
64#define MIE_MDTE 0x04
65#define MIE_MDRE 0x02
66#define MIE_MATE 0x01 /* address sent irq en */
67
68#define FCR_RFRST 0x02 /* reset rx fifo */
69#define FCR_TFRST 0x01 /* reset tx fifo */
70
71#define FSR_TEND 0x04 /* last byte sent */
72#define FSR_RDF 0x02 /* rx fifo trigger */
73#define FSR_TDFE 0x01 /* tx fifo empty */
74
75#define FIER_TEIE 0x04 /* tx fifo empty irq en */
76#define FIER_RXIE 0x02 /* rx fifo trig irq en */
77#define FIER_TXIE 0x01 /* tx fifo trig irq en */
78
79#define FIFO_SIZE 16
80
81struct cami2c {
82 void __iomem *iobase;
83 struct i2c_adapter adap;
84
85 /* message processing */
86 struct i2c_msg *msg;
87#define IDF_SEND 1
88#define IDF_RECV 2
89#define IDF_STOP 4
90 int flags;
91
92#define IDS_DONE 1
93#define IDS_ARBLOST 2
94#define IDS_NACK 4
95 int status;
96 struct completion xfer_done;
97
98 int irq;
99 struct resource *ioarea;
100};
101
102static inline void OUT32(struct cami2c *cam, int reg, unsigned long val)
103{
104 ctrl_outl(val, (unsigned long)cam->iobase + reg);
105}
106
107static inline unsigned long IN32(struct cami2c *cam, int reg)
108{
109 return ctrl_inl((unsigned long)cam->iobase + reg);
110}
111
112static irqreturn_t sh7760_i2c_irq(int irq, void *ptr)
113{
114 struct cami2c *id = ptr;
115 struct i2c_msg *msg = id->msg;
116 char *data = msg->buf;
117 unsigned long msr, fsr, fier, len;
118
119 msr = IN32(id, I2CMSR);
120 fsr = IN32(id, I2CFSR);
121
122 /* arbitration lost */
123 if (msr & MSR_MAL) {
124 OUT32(id, I2CMCR, 0);
125 OUT32(id, I2CSCR, 0);
126 OUT32(id, I2CSAR, 0);
127 id->status |= IDS_DONE | IDS_ARBLOST;
128 goto out;
129 }
130
131 if (msr & MSR_MNR) {
132 /* NACK handling is very screwed up. After receiving a
133 * NAK IRQ one has to wait a bit before writing to any
134 * registers, or the ctl will lock up. After that delay
135 * do a normal i2c stop. Then wait at least 1 ms before
136 * attempting another transfer or ctl will stop working
137 */
138 udelay(100); /* wait or risk ctl hang */
139 OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
140 OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
141 OUT32(id, I2CFIER, 0);
142 OUT32(id, I2CMIER, MIE_MSTE);
143 OUT32(id, I2CSCR, 0);
144 OUT32(id, I2CSAR, 0);
145 id->status |= IDS_NACK;
146 msr &= ~MSR_MAT;
147 fsr = 0;
148 /* In some cases the MST bit is also set. */
149 }
150
151 /* i2c-stop was sent */
152 if (msr & MSR_MST) {
153 id->status |= IDS_DONE;
154 goto out;
155 }
156
157 /* i2c slave addr was sent; set to "normal" operation */
158 if (msr & MSR_MAT)
159 OUT32(id, I2CMCR, MCR_MIE);
160
161 fier = IN32(id, I2CFIER);
162
163 if (fsr & FSR_RDF) {
164 len = IN32(id, I2CRFDR);
165 if (msg->len <= len) {
166 if (id->flags & IDF_STOP) {
167 OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
168 OUT32(id, I2CFIER, 0);
169 /* manual says: wait >= 0.5 SCL times */
170 udelay(5);
171 /* next int should be MST */
172 } else {
173 id->status |= IDS_DONE;
174 /* keep the RDF bit: ctrl holds SCL low
175 * until the setup for the next i2c_msg
176 * clears this bit.
177 */
178 fsr &= ~FSR_RDF;
179 }
180 }
181 while (msg->len && len) {
182 *data++ = IN32(id, I2CRXTX);
183 msg->len--;
184 len--;
185 }
186
187 if (msg->len) {
188 len = (msg->len >= FIFO_SIZE) ? FIFO_SIZE - 1
189 : msg->len - 1;
190
191 OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xf) << 4));
192 }
193
194 } else if (id->flags & IDF_SEND) {
195 if ((fsr & FSR_TEND) && (msg->len < 1)) {
196 if (id->flags & IDF_STOP) {
197 OUT32(id, I2CMCR, MCR_MIE | MCR_FSB);
198 } else {
199 id->status |= IDS_DONE;
200 /* keep the TEND bit: ctl holds SCL low
201 * until the setup for the next i2c_msg
202 * clears this bit.
203 */
204 fsr &= ~FSR_TEND;
205 }
206 }
207 if (fsr & FSR_TDFE) {
208 while (msg->len && (IN32(id, I2CTFDR) < FIFO_SIZE)) {
209 OUT32(id, I2CRXTX, *data++);
210 msg->len--;
211 }
212
213 if (msg->len < 1) {
214 fier &= ~FIER_TXIE;
215 OUT32(id, I2CFIER, fier);
216 } else {
217 len = (msg->len >= FIFO_SIZE) ? 2 : 0;
218 OUT32(id, I2CFCR,
219 FCR_RFRST | ((len & 3) << 2));
220 }
221 }
222 }
223out:
224 if (id->status & IDS_DONE) {
225 OUT32(id, I2CMIER, 0);
226 OUT32(id, I2CFIER, 0);
227 id->msg = NULL;
228 complete(&id->xfer_done);
229 }
230 /* clear status flags and ctrl resumes work */
231 OUT32(id, I2CMSR, ~msr);
232 OUT32(id, I2CFSR, ~fsr);
233 OUT32(id, I2CSSR, 0);
234
235 return IRQ_HANDLED;
236}
237
238
239/* prepare and start a master receive operation */
240static void sh7760_i2c_mrecv(struct cami2c *id)
241{
242 int len;
243
244 id->flags |= IDF_RECV;
245
246 /* set the slave addr reg; otherwise rcv wont work! */
247 OUT32(id, I2CSAR, 0xfe);
248 OUT32(id, I2CMAR, (id->msg->addr << 1) | 1);
249
250 /* adjust rx fifo trigger */
251 if (id->msg->len >= FIFO_SIZE)
252 len = FIFO_SIZE - 1; /* trigger at fifo full */
253 else
254 len = id->msg->len - 1; /* trigger before all received */
255
256 OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
257 OUT32(id, I2CFCR, FCR_TFRST | ((len & 0xF) << 4));
258
259 OUT32(id, I2CMSR, 0);
260 OUT32(id, I2CMCR, MCR_MIE | MCR_ESG);
261 OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE);
262 OUT32(id, I2CFIER, FIER_RXIE);
263}
264
265/* prepare and start a master send operation */
266static void sh7760_i2c_msend(struct cami2c *id)
267{
268 int len;
269
270 id->flags |= IDF_SEND;
271
272 /* set the slave addr reg; otherwise xmit wont work! */
273 OUT32(id, I2CSAR, 0xfe);
274 OUT32(id, I2CMAR, (id->msg->addr << 1) | 0);
275
276 /* adjust tx fifo trigger */
277 if (id->msg->len >= FIFO_SIZE)
278 len = 2; /* trig: 2 bytes left in TX fifo */
279 else
280 len = 0; /* trig: 8 bytes left in TX fifo */
281
282 OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
283 OUT32(id, I2CFCR, FCR_RFRST | ((len & 3) << 2));
284
285 while (id->msg->len && IN32(id, I2CTFDR) < FIFO_SIZE) {
286 OUT32(id, I2CRXTX, *(id->msg->buf));
287 (id->msg->len)--;
288 (id->msg->buf)++;
289 }
290
291 OUT32(id, I2CMSR, 0);
292 OUT32(id, I2CMCR, MCR_MIE | MCR_ESG);
293 OUT32(id, I2CFSR, 0);
294 OUT32(id, I2CMIER, MIE_MNRE | MIE_MALE | MIE_MSTE | MIE_MATE);
295 OUT32(id, I2CFIER, FIER_TEIE | (id->msg->len ? FIER_TXIE : 0));
296}
297
298static inline int sh7760_i2c_busy_check(struct cami2c *id)
299{
300 return (IN32(id, I2CMCR) & MCR_FSDA);
301}
302
303static int sh7760_i2c_master_xfer(struct i2c_adapter *adap,
304 struct i2c_msg *msgs,
305 int num)
306{
307 struct cami2c *id = adap->algo_data;
308 int i, retr;
309
310 if (sh7760_i2c_busy_check(id)) {
311 dev_err(&adap->dev, "sh7760-i2c%d: bus busy!\n", adap->nr);
312 return -EBUSY;
313 }
314
315 i = 0;
316 while (i < num) {
317 retr = adap->retries;
318retry:
319 id->flags = ((i == (num-1)) ? IDF_STOP : 0);
320 id->status = 0;
321 id->msg = msgs;
322 init_completion(&id->xfer_done);
323
324 if (msgs->flags & I2C_M_RD)
325 sh7760_i2c_mrecv(id);
326 else
327 sh7760_i2c_msend(id);
328
329 wait_for_completion(&id->xfer_done);
330
331 if (id->status == 0) {
332 num = -EIO;
333 break;
334 }
335
336 if (id->status & IDS_NACK) {
337 /* wait a bit or i2c module stops working */
338 mdelay(1);
339 num = -EREMOTEIO;
340 break;
341 }
342
343 if (id->status & IDS_ARBLOST) {
344 if (retr--) {
345 mdelay(2);
346 goto retry;
347 }
348 num = -EREMOTEIO;
349 break;
350 }
351
352 msgs++;
353 i++;
354 }
355
356 id->msg = NULL;
357 id->flags = 0;
358 id->status = 0;
359
360 OUT32(id, I2CMCR, 0);
361 OUT32(id, I2CMSR, 0);
362 OUT32(id, I2CMIER, 0);
363 OUT32(id, I2CFIER, 0);
364
365 /* reset slave module registers too: master mode enables slave
366 * module for receive ops (ack, data). Without this reset,
367 * eternal bus activity might be reported after NACK / ARBLOST.
368 */
369 OUT32(id, I2CSCR, 0);
370 OUT32(id, I2CSAR, 0);
371 OUT32(id, I2CSSR, 0);
372
373 return num;
374}
375
376static u32 sh7760_i2c_func(struct i2c_adapter *adap)
377{
378 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
379}
380
381static const struct i2c_algorithm sh7760_i2c_algo = {
382 .master_xfer = sh7760_i2c_master_xfer,
383 .functionality = sh7760_i2c_func,
384};
385
386/* calculate CCR register setting for a desired scl clock. SCL clock is
387 * derived from I2C module clock (iclk) which in turn is derived from
388 * peripheral module clock (mclk, usually around 33MHz):
389 * iclk = mclk/(CDF + 1). iclk must be < 20MHz.
390 * scl = iclk/(SCGD*8 + 20).
391 */
392static int __devinit calc_CCR(unsigned long scl_hz)
393{
394 struct clk *mclk;
395 unsigned long mck, m1, dff, odff, iclk;
396 signed char cdf, cdfm;
397 int scgd, scgdm, scgds;
398
399 mclk = clk_get(NULL, "module_clk");
400 if (IS_ERR(mclk)) {
401 return PTR_ERR(mclk);
402 } else {
403 mck = mclk->rate;
404 clk_put(mclk);
405 }
406
407 odff = scl_hz;
408 scgdm = cdfm = m1 = 0;
409 for (cdf = 3; cdf >= 0; cdf--) {
410 iclk = mck / (1 + cdf);
411 if (iclk >= 20000000)
412 continue;
413 scgds = ((iclk / scl_hz) - 20) >> 3;
414 for (scgd = scgds; (scgd < 63) && scgd <= scgds + 1; scgd++) {
415 m1 = iclk / (20 + (scgd << 3));
416 dff = abs(scl_hz - m1);
417 if (dff < odff) {
418 odff = dff;
419 cdfm = cdf;
420 scgdm = scgd;
421 }
422 }
423 }
424 /* fail if more than 25% off of requested SCL */
425 if (odff > (scl_hz >> 2))
426 return -EINVAL;
427
428 /* create a CCR register value */
429 return ((scgdm << 2) | cdfm);
430}
431
432static int __devinit sh7760_i2c_probe(struct platform_device *pdev)
433{
434 struct sh7760_i2c_platdata *pd;
435 struct resource *res;
436 struct cami2c *id;
437 int ret;
438
439 pd = pdev->dev.platform_data;
440 if (!pd) {
441 dev_err(&pdev->dev, "no platform_data!\n");
442 ret = -ENODEV;
443 goto out0;
444 }
445
446 id = kzalloc(sizeof(struct cami2c), GFP_KERNEL);
447 if (!id) {
448 dev_err(&pdev->dev, "no mem for private data\n");
449 ret = -ENOMEM;
450 goto out0;
451 }
452
453 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
454 if (!res) {
455 dev_err(&pdev->dev, "no mmio resources\n");
456 ret = -ENODEV;
457 goto out1;
458 }
459
460 id->ioarea = request_mem_region(res->start, REGSIZE, pdev->name);
461 if (!id->ioarea) {
462 dev_err(&pdev->dev, "mmio already reserved\n");
463 ret = -EBUSY;
464 goto out1;
465 }
466
467 id->iobase = ioremap(res->start, REGSIZE);
468 if (!id->iobase) {
469 dev_err(&pdev->dev, "cannot ioremap\n");
470 ret = -ENODEV;
471 goto out2;
472 }
473
474 id->irq = platform_get_irq(pdev, 0);
475
476 id->adap.nr = pdev->id;
477 id->adap.algo = &sh7760_i2c_algo;
478 id->adap.class = I2C_CLASS_ALL;
479 id->adap.retries = 3;
480 id->adap.algo_data = id;
481 id->adap.dev.parent = &pdev->dev;
482 snprintf(id->adap.name, sizeof(id->adap.name),
483 "SH7760 I2C at %08lx", (unsigned long)res->start);
484
485 OUT32(id, I2CMCR, 0);
486 OUT32(id, I2CMSR, 0);
487 OUT32(id, I2CMIER, 0);
488 OUT32(id, I2CMAR, 0);
489 OUT32(id, I2CSIER, 0);
490 OUT32(id, I2CSAR, 0);
491 OUT32(id, I2CSCR, 0);
492 OUT32(id, I2CSSR, 0);
493 OUT32(id, I2CFIER, 0);
494 OUT32(id, I2CFCR, FCR_RFRST | FCR_TFRST);
495 OUT32(id, I2CFSR, 0);
496
497 ret = calc_CCR(pd->speed_khz * 1000);
498 if (ret < 0) {
499 dev_err(&pdev->dev, "invalid SCL clock: %dkHz\n",
500 pd->speed_khz);
501 goto out3;
502 }
503 OUT32(id, I2CCCR, ret);
504
505 if (request_irq(id->irq, sh7760_i2c_irq, IRQF_DISABLED,
506 SH7760_I2C_DEVNAME, id)) {
507 dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
508 ret = -EBUSY;
509 goto out3;
510 }
511
512 ret = i2c_add_numbered_adapter(&id->adap);
513 if (ret < 0) {
514 dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
515 goto out4;
516 }
517
518 platform_set_drvdata(pdev, id);
519
520 dev_info(&pdev->dev, "%d kHz mmio %08x irq %d\n",
521 pd->speed_khz, res->start, id->irq);
522
523 return 0;
524
525out4:
526 free_irq(id->irq, id);
527out3:
528 iounmap(id->iobase);
529out2:
530 release_resource(id->ioarea);
531 kfree(id->ioarea);
532out1:
533 kfree(id);
534out0:
535 return ret;
536}
537
538static int __devexit sh7760_i2c_remove(struct platform_device *pdev)
539{
540 struct cami2c *id = platform_get_drvdata(pdev);
541
542 i2c_del_adapter(&id->adap);
543 free_irq(id->irq, id);
544 iounmap(id->iobase);
545 release_resource(id->ioarea);
546 kfree(id->ioarea);
547 kfree(id);
548 platform_set_drvdata(pdev, NULL);
549
550 return 0;
551}
552
553static struct platform_driver sh7760_i2c_drv = {
554 .driver = {
555 .name = SH7760_I2C_DEVNAME,
556 .owner = THIS_MODULE,
557 },
558 .probe = sh7760_i2c_probe,
559 .remove = __devexit_p(sh7760_i2c_remove),
560};
561
562static int __init sh7760_i2c_init(void)
563{
564 return platform_driver_register(&sh7760_i2c_drv);
565}
566
567static void __exit sh7760_i2c_exit(void)
568{
569 platform_driver_unregister(&sh7760_i2c_drv);
570}
571
572module_init(sh7760_i2c_init);
573module_exit(sh7760_i2c_exit);
574
575MODULE_LICENSE("GPL");
576MODULE_DESCRIPTION("SH7760 I2C bus driver");
577MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
diff --git a/include/asm-sh/i2c-sh7760.h b/include/asm-sh/i2c-sh7760.h
new file mode 100644
index 000000000000..24182116711f
--- /dev/null
+++ b/include/asm-sh/i2c-sh7760.h
@@ -0,0 +1,22 @@
1/*
2 * MMIO/IRQ and platform data for SH7760 I2C channels
3 */
4
5#ifndef _I2C_SH7760_H_
6#define _I2C_SH7760_H_
7
8#define SH7760_I2C_DEVNAME "sh7760-i2c"
9
10#define SH7760_I2C0_MMIO 0xFE140000
11#define SH7760_I2C0_MMIOEND 0xFE14003B
12#define SH7760_I2C0_IRQ 62
13
14#define SH7760_I2C1_MMIO 0xFE150000
15#define SH7760_I2C1_MMIOEND 0xFE15003B
16#define SH7760_I2C1_IRQ 63
17
18struct sh7760_i2c_platdata {
19 unsigned int speed_khz;
20};
21
22#endif
n> err = get_filter(argp, &code); if (err >= 0) { ppp_lock(ppp); kfree(ppp->active_filter); ppp->active_filter = code; ppp->active_len = err; ppp_unlock(ppp); err = 0; } break; } #endif /* CONFIG_PPP_FILTER */ #ifdef CONFIG_PPP_MULTILINK case PPPIOCSMRRU: if (get_user(val, p)) break; ppp_recv_lock(ppp); ppp->mrru = val; ppp_recv_unlock(ppp); err = 0; break; #endif /* CONFIG_PPP_MULTILINK */ default: err = -ENOTTY; } return err; } static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, unsigned int cmd, unsigned long arg) { int unit, err = -EFAULT; struct ppp *ppp; struct channel *chan; int __user *p = (int __user *)arg; switch (cmd) { case PPPIOCNEWUNIT: /* Create a new ppp unit */ if (get_user(unit, p)) break; ppp = ppp_create_interface(unit, &err); if (ppp == 0) break; file->private_data = &ppp->file; ppp->owner = file; err = -EFAULT; if (put_user(ppp->file.index, p)) break; err = 0; break; case PPPIOCATTACH: /* Attach to an existing ppp unit */ if (get_user(unit, p)) break; mutex_lock(&all_ppp_mutex); err = -ENXIO; ppp = ppp_find_unit(unit); if (ppp != 0) { atomic_inc(&ppp->file.refcnt); file->private_data = &ppp->file; err = 0; } mutex_unlock(&all_ppp_mutex); break; case PPPIOCATTCHAN: if (get_user(unit, p)) break; spin_lock_bh(&all_channels_lock); err = -ENXIO; chan = ppp_find_channel(unit); if (chan != 0) { atomic_inc(&chan->file.refcnt); file->private_data = &chan->file; err = 0; } spin_unlock_bh(&all_channels_lock); break; default: err = -ENOTTY; } return err; } static const struct file_operations ppp_device_fops = { .owner = THIS_MODULE, .read = ppp_read, .write = ppp_write, .poll = ppp_poll, .ioctl = ppp_ioctl, .open = ppp_open, .release = ppp_release }; #define PPP_MAJOR 108 /* Called at boot time if ppp is compiled into the kernel, or at module load time (from init_module) if compiled as a module. */ static int __init ppp_init(void) { int err; printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); if (!err) { ppp_class = class_create(THIS_MODULE, "ppp"); if (IS_ERR(ppp_class)) { err = PTR_ERR(ppp_class); goto out_chrdev; } device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), "ppp"); } out: if (err) printk(KERN_ERR "failed to register PPP device (%d)\n", err); return err; out_chrdev: unregister_chrdev(PPP_MAJOR, "ppp"); goto out; } /* * Network interface unit routines. */ static int ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ppp *ppp = (struct ppp *) dev->priv; int npi, proto; unsigned char *pp; npi = ethertype_to_npindex(ntohs(skb->protocol)); if (npi < 0) goto outf; /* Drop, accept or reject the packet */ switch (ppp->npmode[npi]) { case NPMODE_PASS: break; case NPMODE_QUEUE: /* it would be nice to have a way to tell the network system to queue this one up for later. */ goto outf; case NPMODE_DROP: case NPMODE_ERROR: goto outf; } /* Put the 2-byte PPP protocol number on the front, making sure there is room for the address and control fields. */ if (skb_headroom(skb) < PPP_HDRLEN) { struct sk_buff *ns; ns = alloc_skb(skb->len + dev->hard_header_len, GFP_ATOMIC); if (ns == 0) goto outf; skb_reserve(ns, dev->hard_header_len); skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); kfree_skb(skb); skb = ns; } pp = skb_push(skb, 2); proto = npindex_to_proto[npi]; pp[0] = proto >> 8; pp[1] = proto; netif_stop_queue(dev); skb_queue_tail(&ppp->file.xq, skb); ppp_xmit_process(ppp); return 0; outf: kfree_skb(skb); ++ppp->stats.tx_dropped; return 0; } static struct net_device_stats * ppp_net_stats(struct net_device *dev) { struct ppp *ppp = (struct ppp *) dev->priv; return &ppp->stats; } static int ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ppp *ppp = dev->priv; int err = -EFAULT; void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; struct ppp_stats stats; struct ppp_comp_stats cstats; char *vers; switch (cmd) { case SIOCGPPPSTATS: ppp_get_stats(ppp, &stats); if (copy_to_user(addr, &stats, sizeof(stats))) break; err = 0; break; case SIOCGPPPCSTATS: memset(&cstats, 0, sizeof(cstats)); if (ppp->xc_state != 0) ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); if (ppp->rc_state != 0) ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); if (copy_to_user(addr, &cstats, sizeof(cstats))) break; err = 0; break; case SIOCGPPPVER: vers = PPP_VERSION; if (copy_to_user(addr, vers, strlen(vers) + 1)) break; err = 0; break; default: err = -EINVAL; } return err; } static void ppp_setup(struct net_device *dev) { dev->hard_header_len = PPP_HDRLEN; dev->mtu = PPP_MTU; dev->addr_len = 0; dev->tx_queue_len = 3; dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; } /* * Transmit-side routines. */ /* * Called to do any work queued up on the transmit side * that can now be done. */ static void ppp_xmit_process(struct ppp *ppp) { struct sk_buff *skb; ppp_xmit_lock(ppp); if (ppp->dev != 0) { ppp_push(ppp); while (ppp->xmit_pending == 0 && (skb = skb_dequeue(&ppp->file.xq)) != 0) ppp_send_frame(ppp, skb); /* If there's no work left to do, tell the core net code that we can accept some more. */ if (ppp->xmit_pending == 0 && skb_peek(&ppp->file.xq) == 0) netif_wake_queue(ppp->dev); } ppp_xmit_unlock(ppp); } static inline struct sk_buff * pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) { struct sk_buff *new_skb; int len; int new_skb_size = ppp->dev->mtu + ppp->xcomp->comp_extra + ppp->dev->hard_header_len; int compressor_skb_size = ppp->dev->mtu + ppp->xcomp->comp_extra + PPP_HDRLEN; new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); if (!new_skb) { if (net_ratelimit()) printk(KERN_ERR "PPP: no memory (comp pkt)\n"); return NULL; } if (ppp->dev->hard_header_len > PPP_HDRLEN) skb_reserve(new_skb, ppp->dev->hard_header_len - PPP_HDRLEN); /* compressor still expects A/C bytes in hdr */ len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, new_skb->data, skb->len + 2, compressor_skb_size); if (len > 0 && (ppp->flags & SC_CCP_UP)) { kfree_skb(skb); skb = new_skb; skb_put(skb, len); skb_pull(skb, 2); /* pull off A/C bytes */ } else if (len == 0) { /* didn't compress, or CCP not up yet */ kfree_skb(new_skb); new_skb = skb; } else { /* * (len < 0) * MPPE requires that we do not send unencrypted * frames. The compressor will return -1 if we * should drop the frame. We cannot simply test * the compress_proto because MPPE and MPPC share * the same number. */ if (net_ratelimit()) printk(KERN_ERR "ppp: compressor dropped pkt\n"); kfree_skb(skb); kfree_skb(new_skb); new_skb = NULL; } return new_skb; } /* * Compress and send a frame. * The caller should have locked the xmit path, * and xmit_pending should be 0. */ static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) { int proto = PPP_PROTO(skb); struct sk_buff *new_skb; int len; unsigned char *cp; if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER /* check if we should pass this packet */ /* the filter instructions are constructed assuming a four-byte PPP header on each packet */ *skb_push(skb, 2) = 1; if (ppp->pass_filter && sk_run_filter(skb, ppp->pass_filter, ppp->pass_len) == 0) { if (ppp->debug & 1) printk(KERN_DEBUG "PPP: outbound frame not passed\n"); kfree_skb(skb); return; } /* if this packet passes the active filter, record the time */ if (!(ppp->active_filter && sk_run_filter(skb, ppp->active_filter, ppp->active_len) == 0)) ppp->last_xmit = jiffies; skb_pull(skb, 2); #else /* for data packets, record the time */ ppp->last_xmit = jiffies; #endif /* CONFIG_PPP_FILTER */ } ++ppp->stats.tx_packets; ppp->stats.tx_bytes += skb->len - 2; switch (proto) { case PPP_IP: if (ppp->vj == 0 || (ppp->flags & SC_COMP_TCP) == 0) break; /* try to do VJ TCP header compression */ new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, GFP_ATOMIC); if (new_skb == 0) { printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); goto drop; } skb_reserve(new_skb, ppp->dev->hard_header_len - 2); cp = skb->data + 2; len = slhc_compress(ppp->vj, cp, skb->len - 2, new_skb->data + 2, &cp, !(ppp->flags & SC_NO_TCP_CCID)); if (cp == skb->data + 2) { /* didn't compress */ kfree_skb(new_skb); } else { if (cp[0] & SL_TYPE_COMPRESSED_TCP) { proto = PPP_VJC_COMP; cp[0] &= ~SL_TYPE_COMPRESSED_TCP; } else { proto = PPP_VJC_UNCOMP; cp[0] = skb->data[2]; } kfree_skb(skb); skb = new_skb; cp = skb_put(skb, len + 2); cp[0] = 0; cp[1] = proto; } break; case PPP_CCP: /* peek at outbound CCP frames */ ppp_ccp_peek(ppp, skb, 0); break; } /* try to do packet compression */ if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0 && proto != PPP_LCP && proto != PPP_CCP) { if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { if (net_ratelimit()) printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); goto drop; } skb = pad_compress_skb(ppp, skb); if (!skb) goto drop; } /* * If we are waiting for traffic (demand dialling), * queue it up for pppd to receive. */ if (ppp->flags & SC_LOOP_TRAFFIC) { if (ppp->file.rq.qlen > PPP_MAX_RQLEN) goto drop; skb_queue_tail(&ppp->file.rq, skb); wake_up_interruptible(&ppp->file.rwait); return; } ppp->xmit_pending = skb; ppp_push(ppp); return; drop: if (skb) kfree_skb(skb); ++ppp->stats.tx_errors; } /* * Try to send the frame in xmit_pending. * The caller should have the xmit path locked. */ static void ppp_push(struct ppp *ppp) { struct list_head *list; struct channel *pch; struct sk_buff *skb = ppp->xmit_pending; if (skb == 0) return; list = &ppp->channels; if (list_empty(list)) { /* nowhere to send the packet, just drop it */ ppp->xmit_pending = NULL; kfree_skb(skb); return; } if ((ppp->flags & SC_MULTILINK) == 0) { /* not doing multilink: send it down the first channel */ list = list->next; pch = list_entry(list, struct channel, clist); spin_lock_bh(&pch->downl); if (pch->chan) { if (pch->chan->ops->start_xmit(pch->chan, skb)) ppp->xmit_pending = NULL; } else { /* channel got unregistered */ kfree_skb(skb); ppp->xmit_pending = NULL; } spin_unlock_bh(&pch->downl); return; } #ifdef CONFIG_PPP_MULTILINK /* Multilink: fragment the packet over as many links as can take the packet at the moment. */ if (!ppp_mp_explode(ppp, skb)) return; #endif /* CONFIG_PPP_MULTILINK */ ppp->xmit_pending = NULL; kfree_skb(skb); } #ifdef CONFIG_PPP_MULTILINK /* * Divide a packet to be transmitted into fragments and * send them out the individual links. */ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) { int len, fragsize; int i, bits, hdrlen, mtu; int flen; int navail, nfree; int nbigger; unsigned char *p, *q; struct list_head *list; struct channel *pch; struct sk_buff *frag; struct ppp_channel *chan; nfree = 0; /* # channels which have no packet already queued */ navail = 0; /* total # of usable channels (not deregistered) */ hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; i = 0; list_for_each_entry(pch, &ppp->channels, clist) { navail += pch->avail = (pch->chan != NULL); if (pch->avail) { if (skb_queue_empty(&pch->file.xq) || !pch->had_frag) { pch->avail = 2; ++nfree; } if (!pch->had_frag && i < ppp->nxchan) ppp->nxchan = i; } ++i; } /* * Don't start sending this packet unless at least half of * the channels are free. This gives much better TCP * performance if we have a lot of channels. */ if (nfree == 0 || nfree < navail / 2) return 0; /* can't take now, leave it in xmit_pending */ /* Do protocol field compression (XXX this should be optional) */ p = skb->data; len = skb->len; if (*p == 0) { ++p; --len; } /* * Decide on fragment size. * We create a fragment for each free channel regardless of * how small they are (i.e. even 0 length) in order to minimize * the time that it will take to detect when a channel drops * a fragment. */ fragsize = len; if (nfree > 1) fragsize = DIV_ROUND_UP(fragsize, nfree); /* nbigger channels get fragsize bytes, the rest get fragsize-1, except if nbigger==0, then they all get fragsize. */ nbigger = len % nfree; /* skip to the channel after the one we last used and start at that one */ list = &ppp->channels; for (i = 0; i < ppp->nxchan; ++i) { list = list->next; if (list == &ppp->channels) { i = 0; break; } } /* create a fragment for each channel */ bits = B; while (nfree > 0 || len > 0) { list = list->next; if (list == &ppp->channels) { i = 0; continue; } pch = list_entry(list, struct channel, clist); ++i; if (!pch->avail) continue; /* * Skip this channel if it has a fragment pending already and * we haven't given a fragment to all of the free channels. */ if (pch->avail == 1) { if (nfree > 0) continue; } else { --nfree; pch->avail = 1; } /* check the channel's mtu and whether it is still attached. */ spin_lock_bh(&pch->downl); if (pch->chan == NULL) { /* can't use this channel, it's being deregistered */ spin_unlock_bh(&pch->downl); pch->avail = 0; if (--navail == 0) break; continue; } /* * Create a fragment for this channel of * min(max(mtu+2-hdrlen, 4), fragsize, len) bytes. * If mtu+2-hdrlen < 4, that is a ridiculously small * MTU, so we use mtu = 2 + hdrlen. */ if (fragsize > len) fragsize = len; flen = fragsize; mtu = pch->chan->mtu + 2 - hdrlen; if (mtu < 4) mtu = 4; if (flen > mtu) flen = mtu; if (flen == len && nfree == 0) bits |= E; frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); if (frag == 0) goto noskb; q = skb_put(frag, flen + hdrlen); /* make the MP header */ q[0] = PPP_MP >> 8; q[1] = PPP_MP; if (ppp->flags & SC_MP_XSHORTSEQ) { q[2] = bits + ((ppp->nxseq >> 8) & 0xf); q[3] = ppp->nxseq; } else { q[2] = bits; q[3] = ppp->nxseq >> 16; q[4] = ppp->nxseq >> 8; q[5] = ppp->nxseq; } /* * Copy the data in. * Unfortunately there is a bug in older versions of * the Linux PPP multilink reconstruction code where it * drops 0-length fragments. Therefore we make sure the * fragment has at least one byte of data. Any bytes * we add in this situation will end up as padding on the * end of the reconstructed packet. */ if (flen == 0) *skb_put(frag, 1) = 0; else memcpy(q + hdrlen, p, flen); /* try to send it down the channel */ chan = pch->chan; if (!skb_queue_empty(&pch->file.xq) || !chan->ops->start_xmit(chan, frag)) skb_queue_tail(&pch->file.xq, frag); pch->had_frag = 1; p += flen; len -= flen; ++ppp->nxseq; bits = 0; spin_unlock_bh(&pch->downl); if (--nbigger == 0 && fragsize > 0) --fragsize; } ppp->nxchan = i; return 1; noskb: spin_unlock_bh(&pch->downl); if (ppp->debug & 1) printk(KERN_ERR "PPP: no memory (fragment)\n"); ++ppp->stats.tx_errors; ++ppp->nxseq; return 1; /* abandon the frame */ } #endif /* CONFIG_PPP_MULTILINK */ /* * Try to send data out on a channel. */ static void ppp_channel_push(struct channel *pch) { struct sk_buff *skb; struct ppp *ppp; spin_lock_bh(&pch->downl); if (pch->chan != 0) { while (!skb_queue_empty(&pch->file.xq)) { skb = skb_dequeue(&pch->file.xq); if (!pch->chan->ops->start_xmit(pch->chan, skb)) { /* put the packet back and try again later */ skb_queue_head(&pch->file.xq, skb); break; } } } else { /* channel got deregistered */ skb_queue_purge(&pch->file.xq); } spin_unlock_bh(&pch->downl); /* see if there is anything from the attached unit to be sent */ if (skb_queue_empty(&pch->file.xq)) { read_lock_bh(&pch->upl); ppp = pch->ppp; if (ppp != 0) ppp_xmit_process(ppp); read_unlock_bh(&pch->upl); } } /* * Receive-side routines. */ /* misuse a few fields of the skb for MP reconstruction */ #define sequence priority #define BEbits cb[0] static inline void ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) { ppp_recv_lock(ppp); /* ppp->dev == 0 means interface is closing down */ if (ppp->dev != 0) ppp_receive_frame(ppp, skb, pch); else kfree_skb(skb); ppp_recv_unlock(ppp); } void ppp_input(struct ppp_channel *chan, struct sk_buff *skb) { struct channel *pch = chan->ppp; int proto; if (pch == 0 || skb->len == 0) { kfree_skb(skb); return; } proto = PPP_PROTO(skb); read_lock_bh(&pch->upl); if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) { /* put it on the channel queue */ skb_queue_tail(&pch->file.rq, skb); /* drop old frames if queue too long */ while (pch->file.rq.qlen > PPP_MAX_RQLEN && (skb = skb_dequeue(&pch->file.rq)) != 0) kfree_skb(skb); wake_up_interruptible(&pch->file.rwait); } else { ppp_do_recv(pch->ppp, skb, pch); } read_unlock_bh(&pch->upl); } /* Put a 0-length skb in the receive queue as an error indication */ void ppp_input_error(struct ppp_channel *chan, int code) { struct channel *pch = chan->ppp; struct sk_buff *skb; if (pch == 0) return; read_lock_bh(&pch->upl); if (pch->ppp != 0) { skb = alloc_skb(0, GFP_ATOMIC); if (skb != 0) { skb->len = 0; /* probably unnecessary */ skb->cb[0] = code; ppp_do_recv(pch->ppp, skb, pch); } } read_unlock_bh(&pch->upl); } /* * We come in here to process a received frame. * The receive side of the ppp unit is locked. */ static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) { if (skb->len >= 2) { #ifdef CONFIG_PPP_MULTILINK /* XXX do channel-level decompression here */ if (PPP_PROTO(skb) == PPP_MP) ppp_receive_mp_frame(ppp, skb, pch); else #endif /* CONFIG_PPP_MULTILINK */ ppp_receive_nonmp_frame(ppp, skb); return; } if (skb->len > 0) /* note: a 0-length skb is used as an error indication */ ++ppp->stats.rx_length_errors; kfree_skb(skb); ppp_receive_error(ppp); } static void ppp_receive_error(struct ppp *ppp) { ++ppp->stats.rx_errors; if (ppp->vj != 0) slhc_toss(ppp->vj); } static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) { struct sk_buff *ns; int proto, len, npi; /* * Decompress the frame, if compressed. * Note that some decompressors need to see uncompressed frames * that come in as well as compressed frames. */ if (ppp->rc_state != 0 && (ppp->rstate & SC_DECOMP_RUN) && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) skb = ppp_decompress_frame(ppp, skb); if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) goto err; proto = PPP_PROTO(skb); switch (proto) { case PPP_VJC_COMP: /* decompress VJ compressed packets */ if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) goto err; if (skb_tailroom(skb) < 124) { /* copy to a new sk_buff with more tailroom */ ns = dev_alloc_skb(skb->len + 128); if (ns == 0) { printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); goto err; } skb_reserve(ns, 2); skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); kfree_skb(skb); skb = ns; } else skb->ip_summed = CHECKSUM_NONE; len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); if (len <= 0) { printk(KERN_DEBUG "PPP: VJ decompression error\n"); goto err; } len += 2; if (len > skb->len) skb_put(skb, len - skb->len); else if (len < skb->len) skb_trim(skb, len); proto = PPP_IP; break; case PPP_VJC_UNCOMP: if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) goto err; /* Until we fix the decompressor need to make sure * data portion is linear. */ if (!pskb_may_pull(skb, skb->len)) goto err; if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { printk(KERN_ERR "PPP: VJ uncompressed error\n"); goto err; } proto = PPP_IP; break; case PPP_CCP: ppp_ccp_peek(ppp, skb, 1); break; } ++ppp->stats.rx_packets; ppp->stats.rx_bytes += skb->len - 2; npi = proto_to_npindex(proto); if (npi < 0) { /* control or unknown frame - pass it to pppd */ skb_queue_tail(&ppp->file.rq, skb); /* limit queue length by dropping old frames */ while (ppp->file.rq.qlen > PPP_MAX_RQLEN && (skb = skb_dequeue(&ppp->file.rq)) != 0) kfree_skb(skb); /* wake up any process polling or blocking on read */ wake_up_interruptible(&ppp->file.rwait); } else { /* network protocol frame - give it to the kernel */ #ifdef CONFIG_PPP_FILTER /* check if the packet passes the pass and active filters */ /* the filter instructions are constructed assuming a four-byte PPP header on each packet */ *skb_push(skb, 2) = 0; if (ppp->pass_filter && sk_run_filter(skb, ppp->pass_filter, ppp->pass_len) == 0) { if (ppp->debug & 1) printk(KERN_DEBUG "PPP: inbound frame not passed\n"); kfree_skb(skb); return; } if (!(ppp->active_filter && sk_run_filter(skb, ppp->active_filter, ppp->active_len) == 0)) ppp->last_recv = jiffies; skb_pull(skb, 2); #else ppp->last_recv = jiffies; #endif /* CONFIG_PPP_FILTER */ if ((ppp->dev->flags & IFF_UP) == 0 || ppp->npmode[npi] != NPMODE_PASS) { kfree_skb(skb); } else { /* chop off protocol */ skb_pull_rcsum(skb, 2); skb->dev = ppp->dev; skb->protocol = htons(npindex_to_ethertype[npi]); skb_reset_mac_header(skb); netif_rx(skb); ppp->dev->last_rx = jiffies; } } return; err: kfree_skb(skb); ppp_receive_error(ppp); } static struct sk_buff * ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) { int proto = PPP_PROTO(skb); struct sk_buff *ns; int len; /* Until we fix all the decompressor's need to make sure * data portion is linear. */ if (!pskb_may_pull(skb, skb->len)) goto err; if (proto == PPP_COMP) { int obuff_size; switch(ppp->rcomp->compress_proto) { case CI_MPPE: obuff_size = ppp->mru + PPP_HDRLEN + 1; break; default: obuff_size = ppp->mru + PPP_HDRLEN; break; } ns = dev_alloc_skb(obuff_size); if (ns == 0) { printk(KERN_ERR "ppp_decompress_frame: no memory\n"); goto err; } /* the decompressor still expects the A/C bytes in the hdr */ len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN); if (len < 0) { /* Pass the compressed frame to pppd as an error indication. */ if (len == DECOMP_FATALERROR) ppp->rstate |= SC_DC_FERROR; kfree_skb(ns); goto err; } kfree_skb(skb); skb = ns; skb_put(skb, len); skb_pull(skb, 2); /* pull off the A/C bytes */ } else { /* Uncompressed frame - pass to decompressor so it can update its dictionary if necessary. */ if (ppp->rcomp->incomp) ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, skb->len + 2); } return skb; err: ppp->rstate |= SC_DC_ERROR; ppp_receive_error(ppp); return skb; } #ifdef CONFIG_PPP_MULTILINK /* * Receive a multilink frame. * We put it on the reconstruction queue and then pull off * as many completed frames as we can. */ static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) { u32 mask, seq; struct channel *ch; int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0) goto err; /* no good, throw it away */ /* Decode sequence number and begin/end bits */ if (ppp->flags & SC_MP_SHORTSEQ) { seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; mask = 0xfff; } else { seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; mask = 0xffffff; } skb->BEbits = skb->data[2]; skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ /* * Do protocol ID decompression on the first fragment of each packet. */ if ((skb->BEbits & B) && (skb->data[0] & 1)) *skb_push(skb, 1) = 0; /* * Expand sequence number to 32 bits, making it as close * as possible to ppp->minseq. */ seq |= ppp->minseq & ~mask; if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) seq += mask + 1; else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) seq -= mask + 1; /* should never happen */ skb->sequence = seq; pch->lastseq = seq; /* * If this packet comes before the next one we were expecting, * drop it. */ if (seq_before(seq, ppp->nextseq)) { kfree_skb(skb); ++ppp->stats.rx_dropped; ppp_receive_error(ppp); return; } /* * Reevaluate minseq, the minimum over all channels of the * last sequence number received on each channel. Because of * the increasing sequence number rule, we know that any fragment * before `minseq' which hasn't arrived is never going to arrive. * The list of channels can't change because we have the receive * side of the ppp unit locked. */ list_for_each_entry(ch, &ppp->channels, clist) { if (seq_before(ch->lastseq, seq)) seq = ch->lastseq; } if (seq_before(ppp->minseq, seq)) ppp->minseq = seq; /* Put the fragment on the reconstruction queue */ ppp_mp_insert(ppp, skb); /* If the queue is getting long, don't wait any longer for packets before the start of the queue. */ if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN && seq_before(ppp->minseq, ppp->mrq.next->sequence)) ppp->minseq = ppp->mrq.next->sequence; /* Pull completed packets off the queue and receive them. */ while ((skb = ppp_mp_reconstruct(ppp)) != 0) ppp_receive_nonmp_frame(ppp, skb); return; err: kfree_skb(skb); ppp_receive_error(ppp); } /* * Insert a fragment on the MP reconstruction queue. * The queue is ordered by increasing sequence number. */ static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) { struct sk_buff *p; struct sk_buff_head *list = &ppp->mrq; u32 seq = skb->sequence; /* N.B. we don't need to lock the list lock because we have the ppp unit receive-side lock. */ for (p = list->next; p != (struct sk_buff *)list; p = p->next) if (seq_before(seq, p->sequence)) break; __skb_insert(skb, p->prev, p, list); } /* * Reconstruct a packet from the MP fragment queue. * We go through increasing sequence numbers until we find a * complete packet, or we get to the sequence number for a fragment * which hasn't arrived but might still do so. */ struct sk_buff * ppp_mp_reconstruct(struct ppp *ppp) { u32 seq = ppp->nextseq; u32 minseq = ppp->minseq; struct sk_buff_head *list = &ppp->mrq; struct sk_buff *p, *next; struct sk_buff *head, *tail; struct sk_buff *skb = NULL; int lost = 0, len = 0; if (ppp->mrru == 0) /* do nothing until mrru is set */ return NULL; head = list->next; tail = NULL; for (p = head; p != (struct sk_buff *) list; p = next) { next = p->next; if (seq_before(p->sequence, seq)) { /* this can't happen, anyway ignore the skb */ printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", p->sequence, seq); head = next; continue; } if (p->sequence != seq) { /* Fragment `seq' is missing. If it is after minseq, it might arrive later, so stop here. */ if (seq_after(seq, minseq)) break; /* Fragment `seq' is lost, keep going. */ lost = 1; seq = seq_before(minseq, p->sequence)? minseq + 1: p->sequence; next = p; continue; } /* * At this point we know that all the fragments from * ppp->nextseq to seq are either present or lost. * Also, there are no complete packets in the queue * that have no missing fragments and end before this * fragment. */ /* B bit set indicates this fragment starts a packet */ if (p->BEbits & B) { head = p; lost = 0; len = 0; } len += p->len; /* Got a complete packet yet? */ if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) { if (len > ppp->mrru + 2) { ++ppp->stats.rx_length_errors; printk(KERN_DEBUG "PPP: reconstructed packet" " is too long (%d)\n", len); } else if (p == head) { /* fragment is complete packet - reuse skb */ tail = p; skb = skb_get(p); break; } else if ((skb = dev_alloc_skb(len)) == NULL) { ++ppp->stats.rx_missed_errors; printk(KERN_DEBUG "PPP: no memory for " "reconstructed packet"); } else { tail = p; break; } ppp->nextseq = seq + 1; } /* * If this is the ending fragment of a packet, * and we haven't found a complete valid packet yet, * we can discard up to and including this fragment. */ if (p->BEbits & E) head = next; ++seq; } /* If we have a complete packet, copy it all into one skb. */ if (tail != NULL) { /* If we have discarded any fragments, signal a receive error. */ if (head->sequence != ppp->nextseq) { if (ppp->debug & 1) printk(KERN_DEBUG " missed pkts %u..%u\n", ppp->nextseq, head->sequence-1); ++ppp->stats.rx_dropped; ppp_receive_error(ppp); } if (head != tail) /* copy to a single skb */ for (p = head; p != tail->next; p = p->next) skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); ppp->nextseq = tail->sequence + 1; head = tail->next; } /* Discard all the skbuffs that we have copied the data out of or that we can't use. */ while ((p = list->next) != head) { __skb_unlink(p, list); kfree_skb(p); } return skb; } #endif /* CONFIG_PPP_MULTILINK */ /* * Channel interface. */ /* * Create a new, unattached ppp channel. */ int ppp_register_channel(struct ppp_channel *chan) { struct channel *pch; pch = kzalloc(sizeof(struct channel), GFP_KERNEL); if (pch == 0) return -ENOMEM; pch->ppp = NULL; pch->chan = chan; chan->ppp = pch; init_ppp_file(&pch->file, CHANNEL); pch->file.hdrlen = chan->hdrlen; #ifdef CONFIG_PPP_MULTILINK pch->lastseq = -1; #endif /* CONFIG_PPP_MULTILINK */ init_rwsem(&pch->chan_sem); spin_lock_init(&pch->downl); rwlock_init(&pch->upl); spin_lock_bh(&all_channels_lock); pch->file.index = ++last_channel_index; list_add(&pch->list, &new_channels); atomic_inc(&channel_count); spin_unlock_bh(&all_channels_lock); return 0; } /* * Return the index of a channel. */ int ppp_channel_index(struct ppp_channel *chan) { struct channel *pch = chan->ppp; if (pch != 0) return pch->file.index; return -1; } /* * Return the PPP unit number to which a channel is connected. */ int ppp_unit_number(struct ppp_channel *chan) { struct channel *pch = chan->ppp; int unit = -1; if (pch != 0) { read_lock_bh(&pch->upl); if (pch->ppp != 0) unit = pch->ppp->file.index; read_unlock_bh(&pch->upl); } return unit; } /* * Disconnect a channel from the generic layer. * This must be called in process context. */ void ppp_unregister_channel(struct ppp_channel *chan) { struct channel *pch = chan->ppp; if (pch == 0) return; /* should never happen */ chan->ppp = NULL; /* * This ensures that we have returned from any calls into the * the channel's start_xmit or ioctl routine before we proceed. */ down_write(&pch->chan_sem); spin_lock_bh(&pch->downl); pch->chan = NULL; spin_unlock_bh(&pch->downl); up_write(&pch->chan_sem); ppp_disconnect_channel(pch); spin_lock_bh(&all_channels_lock); list_del(&pch->list); spin_unlock_bh(&all_channels_lock); pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); if (atomic_dec_and_test(&pch->file.refcnt)) ppp_destroy_channel(pch); } /* * Callback from a channel when it can accept more to transmit. * This should be called at BH/softirq level, not interrupt level. */ void ppp_output_wakeup(struct ppp_channel *chan) { struct channel *pch = chan->ppp; if (pch == 0) return; ppp_channel_push(pch); } /* * Compression control. */ /* Process the PPPIOCSCOMPRESS ioctl. */ static int ppp_set_compress(struct ppp *ppp, unsigned long arg) { int err; struct compressor *cp, *ocomp; struct ppp_option_data data; void *state, *ostate; unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; err = -EFAULT; if (copy_from_user(&data, (void __user *) arg, sizeof(data)) || (data.length <= CCP_MAX_OPTION_LENGTH && copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) goto out; err = -EINVAL; if (data.length > CCP_MAX_OPTION_LENGTH || ccp_option[1] < 2 || ccp_option[1] > data.length) goto out; cp = find_compressor(ccp_option[0]); #ifdef CONFIG_KMOD if (cp == 0) { request_module("ppp-compress-%d", ccp_option[0]); cp = find_compressor(ccp_option[0]); } #endif /* CONFIG_KMOD */ if (cp == 0) goto out; err = -ENOBUFS; if (data.transmit) { state = cp->comp_alloc(ccp_option, data.length); if (state != 0) { ppp_xmit_lock(ppp); ppp->xstate &= ~SC_COMP_RUN; ocomp = ppp->xcomp; ostate = ppp->xc_state; ppp->xcomp = cp; ppp->xc_state = state; ppp_xmit_unlock(ppp); if (ostate != 0) { ocomp->comp_free(ostate); module_put(ocomp->owner); } err = 0; } else module_put(cp->owner); } else { state = cp->decomp_alloc(ccp_option, data.length); if (state != 0) { ppp_recv_lock(ppp); ppp->rstate &= ~SC_DECOMP_RUN; ocomp = ppp->rcomp; ostate = ppp->rc_state; ppp->rcomp = cp; ppp->rc_state = state; ppp_recv_unlock(ppp); if (ostate != 0) { ocomp->decomp_free(ostate); module_put(ocomp->owner); } err = 0; } else module_put(cp->owner); } out: return err; } /* * Look at a CCP packet and update our state accordingly. * We assume the caller has the xmit or recv path locked. */ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) { unsigned char *dp; int len; if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) return; /* no header */ dp = skb->data + 2; switch (CCP_CODE(dp)) { case CCP_CONFREQ: /* A ConfReq starts negotiation of compression * in one direction of transmission, * and hence brings it down...but which way? * * Remember: * A ConfReq indicates what the sender would like to receive */ if(inbound) /* He is proposing what I should send */ ppp->xstate &= ~SC_COMP_RUN; else /* I am proposing to what he should send */ ppp->rstate &= ~SC_DECOMP_RUN; break; case CCP_TERMREQ: case CCP_TERMACK: /* * CCP is going down, both directions of transmission */ ppp->rstate &= ~SC_DECOMP_RUN; ppp->xstate &= ~SC_COMP_RUN; break; case CCP_CONFACK: if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) break; len = CCP_LENGTH(dp); if (!pskb_may_pull(skb, len + 2)) return; /* too short */ dp += CCP_HDRLEN; len -= CCP_HDRLEN; if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) break; if (inbound) { /* we will start receiving compressed packets */ if (ppp->rc_state == 0) break; if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, ppp->file.index, 0, ppp->mru, ppp->debug)) { ppp->rstate |= SC_DECOMP_RUN; ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); } } else { /* we will soon start sending compressed packets */ if (ppp->xc_state == 0) break; if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, ppp->file.index, 0, ppp->debug)) ppp->xstate |= SC_COMP_RUN; } break; case CCP_RESETACK: /* reset the [de]compressor */ if ((ppp->flags & SC_CCP_UP) == 0) break; if (inbound) { if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { ppp->rcomp->decomp_reset(ppp->rc_state); ppp->rstate &= ~SC_DC_ERROR; } } else { if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) ppp->xcomp->comp_reset(ppp->xc_state); } break; } } /* Free up compression resources. */ static void ppp_ccp_closed(struct ppp *ppp) { void *xstate, *rstate; struct compressor *xcomp, *rcomp; ppp_lock(ppp); ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); ppp->xstate = 0; xcomp = ppp->xcomp; xstate = ppp->xc_state; ppp->xc_state = NULL; ppp->rstate = 0; rcomp = ppp->rcomp; rstate = ppp->rc_state; ppp->rc_state = NULL; ppp_unlock(ppp); if (xstate) { xcomp->comp_free(xstate); module_put(xcomp->owner); } if (rstate) { rcomp->decomp_free(rstate); module_put(rcomp->owner); } } /* List of compressors. */ static LIST_HEAD(compressor_list); static DEFINE_SPINLOCK(compressor_list_lock); struct compressor_entry { struct list_head list; struct compressor *comp; }; static struct compressor_entry * find_comp_entry(int proto) { struct compressor_entry *ce; list_for_each_entry(ce, &compressor_list, list) { if (ce->comp->compress_proto == proto) return ce; } return NULL; } /* Register a compressor */ int ppp_register_compressor(struct compressor *cp) { struct compressor_entry *ce; int ret; spin_lock(&compressor_list_lock); ret = -EEXIST; if (find_comp_entry(cp->compress_proto) != 0) goto out; ret = -ENOMEM; ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); if (ce == 0) goto out; ret = 0; ce->comp = cp; list_add(&ce->list, &compressor_list); out: spin_unlock(&compressor_list_lock); return ret; } /* Unregister a compressor */ void ppp_unregister_compressor(struct compressor *cp) { struct compressor_entry *ce; spin_lock(&compressor_list_lock); ce = find_comp_entry(cp->compress_proto); if (ce != 0 && ce->comp == cp) { list_del(&ce->list); kfree(ce); } spin_unlock(&compressor_list_lock); } /* Find a compressor. */ static struct compressor * find_compressor(int type) { struct compressor_entry *ce; struct compressor *cp = NULL; spin_lock(&compressor_list_lock); ce = find_comp_entry(type); if (ce != 0) { cp = ce->comp; if (!try_module_get(cp->owner)) cp = NULL; } spin_unlock(&compressor_list_lock); return cp; } /* * Miscelleneous stuff. */ static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) { struct slcompress *vj = ppp->vj; memset(st, 0, sizeof(*st)); st->p.ppp_ipackets = ppp->stats.rx_packets; st->p.ppp_ierrors = ppp->stats.rx_errors; st->p.ppp_ibytes = ppp->stats.rx_bytes; st->p.ppp_opackets = ppp->stats.tx_packets; st->p.ppp_oerrors = ppp->stats.tx_errors; st->p.ppp_obytes = ppp->stats.tx_bytes; if (vj == 0) return; st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; st->vj.vjs_compressed = vj->sls_o_compressed; st->vj.vjs_searches = vj->sls_o_searches; st->vj.vjs_misses = vj->sls_o_misses; st->vj.vjs_errorin = vj->sls_i_error; st->vj.vjs_tossed = vj->sls_i_tossed; st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; st->vj.vjs_compressedin = vj->sls_i_compressed; } /* * Stuff for handling the lists of ppp units and channels * and for initialization. */ /* * Create a new ppp interface unit. Fails if it can't allocate memory * or if there is already a unit with the requested number. * unit == -1 means allocate a new number. */ static struct ppp * ppp_create_interface(int unit, int *retp) { struct ppp *ppp; struct net_device *dev = NULL; int ret = -ENOMEM; int i; ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL); if (!ppp) goto out; dev = alloc_netdev(0, "", ppp_setup); if (!dev) goto out1; ppp->mru = PPP_MRU; init_ppp_file(&ppp->file, INTERFACE); ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ for (i = 0; i < NUM_NP; ++i) ppp->npmode[i] = NPMODE_PASS; INIT_LIST_HEAD(&ppp->channels); spin_lock_init(&ppp->rlock); spin_lock_init(&ppp->wlock); #ifdef CONFIG_PPP_MULTILINK ppp->minseq = -1; skb_queue_head_init(&ppp->mrq); #endif /* CONFIG_PPP_MULTILINK */ ppp->dev = dev; dev->priv = ppp; dev->hard_start_xmit = ppp_start_xmit; dev->get_stats = ppp_net_stats; dev->do_ioctl = ppp_net_ioctl; ret = -EEXIST; mutex_lock(&all_ppp_mutex); if (unit < 0) unit = cardmap_find_first_free(all_ppp_units); else if (cardmap_get(all_ppp_units, unit) != NULL) goto out2; /* unit already exists */ /* Initialize the new ppp unit */ ppp->file.index = unit; sprintf(dev->name, "ppp%d", unit); ret = register_netdev(dev); if (ret != 0) { printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", dev->name, ret); goto out2; } atomic_inc(&ppp_unit_count); ret = cardmap_set(&all_ppp_units, unit, ppp); if (ret != 0) goto out3; mutex_unlock(&all_ppp_mutex); *retp = 0; return ppp; out3: atomic_dec(&ppp_unit_count); out2: mutex_unlock(&all_ppp_mutex); free_netdev(dev); out1: kfree(ppp); out: *retp = ret; return NULL; } /* * Initialize a ppp_file structure. */ static void init_ppp_file(struct ppp_file *pf, int kind) { pf->kind = kind; skb_queue_head_init(&pf->xq); skb_queue_head_init(&pf->rq); atomic_set(&pf->refcnt, 1); init_waitqueue_head(&pf->rwait); } /* * Take down a ppp interface unit - called when the owning file * (the one that created the unit) is closed or detached. */ static void ppp_shutdown_interface(struct ppp *ppp) { struct net_device *dev; mutex_lock(&all_ppp_mutex); ppp_lock(ppp); dev = ppp->dev; ppp->dev = NULL; ppp_unlock(ppp); /* This will call dev_close() for us. */ if (dev) { unregister_netdev(dev); free_netdev(dev); } cardmap_set(&all_ppp_units, ppp->file.index, NULL); ppp->file.dead = 1; ppp->owner = NULL; wake_up_interruptible(&ppp->file.rwait); mutex_unlock(&all_ppp_mutex); } /* * Free the memory used by a ppp unit. This is only called once * there are no channels connected to the unit and no file structs * that reference the unit. */ static void ppp_destroy_interface(struct ppp *ppp) { atomic_dec(&ppp_unit_count); if (!ppp->file.dead || ppp->n_channels) { /* "can't happen" */ printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " "n_channels=%d !\n", ppp, ppp->file.dead, ppp->n_channels); return; } ppp_ccp_closed(ppp); if (ppp->vj) { slhc_free(ppp->vj); ppp->vj = NULL; } skb_queue_purge(&ppp->file.xq); skb_queue_purge(&ppp->file.rq); #ifdef CONFIG_PPP_MULTILINK skb_queue_purge(&ppp->mrq); #endif /* CONFIG_PPP_MULTILINK */ #ifdef CONFIG_PPP_FILTER kfree(ppp->pass_filter); ppp->pass_filter = NULL; kfree(ppp->active_filter); ppp->active_filter = NULL; #endif /* CONFIG_PPP_FILTER */ if (ppp->xmit_pending) kfree_skb(ppp->xmit_pending); kfree(ppp); } /* * Locate an existing ppp unit. * The caller should have locked the all_ppp_mutex. */ static struct ppp * ppp_find_unit(int unit) { return cardmap_get(all_ppp_units, unit); } /* * Locate an existing ppp channel. * The caller should have locked the all_channels_lock. * First we look in the new_channels list, then in the * all_channels list. If found in the new_channels list, * we move it to the all_channels list. This is for speed * when we have a lot of channels in use. */ static struct channel * ppp_find_channel(int unit) { struct channel *pch; list_for_each_entry(pch, &new_channels, list) { if (pch->file.index == unit) { list_move(&pch->list, &all_channels); return pch; } } list_for_each_entry(pch, &all_channels, list) { if (pch->file.index == unit) return pch; } return NULL; } /* * Connect a PPP channel to a PPP interface unit. */ static int ppp_connect_channel(struct channel *pch, int unit) { struct ppp *ppp; int ret = -ENXIO; int hdrlen; mutex_lock(&all_ppp_mutex); ppp = ppp_find_unit(unit); if (ppp == 0) goto out; write_lock_bh(&pch->upl); ret = -EINVAL; if (pch->ppp != 0) goto outl; ppp_lock(ppp); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ if (ppp->dev && hdrlen > ppp->dev->hard_header_len) ppp->dev->hard_header_len = hdrlen; list_add_tail(&pch->clist, &ppp->channels); ++ppp->n_channels; pch->ppp = ppp; atomic_inc(&ppp->file.refcnt); ppp_unlock(ppp); ret = 0; outl: write_unlock_bh(&pch->upl); out: mutex_unlock(&all_ppp_mutex); return ret; } /* * Disconnect a channel from its ppp unit. */ static int ppp_disconnect_channel(struct channel *pch) { struct ppp *ppp; int err = -EINVAL; write_lock_bh(&pch->upl); ppp = pch->ppp; pch->ppp = NULL; write_unlock_bh(&pch->upl); if (ppp != 0) { /* remove it from the ppp unit's list */ ppp_lock(ppp); list_del(&pch->clist); if (--ppp->n_channels == 0) wake_up_interruptible(&ppp->file.rwait); ppp_unlock(ppp); if (atomic_dec_and_test(&ppp->file.refcnt)) ppp_destroy_interface(ppp); err = 0; } return err; } /* * Free up the resources used by a ppp channel. */ static void ppp_destroy_channel(struct channel *pch) { atomic_dec(&channel_count); if (!pch->file.dead) { /* "can't happen" */ printk(KERN_ERR "ppp: destroying undead channel %p !\n", pch); return; } skb_queue_purge(&pch->file.xq); skb_queue_purge(&pch->file.rq); kfree(pch); } static void __exit ppp_cleanup(void) { /* should never happen */ if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) printk(KERN_ERR "PPP: removing module but units remain!\n"); cardmap_destroy(&all_ppp_units); if (unregister_chrdev(PPP_MAJOR, "ppp") != 0) printk(KERN_ERR "PPP: failed to unregister PPP device\n"); device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); class_destroy(ppp_class); } /* * Cardmap implementation. */ static void *cardmap_get(struct cardmap *map, unsigned int nr) { struct cardmap *p; int i; for (p = map; p != NULL; ) { if ((i = nr >> p->shift) >= CARDMAP_WIDTH) return NULL; if (p->shift == 0) return p->ptr[i]; nr &= ~(CARDMAP_MASK << p->shift); p = p->ptr[i]; } return NULL; } static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) { struct cardmap *p; int i; p = *pmap; if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { do { /* need a new top level */ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) goto enomem; np->ptr[0] = p; if (p != NULL) { np->shift = p->shift + CARDMAP_ORDER; p->parent = np; } else np->shift = 0; p = np; } while ((nr >> p->shift) >= CARDMAP_WIDTH); *pmap = p; } while (p->shift > 0) { i = (nr >> p->shift) & CARDMAP_MASK; if (p->ptr[i] == NULL) { struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) goto enomem; np->shift = p->shift - CARDMAP_ORDER; np->parent = p; p->ptr[i] = np; } if (ptr == NULL) clear_bit(i, &p->inuse); p = p->ptr[i]; } i = nr & CARDMAP_MASK; p->ptr[i] = ptr; if (ptr != NULL) set_bit(i, &p->inuse); else clear_bit(i, &p->inuse); return 0; enomem: return -ENOMEM; } static unsigned int cardmap_find_first_free(struct cardmap *map) { struct cardmap *p; unsigned int nr = 0; int i; if ((p = map) == NULL) return 0; for (;;) { i = find_first_zero_bit(&p->inuse, CARDMAP_WIDTH); if (i >= CARDMAP_WIDTH) { if (p->parent == NULL) return CARDMAP_WIDTH << p->shift; p = p->parent; i = (nr >> p->shift) & CARDMAP_MASK; set_bit(i, &p->inuse); continue; } nr = (nr & (~CARDMAP_MASK << p->shift)) | (i << p->shift); if (p->shift == 0 || p->ptr[i] == NULL) return nr; p = p->ptr[i]; } } static void cardmap_destroy(struct cardmap **pmap) { struct cardmap *p, *np; int i; for (p = *pmap; p != NULL; p = np) { if (p->shift != 0) { for (i = 0; i < CARDMAP_WIDTH; ++i) if (p->ptr[i] != NULL) break; if (i < CARDMAP_WIDTH) { np = p->ptr[i]; p->ptr[i] = NULL; continue; } } np = p->parent; kfree(p); } *pmap = NULL; } /* Module/initialization stuff */ module_init(ppp_init); module_exit(ppp_cleanup); EXPORT_SYMBOL(ppp_register_channel); EXPORT_SYMBOL(ppp_unregister_channel); EXPORT_SYMBOL(ppp_channel_index); EXPORT_SYMBOL(ppp_unit_number); EXPORT_SYMBOL(ppp_input); EXPORT_SYMBOL(ppp_input_error); EXPORT_SYMBOL(ppp_output_wakeup); EXPORT_SYMBOL(ppp_register_compressor); EXPORT_SYMBOL(ppp_unregister_compressor); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR); MODULE_ALIAS("/dev/ppp");