aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/plip.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/plip.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/plip.c')
-rw-r--r--drivers/net/plip.c1427
1 files changed, 1427 insertions, 0 deletions
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
new file mode 100644
index 000000000000..f4b62405d2e5
--- /dev/null
+++ b/drivers/net/plip.c
@@ -0,0 +1,1427 @@
1/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2/* PLIP: A parallel port "network" driver for Linux. */
3/* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4/*
5 * Authors: Donald Becker <becker@scyld.com>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
12 *
13 * Enhancements:
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
20 * by Nimrod Zimerman.
21 *
22 * Fixes:
23 * Niibe Yutaka
24 * - Module initialization.
25 * - MTU fix.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
28 *
29 * Al Viro
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
32 *
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
37 */
38
39/*
40 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41 * inspired by Russ Nelson's parallel port packet driver.
42 *
43 * NOTE:
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
48 *
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
51 */
52
53/*
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
56 */
57static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
58
59/*
60 Sources:
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
63
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 count-low octet
67 count-high octet
68 ... data octets
69 checksum octet
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
72
73 The packet is encapsulated as if it were ethernet.
74
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
78 SLCTIN 17 - 17
79 GROUND 25 - 25
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
83 D3->ACK 5 - 10 10 - 5
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
86 D5,D6,D7 are 7,8,9
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
89*/
90
91#include <linux/module.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/string.h>
97#include <linux/if_ether.h>
98#include <linux/in.h>
99#include <linux/errno.h>
100#include <linux/delay.h>
101#include <linux/lp.h>
102#include <linux/init.h>
103#include <linux/netdevice.h>
104#include <linux/etherdevice.h>
105#include <linux/inetdevice.h>
106#include <linux/skbuff.h>
107#include <linux/if_plip.h>
108#include <linux/workqueue.h>
109#include <linux/ioport.h>
110#include <linux/spinlock.h>
111#include <linux/parport.h>
112#include <linux/bitops.h>
113
114#include <net/neighbour.h>
115
116#include <asm/system.h>
117#include <asm/irq.h>
118#include <asm/byteorder.h>
119#include <asm/semaphore.h>
120
121/* Maximum number of devices to support. */
122#define PLIP_MAX 8
123
124/* Use 0 for production, 1 for verification, >2 for debug */
125#ifndef NET_DEBUG
126#define NET_DEBUG 1
127#endif
128static unsigned int net_debug = NET_DEBUG;
129
130#define ENABLE(irq) if (irq != -1) enable_irq(irq)
131#define DISABLE(irq) if (irq != -1) disable_irq(irq)
132
133/* In micro second */
134#define PLIP_DELAY_UNIT 1
135
136/* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
137#define PLIP_TRIGGER_WAIT 500
138
139/* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
140#define PLIP_NIBBLE_WAIT 3000
141
142/* Bottom halves */
143static void plip_kick_bh(struct net_device *dev);
144static void plip_bh(struct net_device *dev);
145static void plip_timer_bh(struct net_device *dev);
146
147/* Interrupt handler */
148static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
149
150/* Functions for DEV methods */
151static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
152static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
153 unsigned short type, void *daddr,
154 void *saddr, unsigned len);
155static int plip_hard_header_cache(struct neighbour *neigh,
156 struct hh_cache *hh);
157static int plip_open(struct net_device *dev);
158static int plip_close(struct net_device *dev);
159static struct net_device_stats *plip_get_stats(struct net_device *dev);
160static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
161static int plip_preempt(void *handle);
162static void plip_wakeup(void *handle);
163
164enum plip_connection_state {
165 PLIP_CN_NONE=0,
166 PLIP_CN_RECEIVE,
167 PLIP_CN_SEND,
168 PLIP_CN_CLOSING,
169 PLIP_CN_ERROR
170};
171
172enum plip_packet_state {
173 PLIP_PK_DONE=0,
174 PLIP_PK_TRIGGER,
175 PLIP_PK_LENGTH_LSB,
176 PLIP_PK_LENGTH_MSB,
177 PLIP_PK_DATA,
178 PLIP_PK_CHECKSUM
179};
180
181enum plip_nibble_state {
182 PLIP_NB_BEGIN,
183 PLIP_NB_1,
184 PLIP_NB_2,
185};
186
187struct plip_local {
188 enum plip_packet_state state;
189 enum plip_nibble_state nibble;
190 union {
191 struct {
192#if defined(__LITTLE_ENDIAN)
193 unsigned char lsb;
194 unsigned char msb;
195#elif defined(__BIG_ENDIAN)
196 unsigned char msb;
197 unsigned char lsb;
198#else
199#error "Please fix the endianness defines in <asm/byteorder.h>"
200#endif
201 } b;
202 unsigned short h;
203 } length;
204 unsigned short byte;
205 unsigned char checksum;
206 unsigned char data;
207 struct sk_buff *skb;
208};
209
210struct net_local {
211 struct net_device_stats enet_stats;
212 struct work_struct immediate;
213 struct work_struct deferred;
214 struct work_struct timer;
215 struct plip_local snd_data;
216 struct plip_local rcv_data;
217 struct pardevice *pardev;
218 unsigned long trigger;
219 unsigned long nibble;
220 enum plip_connection_state connection;
221 unsigned short timeout_count;
222 int is_deferred;
223 int port_owner;
224 int should_relinquish;
225 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
226 unsigned short type, void *daddr,
227 void *saddr, unsigned len);
228 int (*orig_hard_header_cache)(struct neighbour *neigh,
229 struct hh_cache *hh);
230 spinlock_t lock;
231 atomic_t kill_timer;
232 struct semaphore killed_timer_sem;
233};
234
235inline static void enable_parport_interrupts (struct net_device *dev)
236{
237 if (dev->irq != -1)
238 {
239 struct parport *port =
240 ((struct net_local *)dev->priv)->pardev->port;
241 port->ops->enable_irq (port);
242 }
243}
244
245inline static void disable_parport_interrupts (struct net_device *dev)
246{
247 if (dev->irq != -1)
248 {
249 struct parport *port =
250 ((struct net_local *)dev->priv)->pardev->port;
251 port->ops->disable_irq (port);
252 }
253}
254
255inline static void write_data (struct net_device *dev, unsigned char data)
256{
257 struct parport *port =
258 ((struct net_local *)dev->priv)->pardev->port;
259
260 port->ops->write_data (port, data);
261}
262
263inline static unsigned char read_status (struct net_device *dev)
264{
265 struct parport *port =
266 ((struct net_local *)dev->priv)->pardev->port;
267
268 return port->ops->read_status (port);
269}
270
271/* Entry point of PLIP driver.
272 Probe the hardware, and register/initialize the driver.
273
274 PLIP is rather weird, because of the way it interacts with the parport
275 system. It is _not_ initialised from Space.c. Instead, plip_init()
276 is called, and that function makes up a "struct net_device" for each port, and
277 then calls us here.
278
279 */
280static void
281plip_init_netdev(struct net_device *dev)
282{
283 struct net_local *nl = netdev_priv(dev);
284
285 /* Then, override parts of it */
286 dev->hard_start_xmit = plip_tx_packet;
287 dev->open = plip_open;
288 dev->stop = plip_close;
289 dev->get_stats = plip_get_stats;
290 dev->do_ioctl = plip_ioctl;
291 dev->header_cache_update = NULL;
292 dev->tx_queue_len = 10;
293 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
294 memset(dev->dev_addr, 0xfc, ETH_ALEN);
295
296 /* Set the private structure */
297 nl->orig_hard_header = dev->hard_header;
298 dev->hard_header = plip_hard_header;
299
300 nl->orig_hard_header_cache = dev->hard_header_cache;
301 dev->hard_header_cache = plip_hard_header_cache;
302
303
304 nl->port_owner = 0;
305
306 /* Initialize constants */
307 nl->trigger = PLIP_TRIGGER_WAIT;
308 nl->nibble = PLIP_NIBBLE_WAIT;
309
310 /* Initialize task queue structures */
311 INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
312 INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
313
314 if (dev->irq == -1)
315 INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
316
317 spin_lock_init(&nl->lock);
318}
319
320/* Bottom half handler for the delayed request.
321 This routine is kicked by do_timer().
322 Request `plip_bh' to be invoked. */
323static void
324plip_kick_bh(struct net_device *dev)
325{
326 struct net_local *nl = netdev_priv(dev);
327
328 if (nl->is_deferred)
329 schedule_work(&nl->immediate);
330}
331
332/* Forward declarations of internal routines */
333static int plip_none(struct net_device *, struct net_local *,
334 struct plip_local *, struct plip_local *);
335static int plip_receive_packet(struct net_device *, struct net_local *,
336 struct plip_local *, struct plip_local *);
337static int plip_send_packet(struct net_device *, struct net_local *,
338 struct plip_local *, struct plip_local *);
339static int plip_connection_close(struct net_device *, struct net_local *,
340 struct plip_local *, struct plip_local *);
341static int plip_error(struct net_device *, struct net_local *,
342 struct plip_local *, struct plip_local *);
343static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
344 struct plip_local *snd,
345 struct plip_local *rcv,
346 int error);
347
348#define OK 0
349#define TIMEOUT 1
350#define ERROR 2
351#define HS_TIMEOUT 3
352
353typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
354 struct plip_local *snd, struct plip_local *rcv);
355
356static plip_func connection_state_table[] =
357{
358 plip_none,
359 plip_receive_packet,
360 plip_send_packet,
361 plip_connection_close,
362 plip_error
363};
364
365/* Bottom half handler of PLIP. */
366static void
367plip_bh(struct net_device *dev)
368{
369 struct net_local *nl = netdev_priv(dev);
370 struct plip_local *snd = &nl->snd_data;
371 struct plip_local *rcv = &nl->rcv_data;
372 plip_func f;
373 int r;
374
375 nl->is_deferred = 0;
376 f = connection_state_table[nl->connection];
377 if ((r = (*f)(dev, nl, snd, rcv)) != OK
378 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
379 nl->is_deferred = 1;
380 schedule_delayed_work(&nl->deferred, 1);
381 }
382}
383
384static void
385plip_timer_bh(struct net_device *dev)
386{
387 struct net_local *nl = netdev_priv(dev);
388
389 if (!(atomic_read (&nl->kill_timer))) {
390 plip_interrupt (-1, dev, NULL);
391
392 schedule_delayed_work(&nl->timer, 1);
393 }
394 else {
395 up (&nl->killed_timer_sem);
396 }
397}
398
399static int
400plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
401 struct plip_local *snd, struct plip_local *rcv,
402 int error)
403{
404 unsigned char c0;
405 /*
406 * This is tricky. If we got here from the beginning of send (either
407 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
408 * already disabled. With the old variant of {enable,disable}_irq()
409 * extra disable_irq() was a no-op. Now it became mortal - it's
410 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
411 * that is). So we have to treat HS_TIMEOUT and ERROR from send
412 * in a special way.
413 */
414
415 spin_lock_irq(&nl->lock);
416 if (nl->connection == PLIP_CN_SEND) {
417
418 if (error != ERROR) { /* Timeout */
419 nl->timeout_count++;
420 if ((error == HS_TIMEOUT
421 && nl->timeout_count <= 10)
422 || nl->timeout_count <= 3) {
423 spin_unlock_irq(&nl->lock);
424 /* Try again later */
425 return TIMEOUT;
426 }
427 c0 = read_status(dev);
428 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
429 dev->name, snd->state, c0);
430 } else
431 error = HS_TIMEOUT;
432 nl->enet_stats.tx_errors++;
433 nl->enet_stats.tx_aborted_errors++;
434 } else if (nl->connection == PLIP_CN_RECEIVE) {
435 if (rcv->state == PLIP_PK_TRIGGER) {
436 /* Transmission was interrupted. */
437 spin_unlock_irq(&nl->lock);
438 return OK;
439 }
440 if (error != ERROR) { /* Timeout */
441 if (++nl->timeout_count <= 3) {
442 spin_unlock_irq(&nl->lock);
443 /* Try again later */
444 return TIMEOUT;
445 }
446 c0 = read_status(dev);
447 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
448 dev->name, rcv->state, c0);
449 }
450 nl->enet_stats.rx_dropped++;
451 }
452 rcv->state = PLIP_PK_DONE;
453 if (rcv->skb) {
454 kfree_skb(rcv->skb);
455 rcv->skb = NULL;
456 }
457 snd->state = PLIP_PK_DONE;
458 if (snd->skb) {
459 dev_kfree_skb(snd->skb);
460 snd->skb = NULL;
461 }
462 spin_unlock_irq(&nl->lock);
463 if (error == HS_TIMEOUT) {
464 DISABLE(dev->irq);
465 synchronize_irq(dev->irq);
466 }
467 disable_parport_interrupts (dev);
468 netif_stop_queue (dev);
469 nl->connection = PLIP_CN_ERROR;
470 write_data (dev, 0x00);
471
472 return TIMEOUT;
473}
474
475static int
476plip_none(struct net_device *dev, struct net_local *nl,
477 struct plip_local *snd, struct plip_local *rcv)
478{
479 return OK;
480}
481
482/* PLIP_RECEIVE --- receive a byte(two nibbles)
483 Returns OK on success, TIMEOUT on timeout */
484inline static int
485plip_receive(unsigned short nibble_timeout, struct net_device *dev,
486 enum plip_nibble_state *ns_p, unsigned char *data_p)
487{
488 unsigned char c0, c1;
489 unsigned int cx;
490
491 switch (*ns_p) {
492 case PLIP_NB_BEGIN:
493 cx = nibble_timeout;
494 while (1) {
495 c0 = read_status(dev);
496 udelay(PLIP_DELAY_UNIT);
497 if ((c0 & 0x80) == 0) {
498 c1 = read_status(dev);
499 if (c0 == c1)
500 break;
501 }
502 if (--cx == 0)
503 return TIMEOUT;
504 }
505 *data_p = (c0 >> 3) & 0x0f;
506 write_data (dev, 0x10); /* send ACK */
507 *ns_p = PLIP_NB_1;
508
509 case PLIP_NB_1:
510 cx = nibble_timeout;
511 while (1) {
512 c0 = read_status(dev);
513 udelay(PLIP_DELAY_UNIT);
514 if (c0 & 0x80) {
515 c1 = read_status(dev);
516 if (c0 == c1)
517 break;
518 }
519 if (--cx == 0)
520 return TIMEOUT;
521 }
522 *data_p |= (c0 << 1) & 0xf0;
523 write_data (dev, 0x00); /* send ACK */
524 *ns_p = PLIP_NB_BEGIN;
525 case PLIP_NB_2:
526 break;
527 }
528 return OK;
529}
530
531/*
532 * Determine the packet's protocol ID. The rule here is that we
533 * assume 802.3 if the type field is short enough to be a length.
534 * This is normal practice and works for any 'now in use' protocol.
535 *
536 * PLIP is ethernet ish but the daddr might not be valid if unicast.
537 * PLIP fortunately has no bus architecture (its Point-to-point).
538 *
539 * We can't fix the daddr thing as that quirk (more bug) is embedded
540 * in far too many old systems not all even running Linux.
541 */
542
543static unsigned short plip_type_trans(struct sk_buff *skb, struct net_device *dev)
544{
545 struct ethhdr *eth;
546 unsigned char *rawp;
547
548 skb->mac.raw=skb->data;
549 skb_pull(skb,dev->hard_header_len);
550 eth = eth_hdr(skb);
551
552 if(*eth->h_dest&1)
553 {
554 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
555 skb->pkt_type=PACKET_BROADCAST;
556 else
557 skb->pkt_type=PACKET_MULTICAST;
558 }
559
560 /*
561 * This ALLMULTI check should be redundant by 1.4
562 * so don't forget to remove it.
563 */
564
565 if (ntohs(eth->h_proto) >= 1536)
566 return eth->h_proto;
567
568 rawp = skb->data;
569
570 /*
571 * This is a magic hack to spot IPX packets. Older Novell breaks
572 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
573 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
574 * won't work for fault tolerant netware but does for the rest.
575 */
576 if (*(unsigned short *)rawp == 0xFFFF)
577 return htons(ETH_P_802_3);
578
579 /*
580 * Real 802.2 LLC
581 */
582 return htons(ETH_P_802_2);
583}
584
585
586/* PLIP_RECEIVE_PACKET --- receive a packet */
587static int
588plip_receive_packet(struct net_device *dev, struct net_local *nl,
589 struct plip_local *snd, struct plip_local *rcv)
590{
591 unsigned short nibble_timeout = nl->nibble;
592 unsigned char *lbuf;
593
594 switch (rcv->state) {
595 case PLIP_PK_TRIGGER:
596 DISABLE(dev->irq);
597 /* Don't need to synchronize irq, as we can safely ignore it */
598 disable_parport_interrupts (dev);
599 write_data (dev, 0x01); /* send ACK */
600 if (net_debug > 2)
601 printk(KERN_DEBUG "%s: receive start\n", dev->name);
602 rcv->state = PLIP_PK_LENGTH_LSB;
603 rcv->nibble = PLIP_NB_BEGIN;
604
605 case PLIP_PK_LENGTH_LSB:
606 if (snd->state != PLIP_PK_DONE) {
607 if (plip_receive(nl->trigger, dev,
608 &rcv->nibble, &rcv->length.b.lsb)) {
609 /* collision, here dev->tbusy == 1 */
610 rcv->state = PLIP_PK_DONE;
611 nl->is_deferred = 1;
612 nl->connection = PLIP_CN_SEND;
613 schedule_delayed_work(&nl->deferred, 1);
614 enable_parport_interrupts (dev);
615 ENABLE(dev->irq);
616 return OK;
617 }
618 } else {
619 if (plip_receive(nibble_timeout, dev,
620 &rcv->nibble, &rcv->length.b.lsb))
621 return TIMEOUT;
622 }
623 rcv->state = PLIP_PK_LENGTH_MSB;
624
625 case PLIP_PK_LENGTH_MSB:
626 if (plip_receive(nibble_timeout, dev,
627 &rcv->nibble, &rcv->length.b.msb))
628 return TIMEOUT;
629 if (rcv->length.h > dev->mtu + dev->hard_header_len
630 || rcv->length.h < 8) {
631 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
632 return ERROR;
633 }
634 /* Malloc up new buffer. */
635 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
636 if (rcv->skb == NULL) {
637 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
638 return ERROR;
639 }
640 skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
641 skb_put(rcv->skb,rcv->length.h);
642 rcv->skb->dev = dev;
643 rcv->state = PLIP_PK_DATA;
644 rcv->byte = 0;
645 rcv->checksum = 0;
646
647 case PLIP_PK_DATA:
648 lbuf = rcv->skb->data;
649 do
650 if (plip_receive(nibble_timeout, dev,
651 &rcv->nibble, &lbuf[rcv->byte]))
652 return TIMEOUT;
653 while (++rcv->byte < rcv->length.h);
654 do
655 rcv->checksum += lbuf[--rcv->byte];
656 while (rcv->byte);
657 rcv->state = PLIP_PK_CHECKSUM;
658
659 case PLIP_PK_CHECKSUM:
660 if (plip_receive(nibble_timeout, dev,
661 &rcv->nibble, &rcv->data))
662 return TIMEOUT;
663 if (rcv->data != rcv->checksum) {
664 nl->enet_stats.rx_crc_errors++;
665 if (net_debug)
666 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
667 return ERROR;
668 }
669 rcv->state = PLIP_PK_DONE;
670
671 case PLIP_PK_DONE:
672 /* Inform the upper layer for the arrival of a packet. */
673 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
674 netif_rx(rcv->skb);
675 dev->last_rx = jiffies;
676 nl->enet_stats.rx_bytes += rcv->length.h;
677 nl->enet_stats.rx_packets++;
678 rcv->skb = NULL;
679 if (net_debug > 2)
680 printk(KERN_DEBUG "%s: receive end\n", dev->name);
681
682 /* Close the connection. */
683 write_data (dev, 0x00);
684 spin_lock_irq(&nl->lock);
685 if (snd->state != PLIP_PK_DONE) {
686 nl->connection = PLIP_CN_SEND;
687 spin_unlock_irq(&nl->lock);
688 schedule_work(&nl->immediate);
689 enable_parport_interrupts (dev);
690 ENABLE(dev->irq);
691 return OK;
692 } else {
693 nl->connection = PLIP_CN_NONE;
694 spin_unlock_irq(&nl->lock);
695 enable_parport_interrupts (dev);
696 ENABLE(dev->irq);
697 return OK;
698 }
699 }
700 return OK;
701}
702
703/* PLIP_SEND --- send a byte (two nibbles)
704 Returns OK on success, TIMEOUT when timeout */
705inline static int
706plip_send(unsigned short nibble_timeout, struct net_device *dev,
707 enum plip_nibble_state *ns_p, unsigned char data)
708{
709 unsigned char c0;
710 unsigned int cx;
711
712 switch (*ns_p) {
713 case PLIP_NB_BEGIN:
714 write_data (dev, data & 0x0f);
715 *ns_p = PLIP_NB_1;
716
717 case PLIP_NB_1:
718 write_data (dev, 0x10 | (data & 0x0f));
719 cx = nibble_timeout;
720 while (1) {
721 c0 = read_status(dev);
722 if ((c0 & 0x80) == 0)
723 break;
724 if (--cx == 0)
725 return TIMEOUT;
726 udelay(PLIP_DELAY_UNIT);
727 }
728 write_data (dev, 0x10 | (data >> 4));
729 *ns_p = PLIP_NB_2;
730
731 case PLIP_NB_2:
732 write_data (dev, (data >> 4));
733 cx = nibble_timeout;
734 while (1) {
735 c0 = read_status(dev);
736 if (c0 & 0x80)
737 break;
738 if (--cx == 0)
739 return TIMEOUT;
740 udelay(PLIP_DELAY_UNIT);
741 }
742 *ns_p = PLIP_NB_BEGIN;
743 return OK;
744 }
745 return OK;
746}
747
748/* PLIP_SEND_PACKET --- send a packet */
749static int
750plip_send_packet(struct net_device *dev, struct net_local *nl,
751 struct plip_local *snd, struct plip_local *rcv)
752{
753 unsigned short nibble_timeout = nl->nibble;
754 unsigned char *lbuf;
755 unsigned char c0;
756 unsigned int cx;
757
758 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
759 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
760 snd->state = PLIP_PK_DONE;
761 snd->skb = NULL;
762 return ERROR;
763 }
764
765 switch (snd->state) {
766 case PLIP_PK_TRIGGER:
767 if ((read_status(dev) & 0xf8) != 0x80)
768 return HS_TIMEOUT;
769
770 /* Trigger remote rx interrupt. */
771 write_data (dev, 0x08);
772 cx = nl->trigger;
773 while (1) {
774 udelay(PLIP_DELAY_UNIT);
775 spin_lock_irq(&nl->lock);
776 if (nl->connection == PLIP_CN_RECEIVE) {
777 spin_unlock_irq(&nl->lock);
778 /* Interrupted. */
779 nl->enet_stats.collisions++;
780 return OK;
781 }
782 c0 = read_status(dev);
783 if (c0 & 0x08) {
784 spin_unlock_irq(&nl->lock);
785 DISABLE(dev->irq);
786 synchronize_irq(dev->irq);
787 if (nl->connection == PLIP_CN_RECEIVE) {
788 /* Interrupted.
789 We don't need to enable irq,
790 as it is soon disabled. */
791 /* Yes, we do. New variant of
792 {enable,disable}_irq *counts*
793 them. -- AV */
794 ENABLE(dev->irq);
795 nl->enet_stats.collisions++;
796 return OK;
797 }
798 disable_parport_interrupts (dev);
799 if (net_debug > 2)
800 printk(KERN_DEBUG "%s: send start\n", dev->name);
801 snd->state = PLIP_PK_LENGTH_LSB;
802 snd->nibble = PLIP_NB_BEGIN;
803 nl->timeout_count = 0;
804 break;
805 }
806 spin_unlock_irq(&nl->lock);
807 if (--cx == 0) {
808 write_data (dev, 0x00);
809 return HS_TIMEOUT;
810 }
811 }
812
813 case PLIP_PK_LENGTH_LSB:
814 if (plip_send(nibble_timeout, dev,
815 &snd->nibble, snd->length.b.lsb))
816 return TIMEOUT;
817 snd->state = PLIP_PK_LENGTH_MSB;
818
819 case PLIP_PK_LENGTH_MSB:
820 if (plip_send(nibble_timeout, dev,
821 &snd->nibble, snd->length.b.msb))
822 return TIMEOUT;
823 snd->state = PLIP_PK_DATA;
824 snd->byte = 0;
825 snd->checksum = 0;
826
827 case PLIP_PK_DATA:
828 do
829 if (plip_send(nibble_timeout, dev,
830 &snd->nibble, lbuf[snd->byte]))
831 return TIMEOUT;
832 while (++snd->byte < snd->length.h);
833 do
834 snd->checksum += lbuf[--snd->byte];
835 while (snd->byte);
836 snd->state = PLIP_PK_CHECKSUM;
837
838 case PLIP_PK_CHECKSUM:
839 if (plip_send(nibble_timeout, dev,
840 &snd->nibble, snd->checksum))
841 return TIMEOUT;
842
843 nl->enet_stats.tx_bytes += snd->skb->len;
844 dev_kfree_skb(snd->skb);
845 nl->enet_stats.tx_packets++;
846 snd->state = PLIP_PK_DONE;
847
848 case PLIP_PK_DONE:
849 /* Close the connection */
850 write_data (dev, 0x00);
851 snd->skb = NULL;
852 if (net_debug > 2)
853 printk(KERN_DEBUG "%s: send end\n", dev->name);
854 nl->connection = PLIP_CN_CLOSING;
855 nl->is_deferred = 1;
856 schedule_delayed_work(&nl->deferred, 1);
857 enable_parport_interrupts (dev);
858 ENABLE(dev->irq);
859 return OK;
860 }
861 return OK;
862}
863
864static int
865plip_connection_close(struct net_device *dev, struct net_local *nl,
866 struct plip_local *snd, struct plip_local *rcv)
867{
868 spin_lock_irq(&nl->lock);
869 if (nl->connection == PLIP_CN_CLOSING) {
870 nl->connection = PLIP_CN_NONE;
871 netif_wake_queue (dev);
872 }
873 spin_unlock_irq(&nl->lock);
874 if (nl->should_relinquish) {
875 nl->should_relinquish = nl->port_owner = 0;
876 parport_release(nl->pardev);
877 }
878 return OK;
879}
880
881/* PLIP_ERROR --- wait till other end settled */
882static int
883plip_error(struct net_device *dev, struct net_local *nl,
884 struct plip_local *snd, struct plip_local *rcv)
885{
886 unsigned char status;
887
888 status = read_status(dev);
889 if ((status & 0xf8) == 0x80) {
890 if (net_debug > 2)
891 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
892 nl->connection = PLIP_CN_NONE;
893 nl->should_relinquish = 0;
894 netif_start_queue (dev);
895 enable_parport_interrupts (dev);
896 ENABLE(dev->irq);
897 netif_wake_queue (dev);
898 } else {
899 nl->is_deferred = 1;
900 schedule_delayed_work(&nl->deferred, 1);
901 }
902
903 return OK;
904}
905
906/* Handle the parallel port interrupts. */
907static void
908plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
909{
910 struct net_device *dev = dev_id;
911 struct net_local *nl;
912 struct plip_local *rcv;
913 unsigned char c0;
914
915 if (dev == NULL) {
916 printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
917 return;
918 }
919
920 nl = netdev_priv(dev);
921 rcv = &nl->rcv_data;
922
923 spin_lock_irq (&nl->lock);
924
925 c0 = read_status(dev);
926 if ((c0 & 0xf8) != 0xc0) {
927 if ((dev->irq != -1) && (net_debug > 1))
928 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
929 spin_unlock_irq (&nl->lock);
930 return;
931 }
932
933 if (net_debug > 3)
934 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
935
936 switch (nl->connection) {
937 case PLIP_CN_CLOSING:
938 netif_wake_queue (dev);
939 case PLIP_CN_NONE:
940 case PLIP_CN_SEND:
941 rcv->state = PLIP_PK_TRIGGER;
942 nl->connection = PLIP_CN_RECEIVE;
943 nl->timeout_count = 0;
944 schedule_work(&nl->immediate);
945 break;
946
947 case PLIP_CN_RECEIVE:
948 /* May occur because there is race condition
949 around test and set of dev->interrupt.
950 Ignore this interrupt. */
951 break;
952
953 case PLIP_CN_ERROR:
954 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
955 break;
956 }
957
958 spin_unlock_irq(&nl->lock);
959}
960
961static int
962plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
963{
964 struct net_local *nl = netdev_priv(dev);
965 struct plip_local *snd = &nl->snd_data;
966
967 if (netif_queue_stopped(dev))
968 return 1;
969
970 /* We may need to grab the bus */
971 if (!nl->port_owner) {
972 if (parport_claim(nl->pardev))
973 return 1;
974 nl->port_owner = 1;
975 }
976
977 netif_stop_queue (dev);
978
979 if (skb->len > dev->mtu + dev->hard_header_len) {
980 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
981 netif_start_queue (dev);
982 return 1;
983 }
984
985 if (net_debug > 2)
986 printk(KERN_DEBUG "%s: send request\n", dev->name);
987
988 spin_lock_irq(&nl->lock);
989 dev->trans_start = jiffies;
990 snd->skb = skb;
991 snd->length.h = skb->len;
992 snd->state = PLIP_PK_TRIGGER;
993 if (nl->connection == PLIP_CN_NONE) {
994 nl->connection = PLIP_CN_SEND;
995 nl->timeout_count = 0;
996 }
997 schedule_work(&nl->immediate);
998 spin_unlock_irq(&nl->lock);
999
1000 return 0;
1001}
1002
1003static void
1004plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1005{
1006 struct in_device *in_dev;
1007
1008 if ((in_dev=dev->ip_ptr) != NULL) {
1009 /* Any address will do - we take the first */
1010 struct in_ifaddr *ifa=in_dev->ifa_list;
1011 if (ifa != NULL) {
1012 memcpy(eth->h_source, dev->dev_addr, 6);
1013 memset(eth->h_dest, 0xfc, 2);
1014 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1015 }
1016 }
1017}
1018
1019static int
1020plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1021 unsigned short type, void *daddr,
1022 void *saddr, unsigned len)
1023{
1024 struct net_local *nl = netdev_priv(dev);
1025 int ret;
1026
1027 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1028 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1029
1030 return ret;
1031}
1032
1033int plip_hard_header_cache(struct neighbour *neigh,
1034 struct hh_cache *hh)
1035{
1036 struct net_local *nl = neigh->dev->priv;
1037 int ret;
1038
1039 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1040 {
1041 struct ethhdr *eth;
1042
1043 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1044 HH_DATA_OFF(sizeof(*eth)));
1045 plip_rewrite_address (neigh->dev, eth);
1046 }
1047
1048 return ret;
1049}
1050
1051/* Open/initialize the board. This is called (in the current kernel)
1052 sometime after booting when the 'ifconfig' program is run.
1053
1054 This routine gets exclusive access to the parallel port by allocating
1055 its IRQ line.
1056 */
1057static int
1058plip_open(struct net_device *dev)
1059{
1060 struct net_local *nl = netdev_priv(dev);
1061 struct in_device *in_dev;
1062
1063 /* Grab the port */
1064 if (!nl->port_owner) {
1065 if (parport_claim(nl->pardev)) return -EAGAIN;
1066 nl->port_owner = 1;
1067 }
1068
1069 nl->should_relinquish = 0;
1070
1071 /* Clear the data port. */
1072 write_data (dev, 0x00);
1073
1074 /* Enable rx interrupt. */
1075 enable_parport_interrupts (dev);
1076 if (dev->irq == -1)
1077 {
1078 atomic_set (&nl->kill_timer, 0);
1079 schedule_delayed_work(&nl->timer, 1);
1080 }
1081
1082 /* Initialize the state machine. */
1083 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1084 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1085 nl->connection = PLIP_CN_NONE;
1086 nl->is_deferred = 0;
1087
1088 /* Fill in the MAC-level header.
1089 We used to abuse dev->broadcast to store the point-to-point
1090 MAC address, but we no longer do it. Instead, we fetch the
1091 interface address whenever it is needed, which is cheap enough
1092 because we use the hh_cache. Actually, abusing dev->broadcast
1093 didn't work, because when using plip_open the point-to-point
1094 address isn't yet known.
1095 PLIP doesn't have a real MAC address, but we need it to be
1096 DOS compatible, and to properly support taps (otherwise,
1097 when the device address isn't identical to the address of a
1098 received frame, the kernel incorrectly drops it). */
1099
1100 if ((in_dev=dev->ip_ptr) != NULL) {
1101 /* Any address will do - we take the first. We already
1102 have the first two bytes filled with 0xfc, from
1103 plip_init_dev(). */
1104 struct in_ifaddr *ifa=in_dev->ifa_list;
1105 if (ifa != NULL) {
1106 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1107 }
1108 }
1109
1110 netif_start_queue (dev);
1111
1112 return 0;
1113}
1114
1115/* The inverse routine to plip_open (). */
1116static int
1117plip_close(struct net_device *dev)
1118{
1119 struct net_local *nl = netdev_priv(dev);
1120 struct plip_local *snd = &nl->snd_data;
1121 struct plip_local *rcv = &nl->rcv_data;
1122
1123 netif_stop_queue (dev);
1124 DISABLE(dev->irq);
1125 synchronize_irq(dev->irq);
1126
1127 if (dev->irq == -1)
1128 {
1129 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1130 atomic_set (&nl->kill_timer, 1);
1131 down (&nl->killed_timer_sem);
1132 }
1133
1134#ifdef NOTDEF
1135 outb(0x00, PAR_DATA(dev));
1136#endif
1137 nl->is_deferred = 0;
1138 nl->connection = PLIP_CN_NONE;
1139 if (nl->port_owner) {
1140 parport_release(nl->pardev);
1141 nl->port_owner = 0;
1142 }
1143
1144 snd->state = PLIP_PK_DONE;
1145 if (snd->skb) {
1146 dev_kfree_skb(snd->skb);
1147 snd->skb = NULL;
1148 }
1149 rcv->state = PLIP_PK_DONE;
1150 if (rcv->skb) {
1151 kfree_skb(rcv->skb);
1152 rcv->skb = NULL;
1153 }
1154
1155#ifdef NOTDEF
1156 /* Reset. */
1157 outb(0x00, PAR_CONTROL(dev));
1158#endif
1159 return 0;
1160}
1161
1162static int
1163plip_preempt(void *handle)
1164{
1165 struct net_device *dev = (struct net_device *)handle;
1166 struct net_local *nl = netdev_priv(dev);
1167
1168 /* Stand our ground if a datagram is on the wire */
1169 if (nl->connection != PLIP_CN_NONE) {
1170 nl->should_relinquish = 1;
1171 return 1;
1172 }
1173
1174 nl->port_owner = 0; /* Remember that we released the bus */
1175 return 0;
1176}
1177
1178static void
1179plip_wakeup(void *handle)
1180{
1181 struct net_device *dev = (struct net_device *)handle;
1182 struct net_local *nl = netdev_priv(dev);
1183
1184 if (nl->port_owner) {
1185 /* Why are we being woken up? */
1186 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1187 if (!parport_claim(nl->pardev))
1188 /* bus_owner is already set (but why?) */
1189 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1190 else
1191 return;
1192 }
1193
1194 if (!(dev->flags & IFF_UP))
1195 /* Don't need the port when the interface is down */
1196 return;
1197
1198 if (!parport_claim(nl->pardev)) {
1199 nl->port_owner = 1;
1200 /* Clear the data port. */
1201 write_data (dev, 0x00);
1202 }
1203
1204 return;
1205}
1206
1207static struct net_device_stats *
1208plip_get_stats(struct net_device *dev)
1209{
1210 struct net_local *nl = netdev_priv(dev);
1211 struct net_device_stats *r = &nl->enet_stats;
1212
1213 return r;
1214}
1215
1216static int
1217plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1218{
1219 struct net_local *nl = netdev_priv(dev);
1220 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1221
1222 if (cmd != SIOCDEVPLIP)
1223 return -EOPNOTSUPP;
1224
1225 switch(pc->pcmd) {
1226 case PLIP_GET_TIMEOUT:
1227 pc->trigger = nl->trigger;
1228 pc->nibble = nl->nibble;
1229 break;
1230 case PLIP_SET_TIMEOUT:
1231 if(!capable(CAP_NET_ADMIN))
1232 return -EPERM;
1233 nl->trigger = pc->trigger;
1234 nl->nibble = pc->nibble;
1235 break;
1236 default:
1237 return -EOPNOTSUPP;
1238 }
1239 return 0;
1240}
1241
1242static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1243static int timid;
1244
1245module_param_array(parport, int, NULL, 0);
1246module_param(timid, int, 0);
1247MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1248
1249static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1250
1251static inline int
1252plip_searchfor(int list[], int a)
1253{
1254 int i;
1255 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1256 if (list[i] == a) return 1;
1257 }
1258 return 0;
1259}
1260
1261/* plip_attach() is called (by the parport code) when a port is
1262 * available to use. */
1263static void plip_attach (struct parport *port)
1264{
1265 static int unit;
1266 struct net_device *dev;
1267 struct net_local *nl;
1268 char name[IFNAMSIZ];
1269
1270 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1271 plip_searchfor(parport, port->number)) {
1272 if (unit == PLIP_MAX) {
1273 printk(KERN_ERR "plip: too many devices\n");
1274 return;
1275 }
1276
1277 sprintf(name, "plip%d", unit);
1278 dev = alloc_etherdev(sizeof(struct net_local));
1279 if (!dev) {
1280 printk(KERN_ERR "plip: memory squeeze\n");
1281 return;
1282 }
1283
1284 strcpy(dev->name, name);
1285
1286 SET_MODULE_OWNER(dev);
1287 dev->irq = port->irq;
1288 dev->base_addr = port->base;
1289 if (port->irq == -1) {
1290 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1291 "which is fairly inefficient!\n", port->name);
1292 }
1293
1294 nl = netdev_priv(dev);
1295 nl->pardev = parport_register_device(port, name, plip_preempt,
1296 plip_wakeup, plip_interrupt,
1297 0, dev);
1298
1299 if (!nl->pardev) {
1300 printk(KERN_ERR "%s: parport_register failed\n", name);
1301 goto err_free_dev;
1302 return;
1303 }
1304
1305 plip_init_netdev(dev);
1306
1307 if (register_netdev(dev)) {
1308 printk(KERN_ERR "%s: network register failed\n", name);
1309 goto err_parport_unregister;
1310 }
1311
1312 printk(KERN_INFO "%s", version);
1313 if (dev->irq != -1)
1314 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1315 "using IRQ %d.\n",
1316 dev->name, dev->base_addr, dev->irq);
1317 else
1318 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1319 "not using IRQ.\n",
1320 dev->name, dev->base_addr);
1321 dev_plip[unit++] = dev;
1322 }
1323 return;
1324
1325err_parport_unregister:
1326 parport_unregister_device(nl->pardev);
1327err_free_dev:
1328 free_netdev(dev);
1329 return;
1330}
1331
1332/* plip_detach() is called (by the parport code) when a port is
1333 * no longer available to use. */
1334static void plip_detach (struct parport *port)
1335{
1336 /* Nothing to do */
1337}
1338
1339static struct parport_driver plip_driver = {
1340 .name = "plip",
1341 .attach = plip_attach,
1342 .detach = plip_detach
1343};
1344
1345static void __exit plip_cleanup_module (void)
1346{
1347 struct net_device *dev;
1348 int i;
1349
1350 parport_unregister_driver (&plip_driver);
1351
1352 for (i=0; i < PLIP_MAX; i++) {
1353 if ((dev = dev_plip[i])) {
1354 struct net_local *nl = netdev_priv(dev);
1355 unregister_netdev(dev);
1356 if (nl->port_owner)
1357 parport_release(nl->pardev);
1358 parport_unregister_device(nl->pardev);
1359 free_netdev(dev);
1360 dev_plip[i] = NULL;
1361 }
1362 }
1363}
1364
1365#ifndef MODULE
1366
1367static int parport_ptr;
1368
1369static int __init plip_setup(char *str)
1370{
1371 int ints[4];
1372
1373 str = get_options(str, ARRAY_SIZE(ints), ints);
1374
1375 /* Ugh. */
1376 if (!strncmp(str, "parport", 7)) {
1377 int n = simple_strtoul(str+7, NULL, 10);
1378 if (parport_ptr < PLIP_MAX)
1379 parport[parport_ptr++] = n;
1380 else
1381 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1382 str);
1383 } else if (!strcmp(str, "timid")) {
1384 timid = 1;
1385 } else {
1386 if (ints[0] == 0 || ints[1] == 0) {
1387 /* disable driver on "plip=" or "plip=0" */
1388 parport[0] = -2;
1389 } else {
1390 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1391 ints[1]);
1392 }
1393 }
1394 return 1;
1395}
1396
1397__setup("plip=", plip_setup);
1398
1399#endif /* !MODULE */
1400
1401static int __init plip_init (void)
1402{
1403 if (parport[0] == -2)
1404 return 0;
1405
1406 if (parport[0] != -1 && timid) {
1407 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1408 timid = 0;
1409 }
1410
1411 if (parport_register_driver (&plip_driver)) {
1412 printk (KERN_WARNING "plip: couldn't register driver\n");
1413 return 1;
1414 }
1415
1416 return 0;
1417}
1418
1419module_init(plip_init);
1420module_exit(plip_cleanup_module);
1421MODULE_LICENSE("GPL");
1422
1423/*
1424 * Local variables:
1425 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1426 * End:
1427 */